diff --git a/google/cloud/firestore_admin_v1/gapic_metadata.json b/google/cloud/firestore_admin_v1/gapic_metadata.json index e2c91bdb5..b8d4cb298 100644 --- a/google/cloud/firestore_admin_v1/gapic_metadata.json +++ b/google/cloud/firestore_admin_v1/gapic_metadata.json @@ -15,6 +15,11 @@ "bulk_delete_documents" ] }, + "CloneDatabase": { + "methods": [ + "clone_database" + ] + }, "CreateBackupSchedule": { "methods": [ "create_backup_schedule" @@ -175,6 +180,11 @@ "bulk_delete_documents" ] }, + "CloneDatabase": { + "methods": [ + "clone_database" + ] + }, "CreateBackupSchedule": { "methods": [ "create_backup_schedule" @@ -335,6 +345,11 @@ "bulk_delete_documents" ] }, + "CloneDatabase": { + "methods": [ + "clone_database" + ] + }, "CreateBackupSchedule": { "methods": [ "create_backup_schedule" diff --git a/google/cloud/firestore_admin_v1/services/firestore_admin/async_client.py b/google/cloud/firestore_admin_v1/services/firestore_admin/async_client.py index 56531fa29..a2800e34e 100644 --- a/google/cloud/firestore_admin_v1/services/firestore_admin/async_client.py +++ b/google/cloud/firestore_admin_v1/services/firestore_admin/async_client.py @@ -4111,6 +4111,143 @@ async def sample_delete_backup_schedule(): metadata=metadata, ) + async def clone_database( + self, + request: Optional[Union[firestore_admin.CloneDatabaseRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new database by cloning an existing one. + + The new database must be in the same cloud region or + multi-region location as the existing database. This behaves + similar to + [FirestoreAdmin.CreateDatabase][google.firestore.admin.v1.FirestoreAdmin.CreateDatabase] + except instead of creating a new empty database, a new database + is created with the database type, index configuration, and + documents from an existing database. + + The [long-running operation][google.longrunning.Operation] can + be used to track the progress of the clone, with the Operation's + [metadata][google.longrunning.Operation.metadata] field type + being the + [CloneDatabaseMetadata][google.firestore.admin.v1.CloneDatabaseMetadata]. + The [response][google.longrunning.Operation.response] type is + the [Database][google.firestore.admin.v1.Database] if the clone + was successful. The new database is not readable or writeable + until the LRO has completed. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import firestore_admin_v1 + + async def sample_clone_database(): + # Create a client + client = firestore_admin_v1.FirestoreAdminAsyncClient() + + # Initialize request argument(s) + pitr_snapshot = firestore_admin_v1.PitrSnapshot() + pitr_snapshot.database = "database_value" + + request = firestore_admin_v1.CloneDatabaseRequest( + parent="parent_value", + database_id="database_id_value", + pitr_snapshot=pitr_snapshot, + ) + + # Make the request + operation = client.clone_database(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.firestore_admin_v1.types.CloneDatabaseRequest, dict]]): + The request object. The request message for + [FirestoreAdmin.CloneDatabase][google.firestore.admin.v1.FirestoreAdmin.CloneDatabase]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.firestore_admin_v1.types.Database` + A Cloud Firestore Database. + + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, firestore_admin.CloneDatabaseRequest): + request = firestore_admin.CloneDatabaseRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.clone_database + ] + + header_params = {} + + routing_param_regex = re.compile("^projects/(?P[^/]+)(?:/.*)?$") + regex_match = routing_param_regex.match(request.pitr_snapshot.database) + if regex_match and regex_match.group("project_id"): + header_params["project_id"] = regex_match.group("project_id") + + routing_param_regex = re.compile( + "^projects/[^/]+/databases/(?P[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.pitr_snapshot.database) + if regex_match and regex_match.group("database_id"): + header_params["database_id"] = regex_match.group("database_id") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + database.Database, + metadata_type=gfa_operation.CloneDatabaseMetadata, + ) + + # Done; return the response. + return response + async def list_operations( self, request: Optional[operations_pb2.ListOperationsRequest] = None, diff --git a/google/cloud/firestore_admin_v1/services/firestore_admin/client.py b/google/cloud/firestore_admin_v1/services/firestore_admin/client.py index d05b82787..991d58ccd 100644 --- a/google/cloud/firestore_admin_v1/services/firestore_admin/client.py +++ b/google/cloud/firestore_admin_v1/services/firestore_admin/client.py @@ -4591,6 +4591,141 @@ def sample_delete_backup_schedule(): metadata=metadata, ) + def clone_database( + self, + request: Optional[Union[firestore_admin.CloneDatabaseRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gac_operation.Operation: + r"""Creates a new database by cloning an existing one. + + The new database must be in the same cloud region or + multi-region location as the existing database. This behaves + similar to + [FirestoreAdmin.CreateDatabase][google.firestore.admin.v1.FirestoreAdmin.CreateDatabase] + except instead of creating a new empty database, a new database + is created with the database type, index configuration, and + documents from an existing database. + + The [long-running operation][google.longrunning.Operation] can + be used to track the progress of the clone, with the Operation's + [metadata][google.longrunning.Operation.metadata] field type + being the + [CloneDatabaseMetadata][google.firestore.admin.v1.CloneDatabaseMetadata]. + The [response][google.longrunning.Operation.response] type is + the [Database][google.firestore.admin.v1.Database] if the clone + was successful. The new database is not readable or writeable + until the LRO has completed. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import firestore_admin_v1 + + def sample_clone_database(): + # Create a client + client = firestore_admin_v1.FirestoreAdminClient() + + # Initialize request argument(s) + pitr_snapshot = firestore_admin_v1.PitrSnapshot() + pitr_snapshot.database = "database_value" + + request = firestore_admin_v1.CloneDatabaseRequest( + parent="parent_value", + database_id="database_id_value", + pitr_snapshot=pitr_snapshot, + ) + + # Make the request + operation = client.clone_database(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.firestore_admin_v1.types.CloneDatabaseRequest, dict]): + The request object. The request message for + [FirestoreAdmin.CloneDatabase][google.firestore.admin.v1.FirestoreAdmin.CloneDatabase]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.firestore_admin_v1.types.Database` + A Cloud Firestore Database. + + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, firestore_admin.CloneDatabaseRequest): + request = firestore_admin.CloneDatabaseRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.clone_database] + + header_params = {} + + routing_param_regex = re.compile("^projects/(?P[^/]+)(?:/.*)?$") + regex_match = routing_param_regex.match(request.pitr_snapshot.database) + if regex_match and regex_match.group("project_id"): + header_params["project_id"] = regex_match.group("project_id") + + routing_param_regex = re.compile( + "^projects/[^/]+/databases/(?P[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.pitr_snapshot.database) + if regex_match and regex_match.group("database_id"): + header_params["database_id"] = regex_match.group("database_id") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + database.Database, + metadata_type=gfa_operation.CloneDatabaseMetadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "FirestoreAdminClient": return self diff --git a/google/cloud/firestore_admin_v1/services/firestore_admin/transports/base.py b/google/cloud/firestore_admin_v1/services/firestore_admin/transports/base.py index f290fcbfe..7d582d9b5 100644 --- a/google/cloud/firestore_admin_v1/services/firestore_admin/transports/base.py +++ b/google/cloud/firestore_admin_v1/services/firestore_admin/transports/base.py @@ -81,9 +81,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -357,6 +358,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.clone_database: gapic_v1.method.wrap_method( + self.clone_database, + default_timeout=120.0, + client_info=client_info, + ), self.cancel_operation: gapic_v1.method.wrap_method( self.cancel_operation, default_timeout=None, @@ -688,6 +694,15 @@ def delete_backup_schedule( ]: raise NotImplementedError() + @property + def clone_database( + self, + ) -> Callable[ + [firestore_admin.CloneDatabaseRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + @property def list_operations( self, diff --git a/google/cloud/firestore_admin_v1/services/firestore_admin/transports/grpc.py b/google/cloud/firestore_admin_v1/services/firestore_admin/transports/grpc.py index c6e7824c2..f6531a190 100644 --- a/google/cloud/firestore_admin_v1/services/firestore_admin/transports/grpc.py +++ b/google/cloud/firestore_admin_v1/services/firestore_admin/transports/grpc.py @@ -192,9 +192,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -328,9 +329,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -1279,6 +1281,50 @@ def delete_backup_schedule( ) return self._stubs["delete_backup_schedule"] + @property + def clone_database( + self, + ) -> Callable[[firestore_admin.CloneDatabaseRequest], operations_pb2.Operation]: + r"""Return a callable for the clone database method over gRPC. + + Creates a new database by cloning an existing one. + + The new database must be in the same cloud region or + multi-region location as the existing database. This behaves + similar to + [FirestoreAdmin.CreateDatabase][google.firestore.admin.v1.FirestoreAdmin.CreateDatabase] + except instead of creating a new empty database, a new database + is created with the database type, index configuration, and + documents from an existing database. + + The [long-running operation][google.longrunning.Operation] can + be used to track the progress of the clone, with the Operation's + [metadata][google.longrunning.Operation.metadata] field type + being the + [CloneDatabaseMetadata][google.firestore.admin.v1.CloneDatabaseMetadata]. + The [response][google.longrunning.Operation.response] type is + the [Database][google.firestore.admin.v1.Database] if the clone + was successful. The new database is not readable or writeable + until the LRO has completed. + + Returns: + Callable[[~.CloneDatabaseRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "clone_database" not in self._stubs: + self._stubs["clone_database"] = self._logged_channel.unary_unary( + "/google.firestore.admin.v1.FirestoreAdmin/CloneDatabase", + request_serializer=firestore_admin.CloneDatabaseRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["clone_database"] + def close(self): self._logged_channel.close() diff --git a/google/cloud/firestore_admin_v1/services/firestore_admin/transports/grpc_asyncio.py b/google/cloud/firestore_admin_v1/services/firestore_admin/transports/grpc_asyncio.py index 9dd9d6155..117707853 100644 --- a/google/cloud/firestore_admin_v1/services/firestore_admin/transports/grpc_asyncio.py +++ b/google/cloud/firestore_admin_v1/services/firestore_admin/transports/grpc_asyncio.py @@ -189,8 +189,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -241,9 +242,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -1331,6 +1333,52 @@ def delete_backup_schedule( ) return self._stubs["delete_backup_schedule"] + @property + def clone_database( + self, + ) -> Callable[ + [firestore_admin.CloneDatabaseRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the clone database method over gRPC. + + Creates a new database by cloning an existing one. + + The new database must be in the same cloud region or + multi-region location as the existing database. This behaves + similar to + [FirestoreAdmin.CreateDatabase][google.firestore.admin.v1.FirestoreAdmin.CreateDatabase] + except instead of creating a new empty database, a new database + is created with the database type, index configuration, and + documents from an existing database. + + The [long-running operation][google.longrunning.Operation] can + be used to track the progress of the clone, with the Operation's + [metadata][google.longrunning.Operation.metadata] field type + being the + [CloneDatabaseMetadata][google.firestore.admin.v1.CloneDatabaseMetadata]. + The [response][google.longrunning.Operation.response] type is + the [Database][google.firestore.admin.v1.Database] if the clone + was successful. The new database is not readable or writeable + until the LRO has completed. + + Returns: + Callable[[~.CloneDatabaseRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "clone_database" not in self._stubs: + self._stubs["clone_database"] = self._logged_channel.unary_unary( + "/google.firestore.admin.v1.FirestoreAdmin/CloneDatabase", + request_serializer=firestore_admin.CloneDatabaseRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["clone_database"] + def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { @@ -1544,6 +1592,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.clone_database: self._wrap_method( + self.clone_database, + default_timeout=120.0, + client_info=client_info, + ), self.cancel_operation: self._wrap_method( self.cancel_operation, default_timeout=None, diff --git a/google/cloud/firestore_admin_v1/services/firestore_admin/transports/rest.py b/google/cloud/firestore_admin_v1/services/firestore_admin/transports/rest.py index c96be2e32..41e819c87 100644 --- a/google/cloud/firestore_admin_v1/services/firestore_admin/transports/rest.py +++ b/google/cloud/firestore_admin_v1/services/firestore_admin/transports/rest.py @@ -97,6 +97,14 @@ def post_bulk_delete_documents(self, response): logging.log(f"Received response: {response}") return response + def pre_clone_database(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_clone_database(self, response): + logging.log(f"Received response: {response}") + return response + def pre_create_backup_schedule(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -376,6 +384,54 @@ def post_bulk_delete_documents_with_metadata( """ return response, metadata + def pre_clone_database( + self, + request: firestore_admin.CloneDatabaseRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + firestore_admin.CloneDatabaseRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for clone_database + + Override in a subclass to manipulate the request or metadata + before they are sent to the FirestoreAdmin server. + """ + return request, metadata + + def post_clone_database( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for clone_database + + DEPRECATED. Please use the `post_clone_database_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the FirestoreAdmin server but before + it is returned to user code. This `post_clone_database` interceptor runs + before the `post_clone_database_with_metadata` interceptor. + """ + return response + + def post_clone_database_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for clone_database + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FirestoreAdmin server but before it is returned to user code. + + We recommend only using this `post_clone_database_with_metadata` + interceptor in new development instead of the `post_clone_database` interceptor. + When both interceptors are used, this `post_clone_database_with_metadata` interceptor runs after the + `post_clone_database` interceptor. The (possibly modified) response returned by + `post_clone_database` will be passed to + `post_clone_database_with_metadata`. + """ + return response, metadata + def pre_create_backup_schedule( self, request: firestore_admin.CreateBackupScheduleRequest, @@ -1857,9 +1913,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -2115,6 +2172,158 @@ def __call__( ) return resp + class _CloneDatabase( + _BaseFirestoreAdminRestTransport._BaseCloneDatabase, FirestoreAdminRestStub + ): + def __hash__(self): + return hash("FirestoreAdminRestTransport.CloneDatabase") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: firestore_admin.CloneDatabaseRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the clone database method over HTTP. + + Args: + request (~.firestore_admin.CloneDatabaseRequest): + The request object. The request message for + [FirestoreAdmin.CloneDatabase][google.firestore.admin.v1.FirestoreAdmin.CloneDatabase]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseFirestoreAdminRestTransport._BaseCloneDatabase._get_http_options() + ) + + request, metadata = self._interceptor.pre_clone_database(request, metadata) + transcoded_request = _BaseFirestoreAdminRestTransport._BaseCloneDatabase._get_transcoded_request( + http_options, request + ) + + body = _BaseFirestoreAdminRestTransport._BaseCloneDatabase._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseFirestoreAdminRestTransport._BaseCloneDatabase._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.firestore.admin_v1.FirestoreAdminClient.CloneDatabase", + extra={ + "serviceName": "google.firestore.admin.v1.FirestoreAdmin", + "rpcName": "CloneDatabase", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = FirestoreAdminRestTransport._CloneDatabase._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_clone_database(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_clone_database_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.firestore.admin_v1.FirestoreAdminClient.clone_database", + extra={ + "serviceName": "google.firestore.admin.v1.FirestoreAdmin", + "rpcName": "CloneDatabase", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + class _CreateBackupSchedule( _BaseFirestoreAdminRestTransport._BaseCreateBackupSchedule, FirestoreAdminRestStub, @@ -6507,6 +6716,14 @@ def bulk_delete_documents( # In C++ this would require a dynamic_cast return self._BulkDeleteDocuments(self._session, self._host, self._interceptor) # type: ignore + @property + def clone_database( + self, + ) -> Callable[[firestore_admin.CloneDatabaseRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CloneDatabase(self._session, self._host, self._interceptor) # type: ignore + @property def create_backup_schedule( self, diff --git a/google/cloud/firestore_admin_v1/services/firestore_admin/transports/rest_base.py b/google/cloud/firestore_admin_v1/services/firestore_admin/transports/rest_base.py index 19a0c9856..56b6ce93f 100644 --- a/google/cloud/firestore_admin_v1/services/firestore_admin/transports/rest_base.py +++ b/google/cloud/firestore_admin_v1/services/firestore_admin/transports/rest_base.py @@ -156,6 +156,63 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseCloneDatabase: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{parent=projects/*}/databases:clone", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = firestore_admin.CloneDatabaseRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseFirestoreAdminRestTransport._BaseCloneDatabase._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseCreateBackupSchedule: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") diff --git a/google/cloud/firestore_admin_v1/types/__init__.py b/google/cloud/firestore_admin_v1/types/__init__.py index 249147d52..c76372e5d 100644 --- a/google/cloud/firestore_admin_v1/types/__init__.py +++ b/google/cloud/firestore_admin_v1/types/__init__.py @@ -25,6 +25,7 @@ from .firestore_admin import ( BulkDeleteDocumentsRequest, BulkDeleteDocumentsResponse, + CloneDatabaseRequest, CreateBackupScheduleRequest, CreateDatabaseMetadata, CreateDatabaseRequest, @@ -73,6 +74,7 @@ ) from .operation import ( BulkDeleteDocumentsMetadata, + CloneDatabaseMetadata, ExportDocumentsMetadata, ExportDocumentsResponse, FieldOperationMetadata, @@ -87,6 +89,9 @@ DailyRecurrence, WeeklyRecurrence, ) +from .snapshot import ( + PitrSnapshot, +) from .user_creds import ( UserCreds, ) @@ -97,6 +102,7 @@ "Field", "BulkDeleteDocumentsRequest", "BulkDeleteDocumentsResponse", + "CloneDatabaseRequest", "CreateBackupScheduleRequest", "CreateDatabaseMetadata", "CreateDatabaseRequest", @@ -139,6 +145,7 @@ "Index", "LocationMetadata", "BulkDeleteDocumentsMetadata", + "CloneDatabaseMetadata", "ExportDocumentsMetadata", "ExportDocumentsResponse", "FieldOperationMetadata", @@ -150,5 +157,6 @@ "BackupSchedule", "DailyRecurrence", "WeeklyRecurrence", + "PitrSnapshot", "UserCreds", ) diff --git a/google/cloud/firestore_admin_v1/types/database.py b/google/cloud/firestore_admin_v1/types/database.py index eafa21df1..f46bede62 100644 --- a/google/cloud/firestore_admin_v1/types/database.py +++ b/google/cloud/firestore_admin_v1/types/database.py @@ -213,9 +213,9 @@ class PointInTimeRecoveryEnablement(proto.Enum): Reads are supported on selected versions of the data from within the past 7 days: - - Reads against any timestamp within the past hour - - Reads against 1-minute snapshots beyond 1 hour and within - 7 days + - Reads against any timestamp within the past hour + - Reads against 1-minute snapshots beyond 1 hour and within + 7 days ``version_retention_period`` and ``earliest_version_time`` can be used to determine the supported versions. diff --git a/google/cloud/firestore_admin_v1/types/firestore_admin.py b/google/cloud/firestore_admin_v1/types/firestore_admin.py index a4b577b78..9ede35cac 100644 --- a/google/cloud/firestore_admin_v1/types/firestore_admin.py +++ b/google/cloud/firestore_admin_v1/types/firestore_admin.py @@ -24,6 +24,7 @@ from google.cloud.firestore_admin_v1.types import field as gfa_field from google.cloud.firestore_admin_v1.types import index as gfa_index from google.cloud.firestore_admin_v1.types import schedule +from google.cloud.firestore_admin_v1.types import snapshot from google.cloud.firestore_admin_v1.types import user_creds as gfa_user_creds from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore @@ -73,6 +74,7 @@ "ListBackupsResponse", "DeleteBackupRequest", "RestoreDatabaseRequest", + "CloneDatabaseRequest", }, ) @@ -951,7 +953,7 @@ class ListBackupsRequest(proto.Message): [Backup][google.firestore.admin.v1.Backup] are eligible for filtering: - - ``database_uid`` (supports ``=`` only) + - ``database_uid`` (supports ``=`` only) """ parent: str = proto.Field( @@ -1079,4 +1081,70 @@ class RestoreDatabaseRequest(proto.Message): ) +class CloneDatabaseRequest(proto.Message): + r"""The request message for + [FirestoreAdmin.CloneDatabase][google.firestore.admin.v1.FirestoreAdmin.CloneDatabase]. + + Attributes: + parent (str): + Required. The project to clone the database in. Format is + ``projects/{project_id}``. + database_id (str): + Required. The ID to use for the database, which will become + the final component of the database's resource name. This + database ID must not be associated with an existing + database. + + This value should be 4-63 characters. Valid characters are + /[a-z][0-9]-/ with first character a letter and the last a + letter or a number. Must not be UUID-like + /[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}/. + + "(default)" database ID is also valid. + pitr_snapshot (google.cloud.firestore_admin_v1.types.PitrSnapshot): + Required. Specification of the PITR data to + clone from. The source database must exist. + + The cloned database will be created in the same + location as the source database. + encryption_config (google.cloud.firestore_admin_v1.types.Database.EncryptionConfig): + Optional. Encryption configuration for the cloned database. + + If this field is not specified, the cloned database will use + the same encryption configuration as the source database, + namely + [use_source_encryption][google.firestore.admin.v1.Database.EncryptionConfig.use_source_encryption]. + tags (MutableMapping[str, str]): + Optional. Immutable. Tags to be bound to the cloned + database. + + The tags should be provided in the format of + ``tagKeys/{tag_key_id} -> tagValues/{tag_value_id}``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + database_id: str = proto.Field( + proto.STRING, + number=2, + ) + pitr_snapshot: snapshot.PitrSnapshot = proto.Field( + proto.MESSAGE, + number=6, + message=snapshot.PitrSnapshot, + ) + encryption_config: gfa_database.Database.EncryptionConfig = proto.Field( + proto.MESSAGE, + number=4, + message=gfa_database.Database.EncryptionConfig, + ) + tags: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/firestore_admin_v1/types/operation.py b/google/cloud/firestore_admin_v1/types/operation.py index c58f24273..c50455693 100644 --- a/google/cloud/firestore_admin_v1/types/operation.py +++ b/google/cloud/firestore_admin_v1/types/operation.py @@ -20,6 +20,7 @@ import proto # type: ignore from google.cloud.firestore_admin_v1.types import index as gfa_index +from google.cloud.firestore_admin_v1.types import snapshot from google.protobuf import timestamp_pb2 # type: ignore @@ -34,6 +35,7 @@ "BulkDeleteDocumentsMetadata", "ExportDocumentsResponse", "RestoreDatabaseMetadata", + "CloneDatabaseMetadata", "Progress", }, ) @@ -558,6 +560,60 @@ class RestoreDatabaseMetadata(proto.Message): ) +class CloneDatabaseMetadata(proto.Message): + r"""Metadata for the [long-running + operation][google.longrunning.Operation] from the + [CloneDatabase][google.firestore.admin.v1.CloneDatabase] request. + + Attributes: + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time the clone was started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + The time the clone finished, unset for + ongoing clones. + operation_state (google.cloud.firestore_admin_v1.types.OperationState): + The operation state of the clone. + database (str): + The name of the database being cloned to. + pitr_snapshot (google.cloud.firestore_admin_v1.types.PitrSnapshot): + The snapshot from which this database was + cloned. + progress_percentage (google.cloud.firestore_admin_v1.types.Progress): + How far along the clone is as an estimated + percentage of remaining time. + """ + + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + operation_state: "OperationState" = proto.Field( + proto.ENUM, + number=3, + enum="OperationState", + ) + database: str = proto.Field( + proto.STRING, + number=4, + ) + pitr_snapshot: snapshot.PitrSnapshot = proto.Field( + proto.MESSAGE, + number=7, + message=snapshot.PitrSnapshot, + ) + progress_percentage: "Progress" = proto.Field( + proto.MESSAGE, + number=6, + message="Progress", + ) + + class Progress(proto.Message): r"""Describes the progress of the operation. Unit of work is generic and must be interpreted based on where diff --git a/google/cloud/firestore_admin_v1/types/snapshot.py b/google/cloud/firestore_admin_v1/types/snapshot.py new file mode 100644 index 000000000..e56a125f5 --- /dev/null +++ b/google/cloud/firestore_admin_v1/types/snapshot.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.firestore.admin.v1", + manifest={ + "PitrSnapshot", + }, +) + + +class PitrSnapshot(proto.Message): + r"""A consistent snapshot of a database at a specific point in + time. A PITR (Point-in-time recovery) snapshot with previous + versions of a database's data is available for every minute up + to the associated database's data retention period. If the PITR + feature is enabled, the retention period is 7 days; otherwise, + it is one hour. + + Attributes: + database (str): + Required. The name of the database that this was a snapshot + of. Format: ``projects/{project}/databases/{database}``. + database_uid (bytes): + Output only. Public UUID of the database the + snapshot was associated with. + snapshot_time (google.protobuf.timestamp_pb2.Timestamp): + Required. Snapshot time of the database. + """ + + database: str = proto.Field( + proto.STRING, + number=1, + ) + database_uid: bytes = proto.Field( + proto.BYTES, + number=2, + ) + snapshot_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/firestore_v1/services/firestore/transports/base.py b/google/cloud/firestore_v1/services/firestore/transports/base.py index 66d81748c..02d6c0bbc 100644 --- a/google/cloud/firestore_v1/services/firestore/transports/base.py +++ b/google/cloud/firestore_v1/services/firestore/transports/base.py @@ -75,9 +75,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. diff --git a/google/cloud/firestore_v1/services/firestore/transports/grpc.py b/google/cloud/firestore_v1/services/firestore/transports/grpc.py index c302a73c2..3c5bded2d 100644 --- a/google/cloud/firestore_v1/services/firestore/transports/grpc.py +++ b/google/cloud/firestore_v1/services/firestore/transports/grpc.py @@ -164,9 +164,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -299,9 +300,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/firestore_v1/services/firestore/transports/grpc_asyncio.py b/google/cloud/firestore_v1/services/firestore/transports/grpc_asyncio.py index f46162296..6cc93e21a 100644 --- a/google/cloud/firestore_v1/services/firestore/transports/grpc_asyncio.py +++ b/google/cloud/firestore_v1/services/firestore/transports/grpc_asyncio.py @@ -161,8 +161,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -213,9 +214,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/firestore_v1/services/firestore/transports/rest.py b/google/cloud/firestore_v1/services/firestore/transports/rest.py index 8c038348c..a32a7e84e 100644 --- a/google/cloud/firestore_v1/services/firestore/transports/rest.py +++ b/google/cloud/firestore_v1/services/firestore/transports/rest.py @@ -945,9 +945,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -1118,6 +1119,22 @@ def __call__( resp, _ = self._interceptor.post_batch_get_documents_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.firestore_v1.FirestoreClient.batch_get_documents", + extra={ + "serviceName": "google.firestore.v1.Firestore", + "rpcName": "BatchGetDocuments", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _BatchWrite(_BaseFirestoreRestTransport._BaseBatchWrite, FirestoreRestStub): @@ -2736,6 +2753,22 @@ def __call__( resp, _ = self._interceptor.post_run_aggregation_query_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.firestore_v1.FirestoreClient.run_aggregation_query", + extra={ + "serviceName": "google.firestore.v1.Firestore", + "rpcName": "RunAggregationQuery", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _RunQuery(_BaseFirestoreRestTransport._BaseRunQuery, FirestoreRestStub): @@ -2866,6 +2899,22 @@ def __call__( resp, _ = self._interceptor.post_run_query_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.firestore_v1.FirestoreClient.run_query", + extra={ + "serviceName": "google.firestore.v1.Firestore", + "rpcName": "RunQuery", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateDocument( diff --git a/google/cloud/firestore_v1/types/document.py b/google/cloud/firestore_v1/types/document.py index 0942354f5..22fe79b73 100644 --- a/google/cloud/firestore_v1/types/document.py +++ b/google/cloud/firestore_v1/types/document.py @@ -72,7 +72,7 @@ class Document(proto.Message): may contain any character. Some characters, including :literal:`\``, must be escaped using a ``\``. For example, :literal:`\`x&y\`` represents ``x&y`` and - :literal:`\`bak\`tik\`` represents :literal:`bak`tik`. + :literal:`\`bak\\`tik\`` represents :literal:`bak`tik`. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time at which the document was created. diff --git a/google/cloud/firestore_v1/types/firestore.py b/google/cloud/firestore_v1/types/firestore.py index 53a6c6e7a..190f55d28 100644 --- a/google/cloud/firestore_v1/types/firestore.py +++ b/google/cloud/firestore_v1/types/firestore.py @@ -1017,8 +1017,8 @@ class PartitionQueryRequest(proto.Message): For example, two subsequent calls using a page_token may return: - - cursor B, cursor M, cursor Q - - cursor A, cursor U, cursor W + - cursor B, cursor M, cursor Q + - cursor A, cursor U, cursor W To obtain a complete result set ordered with respect to the results of the query supplied to PartitionQuery, the results @@ -1092,9 +1092,9 @@ class PartitionQueryResponse(proto.Message): cursors A and B, running the following three queries will return the entire result set of the original query: - - query, end_at A - - query, start_at A, end_at B - - query, start_at B + - query, end_at A + - query, start_at A, end_at B + - query, start_at B An empty result may indicate that the query has too few results to be partitioned, or that the query is not yet diff --git a/google/cloud/firestore_v1/types/query.py b/google/cloud/firestore_v1/types/query.py index 9aa8977dd..c2856d0b4 100644 --- a/google/cloud/firestore_v1/types/query.py +++ b/google/cloud/firestore_v1/types/query.py @@ -66,25 +66,25 @@ class StructuredQuery(proto.Message): Firestore guarantees a stable ordering through the following rules: - - The ``order_by`` is required to reference all fields used - with an inequality filter. - - All fields that are required to be in the ``order_by`` - but are not already present are appended in - lexicographical ordering of the field name. - - If an order on ``__name__`` is not specified, it is - appended by default. + - The ``order_by`` is required to reference all fields used + with an inequality filter. + - All fields that are required to be in the ``order_by`` but + are not already present are appended in lexicographical + ordering of the field name. + - If an order on ``__name__`` is not specified, it is + appended by default. Fields are appended with the same sort direction as the last order specified, or 'ASCENDING' if no order was specified. For example: - - ``ORDER BY a`` becomes ``ORDER BY a ASC, __name__ ASC`` - - ``ORDER BY a DESC`` becomes - ``ORDER BY a DESC, __name__ DESC`` - - ``WHERE a > 1`` becomes - ``WHERE a > 1 ORDER BY a ASC, __name__ ASC`` - - ``WHERE __name__ > ... AND a > 1`` becomes - ``WHERE __name__ > ... AND a > 1 ORDER BY a ASC, __name__ ASC`` + - ``ORDER BY a`` becomes ``ORDER BY a ASC, __name__ ASC`` + - ``ORDER BY a DESC`` becomes + ``ORDER BY a DESC, __name__ DESC`` + - ``WHERE a > 1`` becomes + ``WHERE a > 1 ORDER BY a ASC, __name__ ASC`` + - ``WHERE __name__ > ... AND a > 1`` becomes + ``WHERE __name__ > ... AND a > 1 ORDER BY a ASC, __name__ ASC`` start_at (google.cloud.firestore_v1.types.Cursor): A potential prefix of a position in the result set to start the query at. @@ -106,10 +106,10 @@ class StructuredQuery(proto.Message): Continuing off the example above, attaching the following start cursors will have varying impact: - - ``START BEFORE (2, /k/123)``: start the query right - before ``a = 1 AND b > 2 AND __name__ > /k/123``. - - ``START AFTER (10)``: start the query right after - ``a = 1 AND b > 10``. + - ``START BEFORE (2, /k/123)``: start the query right before + ``a = 1 AND b > 2 AND __name__ > /k/123``. + - ``START AFTER (10)``: start the query right after + ``a = 1 AND b > 10``. Unlike ``OFFSET`` which requires scanning over the first N results to skip, a start cursor allows the query to begin at @@ -119,8 +119,8 @@ class StructuredQuery(proto.Message): Requires: - - The number of values cannot be greater than the number of - fields specified in the ``ORDER BY`` clause. + - The number of values cannot be greater than the number of + fields specified in the ``ORDER BY`` clause. end_at (google.cloud.firestore_v1.types.Cursor): A potential prefix of a position in the result set to end the query at. @@ -130,8 +130,8 @@ class StructuredQuery(proto.Message): Requires: - - The number of values cannot be greater than the number of - fields specified in the ``ORDER BY`` clause. + - The number of values cannot be greater than the number of + fields specified in the ``ORDER BY`` clause. offset (int): The number of documents to skip before returning the first result. @@ -142,8 +142,8 @@ class StructuredQuery(proto.Message): Requires: - - The value must be greater than or equal to zero if - specified. + - The value must be greater than or equal to zero if + specified. limit (google.protobuf.wrappers_pb2.Int32Value): The maximum number of results to return. @@ -151,8 +151,8 @@ class StructuredQuery(proto.Message): Requires: - - The value must be greater than or equal to zero if - specified. + - The value must be greater than or equal to zero if + specified. find_nearest (google.cloud.firestore_v1.types.StructuredQuery.FindNearest): Optional. A potential nearest neighbors search. @@ -256,7 +256,7 @@ class CompositeFilter(proto.Message): Requires: - - At least one filter is present. + - At least one filter is present. """ class Operator(proto.Enum): @@ -310,27 +310,27 @@ class Operator(proto.Enum): Requires: - - That ``field`` come first in ``order_by``. + - That ``field`` come first in ``order_by``. LESS_THAN_OR_EQUAL (2): The given ``field`` is less than or equal to the given ``value``. Requires: - - That ``field`` come first in ``order_by``. + - That ``field`` come first in ``order_by``. GREATER_THAN (3): The given ``field`` is greater than the given ``value``. Requires: - - That ``field`` come first in ``order_by``. + - That ``field`` come first in ``order_by``. GREATER_THAN_OR_EQUAL (4): The given ``field`` is greater than or equal to the given ``value``. Requires: - - That ``field`` come first in ``order_by``. + - That ``field`` come first in ``order_by``. EQUAL (5): The given ``field`` is equal to the given ``value``. NOT_EQUAL (6): @@ -338,9 +338,9 @@ class Operator(proto.Enum): Requires: - - No other ``NOT_EQUAL``, ``NOT_IN``, ``IS_NOT_NULL``, or - ``IS_NOT_NAN``. - - That ``field`` comes first in the ``order_by``. + - No other ``NOT_EQUAL``, ``NOT_IN``, ``IS_NOT_NULL``, or + ``IS_NOT_NAN``. + - That ``field`` comes first in the ``order_by``. ARRAY_CONTAINS (7): The given ``field`` is an array that contains the given ``value``. @@ -350,31 +350,31 @@ class Operator(proto.Enum): Requires: - - That ``value`` is a non-empty ``ArrayValue``, subject to - disjunction limits. - - No ``NOT_IN`` filters in the same query. + - That ``value`` is a non-empty ``ArrayValue``, subject to + disjunction limits. + - No ``NOT_IN`` filters in the same query. ARRAY_CONTAINS_ANY (9): The given ``field`` is an array that contains any of the values in the given array. Requires: - - That ``value`` is a non-empty ``ArrayValue``, subject to - disjunction limits. - - No other ``ARRAY_CONTAINS_ANY`` filters within the same - disjunction. - - No ``NOT_IN`` filters in the same query. + - That ``value`` is a non-empty ``ArrayValue``, subject to + disjunction limits. + - No other ``ARRAY_CONTAINS_ANY`` filters within the same + disjunction. + - No ``NOT_IN`` filters in the same query. NOT_IN (10): The value of the ``field`` is not in the given array. Requires: - - That ``value`` is a non-empty ``ArrayValue`` with at most - 10 values. - - No other ``OR``, ``IN``, ``ARRAY_CONTAINS_ANY``, - ``NOT_IN``, ``NOT_EQUAL``, ``IS_NOT_NULL``, or - ``IS_NOT_NAN``. - - That ``field`` comes first in the ``order_by``. + - That ``value`` is a non-empty ``ArrayValue`` with at most + 10 values. + - No other ``OR``, ``IN``, ``ARRAY_CONTAINS_ANY``, + ``NOT_IN``, ``NOT_EQUAL``, ``IS_NOT_NULL``, or + ``IS_NOT_NAN``. + - That ``field`` comes first in the ``order_by``. """ OPERATOR_UNSPECIFIED = 0 LESS_THAN = 1 @@ -433,17 +433,17 @@ class Operator(proto.Enum): Requires: - - No other ``NOT_EQUAL``, ``NOT_IN``, ``IS_NOT_NULL``, or - ``IS_NOT_NAN``. - - That ``field`` comes first in the ``order_by``. + - No other ``NOT_EQUAL``, ``NOT_IN``, ``IS_NOT_NULL``, or + ``IS_NOT_NAN``. + - That ``field`` comes first in the ``order_by``. IS_NOT_NULL (5): The given ``field`` is not equal to ``NULL``. Requires: - - A single ``NOT_EQUAL``, ``NOT_IN``, ``IS_NOT_NULL``, or - ``IS_NOT_NAN``. - - That ``field`` comes first in the ``order_by``. + - A single ``NOT_EQUAL``, ``NOT_IN``, ``IS_NOT_NULL``, or + ``IS_NOT_NAN``. + - That ``field`` comes first in the ``order_by``. """ OPERATOR_UNSPECIFIED = 0 IS_NAN = 2 @@ -493,9 +493,9 @@ class FieldReference(proto.Message): Requires: - - MUST be a dot-delimited (``.``) string of segments, where - each segment conforms to [document field - name][google.firestore.v1.Document.fields] limitations. + - MUST be a dot-delimited (``.``) string of segments, where + each segment conforms to [document field + name][google.firestore.v1.Document.fields] limitations. """ field_path: str = proto.Field( @@ -555,9 +555,9 @@ class FindNearest(proto.Message): when the vectors are more similar, the comparison is inverted. - - For EUCLIDEAN, COSINE: WHERE distance <= - distance_threshold - - For DOT_PRODUCT: WHERE distance >= distance_threshold + - For EUCLIDEAN, COSINE: WHERE distance <= + distance_threshold + - For DOT_PRODUCT: WHERE distance >= distance_threshold """ class DistanceMeasure(proto.Enum): @@ -688,8 +688,8 @@ class StructuredAggregationQuery(proto.Message): Requires: - - A minimum of one and maximum of five aggregations per - query. + - A minimum of one and maximum of five aggregations per + query. """ class Aggregation(proto.Message): @@ -749,9 +749,9 @@ class Aggregation(proto.Message): Requires: - - Must be unique across all aggregation aliases. - - Conform to [document field - name][google.firestore.v1.Document.fields] limitations. + - Must be unique across all aggregation aliases. + - Conform to [document field + name][google.firestore.v1.Document.fields] limitations. """ class Count(proto.Message): @@ -778,7 +778,7 @@ class Count(proto.Message): Requires: - - Must be greater than zero when present. + - Must be greater than zero when present. """ up_to: wrappers_pb2.Int64Value = proto.Field( @@ -790,26 +790,26 @@ class Count(proto.Message): class Sum(proto.Message): r"""Sum of the values of the requested field. - - Only numeric values will be aggregated. All non-numeric values - including ``NULL`` are skipped. + - Only numeric values will be aggregated. All non-numeric values + including ``NULL`` are skipped. - - If the aggregated values contain ``NaN``, returns ``NaN``. - Infinity math follows IEEE-754 standards. + - If the aggregated values contain ``NaN``, returns ``NaN``. + Infinity math follows IEEE-754 standards. - - If the aggregated value set is empty, returns 0. + - If the aggregated value set is empty, returns 0. - - Returns a 64-bit integer if all aggregated numbers are integers - and the sum result does not overflow. Otherwise, the result is - returned as a double. Note that even if all the aggregated values - are integers, the result is returned as a double if it cannot fit - within a 64-bit signed integer. When this occurs, the returned - value will lose precision. + - Returns a 64-bit integer if all aggregated numbers are integers + and the sum result does not overflow. Otherwise, the result is + returned as a double. Note that even if all the aggregated values + are integers, the result is returned as a double if it cannot fit + within a 64-bit signed integer. When this occurs, the returned + value will lose precision. - - When underflow occurs, floating-point aggregation is - non-deterministic. This means that running the same query - repeatedly without any changes to the underlying values could - produce slightly different results each time. In those cases, - values should be stored as integers over floating-point numbers. + - When underflow occurs, floating-point aggregation is + non-deterministic. This means that running the same query + repeatedly without any changes to the underlying values could + produce slightly different results each time. In those cases, + values should be stored as integers over floating-point numbers. Attributes: field (google.cloud.firestore_v1.types.StructuredQuery.FieldReference): @@ -825,15 +825,15 @@ class Sum(proto.Message): class Avg(proto.Message): r"""Average of the values of the requested field. - - Only numeric values will be aggregated. All non-numeric values - including ``NULL`` are skipped. + - Only numeric values will be aggregated. All non-numeric values + including ``NULL`` are skipped. - - If the aggregated values contain ``NaN``, returns ``NaN``. - Infinity math follows IEEE-754 standards. + - If the aggregated values contain ``NaN``, returns ``NaN``. + Infinity math follows IEEE-754 standards. - - If the aggregated value set is empty, returns ``NULL``. + - If the aggregated value set is empty, returns ``NULL``. - - Always returns the result as a double. + - Always returns the result as a double. Attributes: field (google.cloud.firestore_v1.types.StructuredQuery.FieldReference): diff --git a/scripts/fixup_firestore_admin_v1_keywords.py b/scripts/fixup_firestore_admin_v1_keywords.py index 0920ce408..05bd87f0e 100644 --- a/scripts/fixup_firestore_admin_v1_keywords.py +++ b/scripts/fixup_firestore_admin_v1_keywords.py @@ -46,6 +46,7 @@ class firestore_adminCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'bulk_delete_documents': ('name', 'collection_ids', 'namespace_ids', ), + 'clone_database': ('parent', 'database_id', 'pitr_snapshot', 'encryption_config', 'tags', ), 'create_backup_schedule': ('parent', 'backup_schedule', ), 'create_database': ('parent', 'database', 'database_id', ), 'create_index': ('parent', 'index', ), diff --git a/tests/unit/gapic/firestore_admin_v1/test_firestore_admin.py b/tests/unit/gapic/firestore_admin_v1/test_firestore_admin.py index db8276d57..1b9184b8a 100644 --- a/tests/unit/gapic/firestore_admin_v1/test_firestore_admin.py +++ b/tests/unit/gapic/firestore_admin_v1/test_firestore_admin.py @@ -75,6 +75,7 @@ from google.cloud.firestore_admin_v1.types import index as gfa_index from google.cloud.firestore_admin_v1.types import operation as gfa_operation from google.cloud.firestore_admin_v1.types import schedule +from google.cloud.firestore_admin_v1.types import snapshot from google.cloud.firestore_admin_v1.types import user_creds from google.cloud.firestore_admin_v1.types import user_creds as gfa_user_creds from google.cloud.location import locations_pb2 @@ -11859,6 +11860,192 @@ async def test_delete_backup_schedule_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", + [ + firestore_admin.CloneDatabaseRequest, + dict, + ], +) +def test_clone_database(request_type, transport: str = "grpc"): + client = FirestoreAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.clone_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.clone_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = firestore_admin.CloneDatabaseRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_clone_database_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = FirestoreAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = firestore_admin.CloneDatabaseRequest( + parent="parent_value", + database_id="database_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.clone_database), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.clone_database(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == firestore_admin.CloneDatabaseRequest( + parent="parent_value", + database_id="database_id_value", + ) + + +def test_clone_database_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = FirestoreAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.clone_database in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.clone_database] = mock_rpc + request = {} + client.clone_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.clone_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_clone_database_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = FirestoreAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.clone_database + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.clone_database + ] = mock_rpc + + request = {} + await client.clone_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.clone_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_clone_database_async( + transport: str = "grpc_asyncio", request_type=firestore_admin.CloneDatabaseRequest +): + client = FirestoreAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.clone_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.clone_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = firestore_admin.CloneDatabaseRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_clone_database_async_from_dict(): + await test_clone_database_async(request_type=dict) + + def test_create_index_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -17625,6 +17812,141 @@ def test_delete_backup_schedule_rest_flattened_error(transport: str = "rest"): ) +def test_clone_database_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = FirestoreAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.clone_database in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.clone_database] = mock_rpc + + request = {} + client.clone_database(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.clone_database(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_clone_database_rest_required_fields( + request_type=firestore_admin.CloneDatabaseRequest, +): + transport_class = transports.FirestoreAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["database_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).clone_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["databaseId"] = "database_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).clone_database._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "databaseId" in jsonified_request + assert jsonified_request["databaseId"] == "database_id_value" + + client = FirestoreAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.clone_database(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_clone_database_rest_unset_required_fields(): + transport = transports.FirestoreAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.clone_database._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "databaseId", + "pitrSnapshot", + ) + ) + ) + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.FirestoreAdminGrpcTransport( @@ -18398,11 +18720,96 @@ def test_delete_backup_schedule_empty_call_grpc(): # Establish that the underlying stub method was called. call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = firestore_admin.DeleteBackupScheduleRequest() + _, args, _ = call.mock_calls[0] + request_msg = firestore_admin.DeleteBackupScheduleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_clone_database_empty_call_grpc(): + client = FirestoreAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.clone_database), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.clone_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = firestore_admin.CloneDatabaseRequest() + + assert args[0] == request_msg + + +def test_clone_database_routing_parameters_request_1_grpc(): + client = FirestoreAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.clone_database), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.clone_database( + request={"pitr_snapshot": {"database": "projects/sample1/sample2"}} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = firestore_admin.CloneDatabaseRequest( + **{"pitr_snapshot": {"database": "projects/sample1/sample2"}} + ) + + assert args[0] == request_msg + + expected_headers = {"project_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_clone_database_routing_parameters_request_2_grpc(): + client = FirestoreAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.clone_database), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.clone_database( + request={ + "pitr_snapshot": { + "database": "projects/sample1/databases/sample2/sample3" + } + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = firestore_admin.CloneDatabaseRequest( + **{ + "pitr_snapshot": { + "database": "projects/sample1/databases/sample2/sample3" + } + } + ) assert args[0] == request_msg + expected_headers = {"project_id": "sample1", "database_id": "sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + def test_transport_kind_grpc_asyncio(): transport = FirestoreAdminAsyncClient.get_transport_class("grpc_asyncio")( @@ -19270,6 +19677,103 @@ async def test_delete_backup_schedule_empty_call_grpc_asyncio(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_clone_database_empty_call_grpc_asyncio(): + client = FirestoreAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.clone_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.clone_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = firestore_admin.CloneDatabaseRequest() + + assert args[0] == request_msg + + +@pytest.mark.asyncio +async def test_clone_database_routing_parameters_request_1_grpc_asyncio(): + client = FirestoreAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.clone_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.clone_database( + request={"pitr_snapshot": {"database": "projects/sample1/sample2"}} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = firestore_admin.CloneDatabaseRequest( + **{"pitr_snapshot": {"database": "projects/sample1/sample2"}} + ) + + assert args[0] == request_msg + + expected_headers = {"project_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_clone_database_routing_parameters_request_2_grpc_asyncio(): + client = FirestoreAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.clone_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.clone_database( + request={ + "pitr_snapshot": { + "database": "projects/sample1/databases/sample2/sample3" + } + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = firestore_admin.CloneDatabaseRequest( + **{ + "pitr_snapshot": { + "database": "projects/sample1/databases/sample2/sample3" + } + } + ) + + assert args[0] == request_msg + + expected_headers = {"project_id": "sample1", "database_id": "sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + def test_transport_kind_rest(): transport = FirestoreAdminClient.get_transport_class("rest")( credentials=ga_credentials.AnonymousCredentials() @@ -23839,6 +24343,129 @@ def test_delete_backup_schedule_rest_interceptors(null_interceptor): pre.assert_called_once() +def test_clone_database_rest_bad_request( + request_type=firestore_admin.CloneDatabaseRequest, +): + client = FirestoreAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.clone_database(request) + + +@pytest.mark.parametrize( + "request_type", + [ + firestore_admin.CloneDatabaseRequest, + dict, + ], +) +def test_clone_database_rest_call_success(request_type): + client = FirestoreAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.clone_database(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_clone_database_rest_interceptors(null_interceptor): + transport = transports.FirestoreAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.FirestoreAdminRestInterceptor(), + ) + client = FirestoreAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.FirestoreAdminRestInterceptor, "post_clone_database" + ) as post, mock.patch.object( + transports.FirestoreAdminRestInterceptor, "post_clone_database_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.FirestoreAdminRestInterceptor, "pre_clone_database" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = firestore_admin.CloneDatabaseRequest.pb( + firestore_admin.CloneDatabaseRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = firestore_admin.CloneDatabaseRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.clone_database( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + def test_cancel_operation_rest_bad_request( request_type=operations_pb2.CancelOperationRequest, ): @@ -24736,6 +25363,88 @@ def test_delete_backup_schedule_empty_call_rest(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_clone_database_empty_call_rest(): + client = FirestoreAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.clone_database), "__call__") as call: + client.clone_database(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = firestore_admin.CloneDatabaseRequest() + + assert args[0] == request_msg + + +def test_clone_database_routing_parameters_request_1_rest(): + client = FirestoreAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.clone_database), "__call__") as call: + client.clone_database( + request={"pitr_snapshot": {"database": "projects/sample1/sample2"}} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = firestore_admin.CloneDatabaseRequest( + **{"pitr_snapshot": {"database": "projects/sample1/sample2"}} + ) + + assert args[0] == request_msg + + expected_headers = {"project_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_clone_database_routing_parameters_request_2_rest(): + client = FirestoreAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.clone_database), "__call__") as call: + client.clone_database( + request={ + "pitr_snapshot": { + "database": "projects/sample1/databases/sample2/sample3" + } + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = firestore_admin.CloneDatabaseRequest( + **{ + "pitr_snapshot": { + "database": "projects/sample1/databases/sample2/sample3" + } + } + ) + + assert args[0] == request_msg + + expected_headers = {"project_id": "sample1", "database_id": "sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + def test_firestore_admin_rest_lro_client(): client = FirestoreAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -24817,6 +25526,7 @@ def test_firestore_admin_base_transport(): "list_backup_schedules", "update_backup_schedule", "delete_backup_schedule", + "clone_database", "get_operation", "cancel_operation", "delete_operation", @@ -25189,6 +25899,9 @@ def test_firestore_admin_client_transport_session_collision(transport_name): session1 = client1.transport.delete_backup_schedule._session session2 = client2.transport.delete_backup_schedule._session assert session1 != session2 + session1 = client1.transport.clone_database._session + session2 = client2.transport.clone_database._session + assert session1 != session2 def test_firestore_admin_grpc_transport_channel():