diff --git a/UPGRADING.md b/UPGRADING.md new file mode 100644 index 000000000..9cdcf21a6 --- /dev/null +++ b/UPGRADING.md @@ -0,0 +1,18 @@ +# 3.0.0 Migration Guide + +The v3.0.0 release of `google-cloud-bigtable` deprecates the previous `google.cloud.bigtable.Client` class in favor of distinct clients for the two API surfaces, supporting both sync and async calls: +- Data API: + - `google.cloud.bigtable.data.BigtableDataClient` + - `google.cloud.bigtable.data.BigtableDataClientAsync` +- Admin API: + - `google.cloud.bigtable.admin.BigtableInstanceAdminAsyncClient` + - `google.cloud.bigtable.admin.BigtableInstanceAdminClient` + - `google.cloud.bigtable.admin.BigtableTableAdminClient` + - `google.cloud.bigtable.admin.BigtableTableAdminAsyncClient` + +The deprecated client will remain available as an alternative API surface, which internally delegates calls to the respective new clients. For most users, existing code will continue to work as before. But there may be some breaking changes associated with this update, which are detailed in this document. + +If you experience technical issues or have questions, please file an [issue](https://github.com/googleapis/python-bigtable/issues). + +## Breaking Changes +- **[MutationBatcher](https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py#L151) and [MutationBatcherAsync](https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable/data/_async/mutations_batcher.py#L182)'s `table` argument was renamed to `target`**, since it also supports [Authorized View](https://github.com/googleapis/python-bigtable/pull/1034) instances. This matches the naming used in other classes (PR: https://github.com/googleapis/python-bigtable/pull/1153) diff --git a/google/cloud/bigtable/data/_async/mutations_batcher.py b/google/cloud/bigtable/data/_async/mutations_batcher.py index a8e99ea9e..6d87ff5d2 100644 --- a/google/cloud/bigtable/data/_async/mutations_batcher.py +++ b/google/cloud/bigtable/data/_async/mutations_batcher.py @@ -193,7 +193,7 @@ class MutationsBatcherAsync: - when batcher is closed or destroyed Args: - table: table or autrhorized_view used to preform rpc calls + target: table or authorized_view used to preform rpc calls flush_interval: Automatically flush every flush_interval seconds. If None, no time-based flushing is performed. flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count @@ -212,7 +212,7 @@ class MutationsBatcherAsync: def __init__( self, - table: TargetType, + target: TargetType, *, flush_interval: float | None = 5, flush_limit_mutation_count: int | None = 1000, @@ -225,14 +225,14 @@ def __init__( | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, ): self._operation_timeout, self._attempt_timeout = _get_timeouts( - batch_operation_timeout, batch_attempt_timeout, table + batch_operation_timeout, batch_attempt_timeout, target ) self._retryable_errors: list[type[Exception]] = _get_retryable_errors( - batch_retryable_errors, table + batch_retryable_errors, target ) self._closed = CrossSync.Event() - self._target = table + self._target = target self._staged_entries: list[RowMutationEntry] = [] self._staged_count, self._staged_bytes = 0, 0 self._flow_control = CrossSync._FlowControl( @@ -374,7 +374,7 @@ async def _execute_mutate_rows( Args: batch: list of RowMutationEntry objects to send to server timeout: timeout in seconds. Used as operation_timeout and attempt_timeout. - If not given, will use table defaults + If not given, will use target defaults Returns: list[FailedMutationEntryError]: list of FailedMutationEntryError objects for mutations that failed. diff --git a/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py b/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py index 84f0ba8c0..c14606de9 100644 --- a/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py +++ b/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py @@ -162,7 +162,7 @@ class MutationsBatcher: - when batcher is closed or destroyed Args: - table: table or autrhorized_view used to preform rpc calls + target: table or authorized_view used to preform rpc calls flush_interval: Automatically flush every flush_interval seconds. If None, no time-based flushing is performed. flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count @@ -181,7 +181,7 @@ class MutationsBatcher: def __init__( self, - table: TargetType, + target: TargetType, *, flush_interval: float | None = 5, flush_limit_mutation_count: int | None = 1000, @@ -194,13 +194,13 @@ def __init__( | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, ): (self._operation_timeout, self._attempt_timeout) = _get_timeouts( - batch_operation_timeout, batch_attempt_timeout, table + batch_operation_timeout, batch_attempt_timeout, target ) self._retryable_errors: list[type[Exception]] = _get_retryable_errors( - batch_retryable_errors, table + batch_retryable_errors, target ) self._closed = CrossSync._Sync_Impl.Event() - self._target = table + self._target = target self._staged_entries: list[RowMutationEntry] = [] (self._staged_count, self._staged_bytes) = (0, 0) self._flow_control = CrossSync._Sync_Impl._FlowControl( @@ -319,7 +319,7 @@ def _execute_mutate_rows( Args: batch: list of RowMutationEntry objects to send to server timeout: timeout in seconds. Used as operation_timeout and attempt_timeout. - If not given, will use table defaults + If not given, will use target defaults Returns: list[FailedMutationEntryError]: list of FailedMutationEntryError objects for mutations that failed. diff --git a/tests/unit/data/_async/test_mutations_batcher.py b/tests/unit/data/_async/test_mutations_batcher.py index 29f2f1026..e8df3eb42 100644 --- a/tests/unit/data/_async/test_mutations_batcher.py +++ b/tests/unit/data/_async/test_mutations_batcher.py @@ -304,22 +304,22 @@ class TestMutationsBatcherAsync: def _get_target_class(self): return CrossSync.MutationsBatcher - def _make_one(self, table=None, **kwargs): + def _make_one(self, target=None, **kwargs): from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import ServiceUnavailable - if table is None: - table = mock.Mock() - table._request_path = {"table_name": "table"} - table.app_profile_id = None - table.default_mutate_rows_operation_timeout = 10 - table.default_mutate_rows_attempt_timeout = 10 - table.default_mutate_rows_retryable_errors = ( + if target is None: + target = mock.Mock() + target._request_path = {"table_name": "table"} + target.app_profile_id = None + target.default_mutate_rows_operation_timeout = 10 + target.default_mutate_rows_attempt_timeout = 10 + target.default_mutate_rows_retryable_errors = ( DeadlineExceeded, ServiceUnavailable, ) - return self._get_target_class()(table, **kwargs) + return self._get_target_class()(target, **kwargs) @staticmethod def _make_mutation(count=1, size=1): @@ -333,12 +333,12 @@ async def test_ctor_defaults(self): with mock.patch.object( self._get_target_class(), "_timer_routine", return_value=CrossSync.Future() ) as flush_timer_mock: - table = mock.Mock() - table.default_mutate_rows_operation_timeout = 10 - table.default_mutate_rows_attempt_timeout = 8 - table.default_mutate_rows_retryable_errors = [Exception] - async with self._make_one(table) as instance: - assert instance._target == table + target = mock.Mock() + target.default_mutate_rows_operation_timeout = 10 + target.default_mutate_rows_attempt_timeout = 8 + target.default_mutate_rows_retryable_errors = [Exception] + async with self._make_one(target) as instance: + assert instance._target == target assert instance.closed is False assert instance._flush_jobs == set() assert len(instance._staged_entries) == 0 @@ -353,15 +353,15 @@ async def test_ctor_defaults(self): assert instance._entries_processed_since_last_raise == 0 assert ( instance._operation_timeout - == table.default_mutate_rows_operation_timeout + == target.default_mutate_rows_operation_timeout ) assert ( instance._attempt_timeout - == table.default_mutate_rows_attempt_timeout + == target.default_mutate_rows_attempt_timeout ) assert ( instance._retryable_errors - == table.default_mutate_rows_retryable_errors + == target.default_mutate_rows_retryable_errors ) await CrossSync.yield_to_event_loop() assert flush_timer_mock.call_count == 1 @@ -374,7 +374,7 @@ async def test_ctor_explicit(self): with mock.patch.object( self._get_target_class(), "_timer_routine", return_value=CrossSync.Future() ) as flush_timer_mock: - table = mock.Mock() + target = mock.Mock() flush_interval = 20 flush_limit_count = 17 flush_limit_bytes = 19 @@ -384,7 +384,7 @@ async def test_ctor_explicit(self): attempt_timeout = 2 retryable_errors = [Exception] async with self._make_one( - table, + target, flush_interval=flush_interval, flush_limit_mutation_count=flush_limit_count, flush_limit_bytes=flush_limit_bytes, @@ -394,7 +394,7 @@ async def test_ctor_explicit(self): batch_attempt_timeout=attempt_timeout, batch_retryable_errors=retryable_errors, ) as instance: - assert instance._target == table + assert instance._target == target assert instance.closed is False assert instance._flush_jobs == set() assert len(instance._staged_entries) == 0 @@ -426,20 +426,20 @@ async def test_ctor_no_flush_limits(self): with mock.patch.object( self._get_target_class(), "_timer_routine", return_value=CrossSync.Future() ) as flush_timer_mock: - table = mock.Mock() - table.default_mutate_rows_operation_timeout = 10 - table.default_mutate_rows_attempt_timeout = 8 - table.default_mutate_rows_retryable_errors = () + target = mock.Mock() + target.default_mutate_rows_operation_timeout = 10 + target.default_mutate_rows_attempt_timeout = 8 + target.default_mutate_rows_retryable_errors = () flush_interval = None flush_limit_count = None flush_limit_bytes = None async with self._make_one( - table, + target, flush_interval=flush_interval, flush_limit_mutation_count=flush_limit_count, flush_limit_bytes=flush_limit_bytes, ) as instance: - assert instance._target == table + assert instance._target == target assert instance.closed is False assert instance._staged_entries == [] assert len(instance._oldest_exceptions) == 0 @@ -480,7 +480,7 @@ def test_default_argument_consistency(self): batcher_init_signature = dict( inspect.signature(self._get_target_class()).parameters ) - batcher_init_signature.pop("table") + batcher_init_signature.pop("target") # both should have same number of arguments assert len(get_batcher_signature.keys()) == len(batcher_init_signature.keys()) assert len(get_batcher_signature) == 8 # update if expected params change diff --git a/tests/unit/data/_sync_autogen/test_mutations_batcher.py b/tests/unit/data/_sync_autogen/test_mutations_batcher.py index 72db64146..60a6708ba 100644 --- a/tests/unit/data/_sync_autogen/test_mutations_batcher.py +++ b/tests/unit/data/_sync_autogen/test_mutations_batcher.py @@ -254,21 +254,21 @@ class TestMutationsBatcher: def _get_target_class(self): return CrossSync._Sync_Impl.MutationsBatcher - def _make_one(self, table=None, **kwargs): + def _make_one(self, target=None, **kwargs): from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import ServiceUnavailable - if table is None: - table = mock.Mock() - table._request_path = {"table_name": "table"} - table.app_profile_id = None - table.default_mutate_rows_operation_timeout = 10 - table.default_mutate_rows_attempt_timeout = 10 - table.default_mutate_rows_retryable_errors = ( + if target is None: + target = mock.Mock() + target._request_path = {"table_name": "table"} + target.app_profile_id = None + target.default_mutate_rows_operation_timeout = 10 + target.default_mutate_rows_attempt_timeout = 10 + target.default_mutate_rows_retryable_errors = ( DeadlineExceeded, ServiceUnavailable, ) - return self._get_target_class()(table, **kwargs) + return self._get_target_class()(target, **kwargs) @staticmethod def _make_mutation(count=1, size=1): @@ -283,12 +283,12 @@ def test_ctor_defaults(self): "_timer_routine", return_value=CrossSync._Sync_Impl.Future(), ) as flush_timer_mock: - table = mock.Mock() - table.default_mutate_rows_operation_timeout = 10 - table.default_mutate_rows_attempt_timeout = 8 - table.default_mutate_rows_retryable_errors = [Exception] - with self._make_one(table) as instance: - assert instance._target == table + target = mock.Mock() + target.default_mutate_rows_operation_timeout = 10 + target.default_mutate_rows_attempt_timeout = 8 + target.default_mutate_rows_retryable_errors = [Exception] + with self._make_one(target) as instance: + assert instance._target == target assert instance.closed is False assert instance._flush_jobs == set() assert len(instance._staged_entries) == 0 @@ -303,15 +303,15 @@ def test_ctor_defaults(self): assert instance._entries_processed_since_last_raise == 0 assert ( instance._operation_timeout - == table.default_mutate_rows_operation_timeout + == target.default_mutate_rows_operation_timeout ) assert ( instance._attempt_timeout - == table.default_mutate_rows_attempt_timeout + == target.default_mutate_rows_attempt_timeout ) assert ( instance._retryable_errors - == table.default_mutate_rows_retryable_errors + == target.default_mutate_rows_retryable_errors ) CrossSync._Sync_Impl.yield_to_event_loop() assert flush_timer_mock.call_count == 1 @@ -325,7 +325,7 @@ def test_ctor_explicit(self): "_timer_routine", return_value=CrossSync._Sync_Impl.Future(), ) as flush_timer_mock: - table = mock.Mock() + target = mock.Mock() flush_interval = 20 flush_limit_count = 17 flush_limit_bytes = 19 @@ -335,7 +335,7 @@ def test_ctor_explicit(self): attempt_timeout = 2 retryable_errors = [Exception] with self._make_one( - table, + target, flush_interval=flush_interval, flush_limit_mutation_count=flush_limit_count, flush_limit_bytes=flush_limit_bytes, @@ -345,7 +345,7 @@ def test_ctor_explicit(self): batch_attempt_timeout=attempt_timeout, batch_retryable_errors=retryable_errors, ) as instance: - assert instance._target == table + assert instance._target == target assert instance.closed is False assert instance._flush_jobs == set() assert len(instance._staged_entries) == 0 @@ -378,20 +378,20 @@ def test_ctor_no_flush_limits(self): "_timer_routine", return_value=CrossSync._Sync_Impl.Future(), ) as flush_timer_mock: - table = mock.Mock() - table.default_mutate_rows_operation_timeout = 10 - table.default_mutate_rows_attempt_timeout = 8 - table.default_mutate_rows_retryable_errors = () + target = mock.Mock() + target.default_mutate_rows_operation_timeout = 10 + target.default_mutate_rows_attempt_timeout = 8 + target.default_mutate_rows_retryable_errors = () flush_interval = None flush_limit_count = None flush_limit_bytes = None with self._make_one( - table, + target, flush_interval=flush_interval, flush_limit_mutation_count=flush_limit_count, flush_limit_bytes=flush_limit_bytes, ) as instance: - assert instance._target == table + assert instance._target == target assert instance.closed is False assert instance._staged_entries == [] assert len(instance._oldest_exceptions) == 0 @@ -428,7 +428,7 @@ def test_default_argument_consistency(self): batcher_init_signature = dict( inspect.signature(self._get_target_class()).parameters ) - batcher_init_signature.pop("table") + batcher_init_signature.pop("target") assert len(get_batcher_signature.keys()) == len(batcher_init_signature.keys()) assert len(get_batcher_signature) == 8 assert set(get_batcher_signature.keys()) == set(batcher_init_signature.keys())