From 7ae1028bb2f8d3d6a9e26795e00255dceb983d7c Mon Sep 17 00:00:00 2001 From: Linchin Date: Mon, 19 Aug 2024 14:26:11 -0700 Subject: [PATCH] use class method get_explain_metrics() instead of property explain_metrics --- google/cloud/firestore_v1/aggregation.py | 2 +- google/cloud/firestore_v1/query.py | 2 +- google/cloud/firestore_v1/query_results.py | 16 ++- google/cloud/firestore_v1/stream_generator.py | 3 +- google/cloud/firestore_v1/vector_query.py | 2 +- tests/system/test_system.py | 100 ++++++++++-------- tests/unit/v1/test_aggregation.py | 14 +-- tests/unit/v1/test_base_document.py | 4 +- tests/unit/v1/test_query.py | 14 +-- tests/unit/v1/test_query_results.py | 2 +- tests/unit/v1/test_stream_generator.py | 38 ++++--- tests/unit/v1/test_vector_query.py | 7 +- 12 files changed, 114 insertions(+), 90 deletions(-) diff --git a/google/cloud/firestore_v1/aggregation.py b/google/cloud/firestore_v1/aggregation.py index 9da7c2f5f..60659ff6b 100644 --- a/google/cloud/firestore_v1/aggregation.py +++ b/google/cloud/firestore_v1/aggregation.py @@ -98,7 +98,7 @@ def get( if explain_options is None: explain_metrics = None else: - explain_metrics = result.explain_metrics + explain_metrics = result.get_explain_metrics() return QueryResultsList(result_list, explain_options, explain_metrics) diff --git a/google/cloud/firestore_v1/query.py b/google/cloud/firestore_v1/query.py index ceecab0b5..53a477d2e 100644 --- a/google/cloud/firestore_v1/query.py +++ b/google/cloud/firestore_v1/query.py @@ -196,7 +196,7 @@ def get( if explain_options is None: explain_metrics = None else: - explain_metrics = result.explain_metrics + explain_metrics = result.get_explain_metrics() return QueryResultsList(result_list, explain_options, explain_metrics) diff --git a/google/cloud/firestore_v1/query_results.py b/google/cloud/firestore_v1/query_results.py index 5b45afa00..870be2453 100644 --- a/google/cloud/firestore_v1/query_results.py +++ b/google/cloud/firestore_v1/query_results.py @@ -53,11 +53,21 @@ def __init__( self._explain_metrics = explain_metrics @property - def explain_options(self): + def explain_options(self) -> Optional[ExplainOptions]: + """Query profiling options for getting these query results.""" return self._explain_options - @property - def explain_metrics(self): + def get_explain_metrics(self) -> ExplainMetrics: + """ + Get the metrics associated with the query execution. + Metrics are only available when explain_options is set on the query. If + ExplainOptions.analyze is False, only plan_summary is available. If it is + True, execution_stats is also available. + :rtype: :class:`~google.cloud.firestore_v1.query_profile.ExplainMetrics` + :returns: The metrics associated with the query execution. + :raises: :class:`~google.cloud.firestore_v1.query_profile.QueryExplainError` + if explain_metrics is not available on the query. + """ if self._explain_options is None: raise QueryExplainError("explain_options not set on query.") else: diff --git a/google/cloud/firestore_v1/stream_generator.py b/google/cloud/firestore_v1/stream_generator.py index b170b0ef3..5d674c348 100644 --- a/google/cloud/firestore_v1/stream_generator.py +++ b/google/cloud/firestore_v1/stream_generator.py @@ -73,8 +73,7 @@ def explain_options(self) -> ExplainOptions | None: """Query profiling options for this stream request.""" return self._explain_options - @property - def explain_metrics(self) -> ExplainMetrics: + def get_explain_metrics(self) -> ExplainMetrics: """ Get the metrics associated with the query execution. Metrics are only available when explain_options is set on the query. If diff --git a/google/cloud/firestore_v1/vector_query.py b/google/cloud/firestore_v1/vector_query.py index 356bc60be..a1fef88b0 100644 --- a/google/cloud/firestore_v1/vector_query.py +++ b/google/cloud/firestore_v1/vector_query.py @@ -98,7 +98,7 @@ def get( if explain_options is None: explain_metrics = None else: - explain_metrics = result.explain_metrics + explain_metrics = result.get_explain_metrics() return QueryResultsList(result_list, explain_options, explain_metrics) diff --git a/tests/system/test_system.py b/tests/system/test_system.py index 0858f5b78..04f8ca4b5 100644 --- a/tests/system/test_system.py +++ b/tests/system/test_system.py @@ -118,7 +118,7 @@ def test_collection_stream_or_get_w_no_explain_options(database, query_docs, met QueryExplainError, match="explain_options not set on query.", ): - results.explain_metrics + results.get_explain_metrics() @pytest.mark.skipif( @@ -142,10 +142,10 @@ def test_collection_stream_or_get_w_explain_options_analyze_false( method_under_test = getattr(collection, method) results = method_under_test(explain_options=ExplainOptions(analyze=False)) - assert isinstance(results.explain_metrics, ExplainMetrics) - - # Verify plan_summary. - plan_summary = results.explain_metrics.plan_summary + # Verify explain_metrics and plan_summary. + explain_metrics = results.get_explain_metrics() + assert isinstance(explain_metrics, ExplainMetrics) + plan_summary = explain_metrics.plan_summary assert isinstance(plan_summary, PlanSummary) assert len(plan_summary.indexes_used) > 0 assert plan_summary.indexes_used[0]["properties"] == "(__name__ ASC)" @@ -156,7 +156,7 @@ def test_collection_stream_or_get_w_explain_options_analyze_false( QueryExplainError, match="execution_stats not available when explain_options.analyze=False", ): - results.explain_metrics.execution_stats + explain_metrics.execution_stats @pytest.mark.skipif( @@ -188,21 +188,22 @@ def test_collection_stream_or_get_w_explain_options_analyze_true( QueryExplainError, match="explain_metrics not available until query is complete", ): - results.explain_metrics + results.get_explain_metrics() # Finish iterating results, and explain_metrics should be available. num_results = len(list(results)) - assert isinstance(results.explain_metrics, ExplainMetrics) - # Verify plan_summary. - plan_summary = results.explain_metrics.plan_summary + # Verify explain_metrics and plan_summary. + explain_metrics = results.get_explain_metrics() + assert isinstance(explain_metrics, ExplainMetrics) + plan_summary = explain_metrics.plan_summary assert isinstance(plan_summary, PlanSummary) assert len(plan_summary.indexes_used) > 0 assert plan_summary.indexes_used[0]["properties"] == "(__name__ ASC)" assert plan_summary.indexes_used[0]["query_scope"] == "Collection" # Verify execution_stats. - execution_stats = results.explain_metrics.execution_stats + execution_stats = explain_metrics.execution_stats assert isinstance(execution_stats, ExecutionStats) assert execution_stats.results_returned == num_results assert execution_stats.read_operations == num_results @@ -406,7 +407,7 @@ def test_vector_query_stream_or_get_w_no_explain_options(client, database, metho QueryExplainError, match="explain_options not set on query.", ): - results.explain_metrics + results.get_explain_metrics() @pytest.mark.skipif( @@ -446,14 +447,15 @@ def test_vector_query_stream_or_get_w_explain_options_analyze_true( QueryExplainError, match="explain_metrics not available until query is complete", ): - results.explain_metrics + results.get_explain_metrics() # Finish iterating results, and explain_metrics should be available. num_results = len(list(results)) - assert isinstance(results.explain_metrics, ExplainMetrics) - # Verify plan_summary. - plan_summary = results.explain_metrics.plan_summary + # Verify explain_metrics and plan_summary. + explain_metrics = results.get_explain_metrics() + assert isinstance(explain_metrics, ExplainMetrics) + plan_summary = explain_metrics.plan_summary assert isinstance(plan_summary, PlanSummary) assert len(plan_summary.indexes_used) > 0 assert ( @@ -463,7 +465,7 @@ def test_vector_query_stream_or_get_w_explain_options_analyze_true( assert plan_summary.indexes_used[0]["query_scope"] == "Collection group" # Verify execution_stats. - execution_stats = results.explain_metrics.execution_stats + execution_stats = explain_metrics.execution_stats assert isinstance(execution_stats, ExecutionStats) assert execution_stats.results_returned == num_results assert execution_stats.read_operations > 0 @@ -505,12 +507,13 @@ def test_vector_query_stream_or_get_w_explain_options_analyze_false( method_under_test = getattr(vector_query, method) results = method_under_test(explain_options=ExplainOptions(analyze=False)) - assert isinstance(results.explain_metrics, ExplainMetrics) results_list = list(results) assert len(results_list) == 0 - # Verify plan_summary. - plan_summary = results.explain_metrics.plan_summary + # Verify explain_metrics and plan_summary. + explain_metrics = results.get_explain_metrics() + assert isinstance(explain_metrics, ExplainMetrics) + plan_summary = explain_metrics.plan_summary assert isinstance(plan_summary, PlanSummary) assert len(plan_summary.indexes_used) > 0 assert ( @@ -524,7 +527,7 @@ def test_vector_query_stream_or_get_w_explain_options_analyze_false( QueryExplainError, match="execution_stats not available when explain_options.analyze=False", ): - results.explain_metrics.execution_stats + explain_metrics.execution_stats @pytest.mark.parametrize("database", [None, FIRESTORE_OTHER_DB], indirect=True) @@ -1188,7 +1191,7 @@ def test_query_stream_or_get_w_no_explain_options(query_docs, database, method): # If no explain_option is passed, raise an exception if explain_metrics # is called with pytest.raises(QueryExplainError, match="explain_options not set on query"): - results.explain_metrics + results.get_explain_metrics() @pytest.mark.skipif( @@ -1222,21 +1225,22 @@ def test_query_stream_or_get_w_explain_options_analyze_true( QueryExplainError, match="explain_metrics not available until query is complete", ): - results.explain_metrics + results.get_explain_metrics() # Finish iterating results, and explain_metrics should be available. num_results = len(list(results)) - assert isinstance(results.explain_metrics, ExplainMetrics) - # Verify plan_summary. - plan_summary = results.explain_metrics.plan_summary + # Verify explain_metrics and plan_summary. + explain_metrics = results.get_explain_metrics() + assert isinstance(explain_metrics, ExplainMetrics) + plan_summary = explain_metrics.plan_summary assert isinstance(plan_summary, PlanSummary) assert len(plan_summary.indexes_used) > 0 assert plan_summary.indexes_used[0]["properties"] == "(a ASC, __name__ ASC)" assert plan_summary.indexes_used[0]["query_scope"] == "Collection" # Verify execution_stats. - execution_stats = results.explain_metrics.execution_stats + execution_stats = explain_metrics.execution_stats assert isinstance(execution_stats, ExecutionStats) assert execution_stats.results_returned == num_results assert execution_stats.read_operations == num_results @@ -1273,13 +1277,13 @@ def test_query_stream_or_get_w_explain_options_analyze_false( method_under_test = getattr(query, method) results = method_under_test(explain_options=ExplainOptions(analyze=False)) - assert isinstance(results.explain_metrics, ExplainMetrics) - results_list = list(results) assert len(results_list) == 0 - # Verify plan_summary. - plan_summary = results.explain_metrics.plan_summary + # Verify explain_metrics and plan_summary. + explain_metrics = results.get_explain_metrics() + assert isinstance(explain_metrics, ExplainMetrics) + plan_summary = explain_metrics.plan_summary assert isinstance(plan_summary, PlanSummary) assert len(plan_summary.indexes_used) > 0 assert plan_summary.indexes_used[0]["properties"] == "(a ASC, __name__ ASC)" @@ -1290,7 +1294,7 @@ def test_query_stream_or_get_w_explain_options_analyze_false( QueryExplainError, match="execution_stats not available when explain_options.analyze=False", ): - results.explain_metrics.execution_stats + explain_metrics.execution_stats @pytest.mark.parametrize("database", [None, FIRESTORE_OTHER_DB], indirect=True) @@ -2688,7 +2692,7 @@ def test_aggregation_query_stream_or_get_w_no_explain_options(query, database, m # If no explain_option is passed, raise an exception if explain_metrics # is called with pytest.raises(QueryExplainError, match="explain_options not set on query"): - results.explain_metrics + results.get_explain_metrics() @pytest.mark.skipif( @@ -2726,21 +2730,22 @@ def test_aggregation_query_stream_or_get_w_explain_options_analyze_true( QueryExplainError, match="explain_metrics not available until query is complete", ): - results.explain_metrics + results.get_explain_metrics() # Finish iterating results, and explain_metrics should be available. num_results = len(list(results)) - assert isinstance(results.explain_metrics, ExplainMetrics) - # Verify plan_summary. - plan_summary = results.explain_metrics.plan_summary + # Verify explain_metrics and plan_summary. + explain_metrics = results.get_explain_metrics() + assert isinstance(explain_metrics, ExplainMetrics) + plan_summary = explain_metrics.plan_summary assert isinstance(plan_summary, PlanSummary) assert len(plan_summary.indexes_used) > 0 assert plan_summary.indexes_used[0]["properties"] == "(a ASC, __name__ ASC)" assert plan_summary.indexes_used[0]["query_scope"] == "Collection" # Verify execution_stats. - execution_stats = results.explain_metrics.execution_stats + execution_stats = explain_metrics.execution_stats assert isinstance(execution_stats, ExecutionStats) assert execution_stats.results_returned == num_results assert execution_stats.read_operations == num_results @@ -2781,10 +2786,10 @@ def test_aggregation_query_stream_or_get_w_explain_options_analyze_false( method_under_test = getattr(count_query, method) results = method_under_test(explain_options=ExplainOptions(analyze=False)) - assert isinstance(results.explain_metrics, ExplainMetrics) - - # Verify plan_summary. - plan_summary = results.explain_metrics.plan_summary + # Verify explain_metrics and plan_summary. + explain_metrics = results.get_explain_metrics() + assert isinstance(explain_metrics, ExplainMetrics) + plan_summary = explain_metrics.plan_summary assert isinstance(plan_summary, PlanSummary) assert len(plan_summary.indexes_used) > 0 assert plan_summary.indexes_used[0]["properties"] == "(a ASC, __name__ ASC)" @@ -2795,7 +2800,7 @@ def test_aggregation_query_stream_or_get_w_explain_options_analyze_false( QueryExplainError, match="execution_stats not available when explain_options.analyze=False", ): - results.explain_metrics.execution_stats + explain_metrics.execution_stats @pytest.mark.parametrize("database", [None, FIRESTORE_OTHER_DB], indirect=True) @@ -3009,15 +3014,16 @@ def in_transaction(transaction): with pytest.raises( QueryExplainError, match="explain_options not set on query." ): - result_1.explain_metrics + result_1.get_explain_metrics() result_2 = query.get( transaction=transaction, explain_options=ExplainOptions(analyze=True), ) - assert isinstance(result_2.explain_metrics, ExplainMetrics) - assert result_2.explain_metrics.plan_summary is not None - assert result_2.explain_metrics.execution_stats is not None + explain_metrics = result_2.get_explain_metrics() + assert isinstance(explain_metrics, ExplainMetrics) + assert explain_metrics.plan_summary is not None + assert explain_metrics.execution_stats is not None inner_fn_ran = True diff --git a/tests/unit/v1/test_aggregation.py b/tests/unit/v1/test_aggregation.py index 0c03dbdbf..4d1eed198 100644 --- a/tests/unit/v1/test_aggregation.py +++ b/tests/unit/v1/test_aggregation.py @@ -443,10 +443,11 @@ def _aggregation_query_get_helper( if explain_options is None: with pytest.raises(QueryExplainError, match="explain_options not set"): - returned.explain_metrics + returned.get_explain_metrics() else: - assert isinstance(returned.explain_metrics, ExplainMetrics) - assert returned.explain_metrics.execution_stats.results_returned == 1 + actual_explain_metrics = returned.get_explain_metrics() + assert isinstance(actual_explain_metrics, ExplainMetrics) + assert actual_explain_metrics.execution_stats.results_returned == 1 parent_path, _ = parent._parent_info() expected_request = { @@ -728,10 +729,11 @@ def _aggregation_query_stream_helper( if explain_options is None: with pytest.raises(QueryExplainError, match="explain_options not set"): - returned.explain_metrics + returned.get_explain_metrics() else: - assert isinstance(returned.explain_metrics, ExplainMetrics) - assert returned.explain_metrics.execution_stats.results_returned == 1 + explain_metrics = returned.get_explain_metrics() + assert isinstance(explain_metrics, ExplainMetrics) + assert explain_metrics.execution_stats.results_returned == 1 parent_path, _ = parent._parent_info() expected_request = { diff --git a/tests/unit/v1/test_base_document.py b/tests/unit/v1/test_base_document.py index a1f2a85b4..0dffbb294 100644 --- a/tests/unit/v1/test_base_document.py +++ b/tests/unit/v1/test_base_document.py @@ -433,7 +433,7 @@ def test_query_results_list_explain_metrics_w_explain_options(): explain_metrics=explain_metrics, ) - assert snapshot_list.explain_metrics == explain_metrics + assert snapshot_list.get_explain_metrics() == explain_metrics def test_query_results_list_explain_metrics_wo_explain_options(): @@ -442,7 +442,7 @@ def test_query_results_list_explain_metrics_wo_explain_options(): snapshot_list = _make_query_results_list([]) with pytest.raises(QueryExplainError): - snapshot_list.explain_metrics + snapshot_list.get_explain_metrics() def test__get_document_path(): diff --git a/tests/unit/v1/test_query.py b/tests/unit/v1/test_query.py index e8ce6d1b3..177487902 100644 --- a/tests/unit/v1/test_query.py +++ b/tests/unit/v1/test_query.py @@ -82,10 +82,11 @@ def _query_get_helper( if explain_options is None: with pytest.raises(QueryExplainError, match="explain_options not set"): - returned.explain_metrics + returned.get_explain_metrics() else: - assert isinstance(returned.explain_metrics, ExplainMetrics) - assert returned.explain_metrics.execution_stats.results_returned == 1 + actual_explain_metrics = returned.get_explain_metrics() + assert isinstance(actual_explain_metrics, ExplainMetrics) + assert actual_explain_metrics.execution_stats.results_returned == 1 # Create expected request body. parent_path, _ = parent._parent_info() @@ -379,10 +380,11 @@ def _query_stream_helper( if explain_options is None: with pytest.raises(QueryExplainError, match="explain_options not set"): - get_response.explain_metrics + get_response.get_explain_metrics() else: - assert isinstance(get_response.explain_metrics, ExplainMetrics) - assert get_response.explain_metrics.execution_stats.results_returned == 1 + explain_metrics = get_response.get_explain_metrics() + assert isinstance(explain_metrics, ExplainMetrics) + assert explain_metrics.execution_stats.results_returned == 1 # Create expected request body. parent_path, _ = parent._parent_info() diff --git a/tests/unit/v1/test_query_results.py b/tests/unit/v1/test_query_results.py index b01a3c029..7ec1b7a67 100644 --- a/tests/unit/v1/test_query_results.py +++ b/tests/unit/v1/test_query_results.py @@ -99,4 +99,4 @@ def test_query_results_list_explain_metrics_w_explain_options(): explain_metrics=explain_metrics, ) - assert snapshot_list.explain_metrics == explain_metrics + assert snapshot_list.get_explain_metrics() == explain_metrics diff --git a/tests/unit/v1/test_stream_generator.py b/tests/unit/v1/test_stream_generator.py index 7afc0fc9a..0e8a55260 100644 --- a/tests/unit/v1/test_stream_generator.py +++ b/tests/unit/v1/test_stream_generator.py @@ -152,27 +152,28 @@ def test_stream_generator_explain_metrics_explain_options_analyze_true(): ) explain_options = query_profile.ExplainOptions(analyze=True) - explain_metrics = query_profile_pb2.ExplainMetrics( + expected_explain_metrics = query_profile_pb2.ExplainMetrics( plan_summary=plan_summary, execution_stats=execution_stats, ) - inst = _make_stream_generator(iterator, explain_options, explain_metrics) + inst = _make_stream_generator(iterator, explain_options, expected_explain_metrics) # Raise an exception if query isn't complete when explain_metrics is called. with pytest.raises( query_profile.QueryExplainError, match="explain_metrics not available until query is complete.", ): - inst.explain_metrics + inst.get_explain_metrics() list(inst) - assert isinstance(inst.explain_metrics, query_profile._ExplainAnalyzeMetrics) - assert inst.explain_metrics == query_profile.ExplainMetrics._from_pb( - explain_metrics + actual_explain_metrics = inst.get_explain_metrics() + assert isinstance(actual_explain_metrics, query_profile._ExplainAnalyzeMetrics) + assert actual_explain_metrics == query_profile.ExplainMetrics._from_pb( + expected_explain_metrics ) - assert inst.explain_metrics.plan_summary.indexes_used == [ + assert actual_explain_metrics.plan_summary.indexes_used == [ { "indexes_used": { "query_scope": "Collection", @@ -180,17 +181,17 @@ def test_stream_generator_explain_metrics_explain_options_analyze_true(): } } ] - assert inst.explain_metrics.execution_stats.results_returned == 1 - duration = inst.explain_metrics.execution_stats.execution_duration.total_seconds() + assert actual_explain_metrics.execution_stats.results_returned == 1 + duration = actual_explain_metrics.execution_stats.execution_duration.total_seconds() assert duration == 2 - assert inst.explain_metrics.execution_stats.read_operations == 3 + assert actual_explain_metrics.execution_stats.read_operations == 3 expected_debug_stats = { "billing_details": "billing_details_results", "documents_scanned": "documents_scanned_results", "index_entries_scanned": "index_entries_scanned", } - assert inst.explain_metrics.execution_stats.debug_stats == expected_debug_stats + assert actual_explain_metrics.execution_stats.debug_stats == expected_debug_stats def test_stream_generator_explain_metrics_explain_options_analyze_false(): @@ -214,11 +215,14 @@ def test_stream_generator_explain_metrics_explain_options_analyze_false(): } plan_summary = query_profile_pb2.PlanSummary() plan_summary.indexes_used.append(indexes_used_dict) - explain_metrics = query_profile_pb2.ExplainMetrics(plan_summary=plan_summary) + expected_explain_metrics = query_profile_pb2.ExplainMetrics( + plan_summary=plan_summary + ) - inst = _make_stream_generator(iterator, explain_options, explain_metrics) - assert isinstance(inst.explain_metrics, query_profile.ExplainMetrics) - assert inst.explain_metrics.plan_summary.indexes_used == [ + inst = _make_stream_generator(iterator, explain_options, expected_explain_metrics) + actual_explain_metrics = inst.get_explain_metrics() + assert isinstance(actual_explain_metrics, query_profile.ExplainMetrics) + assert actual_explain_metrics.plan_summary.indexes_used == [ { "indexes_used": { "query_scope": "Collection", @@ -236,7 +240,7 @@ def test_stream_generator_explain_metrics_missing_explain_options_analyze_false( with pytest.raises( query_profile.QueryExplainError, match="Did not receive explain_metrics" ): - inst.explain_metrics + inst.get_explain_metrics() def test_stream_generator_explain_metrics_no_explain_options(): @@ -248,4 +252,4 @@ def test_stream_generator_explain_metrics_no_explain_options(): QueryExplainError, match="explain_options not set on query.", ): - inst.explain_metrics + inst.get_explain_metrics() diff --git a/tests/unit/v1/test_vector_query.py b/tests/unit/v1/test_vector_query.py index 1fb06b714..cd8aca3f3 100644 --- a/tests/unit/v1/test_vector_query.py +++ b/tests/unit/v1/test_vector_query.py @@ -152,10 +152,11 @@ def _vector_query_get_helper(distance_measure, expected_distance, explain_option if explain_options is None: with pytest.raises(QueryExplainError, match="explain_options not set"): - returned.explain_metrics + returned.get_explain_metrics() else: - assert isinstance(returned.explain_metrics, ExplainMetrics) - assert returned.explain_metrics.execution_stats.results_returned == 1 + actual_explain_metrics = returned.get_explain_metrics() + assert isinstance(actual_explain_metrics, ExplainMetrics) + assert actual_explain_metrics.execution_stats.results_returned == 1 expected_pb = _expected_pb( parent=parent,