diff --git a/tests/webapp/api/test_perfcompare_api.py b/tests/webapp/api/test_perfcompare_api.py index af955fb1cff..3e6956eccbd 100644 --- a/tests/webapp/api/test_perfcompare_api.py +++ b/tests/webapp/api/test_perfcompare_api.py @@ -113,7 +113,6 @@ def test_perfcompare_results_against_no_base( "framework_id": base_sig.framework.id, "platform": base_sig.platform.platform, "suite": base_sig.suite, - "is_empty": False, "header_name": response["header_name"], "base_repository_name": base_sig.repository.name, "new_repository_name": new_sig.repository.name, @@ -283,7 +282,6 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo( "framework_id": base_sig.framework.id, "platform": base_sig.platform.platform, "suite": base_sig.suite, - "is_empty": False, "header_name": response["header_name"], "base_repository_name": base_sig.repository.name, "new_repository_name": new_sig.repository.name, @@ -352,6 +350,147 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo( assert expected[0] == response.json()[0] +def test_perfcompare_results_without_base_signature( + client, + create_signature, + create_perf_datum, + test_perf_signature, + test_repository, + try_repository, + eleven_jobs_stored, + test_perfcomp_push, + test_perfcomp_push_2, + test_linux_platform, + test_option_collection, +): + perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by("push__time").all() + + test_perfcomp_push.time = THREE_DAYS_AGO + test_perfcomp_push.repository = try_repository + test_perfcomp_push.save() + + test_perfcomp_push_2.time = datetime.datetime.now() + test_perfcomp_push_2.save() + + suite = "a11yr" + test = "dhtml.html" + extra_options = "e10s fission stylo webrender" + measurement_unit = "ms" + new_application = "geckoview" + + new_perf_data_values = [40.2] + + new_sig = create_signature( + signature_hash=(20 * "t2"), + extra_options=extra_options, + platform=test_linux_platform, + measurement_unit=measurement_unit, + suite=suite, + test=test, + test_perf_signature=test_perf_signature, + repository=test_repository, + application=new_application, + ) + + job = perf_jobs[1] + job.push = test_perfcomp_push_2 + job.save() + perf_datum = PerformanceDatum.objects.create( + value=new_perf_data_values[0], + push_timestamp=job.push.time, + job=job, + push=job.push, + repository=job.repository, + signature=new_sig, + ) + perf_datum.push.time = job.push.time + perf_datum.push.save() + + response = get_expected( + None, + new_sig, + extra_options, + test_option_collection, + new_perf_data_values, + [], + ) + + expected = [ + { + "base_rev": test_perfcomp_push.revision, + "new_rev": test_perfcomp_push_2.revision, + "framework_id": new_sig.framework.id, + "platform": new_sig.platform.platform, + "suite": "", + "header_name": response["header_name"], + "base_repository_name": try_repository.name, + "new_repository_name": new_sig.repository.name, + "base_app": "", + "new_app": "geckoview", + "is_complete": False, + "base_measurement_unit": "", + "new_measurement_unit": new_sig.measurement_unit, + "base_retriggerable_job_ids": [], + "new_retriggerable_job_ids": [2], + "base_runs": [], + "new_runs": new_perf_data_values, + "base_runs_replicates": [], + "new_runs_replicates": [], + "base_avg_value": 0, + "new_avg_value": round(response["new_avg_value"], 2), + "base_median_value": 0, + "new_median_value": round(response["new_median_value"], 2), + "test": "", + "option_name": "", + "extra_options": "", + "base_stddev": 0, + "new_stddev": 0, + "base_stddev_pct": 0, + "new_stddev_pct": 0, + "confidence": 0, + "confidence_text": response["confidence_text"], + "delta_value": round(response["delta_value"], 2), + "delta_percentage": 0, + "magnitude": 0, + "new_is_better": True, + "lower_is_better": False, + "is_confident": response["is_confident"], + "more_runs_are_needed": False, + "noise_metric": False, + "graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&" + f"highlightedRevisions={test_perfcomp_push_2.revision}&" + f"series={try_repository.name}%2C{new_sig.signature_hash}%2C1%2C{new_sig.framework.id}&" + f"series={test_repository.name}%2C{new_sig.signature_hash}%2C1%2C{new_sig.framework.id}&" + f"timerange=604800", + "is_improvement": response["is_improvement"], + "is_regression": response["is_regression"], + "is_meaningful": response["is_meaningful"], + "base_parent_signature": response["base_parent_signature"], + "new_parent_signature": response["new_parent_signature"], + "base_signature_id": response["base_signature_id"], + "new_signature_id": response["new_signature_id"], + "has_subtests": response["has_subtests"], + }, + ] + + query_params = ( + "?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={" + "}&no_subtests=true".format( + try_repository.name, + test_repository.name, + test_perfcomp_push.revision, + test_perfcomp_push_2.revision, + test_perf_signature.framework_id, + ) + ) + + response = client.get(reverse("perfcompare-results") + query_params) + + print("+++++++++++++++++++", response.json()) + assert response.status_code == 200 + assert expected[0] == response.json()[0] + + def test_perfcompare_results_subtests_support( client, create_signature, @@ -456,7 +595,6 @@ def test_perfcompare_results_subtests_support( "framework_id": base_sig.framework.id, "platform": base_sig.platform.platform, "suite": base_sig.suite, - "is_empty": False, "header_name": response["header_name"], "base_repository_name": base_sig.repository.name, "new_repository_name": new_sig.repository.name, @@ -625,7 +763,6 @@ def test_perfcompare_results_multiple_runs( "framework_id": sig1.framework.id, "platform": sig1.platform.platform, "suite": sig1.suite, - "is_empty": False, "header_name": first_row["header_name"], "base_repository_name": sig1.repository.name, "new_repository_name": sig2.repository.name, @@ -674,7 +811,6 @@ def test_perfcompare_results_multiple_runs( "framework_id": sig3.framework.id, "platform": sig3.platform.platform, "suite": sig3.suite, - "is_empty": False, "header_name": second_row["header_name"], "base_repository_name": sig3.repository.name, "new_repository_name": sig4.repository.name, @@ -800,8 +936,9 @@ def get_expected( new_perf_data_values, base_perf_data_values, ): - response = {"option_name": test_option_collection.get(base_sig.option_collection_id, "")} - test_suite = perfcompare_utils.get_test_suite(base_sig.suite, base_sig.test) + sig = base_sig if base_sig else new_sig + response = {"option_name": test_option_collection.get(sig.option_collection_id, "")} + test_suite = perfcompare_utils.get_test_suite(sig.suite, sig.test) response["header_name"] = perfcompare_utils.get_header_name( extra_options, response["option_name"], test_suite ) @@ -833,9 +970,9 @@ def get_expected( ) response["magnitude"] = perfcompare_utils.get_magnitude(response["delta_pct"]) response["new_is_better"] = perfcompare_utils.is_new_better( - response["delta_value"], base_sig.lower_is_better + response["delta_value"], sig.lower_is_better ) - response["lower_is_better"] = base_sig.lower_is_better + response["lower_is_better"] = sig.lower_is_better response["confidence"] = perfcompare_utils.get_abs_ttest_value( base_perf_data_values, new_perf_data_values ) @@ -857,12 +994,14 @@ def get_expected( response["is_regression"] = class_name == "danger" response["is_meaningful"] = class_name == "" response["base_parent_signature"] = ( - base_sig.parent_signature.id if base_sig.parent_signature else None + base_sig.parent_signature.id if base_sig and base_sig.parent_signature else None ) response["new_parent_signature"] = ( - new_sig.parent_signature.id if base_sig.parent_signature else None + new_sig.parent_signature.id if new_sig and new_sig.parent_signature else None + ) + response["base_signature_id"] = base_sig.id if base_sig else None + response["new_signature_id"] = new_sig.id if new_sig else None + response["has_subtests"] = (base_sig.has_subtests if base_sig else False) or ( + new_sig.has_subtests if new_sig else False ) - response["base_signature_id"] = base_sig.id - response["new_signature_id"] = new_sig.id - response["has_subtests"] = base_sig.has_subtests or new_sig.has_subtests return response diff --git a/treeherder/webapp/api/performance_data.py b/treeherder/webapp/api/performance_data.py index 48c0018c4e2..2b8cec2bc1b 100644 --- a/treeherder/webapp/api/performance_data.py +++ b/treeherder/webapp/api/performance_data.py @@ -952,15 +952,10 @@ def list(self, request): for platform in platforms: sig_identifier = perfcompare_utils.get_sig_identifier(header, platform) base_sig = base_signatures_map.get(sig_identifier, {}) - base_sig_id = base_sig.get("id", "") + base_sig_id = base_sig.get("id", None) new_sig = new_signatures_map.get(sig_identifier, {}) - new_sig_id = new_sig.get("id", "") + new_sig_id = new_sig.get("id", None) lower_is_better = base_sig.get("lower_is_better", "") - is_empty = not ( - base_sig and new_sig - ) # ensures there are signatures for base and new - if is_empty: - continue base_perf_data_values = base_grouped_values.get(base_sig_id, []) new_perf_data_values = new_grouped_values.get(new_sig_id, []) base_perf_data_replicates = base_grouped_replicates.get(base_sig_id, []) @@ -1019,7 +1014,6 @@ def list(self, request): "test": base_sig.get("test", ""), # same test for base_result and new_result "is_complete": is_complete, "framework_id": framework, - "is_empty": is_empty, "option_name": option_collection_map.get( base_sig.get("option_collection_id", ""), "" ), diff --git a/treeherder/webapp/api/performance_serializers.py b/treeherder/webapp/api/performance_serializers.py index 16b4475eca0..2b1a3b24c97 100644 --- a/treeherder/webapp/api/performance_serializers.py +++ b/treeherder/webapp/api/performance_serializers.py @@ -504,7 +504,6 @@ class PerfCompareResultsSerializer(serializers.ModelSerializer): max_length=10, default="", ) - is_empty = serializers.BooleanField() is_complete = serializers.BooleanField() platform = serializers.CharField() header_name = serializers.CharField() @@ -569,7 +568,6 @@ class Meta: "framework_id", "platform", "suite", - "is_empty", "header_name", "base_repository_name", "new_repository_name",