Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ lnt/server/ui/static/docs
test_run_tmp
tests/**/Output
venv
*~
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is for what exactly, some kind of temp file?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, for emacs and some other editors. I didn't mean to include this in this PR though, will take out.

26 changes: 26 additions & 0 deletions lnt/server/db/migrations/upgrade_17_to_18.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
"""Adds a ignore_same_hash column to the sample fields table and sets it to
true for execution_time.

"""

from sqlalchemy import Column, Integer, update

from lnt.server.db.migrations.util import introspect_table
from lnt.server.db.util import add_column


def upgrade(engine):
ignore_same_hash = Column("ignore_same_hash", Integer, default=0)
add_column(engine, "TestSuiteSampleFields", ignore_same_hash)

test_suite_sample_fields = introspect_table(engine, "TestSuiteSampleFields")
set_init_value = update(test_suite_sample_fields).values(ignore_same_hash=0)
set_exec_time = (
update(test_suite_sample_fields)
.where(test_suite_sample_fields.c.Name == "execution_time")
.values(ignore_same_hash=1)
)

with engine.begin() as trans:
trans.execute(set_init_value)
trans.execute(set_exec_time)
12 changes: 11 additions & 1 deletion lnt/server/db/testsuite.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ def from_json(data):
for index, metric_desc in enumerate(data['metrics']):
name = metric_desc['name']
bigger_is_better = metric_desc.get('bigger_is_better', False)
ignore_same_hash = metric_desc.get('ignore_same_hash', False)
metric_type_name = metric_desc.get('type', 'Real')
display_name = metric_desc.get('display_name')
unit = metric_desc.get('unit')
Expand All @@ -182,8 +183,10 @@ def from_json(data):
metric_type_name)
metric_type = SampleType(metric_type_name)
bigger_is_better_int = 1 if bigger_is_better else 0
ignore_same_hash_int = 1 if ignore_same_hash else 0
field = SampleField(name, metric_type, index, status_field=None,
bigger_is_better=bigger_is_better_int,
ignore_same_hash=ignore_same_hash_int,
display_name=display_name, unit=unit,
unit_abbrev=unit_abbrev)
sample_fields.append(field)
Expand All @@ -196,6 +199,7 @@ def __json__(self):
for sample_field in self.sample_fields:
metric = {
'bigger_is_better': (sample_field.bigger_is_better != 0),
'ignore_same_hash': (sample_field.ignore_same_hash != 0),
'display_name': sample_field.display_name,
'name': sample_field.name,
'type': sample_field.type.name,
Expand Down Expand Up @@ -340,12 +344,18 @@ class SampleField(FieldMixin, Base):
# This assumption can be inverted by setting this column to nonzero.
bigger_is_better = Column("bigger_is_better", Integer)

# Some fields like execution_time should ignore changes if the binary hash
# is the same.
ignore_same_hash = Column("ignore_same_hash", Integer, default=0)

def __init__(self, name, type, schema_index, status_field=None, bigger_is_better=0,
ignore_same_hash=0,
display_name=None, unit=None, unit_abbrev=None):
self.name = name
self.type = type
self.status_field = status_field
self.bigger_is_better = bigger_is_better
self.ignore_same_hash = ignore_same_hash
self.display_name = name if display_name is None else display_name
self.unit = unit
self.unit_abbrev = unit_abbrev
Expand All @@ -367,7 +377,7 @@ def __repr__(self):

def __copy__(self):
return SampleField(self.name, self.type, self.schema_index, self.status_field,
self.bigger_is_better, self.display_name, self.unit,
self.bigger_is_better, self.ignore_same_hash, self.display_name, self.unit,
self.unit_abbrev)

def copy_info(self, other):
Expand Down
19 changes: 15 additions & 4 deletions lnt/server/reporting/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,8 @@ class ComparisonResult:
def __init__(self, aggregation_fn,
cur_failed, prev_failed, samples, prev_samples,
cur_hash, prev_hash, cur_profile=None, prev_profile=None,
confidence_lv=0.05, bigger_is_better=False):
confidence_lv=0.05, bigger_is_better=False,
ignore_same_hash=False):
self.aggregation_fn = aggregation_fn

# Special case: if we're using the minimum to aggregate, swap it for
Expand Down Expand Up @@ -103,6 +104,7 @@ def __init__(self, aggregation_fn,

self.confidence_lv = confidence_lv
self.bigger_is_better = bigger_is_better
self.ignore_same_hash = ignore_same_hash

def __repr__(self):
"""Print this ComparisonResult's constructor.
Expand All @@ -118,7 +120,8 @@ def __repr__(self):
self.samples,
self.prev_samples,
self.confidence_lv,
bool(self.bigger_is_better))
bool(self.bigger_is_better),
bool(self.ignore_same_hash))

def __json__(self):
simple_dict = self.__dict__
Expand Down Expand Up @@ -176,6 +179,12 @@ def get_value_status(self, confidence_interval=2.576,
elif self.prev_failed:
return UNCHANGED_PASS

# Ignore changes if the hash of the binary is the same and the field is
# sensitive to the hash, e.g. execution time.
if self.ignore_same_hash:
if self.cur_hash and self.prev_hash and self.cur_hash == self.prev_hash:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure about completely ignoring, if same binaries have changes it can be a good indication of the noise level, and changed binaries may also be impacted by the same noise. Not sure if that's possible, but it may be good to display the results for the binaries with same hash separately.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

FWIW LNT already detects noisy results based off the stddev and ignores them, the code for it is later on this function.

This also only affects when regressions are flagged, i.e. the Run-over-run changes detail > performance regressions - execution time" table at the top. You can still see the differences in the runs in the test results table below when you check "show all values", which will reveal the noisy tests.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I tend to agree with @fhahn, I don't really understand why we'd ignore subsequent results entirely.

I also don't fully understand the impact of this change: for multi-valued runs (e.g. running the same program multiple times and submitting multiple execution times for it), what does this PR change, if anything? I'm not familiar with how ComparisonResult is used, so that might be part of my confusion.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is addressing a long standing FIXME, see above in the code.

LNT flags improvements and regressions when there is a significant change detected between runs. It still always saves all the results of each run and you can always still view them. This just determines what is flagged to the user in the regressions list, i.e. this page here: https://cc-perf.igalia.com/db_default/v4/nts/regressions/?state=0

It ignores changes that aren't significant or are likely noise, e.g. smaller than MIN_PERCENTAGE_CHANGE. For runs with multiple samples it also uses the standard deviation and the Mann-Whitney U test to ignore changes that are statistically likely to be noise.

LNT has always done this to remove false positives from the list of regressions. This list of regressions is what you read on a daily basis from the LNT reports that are sent out by email etc., so the regressions should be as actionable as possible.

Some noisy tests that are only slightly noisy still slip through the statistical checks, but given that the binary hasn't changed we shouldn't flag them as regressions. Here's an example from cc-perf.igalia.com, the colour of each run indicates the binary hash. The Equivalencing-flt binary hasn't changed over the past 7 runs, but there's 3 improvements detected in the green boxes. This PR would stop them from being flagged. It would however ensure that the improvements in miniFE above are still flagged, because the hashes are different.

image

return UNCHANGED_PASS

# Always ignore percentage changes below MIN_PERCENTAGE_CHANGE %, for now, we just don't
# have enough time to investigate that level of stuff.
if ignore_small and abs(self.pct_delta) < MIN_PERCENTAGE_CHANGE:
Expand Down Expand Up @@ -355,7 +364,8 @@ def get_comparison_result(self, runs, compare_runs, test_id, field,
prev_values, cur_hash, prev_hash,
cur_profile, prev_profile,
self.confidence_lv,
bigger_is_better=field.bigger_is_better)
bigger_is_better=field.bigger_is_better,
ignore_same_hash=field.ignore_same_hash)
return r

def get_geomean_comparison_result(self, run, compare_to, field, tests):
Expand Down Expand Up @@ -385,7 +395,8 @@ def get_geomean_comparison_result(self, run, compare_to, field, tests):
cur_hash=cur_hash,
prev_hash=prev_hash,
confidence_lv=0,
bigger_is_better=field.bigger_is_better)
bigger_is_better=field.bigger_is_better,
ignore_same_hash=field.ignore_same_hash)

def _load_samples_for_runs(self, session, run_ids, only_tests):
# Find the set of new runs to load.
Expand Down
1 change: 1 addition & 0 deletions schemas/nts.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ metrics:
display_name: Execution Time
unit: seconds
unit_abbrev: s
ignore_same_hash: true
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we go with this, it should also be applied to score for consistency

- name: execution_status
type: Status
- name: score
Expand Down
22 changes: 22 additions & 0 deletions tests/server/reporting/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,6 +319,28 @@ def test_handle_zero_sample(self):
None, None)
self.assertEqual(zeroSample.get_value_status(), UNCHANGED_PASS)

def test_ignore_same_hash(self):
"""Test ignore_same_hash ignores regressions with the same hash."""
same_hash = ComparisonResult(min, False, False, [10.], [5.],
'abc', 'abc', ignore_same_hash=True)
self.assertEqual(same_hash.get_value_status(), UNCHANGED_PASS)
self.assertFalse(same_hash.is_result_interesting())

diff_hash = ComparisonResult(min, False, False, [10.], [5.],
'abc', '123', ignore_same_hash=True)
self.assertEqual(diff_hash.get_value_status(), REGRESSED)
self.assertTrue(diff_hash.is_result_interesting())

no_hash = ComparisonResult(min, False, False, [10.], [5.], None,
'123', ignore_same_hash=True)
self.assertEqual(no_hash.get_value_status(), REGRESSED)
self.assertTrue(no_hash.is_result_interesting())

disabled = ComparisonResult(min, False, False, [10.], [5.],
'abc', 'abc', ignore_same_hash=False)
self.assertEqual(disabled.get_value_status(), REGRESSED)
self.assertTrue(disabled.is_result_interesting())


class AbsMinTester(unittest.TestCase):

Expand Down
2 changes: 2 additions & 0 deletions tests/server/ui/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,6 +290,8 @@ def test_schema(self):
m['display_name'] = m['name']
if 'bigger_is_better' not in m:
m['bigger_is_better'] = False
if 'ignore_same_hash' not in m:
m['ignore_same_hash'] = False
yaml_schema['metrics'].sort(key=lambda x: x['name'])
yaml_schema['run_fields'].sort(key=lambda x: x['name'])
yaml_schema['machine_fields'].sort(key=lambda x: x['name'])
Expand Down