diff --git a/src/dbt_score/formatters/human_readable_formatter.py b/src/dbt_score/formatters/human_readable_formatter.py index b1318b8..8cd10d1 100644 --- a/src/dbt_score/formatters/human_readable_formatter.py +++ b/src/dbt_score/formatters/human_readable_formatter.py @@ -35,7 +35,7 @@ def model_evaluated( self._failed_models.append((model, score)) if ( score.value < self._config.fail_any_model_under - or score.value < self._config.fail_project_under + or any(isinstance(result, RuleViolation) for result in results.values()) or self._config.show_all ): print( diff --git a/tests/formatters/test_human_readable_formatter.py b/tests/formatters/test_human_readable_formatter.py index 7649553..43cb728 100644 --- a/tests/formatters/test_human_readable_formatter.py +++ b/tests/formatters/test_human_readable_formatter.py @@ -26,7 +26,14 @@ def test_human_readable_formatter_model( } formatter.model_evaluated(model1, results, Score(10.0, "🥇")) stdout = capsys.readouterr().out - assert stdout == "" + assert ( + stdout + == """🥇 \x1b[1mmodel1\x1b[0m (score: 10.0) + \x1b[1;31mERR \x1b[0m tests.conftest.rule_severity_medium: Oh noes + \x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error + +""" + ) def test_human_readable_formatter_model_show_all( @@ -147,14 +154,18 @@ def test_human_readable_formatter_low_model_score( ) -def test_human_readable_formatter_low_project_score( +def test_human_readable_formatter_low_project_score_high_model_score( capsys, default_config, manifest_loader, model1, rule_severity_critical, ): - """Ensure the formatter has the correct output when the projet has a low score.""" + """Ensure the formatter has the correct output when the projet has a low score. + + If model itself has a high project score then we need to pass `show_all` flag + to make it visible. + """ default_config.overload({"show_all": True}) formatter = HumanReadableFormatter( manifest_loader=manifest_loader, config=default_config