diff --git a/Orange/widgets/evaluate/tests/test_owpredictions.py b/Orange/widgets/evaluate/tests/test_owpredictions.py index 302e7f5b9f7..7ddae006d4f 100644 --- a/Orange/widgets/evaluate/tests/test_owpredictions.py +++ b/Orange/widgets/evaluate/tests/test_owpredictions.py @@ -877,6 +877,27 @@ def test_change_target(self): self.assertEqual(table.model.columnCount(), 4) self.assertEqual(float(table.model.data(table.model.index(0, 3))), idx) + + def test_multi_target_input(self): + widget = self.widget + + domain = Domain([ContinuousVariable('var1')], + class_vars=[ + ContinuousVariable('c1'), + DiscreteVariable('c2', values=('no', 'yes')) + ]) + data = Table.from_list(domain, [[1, 5, 0], [2, 10, 1]]) + + mock_model = Mock(spec=Model, return_value=np.asarray([0.2, 0.1])) + mock_model.name = 'Mockery' + mock_model.domain = domain + mock_learner = Mock(return_value=mock_model) + model = mock_learner(data) + + self.send_signal(widget.Inputs.data, data) + self.send_signal(widget.Inputs.predictors, model, 1) + pred = self.get_output(widget.Outputs.predictions) + self.assertIsInstance(pred, Table) def test_report(self): widget = self.widget diff --git a/Orange/widgets/evaluate/tests/test_owtestandscore.py b/Orange/widgets/evaluate/tests/test_owtestandscore.py index 08ac61ae561..191f15a4039 100644 --- a/Orange/widgets/evaluate/tests/test_owtestandscore.py +++ b/Orange/widgets/evaluate/tests/test_owtestandscore.py @@ -16,7 +16,7 @@ from Orange.evaluation import Results, TestOnTestData, scoring from Orange.evaluation.scoring import ClassificationScore, RegressionScore, \ Score -from Orange.base import Learner +from Orange.base import Learner, Model from Orange.modelling import ConstantLearner from Orange.regression import MeanLearner from Orange.widgets.evaluate.owtestandscore import ( @@ -720,6 +720,41 @@ def test_copy_to_clipboard(self): for i in (0, 3, 4, 5, 6, 7)]) + "\r\n" self.assertEqual(clipboard_text, view_text) + def test_multi_target_input(self): + class NewScorer(Score): + class_types = ( + ContinuousVariable, + DiscreteVariable, + ) + problem_type = 'new_problem_type' + + def compute_score(self, results): + return [0.75] + + domain = Domain([ContinuousVariable('var1')], + class_vars=[ + ContinuousVariable('c1'), + DiscreteVariable('c2', values=('no', 'yes')) + ]) + data = Table.from_list(domain, [[1, 5, 0], [2, 10, 1], [2, 10, 1]]) + data.attributes = {'problem_type': 'new_problem_type'} + + mock_model = Mock(spec=Model, return_value=np.asarray([[0.2, 0.1, 0.2]])) + mock_model.name = 'Mockery' + mock_model.domain = domain + mock_learner = Mock(spec=Learner, return_value=mock_model) + mock_learner.name = 'Mockery' + + + self.widget.resampling = OWTestAndScore.TestOnTrain + self.send_signal(self.widget.Inputs.train_data, data) + self.send_signal(self.widget.Inputs.learner, MajorityLearner(), 0) + self.send_signal(self.widget.Inputs.learner, mock_learner, 1) + _ = self.get_output(self.widget.Outputs.evaluations_results, wait=5000) + self.assertTrue(len(self.widget.scorers) == 1) + self.assertTrue(NewScorer in self.widget.scorers) + self.assertTrue(len(self.widget._successful_slots()) == 1) + class TestHelpers(unittest.TestCase): def test_results_one_vs_rest(self):