diff --git a/Orange/widgets/evaluate/owpredictions.py b/Orange/widgets/evaluate/owpredictions.py
index da983b4c436..7eb4c47a38a 100644
--- a/Orange/widgets/evaluate/owpredictions.py
+++ b/Orange/widgets/evaluate/owpredictions.py
@@ -6,9 +6,8 @@
import numpy
from AnyQt.QtWidgets import (
- QTableView, QListWidget, QSplitter, QToolTip, QStyle, QApplication,
- QSizePolicy
-)
+ QTableView, QSplitter, QToolTip, QStyle, QApplication, QSizePolicy,
+ QPushButton)
from AnyQt.QtGui import QPainter, QStandardItem, QPen, QColor
from AnyQt.QtCore import (
Qt, QSize, QRect, QRectF, QPoint, QLocale,
@@ -51,7 +50,7 @@ class OWPredictions(OWWidget):
description = "Display predictions of models for an input dataset."
keywords = []
- buttons_area_orientation = None
+ want_control_area = False
class Inputs:
data = Input("Data", Orange.data.Table)
@@ -76,8 +75,21 @@ class Error(OWWidget.Error):
score_table = settings.SettingProvider(ScoreTable)
#: List of selected class value indices in the `class_values` list
- selected_classes = settings.ContextSetting([])
+ PROB_OPTS = ["(None)",
+ "Classes in data", "Classes known to the model", "Classes in data and model"]
+ PROB_TOOLTIPS = ["Don't show probabilities",
+ "Show probabilities for classes in the data",
+ "Show probabilities for classes known to the model,\n"
+ "including those that don't appear in this data",
+ "Show probabilities for classes in data that are also\n"
+ "known to the model"
+ ]
+ NO_PROBS, DATA_PROBS, MODEL_PROBS, BOTH_PROBS = range(4)
+ shown_probs = settings.ContextSetting(NO_PROBS)
selection = settings.Setting([], schema_only=True)
+ show_scores = settings.Setting(True)
+ TARGET_AVERAGE = "(Average over classes)"
+ target_class = settings.ContextSetting(TARGET_AVERAGE)
def __init__(self):
super().__init__()
@@ -86,22 +98,42 @@ def __init__(self):
self.predictors = [] # type: List[PredictorSlot]
self.class_values = [] # type: List[str]
self._delegates = []
+ self.scorer_errors = []
self.left_width = 10
self.selection_store = None
self.__pending_selection = self.selection
- controlBox = gui.vBox(self.controlArea, "Show probabilities for")
+ self._prob_controls = []
- gui.listBox(controlBox, self, "selected_classes", "class_values",
- callback=self._update_prediction_delegate,
- selectionMode=QListWidget.ExtendedSelection,
- sizePolicy=(QSizePolicy.Preferred, QSizePolicy.MinimumExpanding),
- sizeHint=QSize(1, 350),
- minimumHeight=100)
- self.reset_button = gui.button(
- controlBox, self, "Restore Original Order",
- callback=self._reset_order,
- tooltip="Show rows in the original order")
+ predopts = gui.hBox(
+ None, sizePolicy=(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed))
+ self._prob_controls = [
+ gui.widgetLabel(predopts, "Show probabilities for"),
+ gui.comboBox(
+ predopts, self, "shown_probs", contentsLength=30,
+ callback=self._update_prediction_delegate)
+ ]
+ gui.rubber(predopts)
+ self.reset_button = button = QPushButton("Restore Original Order")
+ button.clicked.connect(self._reset_order)
+ button.setToolTip("Show rows in the original order")
+ button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+ predopts.layout().addWidget(self.reset_button)
+
+ self.score_opt_box = scoreopts = gui.hBox(
+ None, sizePolicy=(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed))
+ gui.checkBox(
+ scoreopts, self, "show_scores", "Show perfomance scores",
+ callback=self._update_score_table_visibility
+ )
+ gui.separator(scoreopts, 32)
+ self._target_controls = [
+ gui.widgetLabel(scoreopts, "Target class:"),
+ gui.comboBox(
+ scoreopts, self, "target_class", items=[], contentsLength=30,
+ sendSelectedValue=True, callback=self._on_target_changed)
+ ]
+ gui.rubber(scoreopts)
table_opts = dict(horizontalScrollBarPolicy=Qt.ScrollBarAlwaysOn,
horizontalScrollMode=QTableView.ScrollPerPixel,
@@ -136,9 +168,12 @@ def __init__(self):
self.splitter.addWidget(self.dataview)
self.score_table = ScoreTable(self)
- self.vsplitter = gui.vBox(self.mainArea)
- self.vsplitter.layout().addWidget(self.splitter)
- self.vsplitter.layout().addWidget(self.score_table.view)
+ self.mainArea.layout().setSpacing(0)
+ self.mainArea.layout().setContentsMargins(4, 0, 4, 4)
+ self.mainArea.layout().addWidget(predopts)
+ self.mainArea.layout().addWidget(self.splitter)
+ self.mainArea.layout().addWidget(scoreopts)
+ self.mainArea.layout().addWidget(self.score_table.view)
def get_selection_store(self, model):
# Both models map the same, so it doesn't matter which one is used
@@ -151,6 +186,7 @@ def get_selection_store(self, model):
@check_sql_input
def set_data(self, data):
self.Warning.empty_data(shown=data is not None and not data)
+ self.closeContext()
self.data = data
self.selection_store = None
if not data:
@@ -177,6 +213,9 @@ def set_data(self, data):
self._update_data_sort_order, self.dataview,
self.predictionsview))
+ self._set_target_combos()
+ if self.is_discrete_class:
+ self.openContext(self.class_var.values)
self._invalidate_predictions()
def _store_selection(self):
@@ -184,7 +223,11 @@ def _store_selection(self):
@property
def class_var(self):
- return self.data and self.data.domain.class_var
+ return self.data is not None and self.data.domain.class_var
+
+ @property
+ def is_discrete_class(self):
+ return bool(self.class_var) and self.class_var.is_discrete
@Inputs.predictors
def set_predictor(self, index, predictor: Model):
@@ -202,24 +245,47 @@ def insert_predictor(self, index, predictor: Model):
def remove_predictor(self, index):
self.predictors.pop(index)
+ def _set_target_combos(self):
+ prob_combo = self.controls.shown_probs
+ target_combo = self.controls.target_class
+ prob_combo.clear()
+ target_combo.clear()
+
+ self._update_control_visibility()
+
+ # Set these to prevent warnings when setting self.shown_probs
+ target_combo.addItem(self.TARGET_AVERAGE)
+ prob_combo.addItems(self.PROB_OPTS)
+
+ if self.is_discrete_class:
+ target_combo.addItems(self.class_var.values)
+ prob_combo.addItems(self.class_var.values)
+ for i, tip in enumerate(self.PROB_TOOLTIPS):
+ prob_combo.setItemData(i, tip, Qt.ToolTipRole)
+ self.shown_probs = self.DATA_PROBS
+ self.target_class = self.TARGET_AVERAGE
+ else:
+ self.shown_probs = self.NO_PROBS
+
+ def _update_control_visibility(self):
+ for widget in self._prob_controls:
+ widget.setVisible(self.is_discrete_class)
+
+ for widget in self._target_controls:
+ widget.setVisible(self.is_discrete_class and self.show_scores)
+
+ self.score_opt_box.setVisible(bool(self.class_var))
+
def _set_class_values(self):
- class_values = []
+ self.class_values = []
+ if self.is_discrete_class:
+ self.class_values += self.data.domain.class_var.values
for slot in self.predictors:
class_var = slot.predictor.domain.class_var
- if class_var and class_var.is_discrete:
+ if class_var.is_discrete:
for value in class_var.values:
- if value not in class_values:
- class_values.append(value)
-
- if self.class_var and self.class_var.is_discrete:
- values = self.class_var.values
- self.class_values = sorted(
- class_values, key=lambda val: val not in values)
- self.selected_classes = [
- i for i, name in enumerate(class_values) if name in values]
- else:
- self.class_values = class_values # This assignment updates listview
- self.selected_classes = []
+ if value not in self.class_values:
+ self.class_values.append(value)
def handleNewSignals(self):
# Disconnect the model: the model and the delegate will be inconsistent
@@ -233,6 +299,9 @@ def handleNewSignals(self):
self._set_errors()
self.commit()
+ def _on_target_changed(self):
+ self._update_scores()
+
def _call_predictors(self):
if not self.data:
return
@@ -283,17 +352,20 @@ def _call_predictors(self):
def _update_scores(self):
model = self.score_table.model
+ if self.is_discrete_class and self.target_class != self.TARGET_AVERAGE:
+ target = self.class_var.values.index(self.target_class)
+ else:
+ target = None
model.clear()
scorers = usable_scorers(self.class_var) if self.class_var else []
self.score_table.update_header(scorers)
- errors = []
+ self.scorer_errors = errors = []
for pred in self.predictors:
results = pred.results
if not isinstance(results, Results) or results.predicted is None:
continue
row = [QStandardItem(learner_name(pred.predictor)),
QStandardItem("N/A"), QStandardItem("N/A")]
-
try:
actual = results.actual
predicted = results.predicted
@@ -311,7 +383,8 @@ def _update_scores(self):
item.setText("NA")
else:
try:
- score = scorer_caller(scorer, results)()[0]
+ score = scorer_caller(scorer, results,
+ target=target)()[0]
item.setText(f"{score:.3f}")
except Exception as exc: # pylint: disable=broad-except
item.setToolTip(str(exc))
@@ -326,17 +399,26 @@ def _update_scores(self):
results.predicted = predicted
results.probabilities = probabilities
+ self._update_score_table_visibility()
+
+ def _update_score_table_visibility(self):
+ self._update_control_visibility()
view = self.score_table.view
- if model.rowCount():
+ nmodels = self.score_table.model.rowCount()
+ if nmodels and self.show_scores:
view.setVisible(True)
view.ensurePolished()
+ view.resizeColumnsToContents()
+ view.resizeRowsToContents()
view.setFixedHeight(
5 + view.horizontalHeader().height() +
- view.verticalHeader().sectionSize(0) * model.rowCount())
+ view.verticalHeader().sectionSize(0) * nmodels)
+
+ errors = "\n".join(self.scorer_errors)
+ self.Error.scorer_failed(errors, shown=bool(errors))
else:
view.setVisible(False)
-
- self.Error.scorer_failed("\n".join(errors), shown=bool(errors))
+ self.Error.scorer_failed.clear()
def _set_errors(self):
# Not all predictors are run every time, so errors can't be collected
@@ -438,11 +520,13 @@ def _update_predictions_model(self):
hheader.setSectionsClickable(True)
def _update_data_sort_order(self, sort_source_view, sort_dest_view):
- sort_source_view.horizontalHeader().setSortIndicatorShown(True)
+ sort_source = sort_source_view.model()
+ sort_dest = sort_dest_view.model()
+
+ sort_source_view.horizontalHeader().setSortIndicatorShown(
+ sort_source.sortColumn() != -1)
sort_dest_view.horizontalHeader().setSortIndicatorShown(False)
- sort_dest = sort_dest_view.model()
- sort_source = sort_source_view.model()
if sort_dest is not None:
if sort_source is not None and sort_source.sortColumn() >= 0:
sort_dest.setSortIndices(sort_source.mapToSourceRows(...))
@@ -517,20 +601,33 @@ def _get_colors(self):
def _update_prediction_delegate(self):
self._delegates.clear()
colors = self._get_colors()
+ shown_class = "" # just to silence warnings about undefined var
+ if self.shown_probs == self.NO_PROBS:
+ tooltip_probs = ()
+ elif self.shown_probs == self.DATA_PROBS:
+ tooltip_probs = self.class_var.values
+ elif self.shown_probs >= len(self.PROB_OPTS):
+ shown_class = self.class_var.values[self.shown_probs
+ - len(self.PROB_OPTS)]
+ tooltip_probs = (shown_class, )
+ sort_col_indices = []
for col, slot in enumerate(self.predictors):
target = slot.predictor.domain.class_var
- shown_probs = (
- () if target.is_continuous else
- [val if self.class_values[val] in target.values else None
- for val in self.selected_classes]
- )
- delegate = PredictionsItemDelegate(
- None if target.is_continuous else self.class_values,
- colors,
- shown_probs,
- target.format_str if target.is_continuous else None,
- parent=self.predictionsview
- )
+ if target.is_continuous:
+ delegate = PredictionsItemDelegate(
+ None, colors, (), (), target.format_str,
+ parent=self.predictionsview)
+ sort_col_indices.append(None)
+ else:
+ shown_probs = self._shown_prob_indices(target, in_target=True)
+ if self.shown_probs in (self.MODEL_PROBS, self.BOTH_PROBS):
+ tooltip_probs = [self.class_values[i]
+ for i in shown_probs if i is not None]
+ delegate = PredictionsItemDelegate(
+ self.class_values, colors, shown_probs, tooltip_probs,
+ parent=self.predictionsview)
+ sort_col_indices.append([col for col in shown_probs
+ if col is not None])
# QAbstractItemView does not take ownership of delegates, so we must
self._delegates.append(delegate)
self.predictionsview.setItemDelegateForColumn(col, delegate)
@@ -539,7 +636,27 @@ def _update_prediction_delegate(self):
self.predictionsview.resizeColumnsToContents()
self._recompute_splitter_sizes()
if self.predictionsview.model() is not None:
- self.predictionsview.model().setProbInd(self.selected_classes)
+ self.predictionsview.model().setProbInd(sort_col_indices)
+
+ def _shown_prob_indices(self, target: DiscreteVariable, in_target):
+ if self.shown_probs == self.NO_PROBS:
+ values = []
+ elif self.shown_probs == self.DATA_PROBS:
+ values = self.class_var.values
+ elif self.shown_probs == self.MODEL_PROBS:
+ values = target.values
+ elif self.shown_probs == self.BOTH_PROBS:
+ # Don't use set intersection because it's unordered!
+ values = (value for value in self.class_var.values
+ if value in target.values)
+ else:
+ shown_cls_idx = self.shown_probs - len(self.PROB_OPTS)
+ values = [self.class_var.values[shown_cls_idx]]
+
+ return [self.class_values.index(value)
+ if not in_target or value in target.values
+ else None
+ for value in values]
def _recompute_splitter_sizes(self):
if not self.data:
@@ -575,7 +692,7 @@ def _commit_evaluation_results(self):
results.actual = data.Y.ravel()
results.predicted = numpy.vstack(
tuple(p.results.predicted[0][~nanmask] for p in slots))
- if self.class_var and self.class_var.is_discrete:
+ if self.is_discrete_class:
results.probabilities = numpy.array(
[p.results.probabilities[0][~nanmask] for p in slots])
results.learner_names = [p.name for p in slots]
@@ -610,7 +727,7 @@ def _commit_predictions(self):
predictions = self.data.transform(domain)
if newcolumns:
newcolumns = numpy.hstack(
- [numpy.atleast_2d(cols) for cols in newcolumns])
+ [col.reshape((-1, 1)) for col in newcolumns])
with predictions.unlocked(predictions.metas):
predictions.metas[:, -newcolumns.shape[1]:] = newcolumns
@@ -630,23 +747,30 @@ def _commit_predictions(self):
predictions = predictions[datamodel.mapToSourceRows(...)]
self.Outputs.predictions.send(predictions)
- @staticmethod
- def _add_classification_out_columns(slot, newmetas, newcolumns):
- # Mapped or unmapped predictions?!
- # Or provide a checkbox so the user decides?
+ def _add_classification_out_columns(self, slot, newmetas, newcolumns):
pred = slot.predictor
name = pred.name
values = pred.domain.class_var.values
+ probs = slot.results.unmapped_probabilities
+
+ # Column with class prediction
newmetas.append(DiscreteVariable(name=name, values=values))
- newcolumns.append(slot.results.unmapped_predicted.reshape(-1, 1))
- newmetas += [ContinuousVariable(name=f"{name} ({value})")
- for value in values]
- newcolumns.append(slot.results.unmapped_probabilities)
+ newcolumns.append(slot.results.unmapped_predicted)
+
+ # Columns with probability predictions (same as shown in the view)
+ for cls_idx in self._shown_prob_indices(pred.domain.class_var,
+ in_target=False):
+ value = self.class_values[cls_idx]
+ newmetas.append(ContinuousVariable(f"{name} ({value})"))
+ if value in values:
+ newcolumns.append(probs[:, values.index(value)])
+ else:
+ newcolumns.append(numpy.zeros(probs.shape[0]))
@staticmethod
def _add_regression_out_columns(slot, newmetas, newcolumns):
newmetas.append(ContinuousVariable(name=slot.predictor.name))
- newcolumns.append(slot.results.unmapped_predicted.reshape((-1, 1)))
+ newcolumns.append(slot.results.unmapped_predicted)
def send_report(self):
def merge_data_with_predictions():
@@ -682,15 +806,26 @@ def merge_data_with_predictions():
if self.data:
text = self._get_details().replace('\n', '
')
- if self.selected_classes:
- text += '
Showing probabilities for: '
- text += ', '. join([self.class_values[i]
- for i in self.selected_classes])
+ if self.is_discrete_class and self.shown_probs != self.NO_PROBS:
+ text += '
Showing probabilities for '
+ if self.shown_probs == self.MODEL_PROBS:
+ text += "all classes known to the model"
+ elif self.shown_probs == self.DATA_PROBS:
+ text += "all classes that appear in the data"
+ elif self.shown_probs == self.BOTH_PROBS:
+ text += "all classes that appear in the data " \
+ "and are known to the model"
+ else:
+ class_idx = self.shown_probs - len(self.PROB_OPTS)
+ text += f"'{self.class_var.values[class_idx]}'"
self.report_paragraph('Info', text)
self.report_table("Data & Predictions", merge_data_with_predictions(),
header_rows=1, header_columns=1)
- self.report_table("Scores", self.score_table.view)
+ self.report_name("Scores")
+ if self.is_discrete_class:
+ self.report_items([("Target class", self.target_class)])
+ self.report_table(self.score_table.view)
def resizeEvent(self, event):
super().resizeEvent(event)
@@ -723,6 +858,7 @@ class PredictionsItemDelegate(ItemDelegate):
def __init__(
self, class_values, colors, shown_probabilities=(),
+ tooltip_probabilities=(),
target_format=None, parent=None,
):
super().__init__(parent)
@@ -730,9 +866,9 @@ def __init__(
self.colors = [QColor(*c) for c in colors]
self.target_format = target_format # target format for cont. vars
self.shown_probabilities = self.fmt = self.tooltip = None # set below
- self.setFormat(shown_probabilities)
+ self.setFormat(shown_probabilities, tooltip_probabilities)
- def setFormat(self, shown_probabilities=()):
+ def setFormat(self, shown_probabilities=(), tooltip_probabilities=()):
self.shown_probabilities = shown_probabilities
if self.class_values is None:
# is continuous class
@@ -743,13 +879,10 @@ def setFormat(self, shown_probabilities=()):
for i in shown_probabilities)]
* bool(shown_probabilities)
+ ["{value!s}"])
- self.tooltip = ""
- if shown_probabilities:
- val = ', '.join(
- self.class_values[i] if i is not None else "-"
- for i in shown_probabilities if i is not None
- )
- self.tooltip = f"p({val})"
+ if tooltip_probabilities:
+ self.tooltip = f"p({', '.join(tooltip_probabilities)})"
+ else:
+ self.tooltip = ""
def displayText(self, value, _):
try:
@@ -904,17 +1037,20 @@ def headerData(self, section, orientation, role=Qt.DisplayRole):
return self._header[section]
return None
- def setProbInd(self, indices):
- self.__probInd = indices
- self.sort(self.sortColumn())
+ def setProbInd(self, indicess):
+ self.__probInd = indicess
+ self.sort(self.sortColumn(), self.sortOrder())
def sortColumnData(self, column):
values = self._values[column]
probs = self._probs[column]
# Let us assume that probs can be None, numpy array or list of arrays
+ # self.__probInd[column] can be None (numeric) or empty (no probs
+ # shown for particular model)
if probs is not None and len(probs) and len(probs[0]) \
- and self.__probInd is not None and len(self.__probInd):
- return probs[:, self.__probInd]
+ and self.__probInd is not None \
+ and self.__probInd[column]:
+ return probs[:, self.__probInd[column]]
else:
return values
@@ -924,8 +1060,8 @@ def sort(self, column, order=Qt.AscendingOrder):
# PredictionsModel and DataModel have the same signal and sort method, but
-# extracting it into a common base class would require diamond inheritance
-# because they're derived from different classes. It's not worth it.
+# extracting them into a mixin (because they're derived from different classes)
+# would be more complicated and longer than some code repetition.
class DataModel(TableModel):
list_sorted = pyqtSignal()
@@ -1195,7 +1331,7 @@ def sizeHintForColumn(self, column):
def tool_tip(value):
value, dist = value
if dist is not None:
- return "{!s} {!s}".format(value, dist)
+ return f"{value:!s} {dist:!s}"
else:
return str(value)
@@ -1233,5 +1369,5 @@ def pred_error(data, *args, **kwargs):
predictors_ = [pred_error]
WidgetPreview(OWPredictions).run(
- set_data=iris2,
+ set_data=iris,
insert_predictor=list(enumerate(predictors_)))
diff --git a/Orange/widgets/evaluate/tests/test_owpredictions.py b/Orange/widgets/evaluate/tests/test_owpredictions.py
index 26fbfbfdd59..9273c617181 100644
--- a/Orange/widgets/evaluate/tests/test_owpredictions.py
+++ b/Orange/widgets/evaluate/tests/test_owpredictions.py
@@ -2,16 +2,18 @@
# pylint: disable=protected-access
import io
import unittest
-from unittest.mock import Mock
+from unittest.mock import Mock, patch
import numpy as np
from AnyQt.QtCore import QItemSelectionModel, QItemSelection, Qt
from Orange.base import Model
-from Orange.classification import LogisticRegressionLearner
+from Orange.classification import LogisticRegressionLearner, NaiveBayesLearner
from Orange.data.io import TabReader
-from Orange.regression import LinearRegressionLearner
+from Orange.evaluation.scoring import TargetScore
+from Orange.preprocess import Remove
+from Orange.regression import LinearRegressionLearner, MeanLearner
from Orange.widgets.tests.base import WidgetTest
from Orange.widgets.evaluate.owpredictions import (
OWPredictions, SharedSelectionModel, SharedSelectionStore, DataModel,
@@ -21,7 +23,7 @@
from Orange.widgets.evaluate.owliftcurve import OWLiftCurve
from Orange.widgets.evaluate.owrocanalysis import OWROCAnalysis
-from Orange.data import Table, Domain, DiscreteVariable
+from Orange.data import Table, Domain, DiscreteVariable, ContinuousVariable
from Orange.modelling import ConstantLearner, TreeLearner
from Orange.evaluation import Results
from Orange.widgets.tests.utils import excepthook_catch, \
@@ -577,6 +579,320 @@ def test_missing_target_reg(self):
self.assertFalse(self.widget.Warning.missing_targets.is_shown())
self.assertFalse(self.widget.Error.scorer_failed.is_shown())
+ def _mock_predictors(self):
+ def pred(values):
+ slot = Mock()
+ slot.predictor.domain.class_var = DiscreteVariable("c", tuple(values))
+ return slot
+
+ def predc():
+ slot = Mock()
+ slot.predictor.domain.class_var = ContinuousVariable("c")
+ return slot
+
+ widget = self.widget
+ model = Mock()
+ model.setProbInd = Mock()
+ widget.predictionsview.model = Mock(return_value=model)
+
+ widget.predictors = \
+ [pred(values) for values in ("abc", "ab", "cbd", "e")] + [predc()]
+
+ def test_update_prediction_delegate_discrete(self):
+ self._mock_predictors()
+
+ widget = self.widget
+ prob_combo = widget.controls.shown_probs
+ set_prob_ind = widget.predictionsview.model().setProbInd
+
+ widget.data = Table.from_list(
+ Domain([], DiscreteVariable("c", values=tuple("abc"))), [])
+
+ widget._update_control_visibility()
+ self.assertFalse(prob_combo.isHidden())
+
+ widget._set_class_values()
+ self.assertEqual(widget.class_values, list("abcde"))
+
+ widget._set_target_combos()
+ self.assertEqual(
+ [prob_combo.itemText(i) for i in range(prob_combo.count())],
+ widget.PROB_OPTS + list("abc"))
+
+ widget.shown_probs = widget.NO_PROBS
+ widget._update_prediction_delegate()
+ for delegate in widget._delegates:
+ self.assertEqual(list(delegate.shown_probabilities), [])
+ self.assertEqual(delegate.tooltip, "")
+ set_prob_ind.assert_called_with([[], [], [], [], None])
+
+ widget.shown_probs = widget.DATA_PROBS
+ widget._update_prediction_delegate()
+ self.assertEqual(widget._delegates[0].shown_probabilities, [0, 1, 2])
+ self.assertEqual(widget._delegates[1].shown_probabilities, [0, 1, None])
+ self.assertEqual(widget._delegates[1].shown_probabilities, [0, 1, None])
+ self.assertEqual(widget._delegates[2].shown_probabilities, [None, 1, 2])
+ self.assertEqual(widget._delegates[3].shown_probabilities, [None, None, None])
+ self.assertEqual(widget._delegates[4].shown_probabilities, ())
+ self.assertEqual(widget._delegates[4].tooltip, "")
+ for delegate in widget._delegates[:-1]:
+ self.assertEqual(delegate.tooltip, "p(a, b, c)")
+ set_prob_ind.assert_called_with([[0, 1, 2], [0, 1], [1, 2], [], None])
+
+ widget.shown_probs = widget.MODEL_PROBS
+ widget._update_prediction_delegate()
+ self.assertEqual(widget._delegates[0].shown_probabilities, [0, 1, 2])
+ self.assertEqual(widget._delegates[0].tooltip, "p(a, b, c)")
+ self.assertEqual(widget._delegates[1].shown_probabilities, [0, 1])
+ self.assertEqual(widget._delegates[1].tooltip, "p(a, b)")
+ self.assertEqual(widget._delegates[2].shown_probabilities, [2, 1, 3])
+ self.assertEqual(widget._delegates[2].tooltip, "p(c, b, d)")
+ self.assertEqual(widget._delegates[3].shown_probabilities, [4])
+ self.assertEqual(widget._delegates[3].tooltip, "p(e)")
+ self.assertEqual(widget._delegates[4].shown_probabilities, ())
+ self.assertEqual(widget._delegates[4].tooltip, "")
+ set_prob_ind.assert_called_with([[0, 1, 2], [0, 1], [2, 1, 3], [4], None])
+
+ widget.shown_probs = widget.BOTH_PROBS
+ widget._update_prediction_delegate()
+ self.assertEqual(widget._delegates[0].shown_probabilities, [0, 1, 2])
+ self.assertEqual(widget._delegates[0].tooltip, "p(a, b, c)")
+ self.assertEqual(widget._delegates[1].shown_probabilities, [0, 1])
+ self.assertEqual(widget._delegates[1].tooltip, "p(a, b)")
+ self.assertEqual(widget._delegates[2].shown_probabilities, [1, 2])
+ self.assertEqual(widget._delegates[2].tooltip, "p(b, c)")
+ self.assertEqual(widget._delegates[3].shown_probabilities, [])
+ self.assertEqual(widget._delegates[3].tooltip, "")
+ self.assertEqual(widget._delegates[4].shown_probabilities, ())
+ self.assertEqual(widget._delegates[4].tooltip, "")
+ set_prob_ind.assert_called_with([[0, 1, 2], [0, 1], [1, 2], [], None])
+
+ n_fixed = len(widget.PROB_OPTS)
+ widget.shown_probs = n_fixed # a
+ widget._update_prediction_delegate()
+ self.assertEqual(widget._delegates[0].shown_probabilities, [0])
+ self.assertEqual(widget._delegates[1].shown_probabilities, [0])
+ self.assertEqual(widget._delegates[2].shown_probabilities, [None])
+ self.assertEqual(widget._delegates[3].shown_probabilities, [None])
+ self.assertEqual(widget._delegates[4].shown_probabilities, ())
+ for delegate in widget._delegates[:-1]:
+ self.assertEqual(delegate.tooltip, "p(a)")
+ set_prob_ind.assert_called_with([[0], [0], [], [], None])
+
+ n_fixed = len(widget.PROB_OPTS)
+ widget.shown_probs = n_fixed + 1 # b
+ widget._update_prediction_delegate()
+ self.assertEqual(widget._delegates[0].shown_probabilities, [1])
+ self.assertEqual(widget._delegates[1].shown_probabilities, [1])
+ self.assertEqual(widget._delegates[2].shown_probabilities, [1])
+ self.assertEqual(widget._delegates[3].shown_probabilities, [None])
+ self.assertEqual(widget._delegates[4].shown_probabilities, ())
+ for delegate in widget._delegates[:-1]:
+ self.assertEqual(delegate.tooltip, "p(b)")
+ set_prob_ind.assert_called_with([[1], [1], [1], [], None])
+
+ n_fixed = len(widget.PROB_OPTS)
+ widget.shown_probs = n_fixed + 2 # c
+ widget._update_prediction_delegate()
+ self.assertEqual(widget._delegates[0].shown_probabilities, [2])
+ self.assertEqual(widget._delegates[1].shown_probabilities, [None])
+ self.assertEqual(widget._delegates[2].shown_probabilities, [2])
+ self.assertEqual(widget._delegates[3].shown_probabilities, [None])
+ self.assertEqual(widget._delegates[4].shown_probabilities, ())
+ for delegate in widget._delegates[:-1]:
+ self.assertEqual(delegate.tooltip, "p(c)")
+ set_prob_ind.assert_called_with([[2], [], [2], [], None])
+
+ def test_update_delegates_continuous(self):
+ self._mock_predictors()
+
+ widget = self.widget
+ widget.shown_probs = widget.DATA_PROBS
+ set_prob_ind = widget.predictionsview.model().setProbInd
+
+ widget.data = Table.from_list(Domain([], ContinuousVariable("c")), [])
+
+ widget._update_control_visibility()
+ self.assertTrue(widget.controls.shown_probs.isHidden())
+ self.assertTrue(widget.controls.target_class.isHidden())
+
+ widget._set_class_values()
+ self.assertEqual(widget.class_values, list("abcde"))
+
+ widget._set_target_combos()
+ self.assertEqual(widget.shown_probs, widget.NO_PROBS)
+
+ widget._update_prediction_delegate()
+ for delegate in widget._delegates:
+ self.assertEqual(list(delegate.shown_probabilities), [])
+ self.assertEqual(delegate.tooltip, "")
+ set_prob_ind.assert_called_with([[], [], [], [], None])
+
+ class _Scorer(TargetScore):
+ # pylint: disable=arguments-differ
+ def compute_score(self, _, target, **__):
+ return [42 if target is None else target]
+
+ def test_output_wrt_shown_probs_1(self):
+ """Data has one class less, models have same, different or one more"""
+ widget = self.widget
+ iris012 = self.iris
+ purge = Remove(class_flags=Remove.RemoveUnusedValues)
+ iris01 = purge(iris012[:100])
+ iris12 = purge(iris012[50:])
+
+ bayes01 = NaiveBayesLearner()(iris01)
+ bayes12 = NaiveBayesLearner()(iris12)
+ bayes012 = NaiveBayesLearner()(iris012)
+
+ self.send_signal(widget.Inputs.data, iris01)
+ self.send_signal(widget.Inputs.predictors, bayes01, 0)
+ self.send_signal(widget.Inputs.predictors, bayes12, 1)
+ self.send_signal(widget.Inputs.predictors, bayes012, 2)
+
+ for i, pred in enumerate(widget.predictors):
+ p = pred.results.unmapped_probabilities
+ p[0] = 10 + 100 * i + np.arange(p.shape[1])
+ pred.results.unmapped_predicted[:] = i
+
+ widget.shown_probs = widget.NO_PROBS
+ widget._commit_predictions()
+ out = self.get_output(widget.Outputs.predictions)
+ self.assertEqual(list(out.metas[0]), [0, 1, 2])
+
+ widget.shown_probs = widget.DATA_PROBS
+ widget._commit_predictions()
+ out = self.get_output(widget.Outputs.predictions)
+ self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 0, 110, 2, 210, 211])
+
+ widget.shown_probs = widget.MODEL_PROBS
+ widget._commit_predictions()
+ out = self.get_output(widget.Outputs.predictions)
+ self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 110, 111, 2, 210, 211, 212])
+
+ widget.shown_probs = widget.BOTH_PROBS
+ widget._commit_predictions()
+ out = self.get_output(widget.Outputs.predictions)
+ self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 110, 2, 210, 211])
+
+ widget.shown_probs = widget.BOTH_PROBS + 1
+ widget._commit_predictions()
+ out = self.get_output(widget.Outputs.predictions)
+ self.assertEqual(list(out.metas[0]), [0, 10, 1, 0, 2, 210])
+
+ widget.shown_probs = widget.BOTH_PROBS + 2
+ widget._commit_predictions()
+ out = self.get_output(widget.Outputs.predictions)
+ self.assertEqual(list(out.metas[0]), [0, 11, 1, 110, 2, 211])
+
+ def test_output_wrt_shown_probs_2(self):
+ """One model misses one class"""
+ widget = self.widget
+ iris012 = self.iris
+ purge = Remove(class_flags=Remove.RemoveUnusedValues)
+ iris01 = purge(iris012[:100])
+
+ bayes01 = NaiveBayesLearner()(iris01)
+ bayes012 = NaiveBayesLearner()(iris012)
+
+ self.send_signal(widget.Inputs.data, iris012)
+ self.send_signal(widget.Inputs.predictors, bayes01, 0)
+ self.send_signal(widget.Inputs.predictors, bayes012, 1)
+
+ for i, pred in enumerate(widget.predictors):
+ p = pred.results.unmapped_probabilities
+ p[0] = 10 + 100 * i + np.arange(p.shape[1])
+ pred.results.unmapped_predicted[:] = i
+
+ widget.shown_probs = widget.NO_PROBS
+ widget._commit_predictions()
+ out = self.get_output(widget.Outputs.predictions)
+ self.assertEqual(list(out.metas[0]), [0, 1])
+
+ widget.shown_probs = widget.DATA_PROBS
+ widget._commit_predictions()
+ out = self.get_output(widget.Outputs.predictions)
+ self.assertEqual(list(out.metas[0]), [0, 10, 11, 0, 1, 110, 111, 112])
+
+ widget.shown_probs = widget.MODEL_PROBS
+ widget._commit_predictions()
+ out = self.get_output(widget.Outputs.predictions)
+ self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 110, 111, 112])
+
+ widget.shown_probs = widget.BOTH_PROBS
+ widget._commit_predictions()
+ out = self.get_output(widget.Outputs.predictions)
+ self.assertEqual(list(out.metas[0]), [0, 10, 11, 1, 110, 111, 112])
+
+ widget.shown_probs = widget.BOTH_PROBS + 1
+ widget._commit_predictions()
+ out = self.get_output(widget.Outputs.predictions)
+ self.assertEqual(list(out.metas[0]), [0, 10, 1, 110])
+
+ widget.shown_probs = widget.BOTH_PROBS + 2
+ widget._commit_predictions()
+ out = self.get_output(widget.Outputs.predictions)
+ self.assertEqual(list(out.metas[0]), [0, 11, 1, 111])
+
+ widget.shown_probs = widget.BOTH_PROBS + 3
+ widget._commit_predictions()
+ out = self.get_output(widget.Outputs.predictions)
+ self.assertEqual(list(out.metas[0]), [0, 0, 1, 112])
+
+ def test_output_regression(self):
+ widget = self.widget
+ self.send_signal(widget.Inputs.data, self.housing)
+ self.send_signal(widget.Inputs.predictors,
+ LinearRegressionLearner()(self.housing), 0)
+ self.send_signal(widget.Inputs.predictors,
+ MeanLearner()(self.housing), 1)
+ out = self.get_output(widget.Outputs.predictions)
+ np.testing.assert_equal(
+ out.metas,
+ np.hstack([pred.results.predicted.T for pred in widget.predictors]))
+
+ @patch("Orange.widgets.evaluate.owpredictions.usable_scorers",
+ Mock(return_value=[_Scorer]))
+ def test_change_target(self):
+ widget = self.widget
+ table = widget.score_table
+ combo = widget.controls.target_class
+
+ log_reg_iris = LogisticRegressionLearner()(self.iris)
+ self.send_signal(widget.Inputs.predictors, log_reg_iris)
+ self.send_signal(widget.Inputs.data, self.iris)
+
+ self.assertEqual(table.model.rowCount(), 1)
+ self.assertEqual(table.model.columnCount(), 4)
+ self.assertEqual(float(table.model.data(table.model.index(0, 3))), 42)
+
+ for idx, value in enumerate(widget.class_var.values):
+ combo.setCurrentText(value)
+ combo.activated[str].emit(value)
+ self.assertEqual(table.model.rowCount(), 1)
+ self.assertEqual(table.model.columnCount(), 4)
+ self.assertEqual(float(table.model.data(table.model.index(0, 3))),
+ idx)
+
+ def test_report(self):
+ widget = self.widget
+
+ log_reg_iris = LogisticRegressionLearner()(self.iris)
+ self.send_signal(widget.Inputs.predictors, log_reg_iris)
+ self.send_signal(widget.Inputs.data, self.iris)
+
+ widget.report_paragraph = Mock()
+ reports = set()
+ for widget.shown_probs in range(len(widget.PROB_OPTS)):
+ widget.send_report()
+ reports.add(widget.report_paragraph.call_args[0][1])
+ self.assertEqual(len(reports), len(widget.PROB_OPTS))
+
+ for widget.shown_probs, value in enumerate(
+ widget.class_var.values, start=widget.shown_probs + 1):
+ widget.send_report()
+ self.assertIn(value, widget.report_paragraph.call_args[0][1])
+
class SelectionModelTest(unittest.TestCase):
def setUp(self):
@@ -879,7 +1195,7 @@ def test_sorting_classification(self):
self.assertEqual(val, 1)
np.testing.assert_equal(prob, [0.1, 0.6, 0.3])
- model.setProbInd([2])
+ model.setProbInd([[2], [2]])
model.sort(0, Qt.DescendingOrder)
val, prob = model.data(model.index(0, 0))
self.assertEqual(val, 2)
@@ -888,7 +1204,7 @@ def test_sorting_classification(self):
self.assertEqual(val, 1)
np.testing.assert_equal(prob, [0.1, 0.6, 0.3])
- model.setProbInd([2])
+ model.setProbInd([[2], [2]])
model.sort(1, Qt.AscendingOrder)
val, prob = model.data(model.index(0, 1))
self.assertEqual(val, 0)
@@ -897,26 +1213,45 @@ def test_sorting_classification(self):
self.assertEqual(val, 1)
np.testing.assert_equal(prob, [0.3, 0.7, 0])
- model.setProbInd([1, 0])
+ model.setProbInd([[1, 0], [1, 0]])
model.sort(0, Qt.AscendingOrder)
np.testing.assert_equal(model.data(model.index(0, 0))[1], [0, .1, .9])
np.testing.assert_equal(model.data(model.index(1, 0))[1], [0.8, .1, .1])
- model.setProbInd([1, 2])
+ model.setProbInd([[1, 2], [1, 2]])
model.sort(0, Qt.AscendingOrder)
np.testing.assert_equal(model.data(model.index(0, 0))[1], [0.8, .1, .1])
np.testing.assert_equal(model.data(model.index(1, 0))[1], [0, .1, .9])
- model.setProbInd([])
+ model.setProbInd([[], []])
model.sort(0, Qt.AscendingOrder)
self.assertEqual([model.data(model.index(i, 0))[0]
for i in range(model.rowCount())], [0, 0, 1, 1, 2])
- model.setProbInd([])
+ model.setProbInd([[], []])
model.sort(0, Qt.DescendingOrder)
self.assertEqual([model.data(model.index(i, 0))[0]
for i in range(model.rowCount())], [2, 1, 1, 0, 0])
+ def test_sorting_classification_different(self):
+ model = PredictionsModel(self.values, self.probs)
+
+ model.setProbInd([[2], [0]])
+ model.sort(0, Qt.DescendingOrder)
+ val, prob = model.data(model.index(0, 0))
+ self.assertEqual(val, 2)
+ np.testing.assert_equal(prob, [0, 0.1, 0.9])
+ val, prob = model.data(model.index(0, 1))
+ self.assertEqual(val, 1)
+ np.testing.assert_equal(prob, [0.1, 0.6, 0.3])
+ model.sort(1, Qt.DescendingOrder)
+ val, prob = model.data(model.index(0, 0))
+ self.assertEqual(val, 1)
+ np.testing.assert_equal(prob, [0.3, 0.7, 0])
+ val, prob = model.data(model.index(0, 1))
+ self.assertEqual(val, 0)
+ np.testing.assert_equal(prob, [0.9, 0.05, 0.05])
+
def test_sorting_regression(self):
model = PredictionsModel(self.values, self.no_probs)