diff --git a/src/highdicom/frame.py b/src/highdicom/frame.py index 16bd28b5..091fe85f 100644 --- a/src/highdicom/frame.py +++ b/src/highdicom/frame.py @@ -257,8 +257,9 @@ def encode_frame( if transfer_syntax_uid == JPEG2000Lossless: if bits_allocated not in (1, 8, 16): raise ValueError( - 'Bits Allocated must be 1, 8, or 16 for encoding of ' - f'monochrome image frames with with {name} codec.' + 'Bits Allocated must be 1, 8, or 16 for encoding ' + f'of monochrome image frames with with {name} ' + 'codec.' ) else: if bits_allocated not in (8, 16): @@ -287,9 +288,9 @@ def encode_frame( if photometric_interpretation != required_pi.value: raise ValueError( - f'Photometric interpretation must be "{required_pi.value}" ' - 'for encoding of color image frames with ' - f'{name} codec.' + f'Photometric interpretation must be ' + f'"{required_pi.value}" for encoding of color image ' + f'frames with {name} codec.' ) if transfer_syntax_uid == JPEG2000: diff --git a/src/highdicom/sr/sop.py b/src/highdicom/sr/sop.py index f58c5018..f60eaeeb 100644 --- a/src/highdicom/sr/sop.py +++ b/src/highdicom/sr/sop.py @@ -389,7 +389,9 @@ def extract_evidence( UID(instance_ds.ReferencedSOPClassUID), ) - current_evidence_seq = self.get('CurrentRequestedProcedureEvidenceSequence') + current_evidence_seq = self.get( + 'CurrentRequestedProcedureEvidenceSequence' + ) if current_evidence_seq is not None: current_evidence = extract_evidence(current_evidence_seq) else: @@ -440,7 +442,9 @@ def extract_evidence_series( UID(series_ds.SeriesInstanceUID), ) - current_evidence_seq = self.get('CurrentRequestedProcedureEvidenceSequence') + current_evidence_seq = self.get( + 'CurrentRequestedProcedureEvidenceSequence' + ) if current_evidence_seq is not None: current_evidence = extract_evidence_series(current_evidence_seq) else: @@ -459,6 +463,7 @@ def extract_evidence_series( return evidence + class EnhancedSR(_SR): """SOP class for an Enhanced Structured Report (SR) document, whose diff --git a/tests/test_base.py b/tests/test_base.py index c3a7f8f2..ce547eb3 100644 --- a/tests/test_base.py +++ b/tests/test_base.py @@ -74,7 +74,7 @@ def test_big_endian(self): ) def test_explicit_vr(self): - sop_class = SOPClass( + _ = SOPClass( study_instance_uid=UID(), series_instance_uid=UID(), series_number=1, @@ -87,7 +87,7 @@ def test_explicit_vr(self): ) def test_implicit_vr(self): - sop_class = SOPClass( + _ = SOPClass( study_instance_uid=UID(), series_instance_uid=UID(), series_number=1, diff --git a/tests/test_frame.py b/tests/test_frame.py index 1698f1e9..a84c8e2e 100644 --- a/tests/test_frame.py +++ b/tests/test_frame.py @@ -275,7 +275,7 @@ def test_jpeg2000lossless_monochrome(self): def test_jpeg2000lossless_single_bit(self): bits_allocated = 1 - frame = np.zeros((48, 32), dtype=np.dtype(f'uint8')) + frame = np.zeros((48, 32), dtype=np.dtype('uint8')) frame[12:45, 3:6] = 1 compressed_frame = encode_frame( frame, diff --git a/tests/test_sc.py b/tests/test_sc.py index c0a05b74..48137598 100644 --- a/tests/test_sc.py +++ b/tests/test_sc.py @@ -485,7 +485,7 @@ def test_monochrome_jpegls(self): frame ) - def test_monochrome_jpegls(self): + def test_monochrome_jpegls_near_lossless(self): pytest.importorskip("libjpeg") bits_allocated = 16 photometric_interpretation = 'MONOCHROME2' diff --git a/tests/test_seg.py b/tests/test_seg.py index e9703a41..d77f018f 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -663,8 +663,8 @@ def setUp(self): frame = np.pad(frame, ((0, 0), (12, 12), (12, 12), (0, 0))) self._sm_image.PixelData = frame.flatten().tobytes() self._sm_image.TotalPixelMatrixRows = ( - frame.shape[1] * - int(self._sm_image.TotalPixelMatrixRows / self._sm_image.Rows) + frame.shape[1] * + int(self._sm_image.TotalPixelMatrixRows / self._sm_image.Rows) ) self._sm_image.TotalPixelMatrixColumns = ( frame.shape[2] * @@ -3871,11 +3871,14 @@ def setUp(self): resized_multisegment = np.stack( [ - im.resize(out_size, Image.Resampling.NEAREST) for im in seg_pil_multisegment + im.resize(out_size, Image.Resampling.NEAREST) + for im in seg_pil_multisegment ], axis=-1 )[None] - self._downsampled_pix_arrays_multisegment.append(resized_multisegment) + self._downsampled_pix_arrays_multisegment.append( + resized_multisegment + ) # Mock lower-resolution source images. No need to have their pixel # data correctly set as it isn't used. Just update the relevant diff --git a/tests/test_sr.py b/tests/test_sr.py index 90069ada..128d4166 100644 --- a/tests/test_sr.py +++ b/tests/test_sr.py @@ -1479,8 +1479,9 @@ def test_from_image(self): for item in subject_context: # SpecimenUID + specimen_uid = '2.25.281821656492584880365678271074145532563' if item.ConceptNameCodeSequence[0].CodeValue == '121039': - assert item.UID == '2.25.281821656492584880365678271074145532563' + assert item.UID == specimen_uid has_specimen_uid = True # Specimen Identifier