From 06d27c6fc6a8d540144bd61ab1625caf016bd90f Mon Sep 17 00:00:00 2001 From: Roy Shilkrot Date: Mon, 17 Jun 2024 13:10:26 -0400 Subject: [PATCH] chore: Update dot detector settings in UI and storage --- camera_view.py | 2 +- defaults.py | 3 + main.py | 4 + mainwindow.ui | 69 +++--- storage.py | 1 + tesseract.py | 537 ++++++++++++++++++++++++----------------------- ui_mainwindow.py | 40 ++-- 7 files changed, 357 insertions(+), 299 deletions(-) diff --git a/camera_view.py b/camera_view.py index 62c5533..5d9939f 100644 --- a/camera_view.py +++ b/camera_view.py @@ -265,7 +265,7 @@ def run(self): if not self.detectionTargetsStorage.is_empty(): detectionTargets = self.detectionTargetsStorage.get_data() texts = self.textDetector.detect_multi_text( - binary, gray, detectionTargets, multi_crop=True + binary, gray, detectionTargets ) if len(texts) > 0 and len(detectionTargets) == len(texts): # augment the text detection targets with the results diff --git a/defaults.py b/defaults.py index c1361f0..2048485 100644 --- a/defaults.py +++ b/defaults.py @@ -189,6 +189,9 @@ def normalize_settings_dict(settings, box_info): "invert_patch": ( settings["invert_patch"] if "invert_patch" in settings else False ), + "dot_detector": ( + settings["dot_detector"] if "dot_detector" in settings else False + ), "binarization_method": ( settings["binarization_method"] if "binarization_method" in settings else 0 ), diff --git a/main.py b/main.py index 0a11c0e..9481ead 100644 --- a/main.py +++ b/main.py @@ -237,6 +237,9 @@ def __init__(self, translator: QTranslator, parent: QObject): self.ui.checkBox_invertPatch.toggled.connect( partial(self.genericSettingsChanged, "invert_patch") ) + self.ui.checkBox_dotDetector.toggled.connect( + partial(self.genericSettingsChanged, "dot_detector") + ) self.ui.checkBox_ordinalIndicator.toggled.connect( partial(self.genericSettingsChanged, "ordinal_indicator") ) @@ -751,6 +754,7 @@ def populateSettings(self, name): item_obj.settings["normalize_wh_ratio"] ) self.ui.checkBox_invertPatch.setChecked(item_obj.settings["invert_patch"]) + self.ui.checkBox_dotDetector.setChecked(item_obj.settings["dot_detector"]) self.ui.checkBox_ordinalIndicator.setChecked( item_obj.settings["ordinal_indicator"] ) diff --git a/mainwindow.ui b/mainwindow.ui index 1952223..10a1b85 100644 --- a/mainwindow.ui +++ b/mainwindow.ui @@ -529,6 +529,9 @@ + + 3 + 0 @@ -600,10 +603,41 @@ - - - Remove leading 0s - + + + + 3 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + Remove leading 0s + + + + + + + Count dots/blobs instead of detecting characters + + + Dot Counter + + + + @@ -1602,16 +1636,10 @@ 3 - + false - - - 0 - 0 - - Binary View @@ -1621,16 +1649,10 @@ - + false - - - 0 - 0 - - 4-corner Correction @@ -1640,16 +1662,10 @@ - + false - - - 0 - 0 - - Stabilize @@ -1705,6 +1721,9 @@ + + Reset zoom + 1:1 diff --git a/storage.py b/storage.py index e8a0812..9a8b51f 100644 --- a/storage.py +++ b/storage.py @@ -242,6 +242,7 @@ def getBoxesForStorage(self): "normalize_wh_ratio" ), "invert_patch": detectionTarget.settings.get("invert_patch"), + "dot_detector": detectionTarget.settings.get("dot_detector"), "ordinal_indicator": detectionTarget.settings.get( "ordinal_indicator" ), diff --git a/tesseract.py b/tesseract.py index a737e29..fe25e70 100644 --- a/tesseract.py +++ b/tesseract.py @@ -9,6 +9,7 @@ import re from PySide6.QtCore import QRectF from threading import Lock +from sc_logging import logger def autocrop(image_in): @@ -158,7 +159,7 @@ def detect_text(self, image): return text.strip() def detect_multi_text( - self, binary, gray, rects: list[TextDetectionTarget], multi_crop=False + self, binary, gray, rects: list[TextDetectionTarget] ) -> list[TextDetectionResult]: if binary is None: return [] @@ -168,288 +169,308 @@ def detect_multi_text( if len(binary.shape) < 2 or binary.shape[0] < 1 or binary.shape[1] < 1: return [] - if not multi_crop: - pilimage = Image.fromarray(binary) - with self.api_lock: - self.api.SetImage(pilimage) - texts = [] for rect in rects: effectiveRect = None scale_x = 1.0 scale_y = 1.0 - if multi_crop: - if ( - rect is None - or rect.x() < 0 - or rect.y() < 0 - or rect.width() < 1 - or rect.height() < 1 - ): - texts.append( - TextDetectionResult( - "", TextDetectionTargetWithResult.ResultState.Empty, None - ) - ) - continue - - if rect.x() >= binary.shape[1]: - # move the rect inside the image - rect.setX(binary.shape[1] - rect.width()) - if rect.y() >= binary.shape[0]: - # move the rect inside the image - rect.setY(binary.shape[0] - rect.height()) - if rect.x() + rect.width() > binary.shape[1]: - rect.setWidth(binary.shape[1] - rect.x()) - if rect.y() + rect.height() > binary.shape[0]: - rect.setHeight(binary.shape[0] - rect.y()) + if ( + rect is None + or rect.x() < 0 + or rect.y() < 0 + or rect.width() < 1 + or rect.height() < 1 + ): + texts.append( + TextDetectionResult( + "", TextDetectionTargetWithResult.ResultState.Empty, None + ) + ) + continue + + if rect.x() >= binary.shape[1]: + # move the rect inside the image + rect.setX(binary.shape[1] - rect.width()) + if rect.y() >= binary.shape[0]: + # move the rect inside the image + rect.setY(binary.shape[0] - rect.height()) + if rect.x() + rect.width() > binary.shape[1]: + rect.setWidth(binary.shape[1] - rect.x()) + if rect.y() + rect.height() > binary.shape[0]: + rect.setHeight(binary.shape[0] - rect.y()) + + if ( + rect.settings is not None + and "binarization_method" in rect.settings + and rect.settings["binarization_method"] + != TextDetector.BinarizationMethod.GLOBAL + ): if ( - rect.settings is not None - and "binarization_method" in rect.settings - and rect.settings["binarization_method"] - != TextDetector.BinarizationMethod.GLOBAL + rect.settings["binarization_method"] + == TextDetector.BinarizationMethod.NO_BINARIZATION ): - if ( - rect.settings["binarization_method"] - == TextDetector.BinarizationMethod.NO_BINARIZATION - ): - # no binarization - imagecrop = gray[ - int(rect.y()) : int(rect.y() + rect.height()), - int(rect.x()) : int(rect.x() + rect.width()), - ] - elif ( - rect.settings["binarization_method"] - == TextDetector.BinarizationMethod.LOCAL - ): - # local binarization using Otsu's method - _, imagecrop = cv2.threshold( - gray[ - int(rect.y()) : int(rect.y() + rect.height()), - int(rect.x()) : int(rect.x() + rect.width()), - ], - 0, - 255, - cv2.THRESH_BINARY + cv2.THRESH_OTSU, - ) - elif ( - rect.settings["binarization_method"] - == TextDetector.BinarizationMethod.ADAPTIVE - ): - # apply adaptive binarization - imagecrop = cv2.adaptiveThreshold( - gray[ - int(rect.y()) : int(rect.y() + rect.height()), - int(rect.x()) : int(rect.x() + rect.width()), - ], - 255, - cv2.ADAPTIVE_THRESH_GAUSSIAN_C, - cv2.THRESH_BINARY, - # use a fraction of the patch area - max(int(rect.width() * rect.height() * 0.01), 3) | 1, - 2, - ) - # update the binary image for visualisation in the binary mode - binary[ - int(rect.y()) : int(rect.y() + rect.height()), - int(rect.x()) : int(rect.x() + rect.width()), - ] = imagecrop - else: - imagecrop = binary[ + # no binarization + imagecrop = gray[ int(rect.y()) : int(rect.y() + rect.height()), int(rect.x()) : int(rect.x() + rect.width()), ] - - if ( - rect.settings is not None - and "cleanup_thresh" in rect.settings - and rect.settings["cleanup_thresh"] > 0 - ): - # cleanup image from small components: find contours and remove small ones - contours, _ = cv2.findContours( - imagecrop, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE - ) - # cleanup_thresh is [0, 1.0], convert to [0, 0.05] - cleanup_thresh = rect.settings["cleanup_thresh"] * 0.05 - img_area_thresh = ( - imagecrop.shape[0] * imagecrop.shape[1] * cleanup_thresh - ) - for contour in contours: - if cv2.contourArea(contour) < img_area_thresh: - cv2.drawContours(imagecrop, [contour], 0, 0, -1) - - if ( - rect.settings is not None - and "vscale" in rect.settings - and rect.settings["vscale"] != 10 + elif ( + rect.settings["binarization_method"] + == TextDetector.BinarizationMethod.LOCAL ): - # vertical scale the image - # the vscale input is in the range [1, 10] where 10 is the default (1:1) - # scale the image in the y direction about the center - rows, cols = imagecrop.shape - # calculate the target height - target_height = int(rows * (rect.settings["vscale"] / 10.0)) - scaled = cv2.resize( - imagecrop, (cols, target_height), 0, 0, cv2.INTER_AREA - ) - # add padding to the top and bottom - pad_top = (rows - target_height) // 2 - pad_bottom = rows - target_height - pad_top - scaled = cv2.copyMakeBorder( - scaled, pad_top, pad_bottom, 0, 0, cv2.BORDER_REPLICATE + # local binarization using Otsu's method + _, imagecrop = cv2.threshold( + gray[ + int(rect.y()) : int(rect.y() + rect.height()), + int(rect.x()) : int(rect.x() + rect.width()), + ], + 0, + 255, + cv2.THRESH_BINARY + cv2.THRESH_OTSU, ) - # make sure the image is the same size as the original - scaled = scaled[:rows, :] - # copy back into imagecrop and binary display - binary[ - int(rect.y()) : int(rect.y() + rect.height()), - int(rect.x()) : int(rect.x() + rect.width()), - ] = scaled - imagecrop = scaled - - if ( - rect.settings is not None - and "skew" in rect.settings - and rect.settings["skew"] != 0 + elif ( + rect.settings["binarization_method"] + == TextDetector.BinarizationMethod.ADAPTIVE ): - # skew the image in the x direction about the center - rows, cols = imagecrop.shape - # identity 2x2 matrix - M = np.float32([[1, 0, 0], [0, 1, 0]]) - # add skew factor to matrix - M[0, 1] = rect.settings["skew"] / 40.0 - try: - skewed = cv2.warpAffine(imagecrop, M, (cols, rows)) - binary[ + # apply adaptive binarization + imagecrop = cv2.adaptiveThreshold( + gray[ int(rect.y()) : int(rect.y() + rect.height()), int(rect.x()) : int(rect.x() + rect.width()), - ] = skewed - imagecrop = skewed - except: - pass - - if ( - rect.settings is not None - and "dilate" in rect.settings - and rect.settings["dilate"] > 0 - and imagecrop.shape[0] > 0 - and imagecrop.shape[1] > 0 - ): - # dilate the image - kernel = np.ones((3, 3), np.uint8) - dilated = cv2.dilate( - imagecrop.copy(), - kernel, - iterations=int(rect.settings["dilate"]), + ], + 255, + cv2.ADAPTIVE_THRESH_GAUSSIAN_C, + cv2.THRESH_BINARY, + # use a fraction of the patch area + max(int(rect.width() * rect.height() * 0.01), 3) | 1, + 2, ) - # copy back into image crop + # update the binary image for visualisation in the binary mode + binary[ + int(rect.y()) : int(rect.y() + rect.height()), + int(rect.x()) : int(rect.x() + rect.width()), + ] = imagecrop + else: + imagecrop = binary[ + int(rect.y()) : int(rect.y() + rect.height()), + int(rect.x()) : int(rect.x() + rect.width()), + ] + + if ( + rect.settings is not None + and "cleanup_thresh" in rect.settings + and rect.settings["cleanup_thresh"] > 0 + ): + # cleanup image from small components: find contours and remove small ones + contours, _ = cv2.findContours( + imagecrop, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE + ) + # cleanup_thresh is [0, 1.0], convert to [0, 0.05] + cleanup_thresh = rect.settings["cleanup_thresh"] * 0.05 + img_area_thresh = ( + imagecrop.shape[0] * imagecrop.shape[1] * cleanup_thresh + ) + for contour in contours: + if cv2.contourArea(contour) < img_area_thresh: + cv2.drawContours(imagecrop, [contour], 0, 0, -1) + + if ( + rect.settings is not None + and "vscale" in rect.settings + and rect.settings["vscale"] != 10 + ): + # vertical scale the image + # the vscale input is in the range [1, 10] where 10 is the default (1:1) + # scale the image in the y direction about the center + rows, cols = imagecrop.shape + # calculate the target height + target_height = int(rows * (rect.settings["vscale"] / 10.0)) + scaled = cv2.resize( + imagecrop, (cols, target_height), 0, 0, cv2.INTER_AREA + ) + # add padding to the top and bottom + pad_top = (rows - target_height) // 2 + pad_bottom = rows - target_height - pad_top + scaled = cv2.copyMakeBorder( + scaled, pad_top, pad_bottom, 0, 0, cv2.BORDER_REPLICATE + ) + # make sure the image is the same size as the original + scaled = scaled[:rows, :] + # copy back into imagecrop and binary display + binary[ + int(rect.y()) : int(rect.y() + rect.height()), + int(rect.x()) : int(rect.x() + rect.width()), + ] = scaled + imagecrop = scaled + + if ( + rect.settings is not None + and "skew" in rect.settings + and rect.settings["skew"] != 0 + ): + # skew the image in the x direction about the center + rows, cols = imagecrop.shape + # identity 2x2 matrix + M = np.float32([[1, 0, 0], [0, 1, 0]]) + # add skew factor to matrix + M[0, 1] = rect.settings["skew"] / 40.0 + try: + skewed = cv2.warpAffine(imagecrop, M, (cols, rows)) binary[ int(rect.y()) : int(rect.y() + rect.height()), int(rect.x()) : int(rect.x() + rect.width()), - ] = dilated - - if ( - rect.settings is not None - and "invert_patch" in rect.settings - and rect.settings["invert_patch"] - ): - # invert the image - imagecrop = 255 - imagecrop - + ] = skewed + imagecrop = skewed + except: + pass + + if ( + rect.settings is not None + and "dilate" in rect.settings + and rect.settings["dilate"] > 0 + and imagecrop.shape[0] > 0 + and imagecrop.shape[1] > 0 + ): + # dilate the image + kernel = np.ones((3, 3), np.uint8) + dilated = cv2.dilate( + imagecrop.copy(), + kernel, + iterations=int(rect.settings["dilate"]), + ) + # copy back into image crop + binary[ + int(rect.y()) : int(rect.y() + rect.height()), + int(rect.x()) : int(rect.x() + rect.width()), + ] = dilated + + if ( + rect.settings is not None + and "invert_patch" in rect.settings + and rect.settings["invert_patch"] + ): + # invert the image + imagecrop = 255 - imagecrop + + if ( + rect.settings is not None + and "skip_similar_image" in rect.settings + and rect.settings["skip_similar_image"] + ): + # compare the image with the last image if ( - rect.settings is not None - and "skip_similar_image" in rect.settings - and rect.settings["skip_similar_image"] + rect.last_image is not None + and rect.last_image.shape == imagecrop.shape ): - # compare the image with the last image - if ( - rect.last_image is not None - and rect.last_image.shape == imagecrop.shape - ): - # check if the difference is less than 5% - diff = cv2.absdiff(rect.last_image, imagecrop) - diff = diff.astype(np.float32) - diff = diff / 255.0 - diff = diff.sum() / (imagecrop.shape[0] * imagecrop.shape[1]) - if diff < 0.05: - # skip this image - texts.append( - TextDetectionResult( - "SIM", - TextDetectionTargetWithResult.ResultState.FailedFilter, - effectiveRect, - ) + # check if the difference is less than 5% + diff = cv2.absdiff(rect.last_image, imagecrop) + diff = diff.astype(np.float32) + diff = diff / 255.0 + diff = diff.sum() / (imagecrop.shape[0] * imagecrop.shape[1]) + if diff < 0.05: + # skip this image + texts.append( + TextDetectionResult( + "SIM", + TextDetectionTargetWithResult.ResultState.FailedFilter, + effectiveRect, ) - continue - rect.last_image = imagecrop.copy() - - if ( - rect.settings is not None - and "autocrop" in rect.settings - and rect.settings["autocrop"] - ): - # auto crop the binary image around the text - imagecrop, (first_row, last_row, first_col, last_col) = autocrop( - imagecrop - ) - effectiveRect = QRectF( - first_col, - first_row, - last_col - first_col, - last_row - first_row, - ) - - # check if image is size 0 - if imagecrop.shape[0] == 0 or imagecrop.shape[1] == 0: - texts.append( - TextDetectionResult( - "", - TextDetectionTargetWithResult.ResultState.Empty, - effectiveRect, ) + continue + rect.last_image = imagecrop.copy() + + if ( + rect.settings is not None + and "autocrop" in rect.settings + and rect.settings["autocrop"] + ): + # auto crop the binary image around the text + imagecrop, (first_row, last_row, first_col, last_col) = autocrop( + imagecrop + ) + effectiveRect = QRectF( + first_col, + first_row, + last_col - first_col, + last_row - first_row, + ) + + # check if image is size 0 + if imagecrop.shape[0] == 0 or imagecrop.shape[1] == 0: + texts.append( + TextDetectionResult( + "", + TextDetectionTargetWithResult.ResultState.Empty, + effectiveRect, ) - continue - - if ( - rect.settings is not None - and "rescale_patch" in rect.settings - and rect.settings["rescale_patch"] - ): - # rescale the image to 35 pixels height - scale_x = 35 / imagecrop.shape[0] - scale_y = scale_x - - if ( - rect.settings is not None - and "normalize_wh_ratio" in rect.settings - and rect.settings["normalize_wh_ratio"] - and "median_wh_ratio" in rect.settings - and rect.settings["median_wh_ratio"] > 0 - ): - # rescale the image in x or in y such that the width-to-height ratio is 0.5 - scale_x *= 0.5 / rect.settings["median_wh_ratio"] - - if scale_x != 1.0 or scale_y != 1.0: - imagecrop = cv2.resize( - imagecrop, - None, - fx=scale_x, - fy=scale_y, - interpolation=cv2.INTER_AREA, + ) + continue + + if ( + rect.settings is not None + and "rescale_patch" in rect.settings + and rect.settings["rescale_patch"] + ): + # rescale the image to 35 pixels height + scale_x = 35 / imagecrop.shape[0] + scale_y = scale_x + + if ( + rect.settings is not None + and "normalize_wh_ratio" in rect.settings + and rect.settings["normalize_wh_ratio"] + and "median_wh_ratio" in rect.settings + and rect.settings["median_wh_ratio"] > 0 + ): + # rescale the image in x or in y such that the width-to-height ratio is 0.5 + scale_x *= 0.5 / rect.settings["median_wh_ratio"] + + if scale_x != 1.0 or scale_y != 1.0: + imagecrop = cv2.resize( + imagecrop, + None, + fx=scale_x, + fy=scale_y, + interpolation=cv2.INTER_AREA, + ) + + # if dot detector count the blobs in the patch + if ( + rect.settings is not None + and "dot_detector" in rect.settings + and rect.settings["dot_detector"] + ): + # find the contours + contours, _ = cv2.findContours( + imagecrop, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE + ) + # count the number of contours + count = 0 + for contour in contours: + if cv2.contourArea(contour) > 5: + count += 1 + + texts.append( + TextDetectionResult( + str(count), + TextDetectionTargetWithResult.ResultState.Success, + effectiveRect, ) + ) + continue - try: - pilimage = Image.fromarray(imagecrop) - with self.api_lock: - self.api.SetImage(pilimage) - except: - texts.append( - TextDetectionResult( - "", TextDetectionTargetWithResult.ResultState.Empty, None - ) + try: + pilimage = Image.fromarray(imagecrop) + with self.api_lock: + self.api.SetImage(pilimage) + except: + texts.append( + TextDetectionResult( + "", TextDetectionTargetWithResult.ResultState.Empty, None ) - continue + ) + continue if rect.settings["type"] == FieldType.NUMBER: with self.api_lock: @@ -464,12 +485,6 @@ def detect_multi_text( "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ .,;:!?-_()[]{}<>@#$%^&*+=|\\~`\"'", ) - if not multi_crop: - with self.api_lock: - self.api.SetRectangle( - rect.x(), rect.y(), rect.width(), rect.height() - ) - text = "" extras = {} with self.api_lock: diff --git a/ui_mainwindow.py b/ui_mainwindow.py index 73ff416..24df919 100644 --- a/ui_mainwindow.py +++ b/ui_mainwindow.py @@ -273,6 +273,7 @@ def setupUi(self, MainWindow): sizePolicy2.setHeightForWidth(self.widget_11.sizePolicy().hasHeightForWidth()) self.widget_11.setSizePolicy(sizePolicy2) self.horizontalLayout_13 = QHBoxLayout(self.widget_11) + self.horizontalLayout_13.setSpacing(3) self.horizontalLayout_13.setObjectName(u"horizontalLayout_13") self.horizontalLayout_13.setContentsMargins(0, 0, 0, 0) self.checkBox_skip_empty = QCheckBox(self.widget_11) @@ -309,10 +310,24 @@ def setupUi(self, MainWindow): self.verticalLayout_5.addWidget(self.widget_15) - self.checkBox_removeLeadingZeros = QCheckBox(self.groupBox_target_settings) + self.widget_22 = QWidget(self.groupBox_target_settings) + self.widget_22.setObjectName(u"widget_22") + self.horizontalLayout_25 = QHBoxLayout(self.widget_22) + self.horizontalLayout_25.setSpacing(3) + self.horizontalLayout_25.setObjectName(u"horizontalLayout_25") + self.horizontalLayout_25.setContentsMargins(0, 0, 0, 0) + self.checkBox_removeLeadingZeros = QCheckBox(self.widget_22) self.checkBox_removeLeadingZeros.setObjectName(u"checkBox_removeLeadingZeros") - self.verticalLayout_5.addWidget(self.checkBox_removeLeadingZeros) + self.horizontalLayout_25.addWidget(self.checkBox_removeLeadingZeros) + + self.checkBox_dotDetector = QCheckBox(self.widget_22) + self.checkBox_dotDetector.setObjectName(u"checkBox_dotDetector") + + self.horizontalLayout_25.addWidget(self.checkBox_dotDetector) + + + self.verticalLayout_5.addWidget(self.widget_22) self.widget_9 = QWidget(self.groupBox_target_settings) self.widget_9.setObjectName(u"widget_9") @@ -496,7 +511,7 @@ def setupUi(self, MainWindow): self.tab_textFiles.setSizePolicy(sizePolicy3) self.formLayout_2 = QFormLayout(self.tab_textFiles) self.formLayout_2.setObjectName(u"formLayout_2") - self.formLayout_2.setVerticalSpacing(0) + self.formLayout_2.setVerticalSpacing(3) self.formLayout_2.setContentsMargins(-1, -1, -1, 0) self.label_7 = QLabel(self.tab_textFiles) self.label_7.setObjectName(u"label_7") @@ -833,29 +848,23 @@ def setupUi(self, MainWindow): self.horizontalLayout_10.setSpacing(3) self.horizontalLayout_10.setObjectName(u"horizontalLayout_10") self.horizontalLayout_10.setContentsMargins(0, 0, 0, 3) - self.pushButton_binary = QPushButton(self.widget_viewTools) + self.pushButton_binary = QToolButton(self.widget_viewTools) self.pushButton_binary.setObjectName(u"pushButton_binary") self.pushButton_binary.setEnabled(False) - sizePolicy7.setHeightForWidth(self.pushButton_binary.sizePolicy().hasHeightForWidth()) - self.pushButton_binary.setSizePolicy(sizePolicy7) self.pushButton_binary.setCheckable(True) self.horizontalLayout_10.addWidget(self.pushButton_binary) - self.pushButton_fourCorner = QPushButton(self.widget_viewTools) + self.pushButton_fourCorner = QToolButton(self.widget_viewTools) self.pushButton_fourCorner.setObjectName(u"pushButton_fourCorner") self.pushButton_fourCorner.setEnabled(False) - sizePolicy7.setHeightForWidth(self.pushButton_fourCorner.sizePolicy().hasHeightForWidth()) - self.pushButton_fourCorner.setSizePolicy(sizePolicy7) self.pushButton_fourCorner.setCheckable(True) self.horizontalLayout_10.addWidget(self.pushButton_fourCorner) - self.pushButton_stabilize = QPushButton(self.widget_viewTools) + self.pushButton_stabilize = QToolButton(self.widget_viewTools) self.pushButton_stabilize.setObjectName(u"pushButton_stabilize") self.pushButton_stabilize.setEnabled(False) - sizePolicy7.setHeightForWidth(self.pushButton_stabilize.sizePolicy().hasHeightForWidth()) - self.pushButton_stabilize.setSizePolicy(sizePolicy7) self.pushButton_stabilize.setCheckable(True) self.horizontalLayout_10.addWidget(self.pushButton_stabilize) @@ -977,6 +986,10 @@ def retranslateUi(self, MainWindow): self.checkBox_autocrop.setText(QCoreApplication.translate("MainWindow", u"Auto Crop", None)) self.checkBox_invertPatch.setText(QCoreApplication.translate("MainWindow", u"Invert Input", None)) self.checkBox_removeLeadingZeros.setText(QCoreApplication.translate("MainWindow", u"Remove leading 0s", None)) +#if QT_CONFIG(tooltip) + self.checkBox_dotDetector.setToolTip(QCoreApplication.translate("MainWindow", u"Count dots/blobs instead of detecting characters", None)) +#endif // QT_CONFIG(tooltip) + self.checkBox_dotDetector.setText(QCoreApplication.translate("MainWindow", u"Dot Counter", None)) #if QT_CONFIG(tooltip) self.checkBox_rescalePatch.setToolTip(QCoreApplication.translate("MainWindow", u"Scale the image to 35 pixels height, a favorable size for OCR", None)) #endif // QT_CONFIG(tooltip) @@ -1055,6 +1068,9 @@ def retranslateUi(self, MainWindow): self.toolButton_showOCRrects.setToolTip(QCoreApplication.translate("MainWindow", u"Show OCR Detection Boxes", None)) #endif // QT_CONFIG(tooltip) self.toolButton_showOCRrects.setText(QCoreApplication.translate("MainWindow", u"OCR", None)) +#if QT_CONFIG(tooltip) + self.toolButton_zoomReset.setToolTip(QCoreApplication.translate("MainWindow", u"Reset zoom", None)) +#endif // QT_CONFIG(tooltip) self.toolButton_zoomReset.setText(QCoreApplication.translate("MainWindow", u"1:1", None)) self.label_11.setText(QCoreApplication.translate("MainWindow", u"Ctrl-scroll to zoom", None)) self.label_12.setText(QCoreApplication.translate("MainWindow", u"### Open a Camera or Load a File", None))