From b7f6cc3e7483acfdf7a2dbab90d56c5c02b0c148 Mon Sep 17 00:00:00 2001 From: itqop Date: Thu, 28 Nov 2024 20:44:26 +0300 Subject: [PATCH] 1 --- car-yolo-test.py | 31 ++++++++++++++++++++++++ kalentev.py | 63 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 car-yolo-test.py create mode 100644 kalentev.py diff --git a/car-yolo-test.py b/car-yolo-test.py new file mode 100644 index 0000000..ea011c6 --- /dev/null +++ b/car-yolo-test.py @@ -0,0 +1,31 @@ +import easyocr +import cv2 + +def ocr_image(img_path): + """ + Выполняет OCR для изображения с номером. + + :param img_path: Путь к изображению. + :return: Распознанный текст. + """ + reader = easyocr.Reader(['en'], gpu=True) # Инициализация easyOCR + img = cv2.imread(img_path) # Загрузка изображения + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Конвертация в оттенки серого + + # Запуск OCR + results = reader.readtext(gray) + print(results) + text = "" + for res in results: + if len(results) == 1: + text = res[1] + if len(results) > 1 and len(res[1]) > 6 and res[2] > 0.2: # Фильтрация текста + text = res[1] + + return text + +# Использование +if __name__ == "__main__": + img_path = "result.png" # Путь к изображению с номером + recognized_text = ocr_image(img_path) + print(f"Распознанный текст: {recognized_text}") diff --git a/kalentev.py b/kalentev.py new file mode 100644 index 0000000..7cde18d --- /dev/null +++ b/kalentev.py @@ -0,0 +1,63 @@ +import numpy as np +import cv2 +from matplotlib import pyplot as plt + +images = ['img/1.jpg', 'img/2.jpg', 'img/3.jpg', 'img/26417135.jpg', 'img/ru4018185.jpg'] + +processed_plates = [] + +for img_path in images: + image = cv2.imread(img_path) + + img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(img_gray, 100, 200, cv2.THRESH_TOZERO_INV) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + for i in range(len(contours)): + x, y, w, h = cv2.boundingRect(contours[i]) + a = w * h + aspectRatio = float(w) / h + if aspectRatio >= 3 and a > 600: + approx = cv2.approxPolyDP(contours[i], 0.05 * cv2.arcLength(contours[i], True), True) + if len(approx) <= 4 and x > 15: + width = w + height = h + start_x = x + start_y = y + end_x = start_x + width + end_y = start_y + height + break + + plate = image[start_y:end_y, start_x:end_x] + + gray = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY) + _, thresh = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY) + contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + largest_contour = max(contours, key=cv2.contourArea) + rect = cv2.minAreaRect(largest_contour) + box = cv2.boxPoints(rect) + box = np.intp(box) + angle = rect[-1] + + if angle < -45: + angle += 90 + if angle < 90: + (h, w) = plate.shape[:2] + center = (w // 2, h // 2) + M = cv2.getRotationMatrix2D(center, angle, 1.0) + rotated = cv2.warpAffine(plate, M, (w, h)) + else: + angle = 0.5 + (h, w) = plate.shape[:2] + center = (w // 2, h // 2) + M = cv2.getRotationMatrix2D(center, angle, 1.0) + rotated = cv2.warpAffine(plate, M, (w, h)) + + processed_plates.append(rotated) + +for i, rotated_plate in enumerate(processed_plates): + cv2.imshow(f'Номер {i+1}', rotated_plate) + +cv2.waitKey(0) +cv2.destroyAllWindows()