Spaces:
Runtime error
Runtime error
Damanger
commited on
Commit
·
ec11250
1
Parent(s):
c9e715d
corrigiendo app
Browse files
app.py
CHANGED
|
@@ -49,23 +49,55 @@ reader = easyocr.Reader(
|
|
| 49 |
|
| 50 |
ALLOW = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
| 51 |
|
| 52 |
-
def
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 58 |
gray = cv2.bilateralFilter(gray, 7, 50, 50)
|
| 59 |
-
th = cv2.adaptiveThreshold(
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
return th
|
| 62 |
|
| 63 |
-
def
|
| 64 |
-
img =
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
cands = []
|
| 67 |
for _, text, score in out:
|
| 68 |
-
t = "".join(
|
| 69 |
if len(t) >= 4:
|
| 70 |
cands.append((t, float(score)))
|
| 71 |
if not cands:
|
|
@@ -73,6 +105,25 @@ def ocr_plate(plate_bgr):
|
|
| 73 |
cands.sort(key=lambda x: (x[1], len(x[0])), reverse=True)
|
| 74 |
return cands[0]
|
| 75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
def draw_box_text(img, xyxy, text, color=(0, 255, 0)):
|
| 77 |
x1, y1, x2, y2 = [int(v) for v in xyxy]
|
| 78 |
cv2.rectangle(img, (x1,y1), (x2,y2), color, 2)
|
|
@@ -83,9 +134,7 @@ def draw_box_text(img, xyxy, text, color=(0, 255, 0)):
|
|
| 83 |
|
| 84 |
def detect_plates_bgr(bgr, conf=0.25, iou=0.45):
|
| 85 |
# imgsz 512 suele ser ~2× más rápido que 640 en CPU
|
| 86 |
-
res = yolo.predict(
|
| 87 |
-
bgr, conf=conf, iou=iou, imgsz=512, max_det=2, verbose=False
|
| 88 |
-
)[0]
|
| 89 |
boxes = res.boxes.xyxy.cpu().numpy() if res.boxes is not None else np.empty((0,4))
|
| 90 |
confs = res.boxes.conf.cpu().numpy() if res.boxes is not None else np.empty((0,))
|
| 91 |
return boxes, confs
|
|
@@ -96,21 +145,20 @@ def run_on_image_bgr(bgr, conf=0.25, iou=0.45, with_ocr=True, annotate=True, max
|
|
| 96 |
t0 = time.time()
|
| 97 |
boxes, confs = detect_plates_bgr(bgr, conf, iou)
|
| 98 |
|
| 99 |
-
# Ordenar por confianza y quedarnos con top-K
|
| 100 |
idx = np.argsort(-confs)[:max_plates]
|
| 101 |
boxes = boxes[idx]
|
| 102 |
confs = confs[idx]
|
| 103 |
|
| 104 |
detections = []
|
| 105 |
for xyxy, c in zip(boxes, confs):
|
| 106 |
-
x1, y1, x2, y2 =
|
| 107 |
-
crop = bgr[
|
| 108 |
txt, s = ("", 0.0)
|
| 109 |
if with_ocr and crop.size:
|
| 110 |
txt, s = ocr_plate(crop)
|
| 111 |
if annotate:
|
| 112 |
label = f"{txt or 'plate'} {c:.2f}"
|
| 113 |
-
draw_box_text(vis,
|
| 114 |
detections.append({
|
| 115 |
"box_xyxy": [x1, y1, x2, y2],
|
| 116 |
"det_conf": float(c),
|
|
|
|
| 49 |
|
| 50 |
ALLOW = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
| 51 |
|
| 52 |
+
def clamp(v, lo, hi):
|
| 53 |
+
return max(lo, min(hi, v))
|
| 54 |
+
|
| 55 |
+
def expand_box(xyxy, w, h, pad_ratio=0.10):
|
| 56 |
+
x1, y1, x2, y2 = [int(v) for v in xyxy]
|
| 57 |
+
bw, bh = x2 - x1, y2 - y1
|
| 58 |
+
px, py = int(bw * pad_ratio), int(bh * pad_ratio)
|
| 59 |
+
nx1 = clamp(x1 - px, 0, w - 1)
|
| 60 |
+
ny1 = clamp(y1 - py, 0, h - 1)
|
| 61 |
+
nx2 = clamp(x2 + px, 0, w - 1)
|
| 62 |
+
ny2 = clamp(y2 + py, 0, h - 1)
|
| 63 |
+
return nx1, ny1, nx2, ny2
|
| 64 |
+
|
| 65 |
+
def ensure_min_size(img_bgr, target_long=320):
|
| 66 |
+
h, w = img_bgr.shape[:2]
|
| 67 |
+
m = max(h, w)
|
| 68 |
+
if m < target_long:
|
| 69 |
+
scale = target_long / float(m)
|
| 70 |
+
nh, nw = int(round(h * scale)), int(round(w * scale))
|
| 71 |
+
img_bgr = cv2.resize(img_bgr, (nw, nh), interpolation=cv2.INTER_CUBIC)
|
| 72 |
+
return img_bgr
|
| 73 |
+
|
| 74 |
+
def preproc_adaptive(plate_bgr):
|
| 75 |
+
img = ensure_min_size(plate_bgr) # asegura tamaño
|
| 76 |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 77 |
gray = cv2.bilateralFilter(gray, 7, 50, 50)
|
| 78 |
+
th = cv2.adaptiveThreshold(
|
| 79 |
+
gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 5
|
| 80 |
+
)
|
| 81 |
+
# opcional: cerrar huecos finos
|
| 82 |
+
k = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
|
| 83 |
+
th = cv2.morphologyEx(th, cv2.MORPH_CLOSE, k, iterations=1)
|
| 84 |
return th
|
| 85 |
|
| 86 |
+
def preproc_clahe_otsu(plate_bgr):
|
| 87 |
+
img = ensure_min_size(plate_bgr)
|
| 88 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 89 |
+
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
|
| 90 |
+
eq = clahe.apply(gray)
|
| 91 |
+
_, th = cv2.threshold(eq, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
| 92 |
+
k = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
|
| 93 |
+
th = cv2.morphologyEx(th, cv2.MORPH_CLOSE, k, iterations=1)
|
| 94 |
+
return th
|
| 95 |
+
|
| 96 |
+
def read_easy(img, allow=ALLOW):
|
| 97 |
+
out = reader.readtext(img, detail=1, allowlist=allow)
|
| 98 |
cands = []
|
| 99 |
for _, text, score in out:
|
| 100 |
+
t = "".join(c for c in (text or "").upper() if c in allow)
|
| 101 |
if len(t) >= 4:
|
| 102 |
cands.append((t, float(score)))
|
| 103 |
if not cands:
|
|
|
|
| 105 |
cands.sort(key=lambda x: (x[1], len(x[0])), reverse=True)
|
| 106 |
return cands[0]
|
| 107 |
|
| 108 |
+
def preprocess_for_ocr(plate_bgr):
|
| 109 |
+
img = plate_bgr.copy()
|
| 110 |
+
h, w = img.shape[:2]
|
| 111 |
+
if max(h, w) < 160:
|
| 112 |
+
img = cv2.resize(img, (w*2, h*2), interpolation=cv2.INTER_CUBIC)
|
| 113 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 114 |
+
gray = cv2.bilateralFilter(gray, 7, 50, 50)
|
| 115 |
+
th = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
|
| 116 |
+
cv2.THRESH_BINARY, 31, 5)
|
| 117 |
+
return th
|
| 118 |
+
|
| 119 |
+
def ocr_plate(plate_bgr):
|
| 120 |
+
# 1) adaptativa
|
| 121 |
+
t, s = read_easy(preproc_adaptive(plate_bgr))
|
| 122 |
+
if t:
|
| 123 |
+
return t, s
|
| 124 |
+
# 2) CLAHE + Otsu (fallback)
|
| 125 |
+
return read_easy(preproc_clahe_otsu(plate_bgr))
|
| 126 |
+
|
| 127 |
def draw_box_text(img, xyxy, text, color=(0, 255, 0)):
|
| 128 |
x1, y1, x2, y2 = [int(v) for v in xyxy]
|
| 129 |
cv2.rectangle(img, (x1,y1), (x2,y2), color, 2)
|
|
|
|
| 134 |
|
| 135 |
def detect_plates_bgr(bgr, conf=0.25, iou=0.45):
|
| 136 |
# imgsz 512 suele ser ~2× más rápido que 640 en CPU
|
| 137 |
+
res = yolo.predict(bgr, conf=conf, iou=iou, imgsz=640, max_det=2, verbose=False)[0]
|
|
|
|
|
|
|
| 138 |
boxes = res.boxes.xyxy.cpu().numpy() if res.boxes is not None else np.empty((0,4))
|
| 139 |
confs = res.boxes.conf.cpu().numpy() if res.boxes is not None else np.empty((0,))
|
| 140 |
return boxes, confs
|
|
|
|
| 145 |
t0 = time.time()
|
| 146 |
boxes, confs = detect_plates_bgr(bgr, conf, iou)
|
| 147 |
|
|
|
|
| 148 |
idx = np.argsort(-confs)[:max_plates]
|
| 149 |
boxes = boxes[idx]
|
| 150 |
confs = confs[idx]
|
| 151 |
|
| 152 |
detections = []
|
| 153 |
for xyxy, c in zip(boxes, confs):
|
| 154 |
+
x1, y1, x2, y2 = expand_box(xyxy, w, h, pad_ratio=0.12) # 👈 12% borde
|
| 155 |
+
crop = bgr[y1:y2, x1:x2]
|
| 156 |
txt, s = ("", 0.0)
|
| 157 |
if with_ocr and crop.size:
|
| 158 |
txt, s = ocr_plate(crop)
|
| 159 |
if annotate:
|
| 160 |
label = f"{txt or 'plate'} {c:.2f}"
|
| 161 |
+
draw_box_text(vis, (x1, y1, x2, y2), label)
|
| 162 |
detections.append({
|
| 163 |
"box_xyxy": [x1, y1, x2, y2],
|
| 164 |
"det_conf": float(c),
|