Update app.py
Browse files
app.py
CHANGED
|
@@ -1,22 +1,26 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
import json
|
| 4 |
-
import yolov5
|
|
|
|
| 5 |
|
| 6 |
# Images
|
| 7 |
-
torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
|
| 8 |
-
torch.hub.download_url_to_file('https://raw.githubusercontent.com/WongKinYiu/yolov7/main/inference/images/image3.jpg', 'image3.jpg')
|
| 9 |
-
torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt','yolov5s.pt')
|
| 10 |
|
| 11 |
-
model_path = "yolov5x.pt" #"
|
| 12 |
-
image_size = 640,
|
| 13 |
-
conf_threshold = 0.25,
|
| 14 |
-
iou_threshold = 0.
|
| 15 |
-
model = yolov5.load(model_path, device="cpu")
|
|
|
|
| 16 |
|
| 17 |
def yolov5_inference(
|
| 18 |
-
image: gr.inputs.Image = None
|
| 19 |
-
|
|
|
|
|
|
|
| 20 |
):
|
| 21 |
"""
|
| 22 |
YOLOv5 inference function
|
|
@@ -29,14 +33,15 @@ def yolov5_inference(
|
|
| 29 |
Returns:
|
| 30 |
Rendered image
|
| 31 |
"""
|
| 32 |
-
|
| 33 |
-
|
|
|
|
| 34 |
tensor = {
|
| 35 |
"tensorflow": [
|
| 36 |
]
|
| 37 |
}
|
| 38 |
|
| 39 |
-
if results.pred is not None:
|
| 40 |
for i, element in enumerate(results.pred[0]):
|
| 41 |
object = {}
|
| 42 |
#print (element[0])
|
|
@@ -49,16 +54,44 @@ def yolov5_inference(
|
|
| 49 |
object["w"] = element[2].item()
|
| 50 |
object["h"] = element[3].item()
|
| 51 |
tensor["tensorflow"].append(object)
|
| 52 |
-
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
text = json.dumps(tensor)
|
| 56 |
-
#print (text)
|
| 57 |
return text #results.render()[0]
|
| 58 |
|
| 59 |
|
| 60 |
inputs = [
|
| 61 |
gr.inputs.Image(type="pil", label="Input Image"),
|
|
|
|
|
|
|
|
|
|
| 62 |
]
|
| 63 |
|
| 64 |
outputs = gr.outputs.Image(type="filepath", label="Output Image")
|
|
@@ -70,10 +103,12 @@ demo_app = gr.Interface(
|
|
| 70 |
fn=yolov5_inference,
|
| 71 |
inputs=inputs,
|
| 72 |
outputs=["text"],
|
|
|
|
| 73 |
title=title,
|
| 74 |
-
examples=examples,
|
| 75 |
#cache_examples=True,
|
| 76 |
#live=True,
|
| 77 |
#theme='huggingface',
|
| 78 |
)
|
| 79 |
-
demo_app.launch(
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
import json
|
| 4 |
+
#import yolov5
|
| 5 |
+
from ultralytics import YOLO
|
| 6 |
|
| 7 |
# Images
|
| 8 |
+
#torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
|
| 9 |
+
#torch.hub.download_url_to_file('https://raw.githubusercontent.com/WongKinYiu/yolov7/main/inference/images/image3.jpg', 'image3.jpg')
|
| 10 |
+
#torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt','yolov5s.pt')
|
| 11 |
|
| 12 |
+
#model_path = "yolov5x.pt" #"yolov5x.pt" "yolov5m.pt", "yolov5l.pt", "yolov5x.pt",
|
| 13 |
+
image_size = 640 #640 #320,
|
| 14 |
+
conf_threshold = 0.25 #0.30,
|
| 15 |
+
iou_threshold = 0.15
|
| 16 |
+
#model = yolov5.load(model_path, device="cpu")
|
| 17 |
+
model = YOLO("yolo11x.pt") #YOLO("yolo11x.pt")
|
| 18 |
|
| 19 |
def yolov5_inference(
|
| 20 |
+
image: gr.inputs.Image = None #,
|
| 21 |
+
# image_size: gr.inputs.Slider = 640,
|
| 22 |
+
# conf_threshold: gr.inputs.Slider = 0.25,
|
| 23 |
+
# iou_threshold: gr.inputs.Slider = 0.45
|
| 24 |
):
|
| 25 |
"""
|
| 26 |
YOLOv5 inference function
|
|
|
|
| 33 |
Returns:
|
| 34 |
Rendered image
|
| 35 |
"""
|
| 36 |
+
model.conf = conf_threshold
|
| 37 |
+
model.iou = iou_threshold
|
| 38 |
+
results = model([image]) #, size=image_size)
|
| 39 |
tensor = {
|
| 40 |
"tensorflow": [
|
| 41 |
]
|
| 42 |
}
|
| 43 |
|
| 44 |
+
''' if results.pred is not None:
|
| 45 |
for i, element in enumerate(results.pred[0]):
|
| 46 |
object = {}
|
| 47 |
#print (element[0])
|
|
|
|
| 54 |
object["w"] = element[2].item()
|
| 55 |
object["h"] = element[3].item()
|
| 56 |
tensor["tensorflow"].append(object)
|
| 57 |
+
'''
|
| 58 |
+
for result in results:
|
| 59 |
+
|
| 60 |
+
# As caixas delimitadoras (bounding boxes) são acessíveis via r.boxes
|
| 61 |
+
boxes = result.boxes
|
| 62 |
+
|
| 63 |
+
# Cada detecção individual em boxes tem atributos como xyxy, conf, cls
|
| 64 |
+
for box in boxes:
|
| 65 |
+
object = {}
|
| 66 |
+
# Coordenadas da caixa (formato xyxy)
|
| 67 |
+
x1, y1, x2, y2 = box.xyxy[0].cpu().numpy().astype(int)
|
| 68 |
+
|
| 69 |
+
# Confiança da detecção
|
| 70 |
+
numpy_array = box.conf[0].cpu().numpy()
|
| 71 |
+
object["score"] = numpy_array.item() if numpy_array.size > 0 else 0.0
|
| 72 |
+
|
| 73 |
+
# ID da classe
|
| 74 |
+
class_id = int(box.cls[0].cpu().numpy())
|
| 75 |
+
object["classe"] = class_id
|
| 76 |
+
# Nome da classe
|
| 77 |
+
# r.names é um dicionário que mapeia IDs de classe para nomes
|
| 78 |
+
object["nome"] = result.names[class_id]
|
| 79 |
+
|
| 80 |
+
object["x"] = int (x1)
|
| 81 |
+
object["y"] = int (y1)
|
| 82 |
+
object["w"] = int (x2)
|
| 83 |
+
object["h"] = int (y2)
|
| 84 |
+
tensor["tensorflow"].append(object)
|
| 85 |
|
| 86 |
text = json.dumps(tensor)
|
|
|
|
| 87 |
return text #results.render()[0]
|
| 88 |
|
| 89 |
|
| 90 |
inputs = [
|
| 91 |
gr.inputs.Image(type="pil", label="Input Image"),
|
| 92 |
+
# gr.inputs.Slider(minimum=0, maximum=1280, default=640, step=8, label="Image Size"),
|
| 93 |
+
# gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.01, label="conf_threshold"),
|
| 94 |
+
# gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.01, label="iou_threshold"),
|
| 95 |
]
|
| 96 |
|
| 97 |
outputs = gr.outputs.Image(type="filepath", label="Output Image")
|
|
|
|
| 103 |
fn=yolov5_inference,
|
| 104 |
inputs=inputs,
|
| 105 |
outputs=["text"],
|
| 106 |
+
#outputs=outputs,
|
| 107 |
title=title,
|
| 108 |
+
#examples=examples,
|
| 109 |
#cache_examples=True,
|
| 110 |
#live=True,
|
| 111 |
#theme='huggingface',
|
| 112 |
)
|
| 113 |
+
demo_app.launch( enable_queue=True, share=True)
|
| 114 |
+
#demo_app.launch(debug=True, server_port=8087, enable_queue=True)
|