asad231 commited on
Commit
c2db124
·
verified ·
1 Parent(s): ba491b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -132
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import streamlit as st
2
  from PIL import Image
3
  import torch
4
  from transformers import pipeline
@@ -7,140 +7,106 @@ import matplotlib.pyplot as plt
7
  from reportlab.pdfgen import canvas
8
  import io
9
  import folium
10
- from streamlit_folium import st_folium
11
- import cv2
12
  import tempfile
 
13
  import numpy as np
14
 
15
- st.set_page_config(page_title="InfraGuard Ultimate – Smart City AI", layout="wide")
16
- st.title("InfraGuard Ultimate – Live Smart City Incident Detection")
17
-
18
  # ---------------- Load Models ----------------
19
- @st.cache_resource
20
- def load_detection_model():
21
- model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
22
- return model
23
-
24
- @st.cache_resource
25
- def load_summarizer():
26
- summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
27
- return summarizer
28
-
29
- detection_model = load_detection_model()
30
- summarizer = load_summarizer()
31
-
32
- # ---------------- Sidebar / Tabs ----------------
33
- tab1, tab2, tab3 = st.tabs(["Live Detection", "History", "About"])
34
 
35
  # ---------- Incident History ----------
36
- if "history" not in st.session_state:
37
- st.session_state["history"] = pd.DataFrame(columns=["Image", "Objects", "Severity", "Summary", "Latitude", "Longitude"])
38
-
39
- # ---------------- Tab 1: Live Detection ----------------
40
- with tab1:
41
- st.markdown("### Live Webcam / Upload Video or Image")
42
- video_file = st.file_uploader("Upload Video", type=["mp4"])
43
- uploaded_file = st.file_uploader("Upload Image", type=["jpg", "png", "jpeg"])
44
- camera_input = st.camera_input("Or capture live image")
45
-
46
- # Input GPS coordinates (simulate or from device)
47
- latitude = st.number_input("Latitude", value=24.8607) # default Karachi
48
- longitude = st.number_input("Longitude", value=67.0011)
49
-
50
- # Process Image
51
- input_image = uploaded_file or camera_input
52
- if input_image:
53
- image = Image.open(input_image)
54
- st.image(image, caption="Input Image", use_column_width=True)
55
-
56
- results = detection_model(image)
57
- st.image(results.render()[0], caption="Detected Objects", use_column_width=True)
58
-
59
- detected_objects = results.pandas().xyxy[0]['name'].tolist()
60
- st.markdown(f"**Detected Objects:** {', '.join(detected_objects)}")
61
-
62
- severity = "Low"
63
- if "fire" in detected_objects or "person" in detected_objects and len(detected_objects) > 15:
64
- severity = "High"
65
- elif "car" in detected_objects or "truck" in detected_objects:
66
- severity = "Medium"
67
- st.markdown(f"**Severity:** {severity}")
68
-
69
- incident_text = f"Detected objects: {', '.join(detected_objects)}. Severity: {severity}."
70
- summary = summarizer(incident_text, max_length=60, min_length=25, do_sample=False)
71
- ai_summary = summary[0]['summary_text']
72
- st.markdown("### AI-Generated Incident Summary")
73
- st.write(ai_summary)
74
-
75
- st.session_state["history"] = pd.concat([st.session_state["history"],
76
- pd.DataFrame([[input_image, ', '.join(detected_objects), severity, ai_summary, latitude, longitude]],
77
- columns=["Image", "Objects", "Severity", "Summary", "Latitude", "Longitude"])],
78
- ignore_index=True)
79
-
80
- buffer = io.BytesIO()
81
- c = canvas.Canvas(buffer)
82
- c.drawString(100, 800, "InfraGuard Incident Report")
83
- c.drawString(100, 780, f"Detected Objects: {', '.join(detected_objects)}")
84
- c.drawString(100, 760, f"Severity: {severity}")
85
- c.drawString(100, 740, f"AI Summary: {ai_summary}")
86
- c.drawString(100, 720, f"Location: {latitude}, {longitude}")
87
- c.save()
88
- buffer.seek(0)
89
- st.download_button("Download PDF Report", buffer, file_name="incident_report.pdf", mime="application/pdf")
90
-
91
- # Process Video / Live Streaming
92
- if video_file:
93
- tfile = tempfile.NamedTemporaryFile(delete=False)
94
- tfile.write(video_file.read())
95
- cap = cv2.VideoCapture(tfile.name)
96
-
97
- stframe = st.empty()
98
- while cap.isOpened():
99
- ret, frame = cap.read()
100
- if not ret:
101
- break
102
- # Convert to RGB
103
- frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
104
- # Convert to PIL for YOLO
105
- pil_img = Image.fromarray(frame_rgb)
106
- results = detection_model(pil_img)
107
- frame_result = np.squeeze(results.render())
108
- stframe.image(frame_result, channels="RGB")
109
- cap.release()
110
-
111
- # ---------------- Tab 2: History ----------------
112
- with tab2:
113
- st.markdown("### Incident History")
114
- st.dataframe(st.session_state["history"])
115
-
116
- if not st.session_state["history"].empty:
117
- all_objects = ','.join(st.session_state["history"]["Objects"].tolist()).split(',')
118
- object_counts = pd.Series(all_objects).value_counts()
119
- st.markdown("### Most Frequent Detected Objects")
120
- fig, ax = plt.subplots()
121
- object_counts.plot(kind='bar', ax=ax)
122
- st.pyplot(fig)
123
-
124
- st.markdown("### Incident Locations Map")
125
- m = folium.Map(location=[24.8607, 67.0011], zoom_start=12)
126
- for idx, row in st.session_state["history"].iterrows():
127
- folium.Marker(
128
- location=[row["Latitude"], row["Longitude"]],
129
- popup=f"{row['Objects']} | Severity: {row['Severity']}",
130
- icon=folium.Icon(color="red" if row["Severity"]=="High" else "orange" if row["Severity"]=="Medium" else "green")
131
- ).add_to(m)
132
- st_data = st_folium(m, width=700, height=500)
133
-
134
- # ---------------- Tab 3: About ----------------
135
- with tab3:
136
- st.markdown("""
137
- **InfraGuard Ultimate – Smart City AI System**
138
- - Real-time live webcam / video detection
139
- - Object detection using YOLOv5
140
- - AI-generated incident summary using BART
141
- - Severity scoring & real-time alerts
142
- - Incident history with analytics
143
- - Interactive map showing all incidents
144
- - Downloadable PDF incident reports
145
- - Fully deployable on Hugging Face Spaces
146
- """)
 
1
+ import gradio as gr
2
  from PIL import Image
3
  import torch
4
  from transformers import pipeline
 
7
  from reportlab.pdfgen import canvas
8
  import io
9
  import folium
 
 
10
  import tempfile
11
+ import cv2
12
  import numpy as np
13
 
 
 
 
14
  # ---------------- Load Models ----------------
15
+ detection_model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
16
+ summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  # ---------- Incident History ----------
19
+ history_df = pd.DataFrame(columns=["Image", "Objects", "Severity", "Summary", "Latitude", "Longitude"])
20
+
21
+ # ---------------- Helper Functions ----------------
22
+ def process_image(img, latitude=24.8607, longitude=67.0011):
23
+ global history_df
24
+ image = Image.fromarray(img) if isinstance(img, np.ndarray) else img
25
+ results = detection_model(image)
26
+ detected_objects = results.pandas().xyxy[0]['name'].tolist()
27
+
28
+ # Severity scoring
29
+ severity = "Low"
30
+ if "fire" in detected_objects or "person" in detected_objects and len(detected_objects) > 15:
31
+ severity = "High"
32
+ elif "car" in detected_objects or "truck" in detected_objects:
33
+ severity = "Medium"
34
+
35
+ # AI summary
36
+ incident_text = f"Detected objects: {', '.join(detected_objects)}. Severity: {severity}."
37
+ summary = summarizer(incident_text, max_length=60, min_length=25, do_sample=False)
38
+ ai_summary = summary[0]['summary_text']
39
+
40
+ # Save to history
41
+ history_df = pd.concat([history_df,
42
+ pd.DataFrame([[image, ', '.join(detected_objects), severity, ai_summary, latitude, longitude]],
43
+ columns=["Image", "Objects", "Severity", "Summary", "Latitude", "Longitude"])],
44
+ ignore_index=True)
45
+
46
+ # Prepare image with bounding boxes
47
+ output_image = np.squeeze(results.render())
48
+
49
+ # Prepare PDF
50
+ buffer = io.BytesIO()
51
+ c = canvas.Canvas(buffer)
52
+ c.drawString(100, 800, "InfraGuard Incident Report")
53
+ c.drawString(100, 780, f"Detected Objects: {', '.join(detected_objects)}")
54
+ c.drawString(100, 760, f"Severity: {severity}")
55
+ c.drawString(100, 740, f"AI Summary: {ai_summary}")
56
+ c.drawString(100, 720, f"Location: {latitude}, {longitude}")
57
+ c.save()
58
+ buffer.seek(0)
59
+
60
+ return output_image, ai_summary, severity, buffer
61
+
62
+ def show_history():
63
+ global history_df
64
+ return history_df
65
+
66
+ def show_map():
67
+ global history_df
68
+ m = folium.Map(location=[24.8607, 67.0011], zoom_start=12)
69
+ for idx, row in history_df.iterrows():
70
+ folium.Marker(
71
+ location=[row["Latitude"], row["Longitude"]],
72
+ popup=f"{row['Objects']} | Severity: {row['Severity']}",
73
+ icon=folium.Icon(color="red" if row["Severity"]=="High" else "orange" if row["Severity"]=="Medium" else "green")
74
+ ).add_to(m)
75
+ tmp_file = tempfile.NamedTemporaryFile(suffix=".html", delete=False)
76
+ m.save(tmp_file.name)
77
+ return tmp_file.name
78
+
79
+ # ---------------- Gradio Interface ----------------
80
+ with gr.Blocks() as demo:
81
+ gr.Markdown("## InfraGuard Ultimate – Smart City AI (Gradio Version)")
82
+
83
+ with gr.Tab("Live Detection"):
84
+ with gr.Row():
85
+ img_input = gr.Image(type="pil", label="Upload Image / Webcam")
86
+ latitude = gr.Number(value=24.8607, label="Latitude")
87
+ longitude = gr.Number(value=67.0011, label="Longitude")
88
+ with gr.Row():
89
+ detect_btn = gr.Button("Detect Incident")
90
+ with gr.Row():
91
+ output_image = gr.Image(label="Detection Output")
92
+ with gr.Row():
93
+ ai_summary_out = gr.Textbox(label="AI Incident Summary")
94
+ severity_out = gr.Textbox(label="Severity")
95
+ with gr.Row():
96
+ pdf_out = gr.File(label="Download PDF Report")
97
+
98
+ detect_btn.click(process_image,
99
+ inputs=[img_input, latitude, longitude],
100
+ outputs=[output_image, ai_summary_out, severity_out, pdf_out])
101
+
102
+ with gr.Tab("History & Analytics"):
103
+ hist_btn = gr.Button("Show Incident History")
104
+ hist_table = gr.Dataframe()
105
+ hist_btn.click(show_history, outputs=[hist_table])
106
+
107
+ with gr.Tab("Map"):
108
+ map_btn = gr.Button("Show Incident Map")
109
+ map_html = gr.HTML()
110
+ map_btn.click(show_map, outputs=[map_html])
111
+
112
+ demo.launch()