Emeritus-21's picture
Upload 27 files
af14f34 verified
import cv2
import base64
import numpy as np
from flask import Flask, render_template, Response, request, jsonify
from PIL import Image
from time import time as unix_time
import os
import mediapipe as mp
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
import time
from mediapipe.framework.formats import landmark_pb2
from mediapipe import solutions
from tflite_support.task import vision as vision2
from tflite_support.task import core
from tflite_support.task import processor
from numpy.linalg import norm
from collections import defaultdict
import re
# ======= Global Variables ========
char_list = []
letter_result = 0
old_letter_result = 0
result_to_show = 0
cresult_to_show = 0
text_x = 0
text_y = 0
cwhich = 0
lastwidth = 400
letterscore = 0
frame_time = 0
same_letter_time = 0
no_hand_flag = 1
# ====== N-gram Word Buffer + Itsekiri Translation ======
letter_buffer = []
MAX_BUFFER_SIZE = 20
letter_list = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ#")
word_end_chars = set(['#', '>'])
# Dummy dictionary
itsekiri_dict = {
"HELLO": "MIGWO",
"HOW": "KÉDÙ",
"ARE": "WÈRÈ",
"YOU": "ÉRÉ",
"I": "MÉ",
"LOVE": "FÈ",
"SCHOOL": "ÍGÚE",
"YES": "BÈÈNÈ",
"NO": "MÀ",
"MY": "MÉ",
"NAME": "ÉRÉMÉ",
"IS": "NÌ",
"WHAT": "KÍN",
"GOOD": "DÉ"
}
def decode_letters_to_words(buffer):
cleaned = ''.join([ch for ch in buffer if ch in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ#>'])
words = re.split(r'[>#]', cleaned)
return ' '.join(words)
def translate_to_itsekiri(sentence):
words = sentence.upper().split()
translated = [itsekiri_dict.get(word, word) for word in words]
return ' '.join(translated)
def brightness(img):
return np.average(norm(img, axis=2)) / np.sqrt(3) if len(img.shape) == 3 else np.average(img)
def draw_landmarks_on_image(rgb_image, detection_result):
hand_landmarks_list = detection_result.hand_landmarks
handedness_list = detection_result.handedness
annotated_image = np.copy(rgb_image)
crop = []
image_height, image_width, _ = annotated_image.shape
for idx in range(len(hand_landmarks_list)):
hand_landmarks = hand_landmarks_list[idx]
handedness = handedness_list[idx]
hand_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
hand_landmarks_proto.landmark.extend([
landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in hand_landmarks
])
solutions.drawing_utils.draw_landmarks(
annotated_image,
hand_landmarks_proto,
solutions.hands.HAND_CONNECTIONS,
solutions.drawing_styles.get_default_hand_landmarks_style(),
solutions.drawing_styles.get_default_hand_connections_style())
x_coordinates = [landmark.x for landmark in hand_landmarks]
y_coordinates = [landmark.y for landmark in hand_landmarks]
min_x = int(min(x_coordinates) * image_width)
min_y = int(min(y_coordinates) * image_height)
max_x = int(max(x_coordinates) * image_width)
max_y = int(max(y_coordinates) * image_height)
sect_diameter = max(max_y - min_y, max_x - min_x) + 50
sect_radius = sect_diameter // 2
center_x = (min_x + max_x) // 2
center_y = (min_y + max_y) // 2
crop_top = max(center_y - sect_radius, 0)
crop_bottom = min(center_y + sect_radius, image_height)
crop_left = max(center_x - sect_radius, 0)
crop_right = min(center_x + sect_radius, image_width)
annotated_image = cv2.rectangle(annotated_image, (crop_left, crop_top), (crop_right, crop_bottom), (255, 0, 0), 6)
global text_x, text_y
text_x, text_y = crop_left, crop_top
crop = annotated_image[crop_top:crop_bottom, crop_left:crop_right]
h, w = crop.shape[:2]
crop = cv2.resize(crop, (150, int(150 * h / w)))
return [annotated_image, crop]
# ====== MediaPipe Setup ======
RESULT = None
BaseOptions = mp.tasks.BaseOptions
HandLandmarker = mp.tasks.vision.HandLandmarker
HandLandmarkerOptions = mp.tasks.vision.HandLandmarkerOptions
HandLandmarkerResult = mp.tasks.vision.HandLandmarkerResult
VisionRunningMode = mp.tasks.vision.RunningMode
cbase_options = core.BaseOptions(file_name="./better_exported/model.tflite")
ccbase_options = core.BaseOptions(file_name="./exported/model.tflite")
cclassification_options = processor.ClassificationOptions(max_results=1)
coptions = vision2.ImageClassifierOptions(base_options=cbase_options, classification_options=cclassification_options)
ccoptions = vision2.ImageClassifierOptions(base_options=ccbase_options, classification_options=cclassification_options)
cclassifier = vision2.ImageClassifier.create_from_options(coptions)
ccclassifier = vision2.ImageClassifier.create_from_options(ccoptions)
def print_result(result: HandLandmarkerResult, output_image: mp.Image, timestamp_ms: int):
global RESULT
RESULT = result
options = HandLandmarkerOptions(
base_options=BaseOptions(model_asset_path='hand_landmarker.task'),
running_mode=VisionRunningMode.LIVE_STREAM,
result_callback=print_result
)
detector = vision.HandLandmarker.create_from_options(options)
# ====== Flask App ======
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/data', methods=['POST'])
def handle_video_frame():
frame = request.json.get('key')
response_frame = data_uri_to_image(frame)
decimg = response_frame
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=decimg)
detector.detect_async(mp_image, mp.Timestamp.from_seconds(time.time()).value)
global no_hand_flag, frame_time, same_letter_time, letter_result, old_letter_result, char_list, letterscore
try:
result_images = draw_landmarks_on_image(mp_image.numpy_view(), RESULT)
annotated_image = result_images[0]
cropped_image = result_images[1]
h, w = annotated_image.shape[0:2]
neww = 500
newh = int(neww * (h / w))
final_image = cv2.resize(annotated_image, (neww, newh))
if RESULT.handedness != []:
no_hand_flag = 0
if RESULT.handedness[0][0].display_name == 'Right':
tf_image = vision2.TensorImage.create_from_array(cropped_image)
classification_result = cclassifier.classify(tf_image)
cclassification_result = ccclassifier.classify(tf_image)
result_to_show = classification_result.classifications[0].categories[0].category_name
cresult_to_show = cclassification_result.classifications[0].categories[0].category_name
if cclassification_result.classifications[0].categories[0].score > classification_result.classifications[0].categories[0].score:
letter_result = cresult_to_show
cwhich = "Old"
if result_to_show == "P" and cresult_to_show != "P":
cwhich = "New"
letter_result = result_to_show
else:
letter_result = result_to_show
cwhich = "New"
if cresult_to_show == "M" and cresult_to_show != "M":
cwhich = "Old"
if result_to_show != "R" and cresult_to_show == "R":
cwhich = "Old"
letter_result = cresult_to_show
if result_to_show != "T" and cresult_to_show == "T":
cwhich = "Old"
letter_result = cresult_to_show
letterscore = cclassification_result.classifications[0].categories[0].score if cwhich == "Old" else classification_result.classifications[0].categories[0].score
else:
tf_image = vision2.TensorImage.create_from_array(cropped_image)
classification_result = cclassifier.classify(tf_image)
result_to_show = classification_result.classifications[0].categories[0].category_name
letter_result = '_' if result_to_show != "B" else '>'
# Append to buffer if valid
if letter_result in letter_list:
letter