ranggafermata's picture
Create app.py
e5fffa6 verified
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_id = "ranggafermata/Fermata-v1.2-light" # replace with your actual repo
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_id, attn_implementation="eager")
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
attn_implementation="eager"
)
model.eval()
# Generation function
def chat(prompt, max_new_tokens=256, temperature=0.8, top_p=0.95):
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Gradio interface
gr.Interface(
fn=chat,
inputs=[
gr.Textbox(lines=4, label="Prompt"),
gr.Slider(64, 1024, value=256, step=64, label="Max New Tokens"),
gr.Slider(0.1, 1.5, value=0.8, step=0.1, label="Temperature"),
gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
],
outputs=gr.Textbox(label="Response"),
title="Fermata Assistant (Gemma 3 - 1B - IT)",
description="A smart assistant built on Gemma 3B with personality from the Fermata project."
).launch()