Spaces:
Running
Running
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| import gradio as gr | |
| model_id = "OSS-Forge/codet5p-770m-vhdl" | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_id) | |
| def generate_output(input_text): | |
| inputs = tokenizer.encode(input_text, return_tensors='pt') | |
| outputs = model.generate( | |
| inputs, | |
| max_length=256, | |
| num_beams=5, | |
| early_stopping=True, | |
| ) | |
| generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return generated_text | |
| iface = gr.Interface( | |
| fn=generate_output, | |
| inputs=gr.Textbox(lines=5, placeholder='Insert the English description here...'), | |
| outputs=gr.Textbox(), | |
| title='VHDL Code Generator (CodeT5+ 770M)', | |
| description='Generate VHDL code from an English description using a fine-tuned CodeT5+ model.' | |
| ) | |
| iface.launch() | |