Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
| # Initialize the image classification pipeline | |
| classifier = pipeline("image-classification", model="google/vit-base-patch16-224") | |
| # Initialize the tokenizer and model for the generative text | |
| model_name = "EleutherAI/gpt-neo-2.7B" # Using GPT-Neo for demonstration | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| def generate_tweet(label): | |
| # Generate a tweet about the label | |
| prompt = f"write a tweet about {label}" | |
| inputs = tokenizer.encode(prompt, return_tensors="pt", add_special_tokens=True) | |
| outputs = model.generate(inputs, max_length=280, num_return_sequences=1, no_repeat_ngram_size=2) | |
| tweet = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return tweet | |
| def predict(image): | |
| predictions = classifier(image) | |
| # Sort predictions based on confidence and select the top one | |
| top_prediction = sorted(predictions, key=lambda x: x['score'], reverse=True)[0] | |
| label = top_prediction['label'].split(',')[0] # Clean up label if necessary | |
| # Generate the tweet | |
| tweet = generate_tweet(label) | |
| return tweet | |
| title = "Image Classifier to Generative Tweet" | |
| description = "This demo recognizes and classifies images using the 'google/vit-base-patch16-224' model and generates a tweet about the top prediction using the GPT-Neo model for generating creative and engaging content." | |
| input_component = gr.Image(type="pil", label="Upload an image here") | |
| output_component = gr.Textbox(label="Generated Promotional Tweet") | |
| gr.Interface(fn=predict, inputs=input_component, outputs=output_component, title=title, description=description).launch() | |