Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch as torch | |
| import numpy as np | |
| import sentencepiece | |
| import spaces | |
| import random | |
| from diffusers import DiffusionPipeline | |
| from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast | |
| # gr.load("models/black-forest-labs/FLUX.1-dev").launch() | |
| dtype = torch.bfloat16 | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| pipe = DiffusionPipeline.from_pretrained("sayakpaul/FLUX.1-merged", torch_dtype=dtype).to(device) | |
| MAX_SEED = np.iinfo(np.int32).max | |
| MAX_IMAGE_SIZE = 2048 | |
| def inferee(prompt, seed=42, randomize_seed=True, width=400, height=400, guidance_scale=3.5, num_inference_steps=8): | |
| if randomize_seed: | |
| seed = random.randint(0, MAX_SEED) | |
| generator = torch.Generator().manual_seed(seed) | |
| image = pipe( | |
| prompt = prompt, | |
| width = width, | |
| height = height, | |
| num_inference_steps = num_inference_steps, | |
| generator = generator, | |
| guidance_scale=guidance_scale | |
| ).images[0] | |
| return image | |
| prompt = gr.Text( | |
| label="Prompt", | |
| show_label=False, | |
| max_lines=1, | |
| placeholder="Enter your prompt", | |
| container=False) | |
| run_button = gr.Button("Run", scale=0) | |
| result = gr.Image(label="Result", show_label=False) | |
| interface = gr.Interface( | |
| fn=inferee, | |
| inputs=[prompt], | |
| outputs=[result] | |
| ) | |
| interface.launch() |