--- base_model: - Tongyi-MAI/Z-Image-Turbo --- # Usage ```python from diffusers import ZImagePipeline, ZImageTransformer2DModel, GGUFQuantizationConfig import torch prompt = "Young Chinese woman in red Hanfu, intricate embroidery. Impeccable makeup, red floral forehead pattern. Elaborate high bun, golden phoenix headdress, red flowers, beads. Holds round folding fan with lady, trees, bird. Neon lightning-bolt lamp (⚡️), bright yellow glow, above extended left palm. Soft-lit outdoor night background, silhouetted tiered pagoda (西安大雁塔), blurred colorful distant lights." height = 1024 width = 1024 seed = 42 local_path = "path to gguf file you can use shanpshot_dowload to dowload the file" transformer = ZImageTransformer2DModel.from_single_file( local_path, quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16), dtype=torch.bfloat16, ) pipeline = ZImagePipeline.from_pretrained( "Tongyi-MAI/Z-Image-Turbo", transformer=transformer, dtype=torch.bfloat16, ).to("cuda") images = pipeline( prompt=prompt, num_inference_steps=9, # This actually results in 8 DiT forwards guidance_scale=0.0, # Guidance should be 0 for the Turbo models height=height, width=width, generator=torch.Generator("cuda").manual_seed(seed) ).images[0] images.save("output.png") ```