Optimizar modelo LDM para reducir consumo de cuota GPU
Browse files
app.py
CHANGED
|
@@ -389,15 +389,24 @@ def load_image_model(model_name):
|
|
| 389 |
# Configuración especial para LDM
|
| 390 |
elif "ldm-text2im" in model_name:
|
| 391 |
try:
|
|
|
|
| 392 |
from diffusers import DiffusionPipeline
|
|
|
|
|
|
|
| 393 |
pipe = DiffusionPipeline.from_pretrained(
|
| 394 |
model_name,
|
| 395 |
-
torch_dtype=
|
| 396 |
-
safety_checker=None
|
|
|
|
|
|
|
| 397 |
)
|
|
|
|
|
|
|
|
|
|
| 398 |
except Exception as e:
|
| 399 |
-
print(f"Error cargando LDM: {e}")
|
| 400 |
-
|
|
|
|
| 401 |
pipe = StableDiffusionPipeline.from_pretrained(
|
| 402 |
"CompVis/stable-diffusion-v1-4",
|
| 403 |
torch_dtype=torch_dtype,
|
|
@@ -810,6 +819,13 @@ def generate_image(prompt, model_name, negative_prompt="", seed=0, width=1024, h
|
|
| 810 |
guidance_scale = max(3.5, min(guidance_scale, 4.5))
|
| 811 |
num_inference_steps = max(20, num_inference_steps) # Reducir de 28 a 20
|
| 812 |
print(f"🌟 SD 3.5 Large - Ajustando parámetros: guidance={guidance_scale}, steps={num_inference_steps}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 813 |
else:
|
| 814 |
# Para otros modelos, usar menos pasos por defecto
|
| 815 |
num_inference_steps = min(num_inference_steps, 15) # Reducir de 20 a 15
|
|
|
|
| 389 |
# Configuración especial para LDM
|
| 390 |
elif "ldm-text2im" in model_name:
|
| 391 |
try:
|
| 392 |
+
print("🎨 Cargando Latent Diffusion Model con optimizaciones...")
|
| 393 |
from diffusers import DiffusionPipeline
|
| 394 |
+
|
| 395 |
+
# Configuración optimizada para LDM (más conservadora)
|
| 396 |
pipe = DiffusionPipeline.from_pretrained(
|
| 397 |
model_name,
|
| 398 |
+
torch_dtype=torch.float32, # Usar float32 para mayor compatibilidad
|
| 399 |
+
safety_checker=None,
|
| 400 |
+
low_cpu_mem_usage=True, # Reducir uso de memoria
|
| 401 |
+
device_map="auto" # Asignación automática de dispositivo
|
| 402 |
)
|
| 403 |
+
|
| 404 |
+
print("✅ LDM cargado con optimizaciones de memoria")
|
| 405 |
+
|
| 406 |
except Exception as e:
|
| 407 |
+
print(f"❌ Error cargando LDM: {e}")
|
| 408 |
+
print("🔄 Fallback a SD 1.4 (más eficiente)...")
|
| 409 |
+
# Fallback a SD 1.4 que es mucho más eficiente
|
| 410 |
pipe = StableDiffusionPipeline.from_pretrained(
|
| 411 |
"CompVis/stable-diffusion-v1-4",
|
| 412 |
torch_dtype=torch_dtype,
|
|
|
|
| 819 |
guidance_scale = max(3.5, min(guidance_scale, 4.5))
|
| 820 |
num_inference_steps = max(20, num_inference_steps) # Reducir de 28 a 20
|
| 821 |
print(f"🌟 SD 3.5 Large - Ajustando parámetros: guidance={guidance_scale}, steps={num_inference_steps}")
|
| 822 |
+
elif "ldm-text2im" in model_name.lower():
|
| 823 |
+
# LDM es muy pesado, usar parámetros muy conservadores
|
| 824 |
+
guidance_scale = min(guidance_scale, 2.0) # Muy bajo guidance
|
| 825 |
+
num_inference_steps = min(num_inference_steps, 8) # Muy pocos pasos
|
| 826 |
+
width = min(width, 512) # Resolución más pequeña
|
| 827 |
+
height = min(height, 512)
|
| 828 |
+
print(f"🎨 LDM - Ajustando parámetros ultra conservadores: guidance={guidance_scale}, steps={num_inference_steps}, size={width}x{height}")
|
| 829 |
else:
|
| 830 |
# Para otros modelos, usar menos pasos por defecto
|
| 831 |
num_inference_steps = min(num_inference_steps, 15) # Reducir de 20 a 15
|