Roman190928 commited on
Commit
ee69716
·
verified ·
1 Parent(s): 0ae001f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -6,7 +6,7 @@ import random
6
  # import spaces #[uncomment to use ZeroGPU]
7
  from diffusers import DiffusionPipeline
8
  import torch
9
-
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  model_repo_id = "John6666/spicy-realism-nsfw-mix-v30-sdxl" # Replace to the model you would like to use
12
 
@@ -20,8 +20,15 @@ print(pipe)
20
  pipe = pipe.to(device)
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
- MAX_IMAGE_SIZE = 1024
 
 
 
 
 
24
 
 
 
25
 
26
  # @spaces.GPU #[uncomment to use ZeroGPU]
27
  def infer(
@@ -139,7 +146,9 @@ with gr.Blocks(css=css) as demo:
139
  gr.Examples(examples=examples, inputs=[prompt])
140
  gr.on(
141
  triggers=[run_button.click, prompt.submit],
142
- fn=infer,
 
 
143
  inputs=[
144
  prompt,
145
  negative_prompt,
 
6
  # import spaces #[uncomment to use ZeroGPU]
7
  from diffusers import DiffusionPipeline
8
  import torch
9
+ targets = {"pussy", "boobs", "breasts", "vagina", "penis", "sex", "oral", "anal", "butt", "ass"}
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  model_repo_id = "John6666/spicy-realism-nsfw-mix-v30-sdxl" # Replace to the model you would like to use
12
 
 
20
  pipe = pipe.to(device)
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
+ MAX_IMAGE_SIZE = 2048
24
+ def safe_infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_steps):
25
+ # check if prompt contains any banned words
26
+ if any(word in prompt.lower() for word in targets):
27
+ print("Found at least one banned word!")
28
+ return "Refused due to safety.", seed
29
 
30
+ # otherwise run normal inference
31
+ return infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_steps)
32
 
33
  # @spaces.GPU #[uncomment to use ZeroGPU]
34
  def infer(
 
146
  gr.Examples(examples=examples, inputs=[prompt])
147
  gr.on(
148
  triggers=[run_button.click, prompt.submit],
149
+ if any(word in prompt for word in targets):
150
+ print("Found at least one!")
151
+ fn=safe_infer,
152
  inputs=[
153
  prompt,
154
  negative_prompt,