frogleo commited on
Commit
7b2052a
·
verified ·
1 Parent(s): 0c454d8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -40
app.py CHANGED
@@ -10,9 +10,9 @@ import torch
10
  import uuid
11
  from diffusers import Flux2Pipeline, Flux2Transformer2DModel
12
  from diffusers import BitsAndBytesConfig as DiffBitsAndBytesConfig
13
- import requests
14
  from PIL import Image
15
- import json
16
  import base64
17
  from huggingface_hub import InferenceClient
18
  import logging
@@ -33,10 +33,10 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
33
  MAX_SEED = np.iinfo(np.int32).max
34
  MAX_IMAGE_SIZE = 1024
35
 
36
- hf_client = InferenceClient(
37
- api_key=os.environ.get("HF_TOKEN"),
38
- )
39
- VLM_MODEL = "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT"
40
 
41
  SYSTEM_PROMPT_TEXT_ONLY = """You are an expert prompt engineer for FLUX.2 by Black Forest Labs. Rewrite user prompts to be more descriptive while strictly preserving their core subject and intent.
42
  Guidelines:
@@ -132,44 +132,44 @@ def image_to_data_uri(img):
132
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
133
  return f"data:image/png;base64,{img_str}"
134
 
135
- def upsample_prompt_logic(prompt, image_list):
136
- try:
137
- if image_list and len(image_list) > 0:
138
- # Image + Text Editing Mode
139
- system_content = SYSTEM_PROMPT_WITH_IMAGES
140
 
141
- # Construct user message with text and images
142
- user_content = [{"type": "text", "text": prompt}]
143
 
144
- for img in image_list:
145
- data_uri = image_to_data_uri(img)
146
- user_content.append({
147
- "type": "image_url",
148
- "image_url": {"url": data_uri}
149
- })
150
 
151
- messages = [
152
- {"role": "system", "content": system_content},
153
- {"role": "user", "content": user_content}
154
- ]
155
- else:
156
- # Text Only Mode
157
- system_content = SYSTEM_PROMPT_TEXT_ONLY
158
- messages = [
159
- {"role": "system", "content": system_content},
160
- {"role": "user", "content": prompt}
161
- ]
162
-
163
- completion = hf_client.chat.completions.create(
164
- model=VLM_MODEL,
165
- messages=messages,
166
- max_tokens=1024
167
- )
168
 
169
- return completion.choices[0].message.content
170
- except Exception as e:
171
- print(f"Upsampling failed: {e}")
172
- return prompt
173
 
174
  def update_dimensions_from_image(image_list, width, height):
175
  """Update width/height sliders based on uploaded image aspect ratio.
 
10
  import uuid
11
  from diffusers import Flux2Pipeline, Flux2Transformer2DModel
12
  from diffusers import BitsAndBytesConfig as DiffBitsAndBytesConfig
13
+ # import requests
14
  from PIL import Image
15
+ # import json
16
  import base64
17
  from huggingface_hub import InferenceClient
18
  import logging
 
33
  MAX_SEED = np.iinfo(np.int32).max
34
  MAX_IMAGE_SIZE = 1024
35
 
36
+ # hf_client = InferenceClient(
37
+ # api_key=os.environ.get("HF_TOKEN"),
38
+ # )
39
+ # VLM_MODEL = "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT"
40
 
41
  SYSTEM_PROMPT_TEXT_ONLY = """You are an expert prompt engineer for FLUX.2 by Black Forest Labs. Rewrite user prompts to be more descriptive while strictly preserving their core subject and intent.
42
  Guidelines:
 
132
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
133
  return f"data:image/png;base64,{img_str}"
134
 
135
+ # def upsample_prompt_logic(prompt, image_list):
136
+ # try:
137
+ # if image_list and len(image_list) > 0:
138
+ # # Image + Text Editing Mode
139
+ # system_content = SYSTEM_PROMPT_WITH_IMAGES
140
 
141
+ # # Construct user message with text and images
142
+ # user_content = [{"type": "text", "text": prompt}]
143
 
144
+ # for img in image_list:
145
+ # data_uri = image_to_data_uri(img)
146
+ # user_content.append({
147
+ # "type": "image_url",
148
+ # "image_url": {"url": data_uri}
149
+ # })
150
 
151
+ # messages = [
152
+ # {"role": "system", "content": system_content},
153
+ # {"role": "user", "content": user_content}
154
+ # ]
155
+ # else:
156
+ # # Text Only Mode
157
+ # system_content = SYSTEM_PROMPT_TEXT_ONLY
158
+ # messages = [
159
+ # {"role": "system", "content": system_content},
160
+ # {"role": "user", "content": prompt}
161
+ # ]
162
+
163
+ # completion = hf_client.chat.completions.create(
164
+ # model=VLM_MODEL,
165
+ # messages=messages,
166
+ # max_tokens=1024
167
+ # )
168
 
169
+ # return completion.choices[0].message.content
170
+ # except Exception as e:
171
+ # print(f"Upsampling failed: {e}")
172
+ # return prompt
173
 
174
  def update_dimensions_from_image(image_list, width, height):
175
  """Update width/height sliders based on uploaded image aspect ratio.