File size: 2,080 Bytes
9da5eb8
0b05695
 
 
 
 
 
db581b1
 
 
0896697
db581b1
0b05695
 
 
 
 
547ac4d
db581b1
0b05695
 
 
 
 
 
 
 
 
 
 
 
db581b1
0896697
db581b1
0896697
0b05695
0896697
 
 
 
0b05695
0896697
0b05695
 
 
 
 
 
 
 
 
 
 
 
0896697
0b05695
 
 
 
 
0896697
 
0b05695
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import gradio as gr
import requests

# GPT-J-6B API
API_URL = "https://api-inference.huggingface.co/models/EleutherAI/gpt-j-6B"
headers = {"Authorization": "Bearer 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"}
prompt = """
me: I'm Twimbly Twombly. I live in California. I'm a robot."""

prev_chat = """
you: Hello. How are you?
me: I'm fine"""

examples = [["how are you?"], ["hello"]]


def chat_generate(word):
#   print(f"**current reply")
  p = prompt + prev_chat + "\n" + "you: " + word.lower() + "\n" + "me: "
  print(f"*****Inside chat_generate - Prompt is :{p}")
  json_ = {"inputs": p,
            "parameters":
            {
            "top_p": 0.9,
          "temperature": 1.1,
          "max_new_tokens": 50,
          "return_full_text": False
          }}
  response = requests.post(API_URL, headers=headers, json=json_)
  output = response.json()
  output_tmp = output[0]['generated_text']
  reply = output_tmp.split("you:")[0] # +"."
  print(f"Chat Response being returned is: {reply}")
  prev_chat = "you: " + word.lower() + "\n" + "me: " + reply
  return reply

def text_to_image(reply):
  print("*****Inside Text_to_image")
  reply = " ".join(reply.split('\n'))
  reply = reply + " oil on canvas."
  steps, width, height, images, diversity = '50','256','256','1',15
  img = gr.Interface.load("spaces/multimodalart/latentdiffusion")(reply, steps, width, height, images, diversity)[0]
  return img

demo = gr.Blocks()

with demo:
  gr.Markdown("<h1><center>Twimbly Twombly</center></h1>")
  gr.Markdown(
        "<div>Hi I'm Twimbly Twombly ready to talk to you.</div>"
        "<div>Generate an illustration 🎨 provided by Latent Diffusion model.</div>"
    )
  with gr.Row():
    input_word = gr.Textbox(placeholder="Enter a word here to chat..")
    chat_txt = gr.Textbox(lines=1)
    output_image = gr.Image(type="filepath", shape=(256,256))
  
  b1 = gr.Button("Send")
  b2 = gr.Button("Imagine")

  b1.click(chat_generate, input_word, chat_txt)
  b2.click(text_to_image, chat_txt, output_image)
  #examples=examples

demo.launch(enable_queue=True, debug=True)