| | import gradio as gr |
| | from transformers import AutoTokenizer, LlamaForCausalLM |
| | import torch |
| |
|
| | |
| | model_name = "bjdwh/UrbanGPT" |
| |
|
| | |
| | tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
| | model = LlamaForCausalLM.from_pretrained( |
| | model_name, |
| | torch_dtype=torch.float16, |
| | low_cpu_mem_usage=True, |
| | trust_remote_code=True |
| | ) |
| |
|
| | def generate_response( |
| | message, |
| | history: list[tuple[str, str]], |
| | max_tokens, |
| | temperature, |
| | top_p, |
| | ): |
| | |
| | input_text = message |
| | if history: |
| | input_text = "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in history]) + f"\nUser: {message}" |
| | |
| | |
| | inputs = tokenizer(input_text, return_tensors="pt", padding=True) |
| | |
| | |
| | with torch.no_grad(): |
| | outputs = model.generate( |
| | inputs["input_ids"], |
| | max_length=max_tokens, |
| | temperature=temperature, |
| | top_p=top_p, |
| | num_return_sequences=1, |
| | pad_token_id=tokenizer.eos_token_id |
| | ) |
| | |
| | response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| | |
| | |
| | if history: |
| | response = response.split("Assistant: ")[-1].strip() |
| | |
| | yield response |
| |
|
| | |
| | demo = gr.ChatInterface( |
| | generate_response, |
| | additional_inputs=[ |
| | gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="生成最大长度"), |
| | gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="温度"), |
| | gr.Slider( |
| | minimum=0.1, |
| | maximum=1.0, |
| | value=0.95, |
| | step=0.05, |
| | label="Top-p (核采样)", |
| | ), |
| | ], |
| | title="UrbanGPT 聊天助手", |
| | description="这是一个基于 UrbanGPT 的中文城市规划对话模型", |
| | ) |
| |
|
| | if __name__ == "__main__": |
| | demo.launch() |
| |
|