Spaces:
Sleeping
Sleeping
| from peft import AutoPeftModelForCausalLM | |
| from transformers import AutoTokenizer, TextIteratorStreamer | |
| from threading import Thread | |
| import gradio as gr | |
| model = AutoPeftModelForCausalLM.from_pretrained("adlsdztony/Rui-0.5B") | |
| tokenizer = AutoTokenizer.from_pretrained("adlsdztony/Rui-3B") | |
| # from huggingface_hub import InferenceClient | |
| # """ | |
| # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
| # """ | |
| # client = InferenceClient("adlsdztony/Rui-3B") | |
| def respond( | |
| message, | |
| history: list[tuple[str, str]], | |
| system_message, | |
| max_tokens, | |
| temperature, | |
| top_p, | |
| ): | |
| messages = [{"role": "system", "content": system_message}] | |
| for val in history: | |
| if val[0]: | |
| messages.append({"role": "user", "content": val[0]}) | |
| if val[1]: | |
| messages.append({"role": "assistant", "content": val[1]}) | |
| messages.append({"role": "user", "content": message}) | |
| prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
| inputs = tokenizer([prompt], return_tensors='pt', padding=True, truncation=True) | |
| streamer = TextIteratorStreamer(tokenizer) | |
| generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_tokens, do_sample=True, temperature=temperature, top_p=top_p) | |
| thread = Thread(target=model.generate, kwargs=generation_kwargs) | |
| thread.start() | |
| generate_text = '' | |
| for new_text in streamer: | |
| output = new_text.replace(prompt, '') | |
| if output: | |
| generate_text += output | |
| yield generate_text | |
| # response = "" | |
| # for message in client.chat_completion( | |
| # messages, | |
| # max_tokens=max_tokens, | |
| # stream=True, | |
| # temperature=temperature, | |
| # top_p=top_p, | |
| # ): | |
| # token = message.choices[0].delta.content | |
| # response += token | |
| # yield response | |
| """ | |
| For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
| """ | |
| demo = gr.ChatInterface( | |
| respond, | |
| additional_inputs=[ | |
| gr.Textbox(value="你是小锐,你只会说中文,你会自称为‘锐’,你的工作是每天告诉同学明天的天气和一些最近发生的事情,最后你会跟同学说晚安", label="System message"), | |
| gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
| gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.95, | |
| step=0.05, | |
| label="Top-p (nucleus sampling)", | |
| ), | |
| ], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |