Spaces:
Sleeping
Sleeping
tonic
commited on
Commit
·
ea0c6be
1
Parent(s):
0c6cf3e
Update app.py
Browse files
app.py
CHANGED
|
@@ -18,13 +18,13 @@ Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder
|
|
| 18 |
model_name = 'TencentARC/Mistral_Pro_8B_v0.1'
|
| 19 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 20 |
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
|
| 21 |
-
model.generation_config = GenerationConfig.from_pretrained(model_name)
|
| 22 |
model.generation_config.pad_token_id = model.generation_config.eos_token_id
|
| 23 |
|
| 24 |
@torch.inference_mode()
|
| 25 |
@spaces.GPU
|
| 26 |
-
def predict_math_bot(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty):
|
| 27 |
-
prompt = f"<|system|>\n{
|
| 28 |
inputs = tokenizer(prompt, return_tensors='pt', add_special_tokens=False)
|
| 29 |
input_ids = inputs["input_ids"].to(model.device)
|
| 30 |
|
|
@@ -35,7 +35,7 @@ def predict_math_bot(user_message, system_message, max_new_tokens, temperature,
|
|
| 35 |
top_p=top_p,
|
| 36 |
repetition_penalty=repetition_penalty,
|
| 37 |
pad_token_id=tokenizer.eos_token_id,
|
| 38 |
-
do_sample=
|
| 39 |
)
|
| 40 |
|
| 41 |
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
|
@@ -52,15 +52,15 @@ def main():
|
|
| 52 |
temperature = gr.Slider(label="Temperature", value=0.1, minimum=0.05, maximum=1.0)
|
| 53 |
top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.01, maximum=0.99)
|
| 54 |
repetition_penalty = gr.Slider(label="Repetition penalty", value=1.9, minimum=1.0, maximum=2.0)
|
| 55 |
-
|
| 56 |
|
| 57 |
with gr.Row():
|
| 58 |
user_message = gr.Textbox(label="🫡Your Message", lines=3, placeholder="Enter your math query here...")
|
| 59 |
system_message = gr.Textbox(label="📉System Prompt", lines=2, placeholder="Optional: Set a scene or introduce a character...")
|
| 60 |
|
| 61 |
-
gr.Button("
|
| 62 |
predict_math_bot,
|
| 63 |
-
inputs=[user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty,
|
| 64 |
outputs=output_text
|
| 65 |
)
|
| 66 |
|
|
|
|
| 18 |
model_name = 'TencentARC/Mistral_Pro_8B_v0.1'
|
| 19 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 20 |
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
|
| 21 |
+
# model.generation_config = GenerationConfig.from_pretrained(model_name)
|
| 22 |
model.generation_config.pad_token_id = model.generation_config.eos_token_id
|
| 23 |
|
| 24 |
@torch.inference_mode()
|
| 25 |
@spaces.GPU
|
| 26 |
+
def predict_math_bot(user_message, system_message="", max_new_tokens=125, temperature=0.1, top_p=0.9, repetition_penalty=1.9, do_sample=False):
|
| 27 |
+
prompt = f"<|system|>\n{system_message}\n<|user|>\n{user_message}<|assistant|>" if system_message else user_message
|
| 28 |
inputs = tokenizer(prompt, return_tensors='pt', add_special_tokens=False)
|
| 29 |
input_ids = inputs["input_ids"].to(model.device)
|
| 30 |
|
|
|
|
| 35 |
top_p=top_p,
|
| 36 |
repetition_penalty=repetition_penalty,
|
| 37 |
pad_token_id=tokenizer.eos_token_id,
|
| 38 |
+
do_sample=do_sample
|
| 39 |
)
|
| 40 |
|
| 41 |
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
|
|
|
| 52 |
temperature = gr.Slider(label="Temperature", value=0.1, minimum=0.05, maximum=1.0)
|
| 53 |
top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.01, maximum=0.99)
|
| 54 |
repetition_penalty = gr.Slider(label="Repetition penalty", value=1.9, minimum=1.0, maximum=2.0)
|
| 55 |
+
do_sample = gr.Checkbox(label="Uncheck for faster inference", value=False)
|
| 56 |
|
| 57 |
with gr.Row():
|
| 58 |
user_message = gr.Textbox(label="🫡Your Message", lines=3, placeholder="Enter your math query here...")
|
| 59 |
system_message = gr.Textbox(label="📉System Prompt", lines=2, placeholder="Optional: Set a scene or introduce a character...")
|
| 60 |
|
| 61 |
+
gr.Button("Try🫡📉MetaMath").click(
|
| 62 |
predict_math_bot,
|
| 63 |
+
inputs=[user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample],
|
| 64 |
outputs=output_text
|
| 65 |
)
|
| 66 |
|