entfane commited on
Commit
578eb94
·
verified ·
1 Parent(s): 208d759

Added chat template support

Browse files
Files changed (1) hide show
  1. README.md +11 -3
README.md CHANGED
@@ -25,12 +25,20 @@ Model was fine-tuned on [qwedsacf/grade-school-math-instructions](https://huggin
25
  ### Inference
26
 
27
  ```python
28
- !pip install transformers accelerate
29
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
 
 
30
  model_name = "entfane/math-professor-3B"
 
31
  tokenizer = AutoTokenizer.from_pretrained(model_name)
32
  model = AutoModelForCausalLM.from_pretrained(model_name)
33
- input = "### User: what is the derivative of 2x^2 \n### Assistant:"
 
 
 
 
 
34
  encoded_input = tokenizer(input, return_tensors = "pt").to(model.device)
35
  output = model.generate(**encoded_input, max_new_tokens=1024)
36
  print(tokenizer.decode(output[0], skip_special_tokens=False))
 
25
  ### Inference
26
 
27
  ```python
28
+ !pip install transformers accelerate
29
+
30
+ from transformers import AutoTokenizer, AutoModelForCausalLM
31
+
32
  model_name = "entfane/math-professor-3B"
33
+
34
  tokenizer = AutoTokenizer.from_pretrained(model_name)
35
  model = AutoModelForCausalLM.from_pretrained(model_name)
36
+
37
+ messages = [
38
+ {"role": "user", "content": "What's the derivative of 2x^2?"}
39
+ ]
40
+
41
+ input = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
42
  encoded_input = tokenizer(input, return_tensors = "pt").to(model.device)
43
  output = model.generate(**encoded_input, max_new_tokens=1024)
44
  print(tokenizer.decode(output[0], skip_special_tokens=False))