himu1780 commited on
Commit
d30017d
·
verified ·
1 Parent(s): eafe2d3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -10
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
- # 1. Load YOUR Model (The one currently training)
5
  MODEL_NAME = "himu1780/ai-python-model"
6
 
7
  print(f"Loading {MODEL_NAME}...")
@@ -17,33 +17,30 @@ generator = pipeline(
17
  )
18
 
19
  def chat_function(user_message, history):
20
- # 2. Format the prompt exactly like the training data
21
- # This teaches the model: "Hey, I am giving you an Instruction, now give me the Response."
22
  prompt = f"### Instruction:\n{user_message}\n\n### Response:\n"
23
 
24
  # 3. Generate the answer
25
  response = generator(
26
  prompt,
27
- max_new_tokens=200, # Max length of answer
28
- temperature=0.6, # Creativity (Low = better for code)
29
  do_sample=True,
30
  pad_token_id=tokenizer.eos_token_id
31
  )
32
 
33
- # 4. Clean up the text (Remove the prompt so we only see the answer)
34
  full_text = response[0]['generated_text']
35
- # Split by "### Response:" and take the second part
36
  answer = full_text.split("### Response:")[-1].strip()
37
 
38
  return answer
39
 
40
- # 5. Launch the Chat UI
41
  demo = gr.ChatInterface(
42
  fn=chat_function,
43
  title="🐍 AI Python Assistant",
44
  description="Ask me to write Python code!",
45
- examples=["Write a function to calculate area of circle", "Create a list of numbers 1 to 10"],
46
- theme="soft"
47
  )
48
 
49
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
+ # 1. Load YOUR Model
5
  MODEL_NAME = "himu1780/ai-python-model"
6
 
7
  print(f"Loading {MODEL_NAME}...")
 
17
  )
18
 
19
  def chat_function(user_message, history):
20
+ # 2. Format the prompt
 
21
  prompt = f"### Instruction:\n{user_message}\n\n### Response:\n"
22
 
23
  # 3. Generate the answer
24
  response = generator(
25
  prompt,
26
+ max_new_tokens=200,
27
+ temperature=0.6,
28
  do_sample=True,
29
  pad_token_id=tokenizer.eos_token_id
30
  )
31
 
32
+ # 4. Clean up the text
33
  full_text = response[0]['generated_text']
 
34
  answer = full_text.split("### Response:")[-1].strip()
35
 
36
  return answer
37
 
38
+ # 5. Launch the Chat UI (Fixed: Removed 'theme' argument)
39
  demo = gr.ChatInterface(
40
  fn=chat_function,
41
  title="🐍 AI Python Assistant",
42
  description="Ask me to write Python code!",
43
+ examples=["Write a function to calculate area of circle", "Create a list of numbers 1 to 10"]
 
44
  )
45
 
46
  demo.launch()