himu1780 commited on
Commit
4e382cb
·
verified ·
1 Parent(s): d30017d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -12
app.py CHANGED
@@ -1,46 +1,49 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
- # 1. Load YOUR Model
5
  MODEL_NAME = "himu1780/ai-python-model"
6
 
7
  print(f"Loading {MODEL_NAME}...")
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
10
 
11
- # Create a text generation pipeline
12
  generator = pipeline(
13
  "text-generation",
14
  model=model,
15
  tokenizer=tokenizer,
16
- device=-1 # Use CPU
17
  )
18
 
19
  def chat_function(user_message, history):
20
- # 2. Format the prompt
 
21
  prompt = f"### Instruction:\n{user_message}\n\n### Response:\n"
22
 
23
- # 3. Generate the answer
24
  response = generator(
25
  prompt,
26
- max_new_tokens=200,
27
- temperature=0.6,
 
28
  do_sample=True,
29
  pad_token_id=tokenizer.eos_token_id
30
  )
31
 
32
- # 4. Clean up the text
33
  full_text = response[0]['generated_text']
 
34
  answer = full_text.split("### Response:")[-1].strip()
35
 
36
  return answer
37
 
38
- # 5. Launch the Chat UI (Fixed: Removed 'theme' argument)
39
  demo = gr.ChatInterface(
40
  fn=chat_function,
41
- title="🐍 AI Python Assistant",
42
- description="Ask me to write Python code!",
43
- examples=["Write a function to calculate area of circle", "Create a list of numbers 1 to 10"]
44
  )
45
 
46
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
+ # 1. Load the GOOD model (It's safe on main!)
5
  MODEL_NAME = "himu1780/ai-python-model"
6
 
7
  print(f"Loading {MODEL_NAME}...")
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
10
 
11
+ # 2. Create the pipeline
12
  generator = pipeline(
13
  "text-generation",
14
  model=model,
15
  tokenizer=tokenizer,
16
+ device=-1 # CPU
17
  )
18
 
19
  def chat_function(user_message, history):
20
+ # 3. THE MAGIC FIX: Format the input exactly how the model likes it
21
+ # We add "def" at the end to force it to start coding immediately
22
  prompt = f"### Instruction:\n{user_message}\n\n### Response:\n"
23
 
24
+ # 4. Generate with Penalties (Bans looping)
25
  response = generator(
26
  prompt,
27
+ max_new_tokens=250,
28
+ temperature=0.5, # Lower = More logical/focused
29
+ repetition_penalty=1.2, # <--- Kills the "Hello World" loops
30
  do_sample=True,
31
  pad_token_id=tokenizer.eos_token_id
32
  )
33
 
34
+ # 5. Clean output
35
  full_text = response[0]['generated_text']
36
+ # Only keep the part after "Response:"
37
  answer = full_text.split("### Response:")[-1].strip()
38
 
39
  return answer
40
 
41
+ # 6. Launch
42
  demo = gr.ChatInterface(
43
  fn=chat_function,
44
+ title="🐍 AI Python Graduate (Step 1000)",
45
+ description="I am fully trained! Ask me to 'Write a function to...' or 'Create a loop...'",
46
+ examples=["Write a python function to add two numbers", "Create a loop from 1 to 10"]
47
  )
48
 
49
  demo.launch()