himu1780 commited on
Commit
07fb626
·
verified ·
1 Parent(s): 4e382cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -18
app.py CHANGED
@@ -1,49 +1,62 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
- # 1. Load the GOOD model (It's safe on main!)
5
  MODEL_NAME = "himu1780/ai-python-model"
6
 
7
  print(f"Loading {MODEL_NAME}...")
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
10
 
11
- # 2. Create the pipeline
12
  generator = pipeline(
13
  "text-generation",
14
  model=model,
15
  tokenizer=tokenizer,
16
- device=-1 # CPU
17
  )
18
 
19
  def chat_function(user_message, history):
20
- # 3. THE MAGIC FIX: Format the input exactly how the model likes it
21
- # We add "def" at the end to force it to start coding immediately
22
- prompt = f"### Instruction:\n{user_message}\n\n### Response:\n"
 
23
 
24
- # 4. Generate with Penalties (Bans looping)
25
  response = generator(
26
  prompt,
27
- max_new_tokens=250,
28
- temperature=0.5, # Lower = More logical/focused
29
- repetition_penalty=1.2, # <--- Kills the "Hello World" loops
30
  do_sample=True,
31
  pad_token_id=tokenizer.eos_token_id
32
  )
33
 
34
- # 5. Clean output
35
  full_text = response[0]['generated_text']
36
- # Only keep the part after "Response:"
37
- answer = full_text.split("### Response:")[-1].strip()
38
 
39
- return answer
 
 
 
 
 
 
 
 
 
40
 
41
- # 6. Launch
42
  demo = gr.ChatInterface(
43
  fn=chat_function,
44
- title="🐍 AI Python Graduate (Step 1000)",
45
- description="I am fully trained! Ask me to 'Write a function to...' or 'Create a loop...'",
46
- examples=["Write a python function to add two numbers", "Create a loop from 1 to 10"]
 
 
 
 
 
47
  )
48
 
49
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
+ # 1. Load Your Smart Model (Step 1000)
5
  MODEL_NAME = "himu1780/ai-python-model"
6
 
7
  print(f"Loading {MODEL_NAME}...")
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
10
 
11
+ # 2. Create the Generator Pipeline
12
  generator = pipeline(
13
  "text-generation",
14
  model=model,
15
  tokenizer=tokenizer,
16
+ device=-1 # Use CPU
17
  )
18
 
19
  def chat_function(user_message, history):
20
+ # 3. THE FIX: Force the prompt structure
21
+ # We add "```python" at the end. This TRICKS the model.
22
+ # It thinks: "Oh, I already started writing a code block, I must finish it."
23
+ prompt = f"### Instruction:\n{user_message}\n\n### Response:\n```python\n"
24
 
25
+ # 4. Generate with Anti-Loop Settings
26
  response = generator(
27
  prompt,
28
+ max_new_tokens=250, # Give it enough space to write a full function
29
+ temperature=0.4, # Low temperature = Focused logic (No creative babbling)
30
+ repetition_penalty=1.2, # STRICT penalty. If it repeats "Hello", it gets banned.
31
  do_sample=True,
32
  pad_token_id=tokenizer.eos_token_id
33
  )
34
 
35
+ # 5. Clean up the output
36
  full_text = response[0]['generated_text']
 
 
37
 
38
+ # Isolate just the new code
39
+ # We split by "### Response:" to get the answer
40
+ answer_part = full_text.split("### Response:")[-1].strip()
41
+
42
+ # Ensure the formatting looks nice in the chat
43
+ # We stripped the ```python start, so we add it back for the UI
44
+ if not answer_part.startswith("```"):
45
+ answer_part = "```python\n" + answer_part
46
+
47
+ return answer_part
48
 
49
+ # 6. Launch the Chat Interface
50
  demo = gr.ChatInterface(
51
  fn=chat_function,
52
+ title="🐍 AI Python Assistant",
53
+ description="I am trained! Ask me to 'Write a function to...' or 'Create a loop...'",
54
+ examples=[
55
+ "Write a python function to add two numbers",
56
+ "Create a loop from 1 to 10",
57
+ "Write a script to calculate the area of a circle"
58
+ ],
59
+ theme="soft"
60
  )
61
 
62
  demo.launch()