himu1780 commited on
Commit
d66ab50
·
verified ·
1 Parent(s): b5e85ee

Create App.py

Browse files
Files changed (1) hide show
  1. App.py +49 -0
App.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
+
4
+ # 1. Load YOUR Model (The one currently training)
5
+ MODEL_NAME = "himu1780/ai-python-model"
6
+
7
+ print(f"Loading {MODEL_NAME}...")
8
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
10
+
11
+ # Create a text generation pipeline
12
+ generator = pipeline(
13
+ "text-generation",
14
+ model=model,
15
+ tokenizer=tokenizer,
16
+ device=-1 # Use CPU
17
+ )
18
+
19
+ def chat_function(user_message, history):
20
+ # 2. Format the prompt exactly like the training data
21
+ # This teaches the model: "Hey, I am giving you an Instruction, now give me the Response."
22
+ prompt = f"### Instruction:\n{user_message}\n\n### Response:\n"
23
+
24
+ # 3. Generate the answer
25
+ response = generator(
26
+ prompt,
27
+ max_new_tokens=200, # Max length of answer
28
+ temperature=0.6, # Creativity (Low = better for code)
29
+ do_sample=True,
30
+ pad_token_id=tokenizer.eos_token_id
31
+ )
32
+
33
+ # 4. Clean up the text (Remove the prompt so we only see the answer)
34
+ full_text = response[0]['generated_text']
35
+ # Split by "### Response:" and take the second part
36
+ answer = full_text.split("### Response:")[-1].strip()
37
+
38
+ return answer
39
+
40
+ # 5. Launch the Chat UI
41
+ demo = gr.ChatInterface(
42
+ fn=chat_function,
43
+ title="🐍 AI Python Assistant",
44
+ description="Ask me to write Python code!",
45
+ examples=["Write a function to calculate area of circle", "Create a list of numbers 1 to 10"],
46
+ theme="soft"
47
+ )
48
+
49
+ demo.launch()