Hoglet-33 commited on
Commit
b766ca1
·
verified ·
1 Parent(s): dadefd1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -8
app.py CHANGED
@@ -1,13 +1,36 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- tokenizer = AutoTokenizer.from_pretrained("Lucid-research/lucentcode-1-py")
5
- model = AutoModelForCausalLM.from_pretrained("Lucid-research/lucentcode-1-py")
6
 
7
- def generate_text(prompt):
 
 
 
 
 
 
 
8
  inputs = tokenizer(prompt, return_tensors="pt")
9
- outputs = model.generate(**inputs, max_length=1000)
10
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
13
- iface.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
+ MODEL_ID = "Lucid-research/lucentcode-1-py" # Change this to your model repo ID
 
5
 
6
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
7
+ model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
8
+
9
+ def format_prompt(user_input):
10
+ return f"### Instruction:\n{user_input}\n\n### Output:\n"
11
+
12
+ def generate_code(user_input):
13
+ prompt = format_prompt(user_input)
14
  inputs = tokenizer(prompt, return_tensors="pt")
15
+ outputs = model.generate(
16
+ **inputs,
17
+ max_length=1000,
18
+ temperature=0.7,
19
+ do_sample=True,
20
+ top_p=0.9,
21
+ pad_token_id=tokenizer.eos_token_id,
22
+ )
23
+ text = tokenizer.decode(outputs[0], skip_special_tokens=True)
24
+ # Return only the generated part after "### Output:"
25
+ return text.split("### Output:")[-1].strip()
26
+
27
+ iface = gr.Interface(
28
+ fn=generate_code,
29
+ inputs=gr.Textbox(lines=4, label="Instruction"),
30
+ outputs=gr.Textbox(lines=8, label="Generated Output"),
31
+ title="Code Generation with Your Model",
32
+ description="Enter an instruction and get a generated Python function.",
33
+ )
34
 
35
+ if __name__ == "__main__":
36
+ iface.launch()