Update app.py
Browse files
app.py
CHANGED
|
@@ -33,6 +33,8 @@ def main(
|
|
| 33 |
base_model
|
| 34 |
), "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
|
| 35 |
|
|
|
|
|
|
|
| 36 |
DebugInfo=[] #this is mainly for debug 2023.08.25
|
| 37 |
|
| 38 |
prompter = Prompter(prompt_template)
|
|
@@ -94,10 +96,13 @@ def main(
|
|
| 94 |
**kwargs,
|
| 95 |
):
|
| 96 |
DebugInfo.append("1.Enter in evaluate.")#TBD
|
|
|
|
|
|
|
| 97 |
|
| 98 |
prompt = prompter.generate_prompt(instruction, input)
|
| 99 |
inputs = tokenizer(prompt, return_tensors="pt")
|
| 100 |
input_ids = inputs["input_ids"].to(device)
|
|
|
|
| 101 |
generation_config = GenerationConfig(
|
| 102 |
temperature=temperature,
|
| 103 |
top_p=top_p,
|
|
@@ -106,6 +111,7 @@ def main(
|
|
| 106 |
**kwargs,
|
| 107 |
)
|
| 108 |
with torch.no_grad():
|
|
|
|
| 109 |
generation_output = model.generate(
|
| 110 |
input_ids=input_ids,
|
| 111 |
generation_config=generation_config,
|
|
@@ -116,6 +122,7 @@ def main(
|
|
| 116 |
s = generation_output.sequences[0]
|
| 117 |
output = tokenizer.decode(s)
|
| 118 |
DebugInfo.append("2.Generate out decode completed.")#TBD
|
|
|
|
| 119 |
return prompter.get_response(output),DebugInfo
|
| 120 |
|
| 121 |
gr.Interface(
|
|
|
|
| 33 |
base_model
|
| 34 |
), "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
|
| 35 |
|
| 36 |
+
f = open("debuginfo.txt", "a")
|
| 37 |
+
|
| 38 |
DebugInfo=[] #this is mainly for debug 2023.08.25
|
| 39 |
|
| 40 |
prompter = Prompter(prompt_template)
|
|
|
|
| 96 |
**kwargs,
|
| 97 |
):
|
| 98 |
DebugInfo.append("1.Enter in evaluate.")#TBD
|
| 99 |
+
f.write("1.Enter in evaluate.")
|
| 100 |
+
#f.close()
|
| 101 |
|
| 102 |
prompt = prompter.generate_prompt(instruction, input)
|
| 103 |
inputs = tokenizer(prompt, return_tensors="pt")
|
| 104 |
input_ids = inputs["input_ids"].to(device)
|
| 105 |
+
f.write("2.after input_ids.")
|
| 106 |
generation_config = GenerationConfig(
|
| 107 |
temperature=temperature,
|
| 108 |
top_p=top_p,
|
|
|
|
| 111 |
**kwargs,
|
| 112 |
)
|
| 113 |
with torch.no_grad():
|
| 114 |
+
f.write("3.before model.generate(..).")
|
| 115 |
generation_output = model.generate(
|
| 116 |
input_ids=input_ids,
|
| 117 |
generation_config=generation_config,
|
|
|
|
| 122 |
s = generation_output.sequences[0]
|
| 123 |
output = tokenizer.decode(s)
|
| 124 |
DebugInfo.append("2.Generate out decode completed.")#TBD
|
| 125 |
+
f.write("4.Generate out decode completed.")
|
| 126 |
return prompter.get_response(output),DebugInfo
|
| 127 |
|
| 128 |
gr.Interface(
|