File size: 2,070 Bytes
2f81069 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
import subprocess
import gradio as gr
# Paths
BINARY_PATH = "./bin/llama-cli" # prebuilt binary from zip
MODEL_PATH = "./qwen0.5-finetuned.gguf"
# Commit message prompt template
commit_prompt = """Generate a meaningful commit message explaining all the changes in the provided Git diff.
### Git Diff:
{}
### Commit Message:
"""
# Example git diff prefilled in Gradio
git_diff_example = """
diff --git a/index.html b/index.html
index 89abcde..f123456 100644
--- a/index.html
+++ b/index.html
@@ -5,16 +5,6 @@ <body>
<h1>Welcome to My Page</h1>
- <table border="1">
- <tr>
- <th>Name</th>
- <th>Age</th>
- </tr>
- <tr>
- <td>John Doe</td>
- <td>30</td>
- </tr>
- </table>
+ <p>This is a newly added paragraph replacing the table.</p>
</body>
</html>
"""
def generate_commit(git_diff: str, max_tokens: int = 64) -> str:
"""Generate a commit message using the llama-cli binary."""
if not git_diff.strip():
return "Please provide a git diff to summarize."
prompt_text = commit_prompt.format(git_diff)
cmd = [
BINARY_PATH,
"-m", MODEL_PATH,
"-p", prompt_text,
"-n", str(max_tokens)
]
try:
# Run the binary and capture output
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
return result.stdout.strip()
except subprocess.CalledProcessError as e:
return f"Error running binary: {e}\n{e.stderr}"
if __name__ == "__main__":
demo = gr.Interface(
fn=generate_commit,
inputs=[
gr.Textbox(lines=30, label="Git Diff", value=git_diff_example),
gr.Slider(1, 2048, value=64, step=1, label="max_tokens")
],
outputs=gr.Textbox(label="Commit Message", lines=8),
title="Commit Message Generator",
description="Paste a git diff and generate a concise commit message using the GGUF model via llama-cli binary.",
allow_flagging="never"
)
demo.launch(share=False)
|