Update README.md
Browse files
README.md
CHANGED
|
@@ -43,8 +43,13 @@ I enjoy exploring AI pipelines, natural language processing, and building tools
|
|
| 43 |
```
|
| 44 |
from llama_cpp import Llama
|
| 45 |
|
| 46 |
-
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
# Define the commit message prompt (Minimal format, avoids assistant behavior)
|
| 50 |
commit_prompt = """Generate a meaningful commit message explaining all the changes in the provided Git diff.
|
|
@@ -79,14 +84,6 @@ index 89abcde..f123456 100644
|
|
| 79 |
</html>
|
| 80 |
"""
|
| 81 |
|
| 82 |
-
# Load the GGUF model with increased context size (32768)
|
| 83 |
-
modelGGUF = Llama(
|
| 84 |
-
model_path=gguf_model_path,
|
| 85 |
-
rope_scaling={"type": "linear", "factor": 2.0},
|
| 86 |
-
chat_format=None, # Disables any chat formatting
|
| 87 |
-
n_ctx=32768, # Set the context size explicitly
|
| 88 |
-
)
|
| 89 |
-
|
| 90 |
# Prepare the raw input prompt
|
| 91 |
input_prompt = commit_prompt.format(git_diff_example)
|
| 92 |
|
|
|
|
| 43 |
```
|
| 44 |
from llama_cpp import Llama
|
| 45 |
|
| 46 |
+
modelGGUF = Llama.from_pretrained(
|
| 47 |
+
repo_id="seniruk/qwen2.5coder-0.5B_commit_msg",
|
| 48 |
+
filename="qwen0.5-finetuned.gguf",
|
| 49 |
+
rope_scaling={"type": "linear", "factor": 2.0},
|
| 50 |
+
chat_format=None, # Disables any chat formatting
|
| 51 |
+
n_ctx=32768, # Set the context size explicitly
|
| 52 |
+
)
|
| 53 |
|
| 54 |
# Define the commit message prompt (Minimal format, avoids assistant behavior)
|
| 55 |
commit_prompt = """Generate a meaningful commit message explaining all the changes in the provided Git diff.
|
|
|
|
| 84 |
</html>
|
| 85 |
"""
|
| 86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
# Prepare the raw input prompt
|
| 88 |
input_prompt = commit_prompt.format(git_diff_example)
|
| 89 |
|