ytjhai MrOvkill commited on
Commit
28e7087
·
verified ·
0 Parent(s):

Duplicate from MrOvkill/gemma-2-inference-endpoint-GGUF

Browse files

Co-authored-by: Samuel L Meyers <MrOvkill@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +36 -0
  2. README.md +31 -0
  3. gemma_tools.py +38 -0
  4. handler.py +40 -0
  5. requirements.txt +1 -0
.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ gemma-2b.q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ language:
4
+ - en
5
+ library_name: llama.cpp
6
+ pipeline_tag: text-generation
7
+ ---
8
+
9
+ # Gemma 2 - Inference Endpoint
10
+
11
+ ## <span style="color: red">NOTICE:</span> This model does, in fact run on inference endpoints. Just click deploy, unlike with regular GGUF models. The model is no longer stored, merely linked. Enjoy <span style="color: red">&lt;3</span>
12
+
13
+ <label>Code Sample ( One-Shot )</label>
14
+ ```javascript
15
+
16
+ {
17
+ "inputs": "A plain old prompt with nothing else"
18
+ }
19
+ ```
20
+
21
+ ## Multi turn coming soon...
22
+
23
+ Hello! I wrote a simple container that allows for easy running of llama-cpp-python with GGUF models. My goal here was a cheap way to play with Gemma, but then I thought maybe i'd share just in case it's helpful. I'll probably make a bunch of these, so if you have any requests for GGUF or otherwise quantized Llama.cpp models to become inference endpoints, please feel free to reach out!
24
+
25
+ # Files
26
+
27
+ I used the excellent quant by [lmstudio-ai/gemma-2b-it-GGUF](https://huggingface.co/lmstudio-ai/gemma-2b-it-GGUF),
28
+
29
+ My email is newp@justkidding.net
30
+
31
+ Just kidding, it's sam att samuellmeyers DOT... com
gemma_tools.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ def get_args_or_none(data):
3
+ system_prompt = data.pop("system-prompt", "You are Gemma. Assist user with whatever they require, in a safe and moral manner.")
4
+ inputs = data.pop("inputs", "")
5
+ temperature = data.pop("temperature", None)
6
+ if not temperature or temperature is None:
7
+ temperature = data.pop("temp", 0.33)
8
+ if temperature > 3 or temperature < 0:
9
+ return {
10
+ 0: False,
11
+ "status": "error",
12
+ "reason": "temperature",
13
+ "reason": "invalid temperature ( 0.01 - 1.00 only allowed )"
14
+ }
15
+ top_p = data.pop("top-p", 0.85)
16
+ if top_p > 3 or top_p < 0:
17
+ return {
18
+ 0: False,
19
+ "status": "error",
20
+ "reason": "top_p",
21
+ "description": "invalid top percentage ( 0.01 - 1.00 only allowed )"
22
+ }
23
+ top_k = data.pop("top-k", 42)
24
+ if top_k > 100 or top_k < 0:
25
+ return {
26
+ 0: False,
27
+ "status": "error",
28
+ "reason": "top_k",
29
+ "description": "invalid top k ( 1 - 99 only allowed )"
30
+ }
31
+ return {
32
+ 0: True,
33
+ "inputs": inputs,
34
+ "system_prompt": system_prompt,
35
+ "temperature": temperature,
36
+ "top_p": top_p,
37
+ "top_k": top_k
38
+ }
handler.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from typing import Dict, List, Any
4
+ from llama_cpp import Llama
5
+ import gemma_tools as gem
6
+
7
+ MAX_TOKENS=8192
8
+
9
+ class EndpointHandler():
10
+ def __init__(self, data):
11
+ self.model = Llama.from_pretrained("lmstudio-ai/gemma-2b-it-GGUF", filename="gemma-2b-it-q4_k_m.gguf", n_ctx=8192)
12
+
13
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
14
+ args = gem.get_args_or_none(data)
15
+ fmat = "<startofturn>system\n{system_prompt} <endofturn>\n<startofturn>user\n{prompt} <endofturn>\n<startofturn>model"
16
+ print(args, fmat)
17
+ if not args[0]:
18
+ return {
19
+ "status": args["status"],
20
+ "message": args["description"]
21
+ }
22
+ try:
23
+ fmat = fmat.format(system_prompt = args["system_prompt"], prompt = args["inputs"])
24
+ except Exception as e:
25
+ return json.dumps({
26
+ "status": "error",
27
+ "reason": "invalid format"
28
+ })
29
+ max_length = data.pop("max_length", 512)
30
+ try:
31
+ max_length = int(max_length)
32
+ except Exception as e:
33
+ return json.dumps({
34
+ "status": "error",
35
+ "reason": "max_length was passed as something that was absolutely not a plain old int"
36
+ })
37
+
38
+ res = self.model(fmat, temperature=args["temperature"], top_p=args["top_p"], top_k=args["top_k"], max_tokens=max_length)
39
+
40
+ return res
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ llama-cpp-python