Spaces:
Running
Running
Update modules/local_llm.py
Browse files- modules/local_llm.py +27 -33
modules/local_llm.py
CHANGED
|
@@ -4,10 +4,8 @@ from loguru import logger
|
|
| 4 |
from llama_cpp import Llama
|
| 5 |
import threading
|
| 6 |
|
| 7 |
-
# CAMINHOS OBRIGATÓRIOS NO HF SPACES
|
| 8 |
MODEL_PATH = "/home/user/models/openhermes-2.5-mistral-7b.Q4_K_M.gguf"
|
| 9 |
FINETUNED_PATH = "/home/user/data/finetuned_hermes"
|
| 10 |
-
|
| 11 |
_llm_global = None
|
| 12 |
_lock = threading.Lock()
|
| 13 |
|
|
@@ -20,35 +18,35 @@ def _get_llm():
|
|
| 20 |
if _llm_global is not None:
|
| 21 |
return _llm_global
|
| 22 |
|
| 23 |
-
logger.info("
|
| 24 |
if not os.path.exists(MODEL_PATH):
|
| 25 |
-
logger.error("GGUF NÃO ENCONTRADO!
|
| 26 |
return None
|
| 27 |
|
| 28 |
try:
|
| 29 |
llm = Llama(
|
| 30 |
model_path=MODEL_PATH,
|
| 31 |
-
n_ctx=
|
| 32 |
-
n_threads=
|
| 33 |
-
n_batch=
|
| 34 |
n_gpu_layers=0,
|
| 35 |
verbose=False,
|
| 36 |
-
logits_all=True
|
|
|
|
|
|
|
| 37 |
)
|
| 38 |
|
| 39 |
-
# CARREGA LORA ANGOLANO
|
| 40 |
lora_path = f"{FINETUNED_PATH}/adapter_model.bin"
|
| 41 |
if os.path.exists(lora_path):
|
| 42 |
-
logger.info("LORA ANGOLANO
|
| 43 |
llm.load_lora(lora_path)
|
| 44 |
-
logger.info("AKIRA COM SOTAQUE DE LUANDA 100% ATIVA! BUÉ FIXE!")
|
| 45 |
|
| 46 |
_llm_global = llm
|
| 47 |
-
logger.info("
|
| 48 |
return llm
|
| 49 |
|
| 50 |
except Exception as e:
|
| 51 |
-
logger.error(f"ERRO
|
| 52 |
import traceback
|
| 53 |
logger.error(traceback.format_exc())
|
| 54 |
return None
|
|
@@ -56,27 +54,26 @@ def _get_llm():
|
|
| 56 |
class LocalLLM:
|
| 57 |
def __init__(self):
|
| 58 |
self.llm = _get_llm()
|
| 59 |
-
|
| 60 |
def is_available(self) -> bool:
|
| 61 |
return self.llm is not None
|
| 62 |
|
| 63 |
-
def generate(self, prompt: str, max_tokens: int =
|
| 64 |
if not self.is_available():
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
try:
|
| 69 |
-
# PROMPT PERFEITO PRO OPENHERMES GGUF (FUNCIONA 1000000%)
|
| 70 |
full_prompt = (
|
| 71 |
"<|im_start|>system\n"
|
| 72 |
-
"
|
| 73 |
-
"
|
| 74 |
"<|im_start|>user\n"
|
| 75 |
-
f"{prompt}
|
|
|
|
| 76 |
"<|im_start|>assistant\n"
|
| 77 |
)
|
| 78 |
|
| 79 |
-
logger.info(f"[HERMES
|
| 80 |
|
| 81 |
output = self.llm(
|
| 82 |
full_prompt,
|
|
@@ -84,24 +81,21 @@ class LocalLLM:
|
|
| 84 |
temperature=0.9,
|
| 85 |
top_p=0.95,
|
| 86 |
repeat_penalty=1.12,
|
| 87 |
-
stop=["<|im_end|>", "
|
| 88 |
echo=False
|
| 89 |
)
|
| 90 |
|
| 91 |
text = output["choices"][0]["text"].strip()
|
| 92 |
|
| 93 |
-
#
|
| 94 |
-
if
|
| 95 |
-
text += "\n\
|
| 96 |
|
| 97 |
-
logger.info(f"[HERMES
|
| 98 |
return text
|
| 99 |
|
| 100 |
except Exception as e:
|
| 101 |
-
logger.error(f"
|
| 102 |
-
|
| 103 |
-
logger.error(traceback.format_exc())
|
| 104 |
-
return "Desculpa kota, buguei agora. Tenta de novo ou escreve 'continua'."
|
| 105 |
|
| 106 |
-
# INSTÂNCIA GLOBAL OBRIGATÓRIA
|
| 107 |
HermesLLM = LocalLLM()
|
|
|
|
| 4 |
from llama_cpp import Llama
|
| 5 |
import threading
|
| 6 |
|
|
|
|
| 7 |
MODEL_PATH = "/home/user/models/openhermes-2.5-mistral-7b.Q4_K_M.gguf"
|
| 8 |
FINETUNED_PATH = "/home/user/data/finetuned_hermes"
|
|
|
|
| 9 |
_llm_global = None
|
| 10 |
_lock = threading.Lock()
|
| 11 |
|
|
|
|
| 18 |
if _llm_global is not None:
|
| 19 |
return _llm_global
|
| 20 |
|
| 21 |
+
logger.info("CARREGANDO HERMES 7B TURBO → 8-12 SEGUNDOS MÁXIMO!")
|
| 22 |
if not os.path.exists(MODEL_PATH):
|
| 23 |
+
logger.error("GGUF NÃO ENCONTRADO!")
|
| 24 |
return None
|
| 25 |
|
| 26 |
try:
|
| 27 |
llm = Llama(
|
| 28 |
model_path=MODEL_PATH,
|
| 29 |
+
n_ctx=2048, # ← MENOS CONTEXTO = MAIS RÁPIDO
|
| 30 |
+
n_threads=2, # ← SÓ 2 vCPU NO HF FREE
|
| 31 |
+
n_batch=256, # ← MENOR BATCH = MENOS MEMÓRIA
|
| 32 |
n_gpu_layers=0,
|
| 33 |
verbose=False,
|
| 34 |
+
logits_all=True,
|
| 35 |
+
use_mlock=True, # ← EVITA SWAP (ACELERA MUITO)
|
| 36 |
+
seed=-1,
|
| 37 |
)
|
| 38 |
|
|
|
|
| 39 |
lora_path = f"{FINETUNED_PATH}/adapter_model.bin"
|
| 40 |
if os.path.exists(lora_path):
|
| 41 |
+
logger.info("LORA ANGOLANO CARREGADO → SOTAQUE LUANDA TURBO!")
|
| 42 |
llm.load_lora(lora_path)
|
|
|
|
| 43 |
|
| 44 |
_llm_global = llm
|
| 45 |
+
logger.info("HERMES 7B TURBO ONLINE → 8-12s POR RESPOSTA!")
|
| 46 |
return llm
|
| 47 |
|
| 48 |
except Exception as e:
|
| 49 |
+
logger.error(f"ERRO HERMES: {e}")
|
| 50 |
import traceback
|
| 51 |
logger.error(traceback.format_exc())
|
| 52 |
return None
|
|
|
|
| 54 |
class LocalLLM:
|
| 55 |
def __init__(self):
|
| 56 |
self.llm = _get_llm()
|
| 57 |
+
|
| 58 |
def is_available(self) -> bool:
|
| 59 |
return self.llm is not None
|
| 60 |
|
| 61 |
+
def generate(self, prompt: str, max_tokens: int = 35) -> str: # ← 35 TOKENS = 8 SEGUNDOS!
|
| 62 |
if not self.is_available():
|
| 63 |
+
return "Akira off, kota."
|
| 64 |
+
|
|
|
|
| 65 |
try:
|
|
|
|
| 66 |
full_prompt = (
|
| 67 |
"<|im_start|>system\n"
|
| 68 |
+
"Akira é kota fixe de Luanda. Responde bué rápido, curto e com gírias angolanas.\n"
|
| 69 |
+
"<|im_end|>\n"
|
| 70 |
"<|im_start|>user\n"
|
| 71 |
+
f"{prompt}\n"
|
| 72 |
+
"<|im_end|>\n"
|
| 73 |
"<|im_start|>assistant\n"
|
| 74 |
)
|
| 75 |
|
| 76 |
+
logger.info(f"[HERMES TURBO] Gerando {max_tokens} tokens → 8-12s MÁXIMO!")
|
| 77 |
|
| 78 |
output = self.llm(
|
| 79 |
full_prompt,
|
|
|
|
| 81 |
temperature=0.9,
|
| 82 |
top_p=0.95,
|
| 83 |
repeat_penalty=1.12,
|
| 84 |
+
stop=["<|im_end|>", "User:", "Assistant:"],
|
| 85 |
echo=False
|
| 86 |
)
|
| 87 |
|
| 88 |
text = output["choices"][0]["text"].strip()
|
| 89 |
|
| 90 |
+
# BOTÃO CONTINUA
|
| 91 |
+
if len(text.split()) > 12:
|
| 92 |
+
text += "\n\n*continua* pra mais, kota! 😎"
|
| 93 |
|
| 94 |
+
logger.info(f"[HERMES 8s] {text[:100]}...")
|
| 95 |
return text
|
| 96 |
|
| 97 |
except Exception as e:
|
| 98 |
+
logger.error(f"HERMES BUG: {e}")
|
| 99 |
+
return "Buguei, tenta de novo."
|
|
|
|
|
|
|
| 100 |
|
|
|
|
| 101 |
HermesLLM = LocalLLM()
|