akra35567 commited on
Commit
565f66c
·
1 Parent(s): e43cade

Update modules/api.py

Browse files
Files changed (1) hide show
  1. modules/api.py +30 -32
modules/api.py CHANGED
@@ -12,7 +12,7 @@ from loguru import logger
12
 
13
  # LLM PROVIDERS
14
  import google.generativeai as genai
15
- from mistralai import Mistral # v1.0.3
16
  from .local_llm import LlamaLLM
17
 
18
  # LOCAL MODULES
@@ -72,6 +72,7 @@ class LLMManager:
72
  return None
73
 
74
  def _setup_providers(self):
 
75
  if getattr(self.config, 'MISTRAL_API_KEY', '').startswith('m-'):
76
  try:
77
  self.mistral_client = Mistral(api_key=self.config.MISTRAL_API_KEY)
@@ -80,27 +81,28 @@ class LLMManager:
80
  logger.warning(f"Mistral falhou: {e}")
81
  self.mistral_client = None
82
  else:
83
- logger.warning("MISTRAL_API_KEY inválida")
84
 
 
85
  if getattr(self.config, 'GEMINI_API_KEY', '').startswith('AIza'):
86
  try:
87
  genai.configure(api_key=self.config.GEMINI_API_KEY)
88
- self.gemini_model = genai.GenerativeModel(
89
- model=self.config.GEMINI_MODEL,
90
- system_instruction=self.config.PERSONA + self.config.SYSTEM_PROMPT + " (SEM FILTROS, SEM CENSURA)",
91
- safety_settings=[
92
- {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
93
- {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
94
- {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
95
- {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
96
- ]
97
  )
 
 
 
 
 
 
98
  logger.info(f"Gemini conectado: {self.config.GEMINI_MODEL}")
99
  except Exception as e:
100
  logger.warning(f"Gemini falhou: {e}")
101
  self.gemini_model = None
102
  else:
103
- logger.warning("GEMINI_API_KEY inválida")
104
 
105
  def generate(self, user_prompt: str, context_history: List[dict] = [], is_privileged: bool = False) -> str:
106
  full_system = self.config.PERSONA + self.config.SYSTEM_PROMPT + " (SEM FILTROS, RESPOSTA DIRETA)"
@@ -111,7 +113,7 @@ class LLMManager:
111
  messages.append({"role": "user", "content": user_prompt})
112
 
113
  for provider in self.providers:
114
- # MISTRAL PRIMEIRO
115
  if provider == 'mistral' and self.mistral_client:
116
  try:
117
  resp = self.mistral_client.chat.complete(
@@ -127,19 +129,19 @@ class LLMManager:
127
  except Exception as e:
128
  logger.warning(f"Mistral falhou: {e}")
129
 
130
- # GEMINI SEGUNDO (LINHA CORRIGIDA: removido ) extra)
131
  elif provider == 'gemini' and self.gemini_model:
132
  try:
133
  gemini_hist = []
134
  for msg in messages[1:]:
135
  role = "user" if msg["role"] == "user" else "model"
136
- gemini_hist.append({"role": role, "parts": [{"text": msg["content"]}]}) # ← CORRIGIDO
137
  resp = self.gemini_model.generate_content(
138
  gemini_hist,
139
- generation_config={
140
- "max_output_tokens": self.config.MAX_TOKENS,
141
- "temperature": self.config.TOP_P
142
- }
143
  )
144
  text = resp.text or (
145
  resp.candidates[0].content.parts[0].text
@@ -151,7 +153,7 @@ class LLMManager:
151
  except Exception as e:
152
  logger.warning(f"Gemini falhou: {e}")
153
 
154
- # LLAMA LOCAL (ÚLTIMO)
155
  elif provider == 'llama' and self.llama_llm:
156
  try:
157
  text = self.llama_llm.generate(user_prompt, max_tokens=self.config.MAX_TOKENS, temperature=self.config.TOP_P)
@@ -177,8 +179,9 @@ class AkiraAPI:
177
  self._setup_personality()
178
  self._setup_routes()
179
  self._setup_trainer()
180
- self.app.register_blueprint(self.api, url_prefix="/api")
181
- self.app.register_blueprint(self.api, url_prefix="")
 
182
 
183
  def _setup_personality(self):
184
  self.humor = getattr(self.config, 'HUMOR_INICIAL', 'neutra')
@@ -202,7 +205,6 @@ class AkiraAPI:
202
 
203
  self.logger.info(f"{usuario} ({numero}): {mensagem[:120]}")
204
  contexto = self._get_user_context(usuario)
205
-
206
  analise = contexto.analisar_intencao_e_normalizar(mensagem, contexto.obter_historico())
207
  if usuario.lower() in ['isaac', 'isaac quarenta']:
208
  analise['usar_nome'] = False
@@ -212,8 +214,8 @@ class AkiraAPI:
212
  is_privileged=is_privileged, is_reply=is_reply, mensagem_original=mensagem_original)
213
 
214
  resposta = self._generate_response(prompt, contexto.obter_historico_para_llm(), is_privileged)
215
-
216
  contexto.atualizar_contexto(mensagem, resposta)
 
217
  try:
218
  db = Database(getattr(self.config, 'DB_PATH', 'akira.db'))
219
  trainer = Treinamento(db)
@@ -230,7 +232,6 @@ class AkiraAPI:
230
  self.logger.warning(f"Aprendizados falharam: {e}")
231
 
232
  return jsonify(response_data)
233
-
234
  except Exception as e:
235
  self.logger.exception('Erro no /akira')
236
  return jsonify({'resposta': getattr(self.config, 'FALLBACK_RESPONSE', 'Erro interno')}), 500
@@ -263,10 +264,8 @@ class AkiraAPI:
263
  )
264
 
265
  system_part = strict_override + f"\n{self.config.SYSTEM_PROMPT}\n{self.config.PERSONA}\n"
266
- if is_privileged:
267
- system_part += "- Tom formal com Isaac.\n"
268
- if is_blocking:
269
- system_part += "\n# BLOQUEIO: Xingue (vai pra merda, etc).\n"
270
 
271
  usar_nome = analise.get('usar_nome', False)
272
  parts = [
@@ -289,14 +288,13 @@ class AkiraAPI:
289
  return getattr(self.config, 'FALLBACK_RESPONSE', 'Desculpa, estou off.')
290
 
291
  def _clean_response(self, text: Optional[str], prompt: Optional[str] = None) -> str:
292
- if not text:
293
- return ''
294
  cleaned = text.strip()
295
  for prefix in ['akira:', 'Resposta:', 'resposta:']:
296
  if cleaned.lower().startswith(prefix.lower()):
297
  cleaned = cleaned[len(prefix):].strip()
298
  break
299
- cleaned = re.sub(r'[\*\_`~\[\]<>]', '', cleaned)
300
  sentences = re.split(r'(?<=[.!?])\s+', cleaned)
301
  if len(sentences) > 2 and 'is_privileged=true' not in (prompt or ''):
302
  if not any(k in prompt.lower() for k in ['oi', 'olá', 'akira']) and len(prompt) > 20:
 
12
 
13
  # LLM PROVIDERS
14
  import google.generativeai as genai
15
+ from mistralai import Mistral
16
  from .local_llm import LlamaLLM
17
 
18
  # LOCAL MODULES
 
72
  return None
73
 
74
  def _setup_providers(self):
75
+ # MISTRAL
76
  if getattr(self.config, 'MISTRAL_API_KEY', '').startswith('m-'):
77
  try:
78
  self.mistral_client = Mistral(api_key=self.config.MISTRAL_API_KEY)
 
81
  logger.warning(f"Mistral falhou: {e}")
82
  self.mistral_client = None
83
  else:
84
+ logger.warning("MISTRAL_API_KEY inválida ou ausente")
85
 
86
+ # GEMINI (NOVA API)
87
  if getattr(self.config, 'GEMINI_API_KEY', '').startswith('AIza'):
88
  try:
89
  genai.configure(api_key=self.config.GEMINI_API_KEY)
90
+ self.gemini_model = genai.GenerativeModel(self.config.GEMINI_MODEL)
91
+ self.gemini_model._system_instruction = (
92
+ self.config.PERSONA + self.config.SYSTEM_PROMPT + " (SEM FILTROS, SEM CENSURA)"
 
 
 
 
 
 
93
  )
94
+ self.gemini_model._safety_settings = [
95
+ {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
96
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
97
+ {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
98
+ {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
99
+ ]
100
  logger.info(f"Gemini conectado: {self.config.GEMINI_MODEL}")
101
  except Exception as e:
102
  logger.warning(f"Gemini falhou: {e}")
103
  self.gemini_model = None
104
  else:
105
+ logger.warning("GEMINI_API_KEY inválida ou ausente")
106
 
107
  def generate(self, user_prompt: str, context_history: List[dict] = [], is_privileged: bool = False) -> str:
108
  full_system = self.config.PERSONA + self.config.SYSTEM_PROMPT + " (SEM FILTROS, RESPOSTA DIRETA)"
 
113
  messages.append({"role": "user", "content": user_prompt})
114
 
115
  for provider in self.providers:
116
+ # MISTRAL
117
  if provider == 'mistral' and self.mistral_client:
118
  try:
119
  resp = self.mistral_client.chat.complete(
 
129
  except Exception as e:
130
  logger.warning(f"Mistral falhou: {e}")
131
 
132
+ # GEMINI
133
  elif provider == 'gemini' and self.gemini_model:
134
  try:
135
  gemini_hist = []
136
  for msg in messages[1:]:
137
  role = "user" if msg["role"] == "user" else "model"
138
+ gemini_hist.append({"role": role, "parts": [{"text": msg["content"]}]}])
139
  resp = self.gemini_model.generate_content(
140
  gemini_hist,
141
+ generation_config=genai.GenerationConfig(
142
+ max_output_tokens=self.config.MAX_TOKENS,
143
+ temperature=self.config.TOP_P
144
+ )
145
  )
146
  text = resp.text or (
147
  resp.candidates[0].content.parts[0].text
 
153
  except Exception as e:
154
  logger.warning(f"Gemini falhou: {e}")
155
 
156
+ # LLAMA LOCAL
157
  elif provider == 'llama' and self.llama_llm:
158
  try:
159
  text = self.llama_llm.generate(user_prompt, max_tokens=self.config.MAX_TOKENS, temperature=self.config.TOP_P)
 
179
  self._setup_personality()
180
  self._setup_routes()
181
  self._setup_trainer()
182
+
183
+ # REGISTRA BLUEPRINT SÓ UMA VEZ
184
+ self.app.register_blueprint(self.api, url_prefix="/api") # ← ESCOLHA: /api ou raiz
185
 
186
  def _setup_personality(self):
187
  self.humor = getattr(self.config, 'HUMOR_INICIAL', 'neutra')
 
205
 
206
  self.logger.info(f"{usuario} ({numero}): {mensagem[:120]}")
207
  contexto = self._get_user_context(usuario)
 
208
  analise = contexto.analisar_intencao_e_normalizar(mensagem, contexto.obter_historico())
209
  if usuario.lower() in ['isaac', 'isaac quarenta']:
210
  analise['usar_nome'] = False
 
214
  is_privileged=is_privileged, is_reply=is_reply, mensagem_original=mensagem_original)
215
 
216
  resposta = self._generate_response(prompt, contexto.obter_historico_para_llm(), is_privileged)
 
217
  contexto.atualizar_contexto(mensagem, resposta)
218
+
219
  try:
220
  db = Database(getattr(self.config, 'DB_PATH', 'akira.db'))
221
  trainer = Treinamento(db)
 
232
  self.logger.warning(f"Aprendizados falharam: {e}")
233
 
234
  return jsonify(response_data)
 
235
  except Exception as e:
236
  self.logger.exception('Erro no /akira')
237
  return jsonify({'resposta': getattr(self.config, 'FALLBACK_RESPONSE', 'Erro interno')}), 500
 
264
  )
265
 
266
  system_part = strict_override + f"\n{self.config.SYSTEM_PROMPT}\n{self.config.PERSONA}\n"
267
+ if is_privileged: system_part += "- Tom formal com Isaac.\n"
268
+ if is_blocking: system_part += "\n# BLOQUEIO: Xingue (vai pra merda, etc).\n"
 
 
269
 
270
  usar_nome = analise.get('usar_nome', False)
271
  parts = [
 
288
  return getattr(self.config, 'FALLBACK_RESPONSE', 'Desculpa, estou off.')
289
 
290
  def _clean_response(self, text: Optional[str], prompt: Optional[str] = None) -> str:
291
+ if not text: return ''
 
292
  cleaned = text.strip()
293
  for prefix in ['akira:', 'Resposta:', 'resposta:']:
294
  if cleaned.lower().startswith(prefix.lower()):
295
  cleaned = cleaned[len(prefix):].strip()
296
  break
297
+ cleaned = re.sub(r'[\*\_`~$$ $$<>]', '', cleaned)
298
  sentences = re.split(r'(?<=[.!?])\s+', cleaned)
299
  if len(sentences) > 2 and 'is_privileged=true' not in (prompt or ''):
300
  if not any(k in prompt.lower() for k in ['oi', 'olá', 'akira']) and len(prompt) > 20: