Spaces:
Running
Running
File size: 587 Bytes
3a3b216 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
# TODO: implement ollama client
from typing import Any, List, Optional
from graphgen.bases import BaseLLMClient, Token
class OllamaClient(BaseLLMClient):
async def generate_answer(
self, text: str, history: Optional[List[str]] = None, **extra: Any
) -> str:
pass
async def generate_topk_per_token(
self, text: str, history: Optional[List[str]] = None, **extra: Any
) -> List[Token]:
pass
async def generate_inputs_prob(
self, text: str, history: Optional[List[str]] = None, **extra: Any
) -> List[Token]:
pass
|