0f922c9
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 |
import ollama
def process_with_llm(llm_model, prompt):
response = ollama.chat(
model=llm_model,
messages=[
{
"role": "user",
"content": prompt,
},
],
)
llm_output = response["message"]["content"]
return llm_output
|