File size: 5,535 Bytes
acfcb6f
 
 
 
 
 
 
 
 
fee0742
acfcb6f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
from langchain_groq import ChatGroq
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from dotenv import load_dotenv, find_dotenv
import os, sys, getpass
from groq import Groq
import gradio as gr

_ = load_dotenv(find_dotenv())
groq_api_key = os.getenv("GROQ_API_KEY")


class NLP_tasks_crs:

    @classmethod
    def translator(cls, text: str="Hello world!", language: str="French", style: str="polite"):
        # call our prompt engineering class
        prompt = Prompt_engineering_crs(text, language, style)
        prompt = prompt.translator_prompt()
        # call our LLM_crs class
        llm = LLM_crs()
        llm = llm.chain_llm()
        result = llm.invoke(prompt)
        return result
        
    @classmethod
    def summarization(cls, text: str):
        # call our prompt engineering class
        prompt = Prompt_engineering_crs(text)
        prompt = prompt.summarization_prompt()
        # call our LLM_crs class
        llm = LLM_crs()
        llm = llm.chain_llm()
        result = llm.invoke(prompt)
        return result

    @classmethod
    def translator_summarization(cls, text: str, language: str, style: str="polite"):
        # call our prompt engineering class
        prompt = Prompt_engineering_crs(text, language, style)
        prompt = prompt.translate_summarize_prompt()
        # call our LLM_crs class
        llm = LLM_crs()
        llm = llm.chain_llm()
        result = llm.invoke(prompt)
        return result

    @classmethod
    def question_answer(cls, question: str):
        # call our prompt engineering class
        prompt = Prompt_engineering_crs()
        prompt = prompt.question_answer_prompt(question)
        # call our LLM_crs class
        llm = LLM_crs()
        llm = llm.chain_llm()
        result = llm.invoke(prompt)
        return result


class Prompt_engineering_crs:
    def __init__(self, text: str="Hello world.", language: str="English", style: str="calm and respectful"):
        self.text= text
        self.language= language
        self.style=style

    def translator_prompt(self):
        template = """You are the best expert translator of human languagues. Your role is to 
         firstly detect the correct language of the user text, which is delimited by three 
         backticks. Secondly, translate that text into the desired language provided by the user, which is 
         {language}. May sure to use {style} tone as your style. Finally, make sure to sound like a formal native speaker and provide only the final result without 
         additional information. Thanks.
         text: ```{text}``` """
        prompt_template = ChatPromptTemplate.from_template(template)
        prompt = prompt_template.format_messages(text=self.text, language=self.language, style=self.style)
        return prompt
    
    def summarization_prompt(self):
        summary = """ You are the best expert summarizer in the world. Your role is to summarize the user text into the detected language. 
        The provided text is below and between three backticks. Make sure to keep the right context. Don't forget to give
        a title to the result of the summarization. Finally, make sure to sound like a formal native speaker and provide 
        only the final result without additional information or comments. Thanks.
        text: ```{text}```"""
        prompt_template = ChatPromptTemplate.from_template(summary)
        prompt = prompt_template.format_messages(text=self.text)
        return prompt
    
    def translate_summarize_prompt(self, language: str="English"):
        template =""" You are the best translator and summarizer in the world. Your first role is to translate 
        the below text into {language} language. Indeed, use the below style during the
        translation. Your second role is to summarize into {language} language, the result of the translation
        with clear and concise words and expressions. Furthermore, use little imojis during
        the translation. Finally, make sure to sound like a formal native speaker and provide only the final result without 
        additional information or comments. Thanks.
           text: {text} 
           language: {language}
           style: {style} """
        prompt_template = ChatPromptTemplate.from_template(template)
        prompt = prompt_template.format_messages(text=self.text, language=self.language, style=self.style)
        return prompt
    
    @classmethod
    def question_answer_prompt(cls, question: str):
        template = "You are a master of questions and answers. Here, your role is to answer to any questions from " \
        "from the user. If you do not know any questions, please state that you don't know them. Don't " \
        "invent answers for the questions you do not know. If you have many answers for a question, provide the accurate" \
        "ones to the user. Use a polite style to communicate with the user. Indeed, speak like a native speaker. See below the question of the user. " \
        "question: {question}"
        prompt_template = ChatPromptTemplate.from_template(template)
        prompt = prompt_template.format_messages(question = {question})
        return prompt


class LLM_crs:
    def __init__(self, model="moonshotai/kimi-k2-instruct-0905"):
        self.model = model

    def chain_llm(self):
        llm = ChatGroq(model = self.model)
        parser = StrOutputParser()
        chain_llm  = llm | parser
        return chain_llm