Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import os | |
| from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace | |
| from langchain_core.messages import HumanMessage, AIMessage, SystemMessage | |
| hf = os.getenv('hf') | |
| os.environ['HUGGINGFACEHUB_API_TOKEN'] = hf | |
| os.environ['HF_TOKEN'] = hf | |
| # --- Config --- | |
| st.set_page_config(page_title="AI Mentor Chat", layout="centered") | |
| st.title("🤖 AI Mentor Chat") | |
| # --- Sidebar for selections --- | |
| st.sidebar.title("Mentor Preferences") | |
| exp1 = ['<1', '1', '2', '3', '4', '5', '5+'] | |
| exp = st.sidebar.selectbox("Select experience:", exp1) | |
| # Map experience to label | |
| experience_map = { | |
| '<1': 'New bie mentor', | |
| '1': '1', '2': '2', '3': '3', '4': '4', '5': '5', | |
| '5+': 'Professional' | |
| } | |
| experience_label = experience_map[exp] | |
| # --- Initialize Chat Model --- | |
| deep_seek_skeleton = HuggingFaceEndpoint( | |
| repo_id='meta-llama/Llama-3.2-3B-Instruct', | |
| provider='sambanova', | |
| temperature=0.7, | |
| max_new_tokens=150, | |
| task='conversational' | |
| ) | |
| deep_seek = ChatHuggingFace( | |
| llm=deep_seek_skeleton, | |
| repo_id='meta-llama/Llama-3.2-3B-Instruct', | |
| provider='sambanova', | |
| temperature=0.7, | |
| max_new_tokens=150, | |
| task='conversational' | |
| ) | |
| # --- Session State --- | |
| PAGE_KEY = "python_chat_history" | |
| try: | |
| # --- Session State --- | |
| if PAGE_KEY not in st.session_state: | |
| st.session_state[PAGE_KEY] = [] | |
| st.subheader("🗨️ Chat History") | |
| for user, bot in st.session_state[PAGE_KEY]: | |
| st.markdown(f"**You:** {user}") | |
| st.markdown(f"**Mentor:** {bot}") | |
| st.markdown("---") | |
| # --- Chat Form --- | |
| with st.form(key="chat_form"): | |
| user_input = st.text_input("Ask your question:") | |
| submit = st.form_submit_button("Send") | |
| # --- Chat Logic --- | |
| if submit and user_input: | |
| # Add system context | |
| system_prompt = f"""Act as a Python mentor with {experience_label} years of experience. Teach in a friendly, approachable manner while following these strict rules: | |
| 1. Only answer questions related to Python programming (including libraries, frameworks, and tools in the Python ecosystem) | |
| 2. For any non-Python query, respond with exactly: "I specialize only in Python programming. This appears to be a non-Python topic." | |
| 3. Never suggest you can help with non-Python topics | |
| 4. Keep explanations clear, practical, and beginner-friendly when appropriate | |
| 5. Include practical examples when explaining concepts | |
| 6. For advanced topics, assume the student has basic Python knowledge""" | |
| # Create message list | |
| messages = [SystemMessage(content=system_prompt), HumanMessage(content=user_input)] | |
| # Get model response | |
| result = deep_seek.invoke(messages) | |
| # Append to history | |
| st.session_state[PAGE_KEY].append((user_input, result.content)) | |
| # --- Display Chat History --- | |
| except: | |
| st.warning('The token limit has reached please revisit in 24 hours!') | |
| # import streamlit as st | |
| # import os | |
| # import langchain | |
| # import langchain_huggingface | |
| # from langchain_huggingface import HuggingFaceEndpoint,HuggingFacePipeline,ChatHuggingFace | |
| # from langchain_core.messages import HumanMessage,AIMessage,SystemMessage | |
| # deep_seek_skeleton = HuggingFaceEndpoint(repo_id='meta-llama/Llama-3.2-3B-Instruct', | |
| # provider = 'sambanova', | |
| # temperature=0.7, | |
| # max_new_tokens=150, | |
| # task = 'conversational') | |
| # deep_seek = ChatHuggingFace(llm=deep_seek_skeleton, | |
| # repo_id='meta-llama/Llama-3.2-3B-Instruct', | |
| # provider = 'sambanova', | |
| # temperature=0.7, | |
| # max_new_tokens=150, | |
| # task = 'conversational') | |
| # exp1 = ['<1', '1', '2', '3', '4', '5', '5+'] | |
| # exp = st.selectbox("Select experience:", exp1) | |
| # if exp == '<1': | |
| # experince = 'New bie mentor' | |
| # elif exp == '1': | |
| # experince = '1' | |
| # elif exp == '2': | |
| # experince = '2' | |
| # elif exp == '3': | |
| # experince = '3' | |
| # elif exp == '4': | |
| # experince = '4' | |
| # elif exp == '5': | |
| # experince = '5' | |
| # elif exp == '5+': | |
| # experince = 'professional' | |
| # selec = ['Python', 'Machine Learning', 'Deep Learning', 'Statistics', 'SQL', 'Excel'] | |
| # sub = st.selectbox("Select experience:", selec) | |
| # user_input = st.text_input("Enter your query:") | |
| # l = [] | |
| # st.write(l) | |
| # message = [SystemMessage(content=f'Act as {sub} mentor who has {experince} years of experience and the one who teaches in very friendly manner and also he explains everything within 150 words'), | |
| # HumanMessage(content=user_input)] | |
| # while user_input!='end': | |
| # l.append(user_input) | |
| # l.append(result.content) | |
| # st.write(l) | |
| # user_input = st.text_input("Enter your query:") | |
| # message = [SystemMessage(content=f'Act as {sub} mentor who has {experince} years of experience and the one who teaches in very friendly manner and also he explains everything within 150 words'), | |
| # HumanMessage(content=user_input)] | |
| # result = deep_seek.invoke(message) | |