Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -56,7 +56,7 @@ load_dotenv()
|
|
| 56 |
|
| 57 |
# langfuse analytics
|
| 58 |
from langfuse.callback import CallbackHandler
|
| 59 |
-
|
| 60 |
# Inventory API data table
|
| 61 |
from tabulate import tabulate
|
| 62 |
|
|
@@ -121,13 +121,6 @@ branch = os.getenv("branch")
|
|
| 121 |
langfuse_handler = CallbackHandler()
|
| 122 |
langfuse_handler.auth_check() # Optional: Checks if the authentication is successful
|
| 123 |
|
| 124 |
-
# Create the callback manager and add both the response history and langfuse handler
|
| 125 |
-
callback_handler = ResponseHistoryCallback()
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
nltk.download('punkt')
|
| 132 |
|
| 133 |
open_api_key_token = os.getenv("OPENAI_API_KEY")
|
|
@@ -163,18 +156,7 @@ apis = [
|
|
| 163 |
# LLM setup
|
| 164 |
llm = ChatOpenAI(model="gpt-4o-mini", max_tokens=300, temperature=0.1)
|
| 165 |
llm_chart = OpenAI(is_safe=False)
|
| 166 |
-
|
| 167 |
-
class ResponseHistoryCallback(BaseCallbackHandler):
|
| 168 |
-
def __init__(self):
|
| 169 |
-
self.history = [] # Initialize history to store responses
|
| 170 |
-
|
| 171 |
-
def on_agent_finish(self, output):
|
| 172 |
-
# Capture the response when the agent finishes
|
| 173 |
-
self.history.append(output)
|
| 174 |
-
print(f"Agent's response: {output}") # Optionally print
|
| 175 |
-
|
| 176 |
-
def get_history(self):
|
| 177 |
-
return self.history # Return the captured history
|
| 178 |
def get_schema(_):
|
| 179 |
schema_info = db.get_table_info() # This should be a string of your SQL schema
|
| 180 |
return schema_info
|
|
@@ -521,7 +503,7 @@ def chat_with_llm(df,question):
|
|
| 521 |
def bind_llm(llm, tools,prompt_template):
|
| 522 |
llm = llm.bind()
|
| 523 |
agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
|
| 524 |
-
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True
|
| 525 |
return agent_executor
|
| 526 |
|
| 527 |
# Define input and output models using Pydantic
|
|
@@ -592,10 +574,8 @@ def handle_query(user_question, chatbot, audio=None):
|
|
| 592 |
Function to handle the processing of user input with `AgentExecutor.invoke()`.
|
| 593 |
|
| 594 |
global current_event, stop_event
|
| 595 |
-
|
| 596 |
# Clear previous stop event and current_event
|
| 597 |
stop_event.clear()
|
| 598 |
-
|
| 599 |
if current_event and not current_event.done():
|
| 600 |
chatbot.append(("","A query is already being processed. Please stop it before starting a new one."))
|
| 601 |
return gr.update(value=chatbot)
|
|
@@ -617,7 +597,6 @@ def handle_query(user_question, chatbot, audio=None):
|
|
| 617 |
return gr.update(value=chatbot)
|
| 618 |
|
| 619 |
time.sleep(1) # Wait for 1 second before checking again
|
| 620 |
-
|
| 621 |
if current_event.cancelled():
|
| 622 |
chatbot.append((user_question, "Sorry, we encountered an error while processing your request. Please try after some time."))
|
| 623 |
return gr.update(value=chatbot)
|
|
@@ -691,7 +670,6 @@ def answer_question_thread(user_question, chatbot,audio=None):
|
|
| 691 |
|
| 692 |
global iterations
|
| 693 |
iterations = 0
|
| 694 |
-
agent_responses = []
|
| 695 |
# Ensure the temporary chart directory exists
|
| 696 |
# ensure_temp_chart_dir()
|
| 697 |
# Clean the /tmp/gradio/ directory
|
|
@@ -709,7 +687,6 @@ def answer_question_thread(user_question, chatbot,audio=None):
|
|
| 709 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file:
|
| 710 |
audio_segment.export(temp_audio_file.name, format="wav")
|
| 711 |
temp_audio_file_path = temp_audio_file.name
|
| 712 |
-
|
| 713 |
recognizer = sr.Recognizer()
|
| 714 |
with sr.AudioFile(temp_audio_file_path) as source:
|
| 715 |
audio_content = recognizer.record(source)
|
|
@@ -723,11 +700,7 @@ def answer_question_thread(user_question, chatbot,audio=None):
|
|
| 723 |
|
| 724 |
while iterations < max_iterations:
|
| 725 |
|
| 726 |
-
|
| 727 |
-
response = agent_executor.invoke({"input": user_question}, config={"callbacks": [[callback_handler, langfuse_handler]]}, early_stopping_method="generate")
|
| 728 |
-
# Track the response
|
| 729 |
-
agent_responses.append(response)
|
| 730 |
-
|
| 731 |
#create_file_HF()
|
| 732 |
if isinstance(response, dict):
|
| 733 |
response_text = response.get("output", "")
|
|
@@ -736,12 +709,9 @@ def answer_question_thread(user_question, chatbot,audio=None):
|
|
| 736 |
if "invalid" not in response_text.lower():
|
| 737 |
break
|
| 738 |
iterations += 1
|
| 739 |
-
|
| 740 |
-
|
| 741 |
-
|
| 742 |
-
print("final response::")
|
| 743 |
-
print(response)
|
| 744 |
-
return user_question , "Sorry, I couldn't complete your request" #"The agent could not generate a valid response within the iteration limit."
|
| 745 |
|
| 746 |
if os.getenv("IMAGE_PATH") in response_text:
|
| 747 |
# Open the image file
|
|
@@ -780,20 +750,9 @@ def answer_question_thread(user_question, chatbot,audio=None):
|
|
| 780 |
except Exception as e:
|
| 781 |
print(f"Error loading image file: {e}")
|
| 782 |
response_text = "Chart generation failed. Please try again."
|
| 783 |
-
|
| 784 |
-
print("max iterations error in 1")
|
| 785 |
-
if agent_executor.history:
|
| 786 |
-
print(agent_executor.history[-1])
|
| 787 |
-
#return agent_executor.history[-1].get("output")
|
| 788 |
-
|
| 789 |
return user_question, response_text
|
| 790 |
else:
|
| 791 |
-
if("max iterations" in response_text):
|
| 792 |
-
print("max iterations error in 2")
|
| 793 |
-
# Access the response history after execution
|
| 794 |
-
response_history = callback_handler.get_history()
|
| 795 |
-
print("Response History:", response_history)
|
| 796 |
-
|
| 797 |
return user_question, response_text
|
| 798 |
# response_text = response_text.replace('\n', ' ').replace(' ', ' ').strip()
|
| 799 |
# return response_text
|
|
@@ -830,7 +789,7 @@ def answer_question(user_question, chatbot, audio=None):
|
|
| 830 |
user_question = "Sorry, I could not understand the audio."
|
| 831 |
except sr.RequestError:
|
| 832 |
user_question = "Could not request results from Google Speech Recognition service."
|
| 833 |
-
|
| 834 |
while iterations < max_iterations:
|
| 835 |
|
| 836 |
response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]})
|
|
@@ -1262,19 +1221,13 @@ def handle_large_dataset(df, create_document,isDataFrame):
|
|
| 1262 |
if len(df.columns) > 1:
|
| 1263 |
# Skipping the original first column
|
| 1264 |
# List of required columns
|
| 1265 |
-
|
| 1266 |
required_columns = ['BROKER', 'ACCOUNT NUMBER', 'EMPLOYEE NAME', 'ACCOUNT NAME', 'ACCOUNT ID']
|
| 1267 |
# Filter the DataFrame to include only the required columns
|
| 1268 |
#print(df[required_columns])
|
| 1269 |
#limited_data = df[required_columns]
|
| 1270 |
-
|
| 1271 |
-
|
| 1272 |
-
|
| 1273 |
-
limited_data_without_first_column = limited_data.iloc[:, 1:]
|
| 1274 |
-
else:
|
| 1275 |
-
limited_data_without_first_column = limited_data_top3
|
| 1276 |
-
|
| 1277 |
-
|
| 1278 |
else:
|
| 1279 |
limited_data = df.head(20)
|
| 1280 |
limited_data_without_first_column = limited_data
|
|
@@ -1382,14 +1335,11 @@ def create_pdf(cname,ename,account_number, directory):
|
|
| 1382 |
output.write(filled.read())
|
| 1383 |
|
| 1384 |
"""from PyPDF2 import PdfReader, PdfWriter
|
| 1385 |
-
|
| 1386 |
reader = PdfReader(output_file_name)
|
| 1387 |
writer = PdfWriter()
|
| 1388 |
-
|
| 1389 |
for page in reader.pages:
|
| 1390 |
page.merge_page(page) # Flatten content
|
| 1391 |
writer.add_page(page)
|
| 1392 |
-
|
| 1393 |
with open(output_file_name, "wb") as f:
|
| 1394 |
writer.write(f)
|
| 1395 |
"""
|
|
@@ -1438,7 +1388,6 @@ def get_download_link(file_path,file_name):
|
|
| 1438 |
return f"{base_url}/{file_path}/{file_name}"
|
| 1439 |
|
| 1440 |
css = """
|
| 1441 |
-
|
| 1442 |
/* Example of custom button styling */
|
| 1443 |
.gr-button {
|
| 1444 |
background-color: #6366f1; /* Change to your desired button color */
|
|
@@ -1449,11 +1398,9 @@ css = """
|
|
| 1449 |
font-size: 12px;
|
| 1450 |
cursor: pointer;
|
| 1451 |
}
|
| 1452 |
-
|
| 1453 |
.gr-button:hover {
|
| 1454 |
background-color: #8a92f7; /* Darker shade on hover */
|
| 1455 |
}
|
| 1456 |
-
|
| 1457 |
.gr-buttonbig {
|
| 1458 |
background-color: #6366f1; /* Change to your desired button color */
|
| 1459 |
color: white;
|
|
@@ -1463,41 +1410,33 @@ css = """
|
|
| 1463 |
font-size: 14px;
|
| 1464 |
cursor: pointer;
|
| 1465 |
}
|
| 1466 |
-
|
| 1467 |
.gr-buttonbig:hover {
|
| 1468 |
background-color: #8a92f7; /* Darker shade on hover */
|
| 1469 |
}
|
| 1470 |
-
|
| 1471 |
/* Customizing the Logout link to be on the right */
|
| 1472 |
.logout-link {
|
| 1473 |
text-align: right;
|
| 1474 |
display: inline-block;
|
| 1475 |
width: 100%;
|
| 1476 |
}
|
| 1477 |
-
|
| 1478 |
.logout-link a {
|
| 1479 |
color: #4A90E2; /* Link color */
|
| 1480 |
text-decoration: none;
|
| 1481 |
font-size: 16px;
|
| 1482 |
}
|
| 1483 |
-
|
| 1484 |
.chatbot_gpt {
|
| 1485 |
height: 600px !important; /* Adjust height as needed */
|
| 1486 |
}
|
| 1487 |
-
|
| 1488 |
.logout-link a:hover {
|
| 1489 |
text-decoration: underline; /* Underline on hover */
|
| 1490 |
}
|
| 1491 |
-
|
| 1492 |
.message-buttons-right{
|
| 1493 |
display: none !important;
|
| 1494 |
}
|
| 1495 |
-
|
| 1496 |
body, .gradio-container {
|
| 1497 |
margin: 0;
|
| 1498 |
padding: 0;
|
| 1499 |
}
|
| 1500 |
-
|
| 1501 |
/* Styling the tab header with a blue background */
|
| 1502 |
.gr-tab-header {
|
| 1503 |
background-color: #4A90E2; /* Blue background for the tab header */
|
|
@@ -1506,17 +1445,14 @@ body, .gradio-container {
|
|
| 1506 |
color: white;
|
| 1507 |
font-size: 16px;
|
| 1508 |
}
|
| 1509 |
-
|
| 1510 |
/* Styling the selected tab text color to be green */
|
| 1511 |
.gr-tab-header .gr-tab-active {
|
| 1512 |
color: green; /* Change selected tab text to green */
|
| 1513 |
}
|
| 1514 |
-
|
| 1515 |
/* Keep non-selected tab text color white */
|
| 1516 |
.gr-tab-header .gr-tab {
|
| 1517 |
color: white;
|
| 1518 |
}
|
| 1519 |
-
|
| 1520 |
/* Custom CSS for reducing the size of the video element */
|
| 1521 |
.video-player {
|
| 1522 |
width: 500px; /* Set a custom width for the video */
|
|
@@ -1528,7 +1464,6 @@ body, .gradio-container {
|
|
| 1528 |
width:500px;
|
| 1529 |
justify-content: flex-start; /* Align buttons to the left */
|
| 1530 |
gap: 10px; /* Space between buttons */
|
| 1531 |
-
|
| 1532 |
}
|
| 1533 |
.custom-button {
|
| 1534 |
width: 100px; /* Small width */
|
|
@@ -1541,7 +1476,6 @@ body, .gradio-container {
|
|
| 1541 |
border-radius: 5px; /* Rounded corners */
|
| 1542 |
cursor: pointer; /* Pointer cursor on hover */
|
| 1543 |
}
|
| 1544 |
-
|
| 1545 |
.custom-button:hover {
|
| 1546 |
background-color: #e0e0e0; /* Background color on hover */
|
| 1547 |
}
|
|
@@ -1620,4 +1554,4 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
|
|
| 1620 |
u.upload(lambda _: (gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)), None, [title_textbox, summary_textarea, add_button])
|
| 1621 |
|
| 1622 |
|
| 1623 |
-
demo.launch(auth=[("lakshmi", "redmind"), ("admin", "redmind"), ("arun", "redmind"), ("NewageGlobal", "Newage123$")], auth_message="RedMindGPT", inline=False)
|
|
|
|
| 56 |
|
| 57 |
# langfuse analytics
|
| 58 |
from langfuse.callback import CallbackHandler
|
| 59 |
+
|
| 60 |
# Inventory API data table
|
| 61 |
from tabulate import tabulate
|
| 62 |
|
|
|
|
| 121 |
langfuse_handler = CallbackHandler()
|
| 122 |
langfuse_handler.auth_check() # Optional: Checks if the authentication is successful
|
| 123 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
nltk.download('punkt')
|
| 125 |
|
| 126 |
open_api_key_token = os.getenv("OPENAI_API_KEY")
|
|
|
|
| 156 |
# LLM setup
|
| 157 |
llm = ChatOpenAI(model="gpt-4o-mini", max_tokens=300, temperature=0.1)
|
| 158 |
llm_chart = OpenAI(is_safe=False)
|
| 159 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
def get_schema(_):
|
| 161 |
schema_info = db.get_table_info() # This should be a string of your SQL schema
|
| 162 |
return schema_info
|
|
|
|
| 503 |
def bind_llm(llm, tools,prompt_template):
|
| 504 |
llm = llm.bind()
|
| 505 |
agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
|
| 506 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
| 507 |
return agent_executor
|
| 508 |
|
| 509 |
# Define input and output models using Pydantic
|
|
|
|
| 574 |
Function to handle the processing of user input with `AgentExecutor.invoke()`.
|
| 575 |
|
| 576 |
global current_event, stop_event
|
|
|
|
| 577 |
# Clear previous stop event and current_event
|
| 578 |
stop_event.clear()
|
|
|
|
| 579 |
if current_event and not current_event.done():
|
| 580 |
chatbot.append(("","A query is already being processed. Please stop it before starting a new one."))
|
| 581 |
return gr.update(value=chatbot)
|
|
|
|
| 597 |
return gr.update(value=chatbot)
|
| 598 |
|
| 599 |
time.sleep(1) # Wait for 1 second before checking again
|
|
|
|
| 600 |
if current_event.cancelled():
|
| 601 |
chatbot.append((user_question, "Sorry, we encountered an error while processing your request. Please try after some time."))
|
| 602 |
return gr.update(value=chatbot)
|
|
|
|
| 670 |
|
| 671 |
global iterations
|
| 672 |
iterations = 0
|
|
|
|
| 673 |
# Ensure the temporary chart directory exists
|
| 674 |
# ensure_temp_chart_dir()
|
| 675 |
# Clean the /tmp/gradio/ directory
|
|
|
|
| 687 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file:
|
| 688 |
audio_segment.export(temp_audio_file.name, format="wav")
|
| 689 |
temp_audio_file_path = temp_audio_file.name
|
|
|
|
| 690 |
recognizer = sr.Recognizer()
|
| 691 |
with sr.AudioFile(temp_audio_file_path) as source:
|
| 692 |
audio_content = recognizer.record(source)
|
|
|
|
| 700 |
|
| 701 |
while iterations < max_iterations:
|
| 702 |
|
| 703 |
+
response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]}, early_stopping_method="generate")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 704 |
#create_file_HF()
|
| 705 |
if isinstance(response, dict):
|
| 706 |
response_text = response.get("output", "")
|
|
|
|
| 709 |
if "invalid" not in response_text.lower():
|
| 710 |
break
|
| 711 |
iterations += 1
|
| 712 |
+
|
| 713 |
+
if iterations == max_iterations:
|
| 714 |
+
return user_question , "Sorry, I couldn't complete your request" #"The agent could not generate a valid response within the iteration limit."
|
|
|
|
|
|
|
|
|
|
| 715 |
|
| 716 |
if os.getenv("IMAGE_PATH") in response_text:
|
| 717 |
# Open the image file
|
|
|
|
| 750 |
except Exception as e:
|
| 751 |
print(f"Error loading image file: {e}")
|
| 752 |
response_text = "Chart generation failed. Please try again."
|
| 753 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 754 |
return user_question, response_text
|
| 755 |
else:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 756 |
return user_question, response_text
|
| 757 |
# response_text = response_text.replace('\n', ' ').replace(' ', ' ').strip()
|
| 758 |
# return response_text
|
|
|
|
| 789 |
user_question = "Sorry, I could not understand the audio."
|
| 790 |
except sr.RequestError:
|
| 791 |
user_question = "Could not request results from Google Speech Recognition service."
|
| 792 |
+
|
| 793 |
while iterations < max_iterations:
|
| 794 |
|
| 795 |
response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]})
|
|
|
|
| 1221 |
if len(df.columns) > 1:
|
| 1222 |
# Skipping the original first column
|
| 1223 |
# List of required columns
|
|
|
|
| 1224 |
required_columns = ['BROKER', 'ACCOUNT NUMBER', 'EMPLOYEE NAME', 'ACCOUNT NAME', 'ACCOUNT ID']
|
| 1225 |
# Filter the DataFrame to include only the required columns
|
| 1226 |
#print(df[required_columns])
|
| 1227 |
#limited_data = df[required_columns]
|
| 1228 |
+
limited_data11 = df.head(3)
|
| 1229 |
+
limited_data = limited_data11[required_columns]
|
| 1230 |
+
limited_data_without_first_column = limited_data.iloc[:, 1:]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1231 |
else:
|
| 1232 |
limited_data = df.head(20)
|
| 1233 |
limited_data_without_first_column = limited_data
|
|
|
|
| 1335 |
output.write(filled.read())
|
| 1336 |
|
| 1337 |
"""from PyPDF2 import PdfReader, PdfWriter
|
|
|
|
| 1338 |
reader = PdfReader(output_file_name)
|
| 1339 |
writer = PdfWriter()
|
|
|
|
| 1340 |
for page in reader.pages:
|
| 1341 |
page.merge_page(page) # Flatten content
|
| 1342 |
writer.add_page(page)
|
|
|
|
| 1343 |
with open(output_file_name, "wb") as f:
|
| 1344 |
writer.write(f)
|
| 1345 |
"""
|
|
|
|
| 1388 |
return f"{base_url}/{file_path}/{file_name}"
|
| 1389 |
|
| 1390 |
css = """
|
|
|
|
| 1391 |
/* Example of custom button styling */
|
| 1392 |
.gr-button {
|
| 1393 |
background-color: #6366f1; /* Change to your desired button color */
|
|
|
|
| 1398 |
font-size: 12px;
|
| 1399 |
cursor: pointer;
|
| 1400 |
}
|
|
|
|
| 1401 |
.gr-button:hover {
|
| 1402 |
background-color: #8a92f7; /* Darker shade on hover */
|
| 1403 |
}
|
|
|
|
| 1404 |
.gr-buttonbig {
|
| 1405 |
background-color: #6366f1; /* Change to your desired button color */
|
| 1406 |
color: white;
|
|
|
|
| 1410 |
font-size: 14px;
|
| 1411 |
cursor: pointer;
|
| 1412 |
}
|
|
|
|
| 1413 |
.gr-buttonbig:hover {
|
| 1414 |
background-color: #8a92f7; /* Darker shade on hover */
|
| 1415 |
}
|
|
|
|
| 1416 |
/* Customizing the Logout link to be on the right */
|
| 1417 |
.logout-link {
|
| 1418 |
text-align: right;
|
| 1419 |
display: inline-block;
|
| 1420 |
width: 100%;
|
| 1421 |
}
|
|
|
|
| 1422 |
.logout-link a {
|
| 1423 |
color: #4A90E2; /* Link color */
|
| 1424 |
text-decoration: none;
|
| 1425 |
font-size: 16px;
|
| 1426 |
}
|
|
|
|
| 1427 |
.chatbot_gpt {
|
| 1428 |
height: 600px !important; /* Adjust height as needed */
|
| 1429 |
}
|
|
|
|
| 1430 |
.logout-link a:hover {
|
| 1431 |
text-decoration: underline; /* Underline on hover */
|
| 1432 |
}
|
|
|
|
| 1433 |
.message-buttons-right{
|
| 1434 |
display: none !important;
|
| 1435 |
}
|
|
|
|
| 1436 |
body, .gradio-container {
|
| 1437 |
margin: 0;
|
| 1438 |
padding: 0;
|
| 1439 |
}
|
|
|
|
| 1440 |
/* Styling the tab header with a blue background */
|
| 1441 |
.gr-tab-header {
|
| 1442 |
background-color: #4A90E2; /* Blue background for the tab header */
|
|
|
|
| 1445 |
color: white;
|
| 1446 |
font-size: 16px;
|
| 1447 |
}
|
|
|
|
| 1448 |
/* Styling the selected tab text color to be green */
|
| 1449 |
.gr-tab-header .gr-tab-active {
|
| 1450 |
color: green; /* Change selected tab text to green */
|
| 1451 |
}
|
|
|
|
| 1452 |
/* Keep non-selected tab text color white */
|
| 1453 |
.gr-tab-header .gr-tab {
|
| 1454 |
color: white;
|
| 1455 |
}
|
|
|
|
| 1456 |
/* Custom CSS for reducing the size of the video element */
|
| 1457 |
.video-player {
|
| 1458 |
width: 500px; /* Set a custom width for the video */
|
|
|
|
| 1464 |
width:500px;
|
| 1465 |
justify-content: flex-start; /* Align buttons to the left */
|
| 1466 |
gap: 10px; /* Space between buttons */
|
|
|
|
| 1467 |
}
|
| 1468 |
.custom-button {
|
| 1469 |
width: 100px; /* Small width */
|
|
|
|
| 1476 |
border-radius: 5px; /* Rounded corners */
|
| 1477 |
cursor: pointer; /* Pointer cursor on hover */
|
| 1478 |
}
|
|
|
|
| 1479 |
.custom-button:hover {
|
| 1480 |
background-color: #e0e0e0; /* Background color on hover */
|
| 1481 |
}
|
|
|
|
| 1554 |
u.upload(lambda _: (gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)), None, [title_textbox, summary_textarea, add_button])
|
| 1555 |
|
| 1556 |
|
| 1557 |
+
demo.launch(auth=[("lakshmi", "redmind"), ("admin", "redmind"), ("arun", "redmind"), ("NewageGlobal", "Newage123$")], auth_message="RedMindGPT", inline=False)
|