Spaces:
Runtime error
Runtime error
| import time # to simulate a real time data, time loop | |
| import numpy as np # np mean, np random | |
| import pandas as pd # read csv, df manipulation | |
| import plotly.express as px # interactive charts | |
| import streamlit as st # π data web app development | |
| # PersistDataset ----- | |
| import os | |
| import csv | |
| import gradio as gr | |
| from gradio import inputs, outputs | |
| import huggingface_hub | |
| from huggingface_hub import Repository, hf_hub_download, upload_file | |
| from datetime import datetime | |
| # Dataset and Token links - change awacke1 to your own HF id, and add a HF_TOKEN copy to your repo for write permissions | |
| # This should allow you to save your results to your own Dataset hosted on HF. --- | |
| #DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/Carddata.csv" | |
| DATASET_REPO_URL = "https://huggingface.co/datasets/" + "awacke1/PrivateASRWithMemory.csv" | |
| #DATASET_REPO_ID = "awacke1/Carddata.csv" | |
| DATASET_REPO_ID = "awacke1/PrivateASRWithMemory.csv" | |
| DATA_FILENAME = "PrivateASRWithMemory.csv" | |
| DATA_FILE = os.path.join("data", DATA_FILENAME) | |
| HF_TOKEN = os.environ.get("HF_TOKEN") | |
| DataText = "" | |
| # --------------------------------------------- | |
| SCRIPT = """ | |
| <script> | |
| if (!window.hasBeenRun) { | |
| window.hasBeenRun = true; | |
| console.log("should only happen once"); | |
| document.querySelector("button.submit").click(); | |
| } | |
| </script> | |
| """ | |
| def get_database_session(url): | |
| # Create a database session object that points to the URL. | |
| return session | |
| #Clear memo | |
| #Clear all in-memory and on-disk memo caches. | |
| def fetch_and_clean_data(url): | |
| # Fetch data from URL here, and then clean it up. | |
| return data | |
| if st.checkbox("Clear All"): | |
| # Clear values from *all* memoized functions | |
| st.experimental_memo.clear() | |
| try: | |
| hf_hub_download( | |
| repo_id=DATASET_REPO_ID, | |
| filename=DATA_FILENAME, | |
| cache_dir=DATA_DIRNAME, | |
| force_filename=DATA_FILENAME | |
| ) | |
| except: | |
| print("file not found") | |
| repo = Repository(local_dir="data", clone_from=DATASET_REPO_URL,use_auth_token=HF_TOKEN) | |
| # return session | |
| print(repo) | |
| DataText = repo | |
| st.markdown(DataText) | |
| def generate_html() -> str: | |
| with open(DATA_FILE) as csvfile: | |
| reader = csv.DictReader(csvfile) | |
| rows = [] | |
| for row in reader: | |
| rows.append(row) | |
| rows.reverse() | |
| if len(rows) == 0: | |
| return "no messages yet" | |
| else: | |
| html = "<div class='chatbot'>" | |
| for row in rows: | |
| html += "<div>" | |
| html += f"<span>{row['inputs']}</span>" | |
| html += f"<span class='outputs'>{row['outputs']}</span>" | |
| html += "</div>" | |
| html += "</div>" | |
| return html | |
| def store_message(name: str, message: str): | |
| if name and message: | |
| with open(DATA_FILE, "a") as csvfile: | |
| writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"]) | |
| writer.writerow( | |
| {"name": name.strip(), "message": message.strip(), "time": str(datetime.now())} | |
| ) | |
| # uncomment line below to begin saving - | |
| commit_url = repo.push_to_hub() | |
| return "" | |
| #st.set_page_config( | |
| # page_title="Real-Time Data Science Dashboard", | |
| # page_icon="β ", | |
| # layout="wide", | |
| #) | |
| # read csv from a github repo | |
| dataset_url = "https://raw.githubusercontent.com/Lexie88rus/bank-marketing-analysis/master/bank.csv" | |
| # read csv from a URL | |
| def get_data() -> pd.DataFrame: | |
| return pd.read_csv(dataset_url) | |
| df = get_data() | |
| # dashboard title | |
| st.title("Real-Time / Live Data Science Dashboard") | |
| # top-level filters | |
| job_filter = st.selectbox("Select the Job", pd.unique(df["job"])) | |
| # creating a single-element container | |
| placeholder = st.empty() | |
| # dataframe filter | |
| df = df[df["job"] == job_filter] | |
| # near real-time / live feed simulation | |
| for seconds in range(200): | |
| df["age_new"] = df["age"] * np.random.choice(range(1, 5)) | |
| df["balance_new"] = df["balance"] * np.random.choice(range(1, 5)) | |
| # creating KPIs | |
| avg_age = np.mean(df["age_new"]) | |
| count_married = int( | |
| df[(df["marital"] == "married")]["marital"].count() | |
| + np.random.choice(range(1, 30)) | |
| ) | |
| balance = np.mean(df["balance_new"]) | |
| with placeholder.container(): | |
| # create three columns | |
| kpi1, kpi2, kpi3 = st.columns(3) | |
| # fill in those three columns with respective metrics or KPIs | |
| kpi1.metric( | |
| label="Age β³", | |
| value=round(avg_age), | |
| delta=round(avg_age) - 10, | |
| ) | |
| kpi2.metric( | |
| label="Married Count π", | |
| value=int(count_married), | |
| delta=-10 + count_married, | |
| ) | |
| kpi3.metric( | |
| label="A/C Balance οΌ", | |
| value=f"$ {round(balance,2)} ", | |
| delta=-round(balance / count_married) * 100, | |
| ) | |
| # create two columns for charts | |
| fig_col1, fig_col2 = st.columns(2) | |
| with fig_col1: | |
| st.markdown("### First Chart") | |
| fig = px.density_heatmap( | |
| data_frame=df, y="age_new", x="marital" | |
| ) | |
| st.write(fig) | |
| with fig_col2: | |
| st.markdown("### Second Chart") | |
| fig2 = px.histogram(data_frame=df, x="age_new") | |
| st.write(fig2) | |
| st.markdown("### Detailed Data View") | |
| st.dataframe(df) | |
| time.sleep(1) |