{"text": "# Create app to read and display data from Excel file import pandas as pd from taipy import Gui # ---- READ EXCEL ---- df = pd.read_excel( io=\"data/supermarkt_sales.xlsx\", engine=\"openpyxl\", sheet_name=\"Sales\", skiprows=3, usecols=\"B:R\", nrows=1000, ) # Add 'hour' column to dataframe df[\"hour\"] = pd.to_datetime(df[\"Time\"], format=\"%H:%M:%S\").dt.hour # initialization of variables cities = list(df[\"City\"].unique()) types = list(df[\"Customer_type\"].unique()) genders = list(df[\"Gender\"].unique()) city = cities customer_type = types gender = genders layout = {\"margin\": {\"l\": 220}} # Markdown for the entire page ## NOTE: {: .orange} references a color from main.css use to style my text ## ## \"text\" here is just a name given to my part/my section ## it has no meaning in the code page = \"\"\"<|toggle|theme|> <|layout|columns=20 80|gap=30px| <|{customer_type}|selector|lov={types}|multiple|label=Select the Customer Type|dropdown|on_change=on_filter|width=100%|> <|{gender}|selector|lov={genders}|multiple|label=Select the Gender|dropdown|on_change=on_filter|width=100%|> |sidebar> |total_sales> <|{\"\u2b50\" * int(round(round(df_selection[\"Rating\"].mean(), 1), 0))}|> |average_rating> |average_sale> |>
Display df_selection in an expandable <|Sales Table|expandable|expanded=False| <|{df_selection}|table|width=100%|page_size=5|rebuild|class_name=table|> |> <|{sales_by_product_line}|chart|x=Total|y=Product|type=bar|orientation=h|title=Sales by Product|layout={layout}|color=#ff462b|> |charts> |main_page> |> Code from [Coding is Fun](https://github.com/Sven-Bo) Get the Taipy Code [here](https://github.com/Avaiga/demo-sales-dashboard) and the original code [here](https://github.com/Sven-Bo/streamlit-sales-dashboard) \"\"\" def filter(city, customer_type, gender): df_selection = df[ df[\"City\"].isin(city) & df[\"Customer_type\"].isin(customer_type) & df[\"Gender\"].isin(gender) ] # SALES BY PRODUCT LINE [BAR CHART] sales_by_product_line = ( df_selection[[\"Product line\", \"Total\"]] .groupby(by=[\"Product line\"]) .sum()[[\"Total\"]] .sort_values(by=\"Total\") ) sales_by_product_line[\"Product\"] = sales_by_product_line.index # SALES BY HOUR [BAR CHART] sales_by_hour = ( df_selection[[\"hour\", \"Total\"]].groupby(by=[\"hour\"]).sum()[[\"Total\"]] ) sales_by_hour[\"Hour\"] = sales_by_hour.index return df_selection, sales_by_product_line, sales_by_hour def on_filter(state): state.df_selection, state.sales_by_product_line, state.sales_by_hour = filter( state.city, state.customer_type, state.gender ) if __name__ == \"__main__\": # initialize dataframes df_selection, sales_by_product_line, sales_by_hour = filter( city, customer_type, gender ) # run the app Gui(page).run() "} {"text": "# Create an app with slider and chart from taipy.gui import Gui from math import cos, exp value = 10 page = \"\"\" Markdown # Taipy *Demo* Value: <|{value}|text|> <|{value}|slider|on_change=on_slider|> <|{data}|chart|> \"\"\" def compute_data(decay:int)->list: return [cos(i/6) * exp(-i*decay/600) for i in range(100)] def on_slider(state): state.data = compute_data(state.value) data = compute_data(value) Gui(page).run(use_reloader=True, port=5002)"} {"text": "# Create app to predict covid in the world from taipy.gui import Gui import taipy as tp from pages.country.country import country_md from pages.world.world import world_md from pages.map.map import map_md from pages.predictions.predictions import predictions_md, selected_scenario from pages.root import root, selected_country, selector_country from config.config import Config pages = { '/':root, \"Country\":country_md, \"World\":world_md, \"Map\":map_md, \"Predictions\":predictions_md } gui_multi_pages = Gui(pages=pages) if __name__ == '__main__': tp.Core().run() gui_multi_pages.run(title=\"Covid Dashboard\") "} {"text": "# Create app for finance data analysis import yfinance as yf from taipy.gui import Gui from taipy.gui.data.decimator import MinMaxDecimator, RDP, LTTB df_AAPL = yf.Ticker(\"AAPL\").history(interval=\"1d\", period=\"100Y\") df_AAPL[\"DATE\"] = df_AAPL.index.astype(\"int64\").astype(float) n_out = 500 decimator_instance = MinMaxDecimator(n_out=n_out) decimate_data_count = len(df_AAPL) page = \"\"\" # Decimator From a data length of <|{len(df_AAPL)}|> to <|{n_out}|> ## Without decimator <|{df_AAPL}|chart|x=DATE|y=Open|> ## With decimator <|{df_AAPL}|chart|x=DATE|y=Open|decimator=decimator_instance|> \"\"\" gui = Gui(page) gui.run(port=5026) "} {"text": "# Create an app to upload a csv and display it in a table from taipy.gui import Gui import pandas as pd data = [] data_path = \"\" def data_upload(state): state.data = pd.read_csv(state.data_path) page = \"\"\" <|{data_path}|file_selector|on_action=data_upload|> <|{data}|table|> \"\"\" Gui(page).run() "} {"text": "# Create an app to visualize sin and amp with slider and chart from taipy.gui import Gui from math import cos, exp state = {\"amp\": 1, \"data\":[]} def update(state): x = [i/10 for i in range(100)] y = [math.sin(i)*state.amp for i in x] state.data = [{\"data\": y}] page = \"\"\" Amplitude: <|{amp}|slider|> <|Data|chart|data={data}|> \"\"\" Gui(page).run(state=state)"} {"text": "# Create an app to visualize sin, cos with slider and chart from taipy.gui import Gui from math import sin, cos, pi state = { \"frequency\": 1, \"decay\": 0.01, \"data\": [] } page = \"\"\" # Sine and Cosine Functions Frequency: <|{frequency}|slider|min=0|max=10|step=0.1|on_change=update|> Decay: <|{decay}|slider|min=0|max=1|step=0.01|on_change=update|> <|Data|chart|data={data}|> \"\"\" def update(state): x = [i/10 for i in range(100)] y1 = [sin(i*state.frequency*2*pi) * exp(-i*state.decay) for i in x] y2 = [cos(i*state.frequency*2*pi) * exp(-i*state.decay) for i in x] state.data = [ {\"name\": \"Sine\", \"data\": y1}, {\"name\": \"Cosine\", \"data\": y2} ] Gui(page).run(use_reloader=True, state=state)"} {"text": "# Create app to visualize country population import numpy as np import pandas as pd from taipy.gui import Markdown from data.data import data selected_country = 'France' data_country_date = None representation_selector = ['Cumulative', 'Density'] selected_representation = representation_selector[0] layout = {'barmode':'stack', \"hovermode\":\"x\"} options = {\"unselected\":{\"marker\":{\"opacity\":0.5}}} def initialize_case_evolution(data, selected_country='France'): # Aggregation of the dataframe to erase the regions that will not be used here data_country_date = data.groupby([\"Country/Region\",'Date'])\\ .sum()\\ .reset_index() # a country is selected, here France by default data_country_date = data_country_date.loc[data_country_date['Country/Region']==selected_country] return data_country_date data_country_date = initialize_case_evolution(data) pie_chart = pd.DataFrame({\"labels\": [\"Deaths\", \"Recovered\", \"Confirmed\"],\"values\": [data_country_date.iloc[-1, 6], data_country_date.iloc[-1, 5], data_country_date.iloc[-1, 4]]}) def convert_density(state): if state.selected_representation == 'Density': df_temp = state.data_country_date.copy() df_temp['Deaths'] = df_temp['Deaths'].diff().fillna(0) df_temp['Recovered'] = df_temp['Recovered'].diff().fillna(0) df_temp['Confirmed'] = df_temp['Confirmed'].diff().fillna(0) state.data_country_date = df_temp else: state.data_country_date = initialize_case_evolution(data, state.selected_country) def on_change_country(state): # state contains all the Gui variables and this is through this state variable that we can update the Gui # state.selected_country, state.data_country_date, ... # update data_country_date with the right country (use initialize_case_evolution) print(\"Chosen country: \", state.selected_country) state.data_country_date = initialize_case_evolution(data, state.selected_country) state.pie_chart = pd.DataFrame({\"labels\": [\"Deaths\", \"Recovered\", \"Confirmed\"], \"values\": [state.data_country_date.iloc[-1, 6], state.data_country_date.iloc[-1, 5], state.data_country_date.iloc[-1, 4]]}) convert_density(state) page =\"\"\" # **Country**{: .color-primary} Statistics <|layout|columns=1 1 1| <|{selected_country}|selector|lov={selector_country}|on_change=on_change_country|dropdown|label=Country|> <|{selected_representation}|toggle|lov={representation_selector}|on_change=convert_density|> |>
<|layout|columns=1 1 1 1|gap=50px| <|card| **Deaths**{: .color-primary} <|{'{:,}'.format(int(data_country_date.iloc[-1]['Deaths'])).replace(',', ' ')}|text|class_name=h2|> |> <|card| **Recovered**{: .color-primary} <|{'{:,}'.format(int(data_country_date.iloc[-1]['Recovered'])).replace(',', ' ')}|text|class_name=h2|> |> <|card| **Confirmed**{: .color-primary} <|{'{:,}'.format(int(data_country_date.iloc[-1]['Confirmed'])).replace(',', ' ')}|text|class_name=h2|> |> |>
<|layout|columns=2 1| <|{data_country_date}|chart|type=bar|x=Date|y[3]=Deaths|y[2]=Recovered|y[1]=Confirmed|layout={layout}|options={options}|title=Covid Evolution|> <|{pie_chart}|chart|type=pie|values=values|labels=labels|title=Distribution between cases|> |> \"\"\" Gui(page).run(use_reloader=True, state=state)"} {"text": "# Create Taipy app to generate mandelbrot fractals from taipy import Gui import numpy as np from PIL import Image import matplotlib.pyplot as plt WINDOW_SIZE = 500 cm = plt.cm.get_cmap(\"viridis\") def generate_mandelbrot( center: int = WINDOW_SIZE / 2, dx_range: int = 1000, dx_start: float = -0.12, dy_range: float = 1000, dy_start: float = -0.82, iterations: int = 50, max_value: int = 200, i: int = 0, ) -> str: mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE)) for y in range(WINDOW_SIZE): for x in range(WINDOW_SIZE): dx = (x - center) / dx_range + dx_start dy = (y - center) / dy_range + dy_start a = dx b = dy for t in range(iterations): d = (a * a) - (b * b) + dx b = 2 * (a * b) + dy a = d h = d > max_value if h is True: mat[x, y] = t colored_mat = cm(mat / mat.max()) im = Image.fromarray((colored_mat * 255).astype(np.uint8)) path = f\"mandelbrot_{i}.png\" im.save(path) return path def generate(state): state.i = state.i + 1 state.path = generate_mandelbrot( dx_start=-state.dx_start / 100, dy_start=(state.dy_start - 100) / 100, iterations=state.iterations, i=state.i, ) i = 0 dx_start = 11 dy_start = 17 iterations = 50 path = generate_mandelbrot( dx_start=-dx_start / 100, dy_start=(dy_start - 100) / 100, ) page = \"\"\" # Mandelbrot Generator <|layout|columns=35 65| Display image from path <|{path}|image|width=500px|height=500px|class_name=img|> Iterations:
Create a slider to select iterations <|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
X Position:
<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
Y Position:
Slider dx_start <|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
|> \"\"\" Gui(page).run(title=\"Mandelbrot Generator\") "} {"text": "# Create app to auto generate Tweeter status import logging import random import re # Import from 3rd party libraries from taipy.gui import Gui, notify, state import taipy # Import modules import oai # Configure logger logging.basicConfig(format=\"\\n%(asctime)s\\n%(message)s\", level=logging.INFO, force=True) def error_prompt_flagged(state, prompt): \"\"\"Notify user that a prompt has been flagged.\"\"\" notify(state, \"error\", \"Prompt flagged as inappropriate.\") logging.info(f\"Prompt flagged as inappropriate: {prompt}\") def error_too_many_requests(state): \"\"\"Notify user that too many requests have been made.\"\"\" notify( state, \"error\", \"Too many requests. Please wait a few seconds before generating another text or image.\", ) logging.info(f\"Session request limit reached: {state.n_requests}\") state.n_requests = 1 # Define functions def generate_text(state): \"\"\"Generate Tweet text.\"\"\" state.tweet = \"\" state.image = None # Check the number of requests done by the user if state.n_requests >= 5: error_too_many_requests(state) return # Check if the user has put a topic if state.topic == \"\": notify(state, \"error\", \"Please enter a topic\") return # Create the prompt and add a style or not if state.style == \"\": state.prompt = ( f\"Write a {state.mood}Tweet about {state.topic} in less than 120 characters \" f\"and with the style of {state.style}:\\n\\n\\n\\n\" ) else: state.prompt = f\"Write a {state.mood}Tweet about {state.topic} in less than 120 characters:\\n\\n\" # openai configured and check if text is flagged openai = oai.Openai() flagged = openai.moderate(state.prompt) if flagged: error_prompt_flagged(state, f\"Prompt: {state.prompt}\\n\") return else: # Generate the tweet state.n_requests += 1 state.tweet = openai.complete(state.prompt).strip().replace('\"', \"\") # Notify the user in console and in the GUI logging.info( f\"Topic: {state.prompt}{state.mood}{state.style}\\n\" f\"Tweet: {state.tweet}\" ) notify(state, \"success\", \"Tweet created!\") def generate_image(state): \"\"\"Generate Tweet image.\"\"\" notify(state, \"info\", \"Generating image...\") # Check the number of requests done by the user if state.n_requests >= 5: error_too_many_requests(state) return state.image = None # Creates the prompt prompt_wo_hashtags = re.sub(\"#[A-Za-z0-9_]+\", \"\", state.prompt) processing_prompt = ( \"Create a detailed but brief description of an image that captures \" f\"the essence of the following text:\\n{prompt_wo_hashtags}\\n\\n\" ) # Openai configured and check if text is flagged openai = oai.Openai() flagged = openai.moderate(processing_prompt) if flagged: error_prompt_flagged(state, processing_prompt) return else: state.n_requests += 1 # Generate the prompt that will create the image processed_prompt = ( openai.complete(prompt=processing_prompt, temperature=0.5, max_tokens=40) .strip() .replace('\"', \"\") .split(\".\")[0] + \".\" ) # Generate the image state.image = openai.image(processed_prompt) # Notify the user in console and in the GUI logging.info(f\"Tweet: {state.prompt}\\nImage prompt: {processed_prompt}\") notify(state, \"success\", f\"Image created!\") def feeling_lucky(state): \"\"\"Generate a feeling-lucky tweet.\"\"\" with open(\"moods.txt\") as f: sample_moods = f.read().splitlines() state.topic = \"an interesting topic\" state.mood = random.choice(sample_moods) state.style = \"\" generate_text(state) # Variables tweet = \"\" prompt = \"\" n_requests = 0 topic = \"AI\" mood = \"inspirational\" style = \"elonmusk\" image = None # Called whever there is a problem def on_exception(state, function_name: str, ex: Exception): logging.error(f\"Problem {ex} \\nin {function_name}\") notify(state, \"error\", f\"Problem {ex} \\nin {function_name}\") def update_documents(state: taipy.gui.state, docs: list[dict]) -> None: \"\"\" Updates a partial with a list of documents Args: state: The state of the GUI docs: A list of documents \"\"\" updated_part = \"\" for doc in docs: title = doc[\"title\"] summary = doc[\"summary\"] link = doc[\"link\"] updated_part += f\"\"\"

{title}

{summary}


\"\"\" state.p.update_content(state, updated_part) # Markdown for the entire page ## ## \"text\" here is just a name given to my part/my section ## it has no meaning in the code page = \"\"\" <|container| # **Generate**{: .color-primary} Tweets This mini-app generates Tweets using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).

{sqdqs}

{qfqffqs}


<|layout|columns=1 1 1|gap=30px|class_name=card| |topic> |mood> |style> Create a Generate text button <|Generate text|button|on_action=generate_text|label=Generate text|> <|Feeling lucky|button|on_action=feeling_lucky|label=Feeling Lucky|> |>
---
### Generated **Tweet**{: .color-primary} Create a text input for the tweet <|{tweet}|input|multiline|label=Resulting tweet|class_name=fullwidth|>
<|Generate image|button|on_action=generate_image|label=Generate image|active={prompt!=\"\" and tweet!=\"\"}|>
<|{image}|image|height=400px|> |image> Break line
**Code from [@kinosal](https://twitter.com/kinosal)** Original code can be found [here](https://github.com/kinosal/tweet) |> \"\"\" if __name__ == \"__main__\": Gui(page).run(dark_mode=False, port=5089) "} {"text": "# Create app for py2jsonl3.py py2jsonl3.py\nimport os\nimport json\n\nEXCLUDED_FILES = [\"CODE_OF_CONDUCT.md\", \"CONTRIBUTING.md\", \"INSTALLATION.md\", \"README.md\"]\n\ndef find_files(directory, extensions):\n for root, dirs, files in os.walk(directory):\n for file in files:\n if file.endswith(extensions) and file not in EXCLUDED_FILES:\n yield os.path.join(root, file)\n\ndef extract_content(file_path):\n with open(file_path, 'r', encoding='utf-8') as file:\n return file.read()\n\ndef write_to_jsonl(output_file, data):\n with open(output_file, 'a', encoding='utf-8') as file:\n json_record = json.dumps(data)\n file.write(json_record + '\\n')\n\ndef main(directory, output_file):\n for file_path in find_files(directory, ('.py', '.md')):\n file_content = extract_content(file_path)\n file_comment = f\"# Create app for {os.path.basename(file_path)}\"\n data = {\"text\": file_comment + '\\n' + file_content}\n write_to_jsonl(output_file, data)\n\ndirectory = 'taipy_repos3' # Replace with the path to your directory\noutput_file = 'output.jsonl' # Name of the output JSONL file\n\nmain(directory, output_file)\n"} {"text": "# Create app for demo-remove-background main.py\nfrom taipy.gui import Gui, notify\nfrom rembg import remove\nfrom PIL import Image\nfrom io import BytesIO\n\n\npath_upload = \"\"\npath_download = \"fixed_img.png\"\noriginal_image = None\nfixed_image = None\nfixed = False\n\n\npage = \"\"\"<|toggle|theme|>\n\n\nUpload and download\n<|{path_upload}|file_selector|on_action=fix_image|extensions=.png,.jpg|label=Upload original image|>\n\n
\nDownload it here\n<|{path_download}|file_download|label=Download fixed image|active={fixed}|>\n|>\n\n<|container|\n# Image Background **Eliminator**{: .color-primary}\n\n\ud83d\udc36 Give it a try by uploading an image to witness the seamless removal of the background. You can download images in full quality from the sidebar.\nThis code is open source and accessible on [GitHub](https://github.com/Avaiga/demo-remove-background).\n
\n\n\n\n|col1>\n\n\n|col2>\n|images>\n\n|>\n|page>\n\"\"\"\n\n\ndef convert_image(img):\n buf = BytesIO()\n img.save(buf, format=\"PNG\")\n byte_im = buf.getvalue()\n return byte_im\n\n\ndef fix_image(state):\n notify(state, 'info', 'Uploading original image...')\n image = Image.open(state.path_upload)\n \n notify(state, 'info', 'Removing the background...')\n fixed_image = remove(image)\n fixed_image.save(\"fixed_img.png\")\n\n notify(state, 'success', 'Background removed successfully!')\n state.original_image = convert_image(image)\n state.fixed_image = convert_image(fixed_image)\n state.fixed = True\n\nif __name__ == \"__main__\":\n Gui(page=page).run(margin=\"0px\", title='Background Remover')\n"} {"text": "# Create app for demo-tweet-generation oai.py\n\"\"\"OpenAI API connector.\"\"\"\n\n# Import from standard library\nimport os\nimport logging\n\n# Import from 3rd party libraries\nimport openai\n\nimport os\n\n# Assign credentials from environment variable or streamlit secrets dict\nopenai.api_key = \"Enter your token here\"\n\n# Suppress openai request/response logging\n# Handle by manually changing the respective APIRequestor methods in the openai package\n# Does not work hosted on Streamlit since all packages are re-installed by Poetry\n# Alternatively (affects all messages from this logger):\nlogging.getLogger(\"openai\").setLevel(logging.WARNING)\n\n\nclass Openai:\n \"\"\"OpenAI Connector.\"\"\"\n\n @staticmethod\n def moderate(prompt: str) -> bool:\n \"\"\"Call OpenAI GPT Moderation with text prompt.\n Args:\n prompt: text prompt\n Return: boolean if flagged\n \"\"\"\n try:\n response = openai.Moderation.create(prompt)\n return response[\"results\"][0][\"flagged\"]\n\n except Exception as e:\n logging.error(f\"OpenAI API error: {e}\")\n\n @staticmethod\n def complete(prompt: str, temperature: float = 0.9, max_tokens: int = 50) -> str:\n \"\"\"Call OpenAI GPT Completion with text prompt.\n Args:\n prompt: text prompt\n Return: predicted response text\n \"\"\"\n kwargs = {\n \"engine\": \"text-davinci-003\",\n \"prompt\": prompt,\n \"temperature\": temperature,\n \"max_tokens\": max_tokens,\n \"top_p\": 1, # default\n \"frequency_penalty\": 0, # default,\n \"presence_penalty\": 0, # default\n }\n try:\n response = openai.Completion.create(**kwargs)\n return response[\"choices\"][0][\"text\"]\n\n except Exception as e:\n logging.error(f\"OpenAI API error: {e}\")\n\n @staticmethod\n def image(prompt: str) -> str:\n \"\"\"Call OpenAI Image Create with text prompt.\n Args:\n prompt: text prompt\n Return: image url\n \"\"\"\n try:\n response = openai.Image.create(\n prompt=prompt,\n n=1,\n size=\"512x512\",\n response_format=\"url\",\n )\n return response[\"data\"][0][\"url\"]\n\n except Exception as e:\n logging.error(f\"OpenAI API error: {e}\")"} {"text": "# Create app for demo-tweet-generation main.py\n# Import from standard library\nimport logging\nimport random\nimport re\n\n# Import from 3rd party libraries\nfrom taipy.gui import Gui, notify\n\n# Import modules\nimport oai\n\n# Configure logger\nlogging.basicConfig(format=\"\\n%(asctime)s\\n%(message)s\", level=logging.INFO, force=True)\n\n\ndef error_prompt_flagged(state, prompt):\n \"\"\"Notify user that a prompt has been flagged.\"\"\"\n notify(state, \"error\", \"Prompt flagged as inappropriate.\")\n logging.info(f\"Prompt flagged as inappropriate: {prompt}\")\n\ndef error_too_many_requests(state):\n \"\"\"Notify user that too many requests have been made.\"\"\"\n notify(state, \"error\", \"Too many requests. Please wait a few seconds before generating another text or image.\")\n logging.info(f\"Session request limit reached: {state.n_requests}\")\n state.n_requests = 1\n\n\n# Define functions\ndef generate_text(state):\n \"\"\"Generate Tweet text.\"\"\"\n state.tweet = \"\"\n state.image = None\n\n # Check the number of requests done by the user\n if state.n_requests >= 5:\n error_too_many_requests(state)\n return\n\n # Check if the user has put a topic\n if state.topic == \"\":\n notify(state, \"error\", \"Please enter a topic\")\n return\n\n # Create the prompt and add a style or not\n if state.style == \"\":\n state.prompt = (\n f\"Write a {state.mood}Tweet about {state.topic} in less than 120 characters \"\n f\"and with the style of {state.style}:\\n\\n\\n\\n\"\n )\n else:\n state.prompt = f\"Write a {state.mood}Tweet about {state.topic} in less than 120 characters:\\n\\n\"\n\n\n # openai configured and check if text is flagged\n openai = oai.Openai()\n flagged = openai.moderate(state.prompt)\n \n if flagged:\n error_prompt_flagged(state, f\"Prompt: {state.prompt}\\n\")\n return\n else:\n # Generate the tweet\n state.n_requests += 1\n state.tweet = (\n openai.complete(state.prompt).strip().replace('\"', \"\")\n )\n\n # Notify the user in console and in the GUI\n logging.info(\n f\"Topic: {state.prompt}{state.mood}{state.style}\\n\"\n f\"Tweet: {state.tweet}\"\n )\n notify(state, \"success\", \"Tweet created!\")\n\n\ndef generate_image(state):\n \"\"\"Generate Tweet image.\"\"\"\n notify(state, \"info\", \"Generating image...\")\n\n # Check the number of requests done by the user\n if state.n_requests >= 5:\n error_too_many_requests(state)\n return\n\n state.image = None\n\n # Creates the prompt\n prompt_wo_hashtags = re.sub(\"#[A-Za-z0-9_]+\", \"\", state.prompt)\n processing_prompt = (\n \"Create a detailed but brief description of an image that captures \"\n f\"the essence of the following text:\\n{prompt_wo_hashtags}\\n\\n\"\n )\n\n # Openai configured and check if text is flagged\n openai = oai.Openai()\n flagged = openai.moderate(processing_prompt)\n\n if flagged:\n error_prompt_flagged(state, processing_prompt)\n return\n else:\n state.n_requests += 1\n # Generate the prompt that will create the image\n processed_prompt = (\n openai.complete(\n prompt=processing_prompt, temperature=0.5, max_tokens=40\n )\n .strip()\n .replace('\"', \"\")\n .split(\".\")[0]\n + \".\"\n )\n\n # Generate the image\n state.image = openai.image(processed_prompt)\n\n # Notify the user in console and in the GUI\n logging.info(f\"Tweet: {state.prompt}\\nImage prompt: {processed_prompt}\")\n notify(state, \"success\", f\"Image created!\")\n\n\n\n\n# Variables\ntweet = \"\"\nprompt = \"\"\nn_requests = 0\n\ntopic = \"AI\"\nmood = \"inspirational\"\nstyle = \"elonmusk\"\n\nimage = None\n\n# Called whever there is a problem\ndef on_exception(state, function_name: str, ex: Exception):\n logging.error(f\"Problem {ex} \\nin {function_name}\")\n notify(state, 'error', f\"Problem {ex} \\nin {function_name}\")\n\n\n# Markdown for the entire page\n## \n## \"text\" here is just a name given to my part/my section\n## it has no meaning in the code\npage = \"\"\"\n<|container|\n# **Generate**{: .color-primary} Tweets\n\nThis mini-app generates Tweets using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n<|layout|columns=1 1 1|gap=30px|class_name=card|\n\n|topic>\n\n\n|mood>\n\n\n|style>\n\n<|Generate text|button|on_action=generate_text|label=Generate text|>\n|>\n\n
\n\n---\n\n
\n\n### Generated **Tweet**{: .color-primary}\n\n<|{tweet}|input|multiline|label=Resulting tweet|class_name=fullwidth|>\n\n
<|Generate image|button|on_action=generate_image|label=Generate image|active={prompt!=\"\" and tweet!=\"\"}|>
\n\n<|{image}|image|height=400px|>\n|image>\n\n
\n\n**Code from [@kinosal](https://twitter.com/kinosal)**\n\nOriginal code can be found [here](https://github.com/kinosal/tweet)\n|>\n\"\"\"\n\n\nif __name__ == \"__main__\":\n Gui(page).run(title='Tweet Generation')\n"} {"text": "# Create app for demo-realtime-pollution sender.py\n# echo-client.py\n\nimport math\nimport time\nimport socket\nimport pickle\nimport numpy as np\n\nHOST = \"127.0.0.1\"\nPORT = 65432\n\ninit_lat = 49.247\ninit_long = 1.377\n\nfactory_lat = 49.246\nfactory_long = 1.369\n\ndiff_lat = abs(init_lat - factory_lat) * 15\ndiff_long = abs(init_long - factory_long) * 15\n\nlats_unique = np.arange(init_lat - diff_lat, init_lat + diff_lat, 0.001)\nlongs_unique = np.arange(init_long - diff_long, init_long + diff_long, 0.001)\n\ncountdown = 20\n\n\ndef pollution(lat: float, long: float):\n \"\"\"\n Return pollution level in percentage\n Pollution should be centered around the factory\n Pollution should decrease with distance to factory\n Pollution should have an added random component\n\n Args:\n - lat: latitude\n - long: longitude\n\n Returns:\n - pollution level\n \"\"\"\n global countdown\n return 80 * (0.5 + 0.5 * math.sin(countdown / 20)) * math.exp(\n -(0.8 * (lat - factory_lat) ** 2 + 0.2 * (long - factory_long) ** 2) / 0.00005\n ) + np.random.randint(0, 50)\n\n\nlats = []\nlongs = []\npollutions = []\n\nfor lat in lats_unique:\n for long in longs_unique:\n lats.append(lat)\n longs.append(long)\n pollutions.append(pollution(lat, long))\n\n\ndef update():\n \"\"\"\n Update the pollution levels\n \"\"\"\n for i, _ in enumerate(lats):\n pollutions[i] = pollution(lats[i], longs[i])\n return pollutions\n\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n while True:\n data = pickle.dumps(pollutions)\n s.sendall(data)\n print(f\"Sent Data: {pollutions[:5]}\")\n pollutions = update()\n countdown += 5\n time.sleep(5)\n"} {"text": "# Create app for demo-realtime-pollution receiver.py\nimport socket\nimport pickle\nimport math\nfrom threading import Thread\nfrom taipy.gui import Gui, State, invoke_callback, get_state_id\nimport numpy as np\nimport pandas as pd\n\ninit_lat = 49.247\ninit_long = 1.377\n\nfactory_lat = 49.246\nfactory_long = 1.369\n\ndiff_lat = abs(init_lat - factory_lat) * 15\ndiff_long = abs(init_long - factory_long) * 15\n\nlats_unique = np.arange(init_lat - diff_lat, init_lat + diff_lat, 0.001)\nlongs_unique = np.arange(init_long - diff_long, init_long + diff_long, 0.001)\n\ncountdown = 20\nperiods = 0\nline_data = pd.DataFrame({\"Time\": [], \"Max AQI\": []})\n\ndrone_data = pd.DataFrame(\n {\n \"Drone ID\": [43, 234, 32, 23, 5, 323, 12, 238, 21, 84],\n \"Battery Level\": [\n \"86%\",\n \"56%\",\n \"45%\",\n \"12%\",\n \"85%\",\n \"67%\",\n \"34%\",\n \"78%\",\n \"90%\",\n \"100%\",\n ],\n \"AQI\": [40, 34, 24, 22, 33, 45, 23, 34, 23, 34],\n \"Status\": [\n \"Moving\",\n \"Measuring\",\n \"Measuring\",\n \"Stopped\",\n \"Measuring\",\n \"Moving\",\n \"Moving\",\n \"Measuring\",\n \"Measuring\",\n \"Measuring\",\n ],\n }\n)\n\nHOST = \"127.0.0.1\"\nPORT = 65432\n\nlayout_map = {\n \"mapbox\": {\n \"style\": \"open-street-map\",\n \"center\": {\"lat\": init_lat, \"lon\": init_long},\n \"zoom\": 13,\n },\n \"dragmode\": \"false\",\n \"margin\": {\"l\": 0, \"r\": 0, \"b\": 0, \"t\": 0},\n}\n\nlayout_line = {\n \"title\": \"Max Measured AQI over Time\",\n \"yaxis\": {\"range\": [0, 150]},\n}\n\noptions = {\n \"opacity\": 0.8,\n \"colorscale\": \"Bluered\",\n \"zmin\": 0,\n \"zmax\": 140,\n \"colorbar\": {\"title\": \"AQI\"},\n \"hoverinfo\": \"none\",\n}\n\nconfig = {\"scrollZoom\": False, \"displayModeBar\": False}\n\n\ndef pollution(lat: float, long: float):\n \"\"\"\n Return pollution level in percentage\n Pollution should be centered around the factory\n Pollution should decrease with distance to factory\n Pollution should have an added random component\n\n Args:\n - lat: latitude\n - long: longitude\n\n Returns:\n - pollution level\n \"\"\"\n global countdown\n return 80 * (0.5 + 0.5 * math.sin(countdown / 20)) * math.exp(\n -(0.8 * (lat - factory_lat) ** 2 + 0.2 * (long - factory_long) ** 2) / 0.00005\n ) + np.random.randint(0, 50)\n\n\nlats = []\nlongs = []\npollutions = []\ntimes = []\nmax_pollutions = []\n\nfor lat in lats_unique:\n for long in longs_unique:\n lats.append(lat)\n longs.append(long)\n pollutions.append(pollution(lat, long))\n\ndata_province_displayed = pd.DataFrame(\n {\n \"Latitude\": lats,\n \"Longitude\": longs,\n \"Pollution\": pollutions,\n }\n)\n\nmax_pollution = data_province_displayed[\"Pollution\"].max()\n\n\n# Socket handler\ndef client_handler(gui: Gui, state_id_list: list):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((HOST, PORT))\n s.listen()\n conn, _ = s.accept()\n while True:\n if data := conn.recv(1024 * 1024):\n pollutions = pickle.loads(data)\n print(f\"Data received: {pollutions[:5]}\")\n if hasattr(gui, \"_server\") and state_id_list:\n invoke_callback(\n gui,\n state_id_list[0],\n update_pollutions,\n [pollutions],\n )\n else:\n print(\"Connection closed\")\n break\n\n\n# Gui declaration\nstate_id_list = []\n\nGui.add_shared_variable(\"pollutions\")\n\n\ndef on_init(state: State):\n state_id = get_state_id(state)\n if (state_id := get_state_id(state)) is not None and state_id != \"\":\n state_id_list.append(state_id)\n update_pollutions(state, pollutions)\n\n\ndef update_pollutions(state: State, val):\n state.pollutions = val\n state.data_province_displayed = pd.DataFrame(\n {\n \"Latitude\": lats,\n \"Longitude\": longs,\n \"Pollution\": state.pollutions,\n }\n )\n # Add an hour to the time\n state.periods = state.periods + 1\n state.max_pollutions = state.max_pollutions + [max(state.pollutions)]\n state.times = pd.date_range(\n \"2020-11-04\", periods=len(state.max_pollutions), freq=\"H\"\n )\n state.line_data = pd.DataFrame(\n {\n \"Time\": state.times,\n \"Max AQI\": state.max_pollutions,\n }\n )\n\n\npage = \"\"\"\n<|{data_province_displayed}|chart|type=densitymapbox|plot_config={config}|options={options}|lat=Latitude|lon=Longitude|layout={layout_map}|z=Pollution|mode=markers|class_name=map|height=40vh|>\n<|layout|columns=1 2 2|\n<|part|class_name=card|\n**Max Measured AQI:**


\n<|{int(data_province_displayed[\"Pollution\"].max())}|indicator|value={int(data_province_displayed[\"Pollution\"].max())}|min=140|max=0|>\n

\n**Average Measured AQI:**


\n<|{int(data_province_displayed[\"Pollution\"].mean())}|indicator|value={int(data_province_displayed[\"Pollution\"].mean())}|min=140|max=0|>\n|>\n\n<|part|class_name=card|\n<|{drone_data}|table|show_all=True|>\n|>\n\n<|part|class_name=card|\n<|{line_data[-30:]}|chart|type=lines|x=Time|y=Max AQI|layout={layout_line}|height=40vh|>\n|>\n|>\n\"\"\"\ngui = Gui(page=page)\n\nt = Thread(\n target=client_handler,\n args=(\n gui,\n state_id_list,\n ),\n)\nt.start()\ngui.run(run_browser=False)\n"} {"text": "# Create app for demo-pyspark-penguin-app config.py\n### app/config.py\nimport datetime as dt\nimport os\nimport subprocess\nimport sys\nfrom pathlib import Path\n\nimport pandas as pd\nimport taipy as tp\nfrom taipy import Config\n\nSCRIPT_DIR = Path(__file__).parent\nSPARK_APP_PATH = SCRIPT_DIR / \"penguin_spark_app.py\"\n\n\ninput_csv_path = str(SCRIPT_DIR / \"penguins.csv\")\n\n# -------------------- Data Nodes --------------------\n\ninput_csv_path_cfg = Config.configure_data_node(id=\"input_csv_path\", default_data=input_csv_path)\n# Path to save the csv output of the spark app\noutput_csv_path_cfg = Config.configure_data_node(id=\"output_csv_path\")\n\nprocessed_penguin_df_cfg = Config.configure_parquet_data_node(\n id=\"processed_penguin_df\", validity_period=dt.timedelta(days=1)\n)\n\nspecies_cfg = Config.configure_data_node(id=\"species\") # \"Adelie\", \"Chinstrap\", \"Gentoo\"\nisland_cfg = Config.configure_data_node(id=\"island\") # \"Biscoe\", \"Dream\", \"Torgersen\"\nsex_cfg = Config.configure_data_node(id=\"sex\") # \"male\", \"female\"\n\noutput_cfg = Config.configure_json_data_node(\n id=\"output\",\n)\n\n# -------------------- Tasks --------------------\n\n\ndef spark_process(input_csv_path: str, output_csv_path: str) -> pd.DataFrame:\n proc = subprocess.Popen(\n [\n str(Path(sys.executable).with_name(\"spark-submit\")),\n str(SPARK_APP_PATH),\n \"--input-csv-path\",\n input_csv_path,\n \"--output-csv-path\",\n output_csv_path,\n ],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n\n try:\n outs, errs = proc.communicate(timeout=15)\n except subprocess.TimeoutExpired:\n proc.kill()\n outs, errs = proc.communicate()\n\n if proc.returncode != os.EX_OK:\n raise Exception(\"Spark training failed\")\n\n df = pd.read_csv(output_csv_path)\n\n return df\n\n\ndef filter(penguin_df: pd.DataFrame, species: str, island: str, sex: str) -> dict:\n df = penguin_df[(penguin_df.species == species) & (penguin_df.island == island) & (penguin_df.sex == sex)]\n output = df[[\"bill_length_mm\", \"bill_depth_mm\", \"flipper_length_mm\", \"body_mass_g\"]].to_dict(orient=\"records\")\n return output[0] if output else dict()\n\n\nspark_process_task_cfg = Config.configure_task(\n id=\"spark_process\",\n function=spark_process,\n skippable=True,\n input=[input_csv_path_cfg, output_csv_path_cfg],\n output=processed_penguin_df_cfg,\n)\n\nfilter_task_cfg = Config.configure_task(\n id=\"filter\",\n function=filter,\n skippable=True,\n input=[processed_penguin_df_cfg, species_cfg, island_cfg, sex_cfg],\n output=output_cfg,\n)\n\nscenario_cfg = Config.configure_scenario(\n id=\"scenario\", task_configs=[spark_process_task_cfg, filter_task_cfg]\n)\n"} {"text": "# Create app for demo-pyspark-penguin-app main.py\n### app/main.py\nfrom pathlib import Path\nfrom typing import Optional\n\nimport taipy as tp\nfrom config import scenario_cfg\nfrom taipy.gui import Gui, notify\n\n\nvalid_features: dict[str, list[str]] = {\n \"species\": [\"Adelie\", \"Chinstrap\", \"Gentoo\"],\n \"island\": [\"Torgersen\", \"Biscoe\", \"Dream\"],\n \"sex\": [\"Male\", \"Female\"],\n}\n\nselected_species = valid_features[\"species\"][0]\nselected_island = valid_features[\"island\"][0]\nselected_sex = valid_features[\"sex\"][0]\n\nselected_scenario: Optional[tp.Scenario] = None\n\ndata_dir = Path(__file__).with_name(\"data\")\ndata_dir.mkdir(exist_ok=True)\n\n\ndef scenario_on_creation(state, id, payload):\n _ = payload[\"config\"]\n date = payload[\"date\"]\n label = payload[\"label\"]\n properties = payload[\"properties\"]\n\n # Create scenario with selected configuration\n scenario = tp.create_scenario(scenario_cfg, creation_date=date, name=label)\n scenario.properties.update(properties)\n\n # Write the selected GUI values to the scenario\n scenario.species.write(state.selected_species)\n scenario.island.write(state.selected_island)\n scenario.sex.write(state.selected_sex.lower())\n output_csv_file = data_dir / f\"{scenario.id}.csv\"\n scenario.output_csv_path.write(str(output_csv_file))\n\n notify(state, \"S\", f\"Created {scenario.id}\")\n\n return scenario\n\n\ndef scenario_on_submission_change(state, submittable, details):\n \"\"\"When the selected_scenario's submission status changes, reassign selected_scenario to force a GUI refresh.\"\"\"\n\n state.selected_scenario = submittable\n\n\nselected_data_node = None\n\nmain_md = \"\"\"\n<|layout|columns=1 4|gap=1.5rem|\n\n\n\n----------\n\n## Scenario info\n\n<|{selected_scenario}|scenario|on_submission_change=scenario_on_submission_change|>\n\n|lhs>\n\n\n\n<|{selected_island}|selector|lov={valid_features[\"island\"]}|dropdown|label=Island|>\n\n<|{selected_sex}|selector|lov={valid_features[\"sex\"]}|dropdown|label=Sex|>\n\n|selections>\n\n----------\n\n## Output\n\n**<|{str(selected_scenario.output.read()) if selected_scenario and selected_scenario.output.is_ready_for_reading else 'Submit the scenario using the left panel.'}|text|raw|class_name=color-primary|>**\n\n## Data node inspector\n\n<|{selected_data_node}|data_node_selector|display_cycles=False|>\n\n**Data node value:**\n\n<|{str(selected_data_node.read()) if selected_data_node and selected_data_node.is_ready_for_reading else None}|>\n\n
\n\n----------\n\n## DAG\n\n<|Scenario DAG|expandable|\n<|{selected_scenario}|scenario_dag|>\n|>\n\n|rhs>\n\n|>\n\"\"\"\n\n\ndef on_change(state, var_name: str, var_value):\n if var_name == \"selected_species\":\n state.selected_scenario.species.write(var_value)\n elif var_name == \"selected_island\":\n state.selected_scenario.island.write(var_value)\n elif var_name == \"selected_sex\":\n state.selected_scenario.sex.write(var_value.lower())\n\n\nif __name__ == \"__main__\":\n tp.Core().run()\n\n gui = Gui(main_md)\n gui.run(title=\"Spark with Taipy\")\n"} {"text": "# Create app for demo-pyspark-penguin-app penguin_spark_app.py\n### app/penguin_spark_app.py\nimport argparse\nimport os\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input-csv-path\", required=True, help=\"Path to the input penguin CSV file.\")\nparser.add_argument(\"--output-csv-path\", required=True, help=\"Path to save the output CSV file.\")\nargs = parser.parse_args()\n\nimport pyspark.pandas as ps\nfrom pyspark.sql import SparkSession\n\n\ndef read_penguin_df(csv_path: str):\n penguin_df = ps.read_csv(csv_path)\n return penguin_df\n\n\ndef clean(df: ps.DataFrame) -> ps.DataFrame:\n return df[df.sex.isin([\"male\", \"female\"])].dropna()\n\n\ndef process(df: ps.DataFrame) -> ps.DataFrame:\n \"\"\"The mean of measured penguin values, grouped by island and sex.\"\"\"\n\n mean_df = df.groupby(by=[\"species\", \"island\", \"sex\"]).agg(\"mean\").drop(columns=\"year\").reset_index()\n return mean_df\n\n\nif __name__ == \"__main__\":\n spark = SparkSession.builder.appName(\"Mean Penguin\").getOrCreate()\n\n penguin_df = read_penguin_df(args.input_csv_path)\n cleaned_penguin_df = clean(penguin_df)\n processed_penguin_df = process(cleaned_penguin_df)\n processed_penguin_df.to_pandas().to_csv(args.output_csv_path, index=False)\n\n sys.exit(os.EX_OK)\n"} {"text": "# Create app for demo-dask-customer-analysis config.py\nfrom taipy import Config\n\nfrom algos.algo import (\n preprocess_and_score,\n featurization_and_segmentation,\n segment_analysis,\n high_value_cust_summary_statistics,\n)\n\n# -------------------- Data Nodes --------------------\n\npath_to_data_cfg = Config.configure_data_node(id=\"path_to_data\", default_data=\"data/customers_data.csv\")\n\nscored_df_cfg = Config.configure_data_node(id=\"scored_df\")\n\npayment_threshold_cfg = Config.configure_data_node(id=\"payment_threshold\", default_data=1000)\n\nscore_threshold_cfg = Config.configure_data_node(id=\"score_threshold\", default_data=1.5)\n\nsegmented_customer_df_cfg = Config.configure_data_node(id=\"segmented_customer_df\")\n\nmetric_cfg = Config.configure_data_node(id=\"metric\", default_data=\"mean\")\n\nsegment_result_cfg = Config.configure_data_node(id=\"segment_result\")\n\nsummary_statistic_type_cfg = Config.configure_data_node(id=\"summary_statistic_type\", default_data=\"median\")\n\nhigh_value_summary_df_cfg = Config.configure_data_node(id=\"high_value_summary_df\")\n\n# -------------------- Tasks --------------------\n\npreprocess_and_score_task_cfg = Config.configure_task(\n id=\"preprocess_and_score\",\n function=preprocess_and_score,\n skippable=True,\n input=[path_to_data_cfg],\n output=[scored_df_cfg],\n)\n\nfeaturization_and_segmentation_task_cfg = Config.configure_task(\n id=\"featurization_and_segmentation\",\n function=featurization_and_segmentation,\n skippable=True,\n input=[scored_df_cfg, payment_threshold_cfg, score_threshold_cfg],\n output=[segmented_customer_df_cfg],\n)\n\nsegment_analysis_task_cfg = Config.configure_task(\n id=\"segment_analysis\",\n function=segment_analysis,\n skippable=True,\n input=[segmented_customer_df_cfg, metric_cfg],\n output=[segment_result_cfg],\n)\n\nhigh_value_cust_summary_statistics_task_cfg = Config.configure_task(\n id=\"high_value_cust_summary_statistics\",\n function=high_value_cust_summary_statistics,\n skippable=True,\n input=[segment_result_cfg, segmented_customer_df_cfg, summary_statistic_type_cfg],\n output=[high_value_summary_df_cfg],\n)\n\nscenario_cfg = Config.configure_scenario(\n id=\"scenario_1\",\n task_configs=[\n preprocess_and_score_task_cfg,\n featurization_and_segmentation_task_cfg,\n segment_analysis_task_cfg,\n high_value_cust_summary_statistics_task_cfg,\n ],\n)\n"} {"text": "# Create app for demo-dask-customer-analysis algo.py\nimport time\n\nimport dask.dataframe as dd\nimport pandas as pd\n\n\ndef preprocess_and_score(path_to_original_data: str):\n print(\"__________________________________________________________\")\n print(\"1. TASK 1: DATA PREPROCESSING AND CUSTOMER SCORING ...\")\n start_time = time.perf_counter() # Start the timer\n\n # Step 1: Read data using Dask\n df = dd.read_csv(path_to_original_data)\n\n # Step 2: Simplify the customer scoring formula\n df[\"CUSTOMER_SCORE\"] = (\n 0.5 * df[\"TotalPurchaseAmount\"] / 1000 + 0.3 * df[\"NumberOfPurchases\"] / 10 + 0.2 * df[\"AverageReviewScore\"]\n )\n\n # Save all customers to a new CSV file\n scored_df = df[[\"CUSTOMER_SCORE\", \"TotalPurchaseAmount\", \"NumberOfPurchases\", \"TotalPurchaseTime\"]]\n\n pd_df = scored_df.compute()\n\n end_time = time.perf_counter() # Stop the timer\n execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds\n print(f\"Time of Execution: {execution_time:.4f} ms\")\n\n return pd_df\n\n\ndef featurization_and_segmentation(scored_df, payment_threshold, score_threshold):\n print(\"__________________________________________________________\")\n print(\"2. TASK 2: FEATURE ENGINEERING AND SEGMENTATION ...\")\n\n # payment_threshold, score_threshold = float(payment_threshold), float(score_threshold)\n start_time = time.perf_counter() # Start the timer\n\n df = scored_df\n\n # Feature: Indicator if customer's total purchase is above the payment threshold\n df[\"HighSpender\"] = (df[\"TotalPurchaseAmount\"] > payment_threshold).astype(int)\n\n # Feature: Average time between purchases\n df[\"AverageTimeBetweenPurchases\"] = df[\"TotalPurchaseTime\"] / df[\"NumberOfPurchases\"]\n\n # Additional computationally intensive features\n df[\"Interaction1\"] = df[\"TotalPurchaseAmount\"] * df[\"NumberOfPurchases\"]\n df[\"Interaction2\"] = df[\"TotalPurchaseTime\"] * df[\"CUSTOMER_SCORE\"]\n df[\"PolynomialFeature\"] = df[\"TotalPurchaseAmount\"] ** 2\n\n # Segment customers based on the score_threshold\n df[\"ValueSegment\"] = [\"High Value\" if score > score_threshold else \"Low Value\" for score in df[\"CUSTOMER_SCORE\"]]\n\n end_time = time.perf_counter() # Stop the timer\n execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds\n print(f\"Time of Execution: {execution_time:.4f} ms\")\n\n return df\n\n\ndef segment_analysis(df: pd.DataFrame, metric):\n print(\"__________________________________________________________\")\n print(\"3. TASK 3: SEGMENT ANALYSIS ...\")\n start_time = time.perf_counter() # Start the timer\n\n # Detailed analysis for each segment: mean/median of various metrics\n segment_analysis = (\n df.groupby(\"ValueSegment\")\n .agg(\n {\n \"CUSTOMER_SCORE\": metric,\n \"TotalPurchaseAmount\": metric,\n \"NumberOfPurchases\": metric,\n \"TotalPurchaseTime\": metric,\n \"HighSpender\": \"sum\", # Total number of high spenders in each segment\n \"AverageTimeBetweenPurchases\": metric,\n }\n )\n .reset_index()\n )\n\n end_time = time.perf_counter() # Stop the timer\n execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds\n print(f\"Time of Execution: {execution_time:.4f} ms\")\n\n return segment_analysis\n\n\ndef high_value_cust_summary_statistics(df: pd.DataFrame, segment_analysis: pd.DataFrame, summary_statistic_type: str):\n print(\"__________________________________________________________\")\n print(\"4. TASK 4: ADDITIONAL ANALYSIS BASED ON SEGMENT ANALYSIS ...\")\n start_time = time.perf_counter() # Start the timer\n\n # Filter out the High Value customers\n high_value_customers = df[df[\"ValueSegment\"] == \"High Value\"]\n\n # Use summary_statistic_type to calculate different types of summary statistics\n if summary_statistic_type == \"mean\":\n average_purchase_high_value = high_value_customers[\"TotalPurchaseAmount\"].mean()\n elif summary_statistic_type == \"median\":\n average_purchase_high_value = high_value_customers[\"TotalPurchaseAmount\"].median()\n elif summary_statistic_type == \"max\":\n average_purchase_high_value = high_value_customers[\"TotalPurchaseAmount\"].max()\n elif summary_statistic_type == \"min\":\n average_purchase_high_value = high_value_customers[\"TotalPurchaseAmount\"].min()\n\n median_score_high_value = high_value_customers[\"CUSTOMER_SCORE\"].median()\n\n # Fetch the summary statistic for 'TotalPurchaseAmount' for High Value customers from segment_analysis\n segment_statistic_high_value = segment_analysis.loc[\n segment_analysis[\"ValueSegment\"] == \"High Value\", \"TotalPurchaseAmount\"\n ].values[0]\n\n # Create a DataFrame to hold the results\n result_df = pd.DataFrame(\n {\n \"SummaryStatisticType\": [summary_statistic_type],\n \"AveragePurchaseHighValue\": [average_purchase_high_value],\n \"MedianScoreHighValue\": [median_score_high_value],\n \"SegmentAnalysisHighValue\": [segment_statistic_high_value],\n }\n )\n\n end_time = time.perf_counter() # Stop the timer\n execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds\n print(f\"Time of Execution: {execution_time:.4f} ms\")\n\n return result_df\n\n\nif __name__ == \"__main__\":\n t1 = preprocess_and_score(\"data/customers_data.csv\")\n\n t2 = featurization_and_segmentation(t1, 1500, 1.5)\n\n t3 = segment_analysis(t2, \"mean\")\n\n t4 = high_value_cust_summary_statistics(t2, t3, \"mean\")\n\n print(t4)\n"} {"text": "# Create app for demo-taipy-gui-starter-1 main.py\nfrom taipy.gui import Gui\nfrom math import cos, exp \n\npage = \"\"\"\n#This is *Taipy* GUI \n\nA value: <|{decay}|>.\n\nA slider:
\n<|{decay}|slider|>\n\nMy chart: \n<|{data}|chart|>\n\"\"\"\n\ndef compute_data(decay):\n return [cos(i/16) * exp(-i*decay/6000) for i in range(720)]\n\ndef on_change(state, var_name, var_value):\n if var_name == 'decay':\n state.data = compute_data(var_value)\n\ndecay = 10\ndata = compute_data(decay) \n\nGui(page=page).run(title='Taipy Demo GUI 1',\n \t\t dark_mode=False)"} {"text": "# Create app for demo-churn-classification main.py\nimport pandas as pd\nimport taipy as tp\nfrom taipy.gui import Gui, Icon, navigate\nfrom config.config import scenario_cfg\nfrom taipy.config import Config \nfrom pages.main_dialog import *\n\nimport warnings\nwith warnings.catch_warnings():\n warnings.simplefilter(action='ignore', category=FutureWarning)\n\n# Load configuration\nConfig.load('config/config.toml')\nscenario_cfg = Config.scenarios['churn_classification']\n\n# Execute the scenario\ntp.Core().run()\n\ndef create_first_scenario(scenario_cfg):\n \"\"\"Create and submit the first scenario.\"\"\"\n scenario = tp.create_scenario(scenario_cfg)\n tp.submit(scenario)\n return scenario\n\nscenario = create_first_scenario(scenario_cfg)\n\n# Read datasets\ntrain_dataset = scenario.train_dataset.read()\ntest_dataset = scenario.test_dataset.read()\nroc_dataset = scenario.roc_data_ml.read()\n\n# Process test dataset columns\ntest_dataset.columns = [str(column).upper() for column in test_dataset.columns]\n\n# Prepare data for visualization\nselect_x = test_dataset.drop('EXITED',axis=1).columns.tolist()\nx_selected = select_x[0]\nselect_y = select_x\ny_selected = select_y[1]\n\n# Read results and create charts\nvalues = scenario.results_ml.read()\nforecast_series = values['Forecast']\nscatter_dataset_pred = creation_scatter_dataset_pred(test_dataset, forecast_series)\nhisto_full_pred = creation_histo_full_pred(test_dataset, forecast_series)\nhisto_full = creation_histo_full(test_dataset)\nscatter_dataset = creation_scatter_dataset(test_dataset)\nfeatures_table = scenario.feature_importance_ml.read()\naccuracy_graph, f1_score_graph, score_auc_graph = compare_models_baseline(scenario, ['ml', 'baseline'])\n\ndef create_charts(model_type):\n \"\"\"Create pie charts and metrics for the given model type.\"\"\"\n metrics = c_update_metrics(scenario, model_type)\n (number_of_predictions, accuracy, f1_score, score_auc, \n number_of_good_predictions, number_of_false_predictions, \n fp_, tp_, fn_, tn_) = metrics\n \n pie_plotly = pd.DataFrame({\n \"values\": [number_of_good_predictions, number_of_false_predictions],\n \"labels\": [\"Correct predictions\", \"False predictions\"]\n })\n\n distrib_class = pd.DataFrame({\n \"values\": [len(values[values[\"Historical\"]==0]), len(values[values[\"Historical\"]==1])],\n \"labels\": [\"Stayed\", \"Exited\"]\n })\n\n score_table = pd.DataFrame({\n \"Score\": [\"Predicted stayed\", \"Predicted exited\"],\n \"Stayed\": [tn_, fp_],\n \"Exited\": [fn_, tp_]\n })\n\n pie_confusion_matrix = pd.DataFrame({\n \"values\": [tp_, tn_, fp_, fn_],\n \"labels\": [\"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\"]\n })\n\n return (number_of_predictions, number_of_false_predictions, number_of_good_predictions, \n accuracy, f1_score, score_auc, pie_plotly, distrib_class, score_table, pie_confusion_matrix)\n\n# Initialize charts\nchart_metrics = create_charts('ml')\n(number_of_predictions, number_of_false_predictions, number_of_good_predictions, \n accuracy, f1_score, score_auc, pie_plotly, distrib_class, score_table, pie_confusion_matrix) = chart_metrics\n\ndef on_change(state, var_name, var_value):\n \"\"\"Handle variable changes in the GUI.\"\"\"\n if var_name in ['x_selected', 'y_selected']:\n update_histogram_and_scatter(state)\n elif var_name == 'mm_algorithm_selected':\n update_variables(state, var_value.lower())\n elif var_name in ['mm_algorithm_selected', 'db_table_selected']:\n handle_temp_csv_path(state)\n\n# GUI initialization\nmenu_lov = [\n (\"Data Visualization\", Icon('images/histogram_menu.svg', 'Data Visualization')),\n (\"Model Manager\", Icon('images/model.svg', 'Model Manager')),\n (\"Compare Models\", Icon('images/compare.svg', 'Compare Models')),\n ('Databases', Icon('images/Datanode.svg', 'Databases'))\n]\n\nroot_md = \"\"\"\n<|toggle|theme|>\n<|menu|label=Menu|lov={menu_lov}|on_action=menu_fct|>\n\"\"\"\n\npage = \"Data Visualization\"\n\ndef menu_fct(state, var_name, var_value):\n \"\"\"Function that is called when there is a change in the menu control.\"\"\"\n state.page = var_value['args'][0]\n navigate(state, state.page.replace(\" \", \"-\"))\n\ndef update_variables(state, model_type):\n \"\"\"Update the different variables and dataframes used in the application.\"\"\"\n global scenario\n state.values = scenario.data_nodes[f'results_{model_type}'].read()\n state.forecast_series = state.values['Forecast']\n \n metrics = c_update_metrics(scenario, model_type)\n (state.number_of_predictions, state.accuracy, state.f1_score, state.score_auc,\n number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_) = metrics\n \n update_charts(state, model_type, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_)\n\ndef update_charts(state, model_type, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_):\n \"\"\"This function updates all the charts of the GUI.\n\n Args:\n state: object containing all the variables used in the GUI\n model_type (str): the name of the model_type shown\n number_of_good_predictions (int): number of good predictions\n number_of_false_predictions (int): number of false predictions\n fp_ (float): false positive rate\n tp_ (float): true positive rate\n fn_ (float): false negative rate\n tn_ (float): true negative rate\n \"\"\"\n state.roc_dataset = scenario.data_nodes[f'roc_data_{model_type}'].read()\n state.features_table = scenario.data_nodes[f'feature_importance_{model_type}'].read()\n\n state.score_table = pd.DataFrame({\"Score\":[\"Predicted stayed\", \"Predicted exited\"],\n \"Stayed\": [tn_, fp_],\n \"Exited\" : [fn_, tp_]})\n\n state.pie_confusion_matrix = pd.DataFrame({\"values\": [tp_, tn_, fp_, fn_],\n \"labels\" : [\"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\"]})\n\n state.scatter_dataset_pred = creation_scatter_dataset_pred(test_dataset, state.forecast_series)\n state.histo_full_pred = creation_histo_full_pred(test_dataset, state.forecast_series)\n \n # pie charts\n state.pie_plotly = pd.DataFrame({\"values\": [number_of_good_predictions, number_of_false_predictions],\n \"labels\": [\"Correct predictions\", \"False predictions\"]})\n\n state.distrib_class = pd.DataFrame({\"values\": [len(state.values[state.values[\"Historical\"]==0]),\n len(state.values[state.values[\"Historical\"]==1])],\n \"labels\" : [\"Stayed\", \"Exited\"]})\n\ndef on_init(state):\n update_histogram_and_scatter(state)\n\n# Define pages\npages = {\n \"/\": root_md + dialog_md,\n \"Data-Visualization\": dv_data_visualization_md,\n \"Model-Manager\": mm_model_manager_md, \n \"Compare-Models\": cm_compare_models_md,\n \"Databases\": db_databases_md,\n}\n\n# Run the GUI\nif __name__ == '__main__':\n gui = Gui(pages=pages)\n gui.run(title=\"Churn classification\", dark_mode=False, port=8494)\n"} {"text": "# Create app for demo-churn-classification config.py\nfrom algos.algos import *\nfrom taipy import Config, Scope\n##############################################################################################################################\n# Creation of the datanodes\n##############################################################################################################################\n# How to connect to the database\npath_to_csv = 'data/churn.csv'\n\n# path for csv and file_path for pickle\ninitial_dataset_cfg = Config.configure_data_node(id=\"initial_dataset\",\n path=path_to_csv,\n storage_type=\"csv\",\n has_header=True)\n\ndate_cfg = Config.configure_data_node(id=\"date\", default_data=\"None\")\n\npreprocessed_dataset_cfg = Config.configure_data_node(id=\"preprocessed_dataset\")\n\n# the final datanode that contains the processed data\ntrain_dataset_cfg = Config.configure_data_node(id=\"train_dataset\")\n\n# the final datanode that contains the processed data\ntrained_model_ml_cfg = Config.configure_data_node(id=\"trained_model_ml\")\ntrained_model_baseline_cfg= Config.configure_data_node(id=\"trained_model_baseline\")\n\n\n# the final datanode that contains the processed data\ntest_dataset_cfg = Config.configure_data_node(id=\"test_dataset\")\n\nforecast_dataset_ml_cfg = Config.configure_data_node(id=\"forecast_dataset_ml\")\nforecast_dataset_baseline_cfg = Config.configure_data_node(id=\"forecast_dataset_baseline\")\n\nroc_data_ml_cfg = Config.configure_data_node(id=\"roc_data_ml\")\nroc_data_baseline_cfg = Config.configure_data_node(id=\"roc_data_baseline\")\n\nscore_auc_ml_cfg = Config.configure_data_node(id=\"score_auc_ml\")\nscore_auc_baseline_cfg = Config.configure_data_node(id=\"score_auc_baseline\")\n\n\nmetrics_ml_cfg = Config.configure_data_node(id=\"metrics_ml\")\nmetrics_baseline_cfg = Config.configure_data_node(id=\"metrics_baseline\")\n\nfeature_importance_ml_cfg = Config.configure_data_node(id=\"feature_importance_ml\")\nfeature_importance_baseline_cfg = Config.configure_data_node(id=\"feature_importance_baseline\")\n\nresults_ml_cfg = Config.configure_data_node(id=\"results_ml\")\nresults_baseline_cfg = Config.configure_data_node(id=\"results_baseline\")\n\n\n##############################################################################################################################\n# Creation of the tasks\n##############################################################################################################################\n\n# the task will make the link between the input data node \n# and the output data node while executing the function\n\n# initial_dataset --> preprocess dataset --> preprocessed_dataset\ntask_preprocess_dataset_cfg = Config.configure_task(id=\"preprocess_dataset\",\n input=[initial_dataset_cfg,date_cfg],\n function=preprocess_dataset,\n output=preprocessed_dataset_cfg)\n\n# preprocessed_dataset --> create train data --> train_dataset, test_dataset\ntask_create_train_test_cfg = Config.configure_task(id=\"create_train_and_test_data\",\n input=preprocessed_dataset_cfg,\n function=create_train_test_data,\n output=[train_dataset_cfg, test_dataset_cfg])\n\n\n# train_dataset --> create train_model data --> trained_model\ntask_train_model_baseline_cfg = Config.configure_task(id=\"train_model_baseline\",\n input=train_dataset_cfg,\n function=train_model_baseline,\n output=[trained_model_baseline_cfg,feature_importance_baseline_cfg])\n \n# train_dataset --> create train_model data --> trained_model\ntask_train_model_ml_cfg = Config.configure_task(id=\"train_model_ml\",\n input=train_dataset_cfg,\n function=train_model_ml,\n output=[trained_model_ml_cfg,feature_importance_ml_cfg])\n \n\n# test_dataset --> forecast --> forecast_dataset\ntask_forecast_baseline_cfg = Config.configure_task(id=\"predict_the_test_data_baseline\",\n input=[test_dataset_cfg, trained_model_baseline_cfg],\n function=forecast,\n output=forecast_dataset_baseline_cfg)\n# test_dataset --> forecast --> forecast_dataset\ntask_forecast_ml_cfg = Config.configure_task(id=\"predict_the_test_data_ml\",\n input=[test_dataset_cfg, trained_model_ml_cfg],\n function=forecast,\n output=forecast_dataset_ml_cfg)\n\n\ntask_roc_ml_cfg = Config.configure_task(id=\"task_roc_ml\",\n input=[forecast_dataset_ml_cfg, test_dataset_cfg],\n function=roc_from_scratch,\n output=[roc_data_ml_cfg,score_auc_ml_cfg])\n\ntask_roc_baseline_cfg = Config.configure_task(id=\"task_roc_baseline\",\n input=[forecast_dataset_baseline_cfg, test_dataset_cfg],\n function=roc_from_scratch,\n output=[roc_data_baseline_cfg,score_auc_baseline_cfg])\n\n\ntask_create_metrics_baseline_cfg = Config.configure_task(id=\"task_create_metrics_baseline\",\n input=[forecast_dataset_baseline_cfg,test_dataset_cfg],\n function=create_metrics,\n output=metrics_baseline_cfg)\n\ntask_create_metrics_ml_cfg = Config.configure_task(id=\"task_create_metrics\",\n input=[forecast_dataset_ml_cfg,test_dataset_cfg],\n function=create_metrics,\n output=metrics_ml_cfg)\n\ntask_create_results_baseline_cfg = Config.configure_task(id=\"task_create_results_baseline\",\n input=[forecast_dataset_baseline_cfg,test_dataset_cfg],\n function=create_results,\n output=results_baseline_cfg)\n\ntask_create_results_ml_cfg = Config.configure_task(id=\"task_create_results_ml\",\n input=[forecast_dataset_ml_cfg,test_dataset_cfg],\n function=create_results,\n output=results_ml_cfg)\n\n\n##############################################################################################################################\n# Creation of the scenario\n##############################################################################################################################\n\nscenario_cfg = Config.configure_scenario(id=\"churn_classification\",\n task_configs=[task_create_metrics_baseline_cfg,\n task_create_metrics_ml_cfg,\n task_create_results_baseline_cfg,\n task_create_results_ml_cfg,\n task_forecast_baseline_cfg,\n task_forecast_ml_cfg,\n task_roc_ml_cfg,\n task_roc_baseline_cfg,\n task_train_model_baseline_cfg,\n task_train_model_ml_cfg,\n task_preprocess_dataset_cfg,\n task_create_train_test_cfg])\n\nConfig.export('config/config.toml')"} {"text": "# Create app for demo-churn-classification algos.py\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score\n\nimport datetime as dt\n\nimport pandas as pd\nimport numpy as np\n\n##############################################################################################################################\n# Function used in the tasks\n##############################################################################################################################\n\ndef preprocess_dataset(initial_dataset: pd.DataFrame, date: dt.datetime=\"None\"):\n \"\"\"This function preprocess the dataset to be used in the model\n\n Args:\n initial_dataset (pd.DataFrame): the raw format when we first read the data\n\n Returns:\n pd.DataFrame: the preprocessed dataset for classification\n \"\"\"\n print(\"\\n Preprocessing the dataset...\")\n \n #We filter the dataframe on the date\n if date != \"None\":\n initial_dataset['Date'] = pd.to_datetime(initial_dataset['Date'])\n processed_dataset = initial_dataset[initial_dataset['Date'] <= date]\n print(len(processed_dataset))\n else:\n processed_dataset = initial_dataset\n \n processed_dataset = processed_dataset[['CreditScore','Geography','Gender','Age','Tenure','Balance','NumOfProducts','HasCrCard','IsActiveMember','EstimatedSalary','Exited']]\n \n \n processed_dataset = pd.get_dummies(processed_dataset)\n\n if 'Gender_Female' in processed_dataset.columns:\n processed_dataset.drop('Gender_Female',axis=1,inplace=True)\n \n processed_dataset = processed_dataset.apply(pd.to_numeric)\n \n columns_to_select = ['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'HasCrCard',\n 'IsActiveMember', 'EstimatedSalary', 'Geography_France', 'Geography_Germany',\n 'Geography_Spain', 'Gender_Male','Exited']\n \n processed_dataset = processed_dataset[[col for col in columns_to_select if col in processed_dataset.columns]]\n\n print(\" Preprocessing done!\\n\")\n return processed_dataset\n\n\ndef create_train_test_data(preprocessed_dataset: pd.DataFrame):\n \"\"\"This function will create the train data by segmenting the dataset\n\n Args:\n preprocessed_dataset (pd.DataFrame): the preprocessed dataset\n\n Returns:\n pd.DataFrame: the training dataset\n \"\"\"\n print(\"\\n Creating the training and testing dataset...\")\n \n X_train, X_test, y_train, y_test = train_test_split(preprocessed_dataset.iloc[:,:-1],preprocessed_dataset.iloc[:,-1],test_size=0.2,random_state=42)\n \n train_data = pd.concat([X_train,y_train],axis=1)\n test_data = pd.concat([X_test,y_test],axis=1)\n print(\" Creating done!\")\n return train_data, test_data\n\n\n\ndef train_model_baseline(train_dataset: pd.DataFrame):\n \"\"\"Function to train the Logistic Regression model\n\n Args:\n train_dataset (pd.DataFrame): the training dataset\n\n Returns:\n model (LogisticRegression): the fitted model\n \"\"\"\n print(\" Training the model...\\n\")\n X,y = train_dataset.iloc[:,:-1],train_dataset.iloc[:,-1]\n model_fitted = LogisticRegression().fit(X,y)\n print(\"\\n \",model_fitted,\" is trained!\")\n \n importance_dict = {'Features' : X.columns, 'Importance':model_fitted.coef_[0]}\n importance = pd.DataFrame(importance_dict).sort_values(by='Importance',ascending=True)\n return model_fitted, importance\n\ndef train_model_ml(train_dataset: pd.DataFrame):\n \"\"\"Function to train the Logistic Regression model\n\n Args:\n train_dataset (pd.DataFrame): the training dataset\n\n Returns:\n model (RandomForest): the fitted model\n \"\"\"\n print(\" Training the model...\\n\")\n X,y = train_dataset.iloc[:,:-1],train_dataset.iloc[:,-1]\n model_fitted = RandomForestClassifier().fit(X,y)\n print(\"\\n \",model_fitted,\" is trained!\")\n \n importance_dict = {'Features' : X.columns, 'Importance':model_fitted.feature_importances_}\n importance = pd.DataFrame(importance_dict).sort_values(by='Importance',ascending=True)\n return model_fitted, importance\n\ndef forecast(test_dataset: pd.DataFrame, trained_model: RandomForestClassifier):\n \"\"\"Function to forecast the test dataset\n\n Args:\n test_dataset (pd.DataFrame): the test dataset\n trained_model (LogisticRegression): the fitted model\n\n Returns:\n forecast (pd.DataFrame): the forecasted dataset\n \"\"\"\n print(\" Forecasting the test dataset...\")\n X,y = test_dataset.iloc[:,:-1],test_dataset.iloc[:,-1]\n #predictions = trained_model.predict(X)\n predictions = trained_model.predict_proba(X)[:, 1]\n print(\" Forecasting done!\")\n return predictions\n\n\ndef roc_from_scratch(probabilities, test_dataset, partitions=100):\n print(\" Calculation of the ROC curve...\")\n y_test = test_dataset.iloc[:,-1]\n \n roc = np.array([])\n for i in range(partitions + 1):\n threshold_vector = np.greater_equal(probabilities, i / partitions).astype(int)\n tpr, fpr = true_false_positive(threshold_vector, y_test)\n roc = np.append(roc, [fpr, tpr])\n \n roc_np = roc.reshape(-1, 2)\n roc_data = pd.DataFrame({\"False positive rate\": roc_np[:, 0], \"True positive rate\": roc_np[:, 1]})\n print(\" Calculation done\")\n print(\" Scoring...\")\n\n score_auc = roc_auc_score(y_test, probabilities)\n print(\" Scoring done\\n\")\n\n return roc_data, score_auc\n\n\ndef true_false_positive(threshold_vector:np.array, y_test:np.array):\n \"\"\"Function to calculate the true positive rate and the false positive rate\n \n Args:\n threshold_vector (np.array): the test dataset\n y_test (np.array): the fitted model\n\n Returns:\n tpr (pd.DataFrame): the forecasted dataset\n fpr (pd.DataFrame): the forecasted dataset\n \"\"\"\n \n true_positive = np.equal(threshold_vector, 1) & np.equal(y_test, 1)\n true_negative = np.equal(threshold_vector, 0) & np.equal(y_test, 0)\n false_positive = np.equal(threshold_vector, 1) & np.equal(y_test, 0)\n false_negative = np.equal(threshold_vector, 0) & np.equal(y_test, 1)\n\n tpr = true_positive.sum() / (true_positive.sum() + false_negative.sum())\n fpr = false_positive.sum() / (false_positive.sum() + true_negative.sum())\n\n return tpr, fpr\n\ndef create_metrics(predictions:np.array, test_dataset:np.array):\n print(\" Creating the metrics...\")\n threshold = 0.5\n threshold_vector = np.greater_equal(predictions, threshold).astype(int)\n \n y_test = test_dataset.iloc[:,-1]\n \n true_positive = (np.equal(threshold_vector, 1) & np.equal(y_test, 1)).sum()\n true_negative = (np.equal(threshold_vector, 0) & np.equal(y_test, 0)).sum()\n false_positive = (np.equal(threshold_vector, 1) & np.equal(y_test, 0)).sum()\n false_negative = (np.equal(threshold_vector, 0) & np.equal(y_test, 1)).sum()\n\n\n f1_score = np.around(2*true_positive/(2*true_positive+false_positive+false_negative), decimals=2)\n accuracy = np.around((true_positive+true_negative)/(true_positive+true_negative+false_positive+false_negative), decimals=2)\n dict_ftpn = {\"tp\": true_positive, \"tn\": true_negative, \"fp\": false_positive, \"fn\": false_negative}\n \n \n number_of_good_predictions = true_positive + true_negative\n number_of_false_predictions = false_positive + false_negative\n \n metrics = {\"f1_score\": f1_score,\n \"accuracy\": accuracy,\n \"dict_ftpn\": dict_ftpn,\n 'number_of_predictions': len(predictions),\n 'number_of_good_predictions':number_of_good_predictions,\n 'number_of_false_predictions':number_of_false_predictions}\n \n return metrics\n\n \ndef create_results(forecast_values,test_dataset):\n forecast_series_proba = pd.Series(np.around(forecast_values,decimals=2), index=test_dataset.index, name='Probability')\n forecast_series = pd.Series((forecast_values>0.5).astype(int), index=test_dataset.index, name='Forecast')\n true_series = pd.Series(test_dataset.iloc[:,-1], name=\"Historical\",index=test_dataset.index)\n index_series = pd.Series(range(len(true_series)), index=test_dataset.index, name=\"Id\")\n \n results = pd.concat([index_series, forecast_series_proba, forecast_series, true_series], axis=1)\n return results"} {"text": "# Create app for demo-churn-classification main_dialog.py\nfrom pages.compare_models_md import *\nfrom pages.data_visualization_md import *\nfrom pages.databases_md import *\nfrom pages.model_manager_md import *\n\ndr_show_roc = False\n\ndialog_md = \"\"\"\n<|dialog|open={dr_show_roc}|title=ROC Curve|on_action={lambda s: s.assign(\"dr_show_roc\", False)}|labels=Close|width=1000px|\n<|{roc_dataset}|chart|x=False positive rate|y[1]=True positive rate|label[1]=True positive rate|height=500px|width=900px|type=scatter|>\n|>\n\"\"\""} {"text": "# Create app for demo-churn-classification databases_md.py\nimport pathlib\n\n# This path is used to create a temporary CSV file download the table\ntempdir = pathlib.Path(\".tmp\")\ntempdir.mkdir(exist_ok=True)\nPATH_TO_TABLE = str(tempdir / \"table.csv\")\n\n# Selector to select the table to show\ndb_table_selector = ['Training Dataset', 'Test Dataset', 'Forecast Dataset', 'Confusion Matrix']\ndb_table_selected = db_table_selector[0]\n\ndef handle_temp_csv_path(state):\n \"\"\"This function checks if the temporary csv file exists. If it does, it is deleted. Then, the temporary csv file\n is created for the right table\n\n Args:\n state: object containing all the variables used in the GUI\n \"\"\"\n if state.db_table_selected == 'Test Dataset':\n state.test_dataset.to_csv(PATH_TO_TABLE, sep=';')\n if state.db_table_selected == 'Confusion Matrix':\n state.score_table.to_csv(PATH_TO_TABLE, sep=';')\n if state.db_table_selected == \"Training Dataset\":\n state.train_dataset.to_csv(PATH_TO_TABLE, sep=';')\n if state.db_table_selected == \"Forecast Dataset\":\n state.values.to_csv(PATH_TO_TABLE, sep=';')\n\n\n# Aggregation of the strings to create the complete page\ndb_databases_md = \"\"\"\n# Data**bases**{: .color-primary}\n\n<|layout|columns=2 2 1|\n<|{mm_algorithm_selected}|selector|lov={mm_algorithm_selector}|dropdown|label=Algorithm|active=False|>\n\n<|{db_table_selected}|selector|lov={db_table_selector}|dropdown|label=Table|>\n\n<|{PATH_TO_TABLE}|file_download|name=table.csv|label=Download table|>\n|>\n\n\n|Confusion>\n\n\n|Training>\n\n\n|Forecast>\n\n\n|test_dataset>\n\"\"\" \n\n"} {"text": "# Create app for demo-churn-classification data_visualization_md.py\nimport pandas as pd\nimport numpy as np\n\n\ndv_graph_selector = ['Histogram','Scatter']\ndv_graph_selected = dv_graph_selector[0]\n\n# Histograms dialog\nproperties_histo_full = {}\nproperties_scatter_dataset = {}\n\ndef creation_scatter_dataset(test_dataset:pd.DataFrame):\n \"\"\"This function creates the dataset for the scatter plot. For every column (except Exited), scatter_dataset will have a positive and negative version.\n The positive column will have NaN when the Exited is zero and the negative column will have NaN when the Exited is one.\n\n Args:\n test_dataset (pd.DataFrame): the test dataset\n\n Returns:\n pd.DataFrame: the datafram\n \"\"\"\n scatter_dataset = test_dataset.copy()\n\n for column in scatter_dataset.columns:\n if column != 'EXITED' :\n column_neg = str(column)+'_neg'\n column_pos = str(column)+'_pos'\n \n scatter_dataset[column_neg] = scatter_dataset[column]\n scatter_dataset[column_pos] = scatter_dataset[column]\n \n scatter_dataset.loc[(scatter_dataset['EXITED'] == 1),column_neg] = np.NaN\n scatter_dataset.loc[(scatter_dataset['EXITED'] == 0),column_pos] = np.NaN\n \n return scatter_dataset\n\n\ndef creation_histo_full(test_dataset:pd.DataFrame):\n \"\"\"This function creates the dataset for the histogram plot. For every column (except Exited), histo_full will have a positive and negative version.\n The positive column will have NaN when the Exited is zero and the negative column will have NaN when the Exited is one. \n\n Args:\n test_dataset (pd.DataFrame): the test dataset\n\n Returns:\n pd.DataFrame: the Dataframe used to display the Histogram\n \"\"\"\n histo_full = test_dataset.copy()\n \n for column in histo_full.columns:\n column_neg = str(column)+'_neg'\n histo_full[column_neg] = histo_full[column]\n histo_full.loc[(histo_full['EXITED'] == 1),column_neg] = np.NaN\n histo_full.loc[(histo_full['EXITED'] == 0),column] = np.NaN\n \n return histo_full\n\n\ndef update_histogram_and_scatter(state):\n global x_selected, y_selected\n x_selected = state.x_selected\n y_selected = state.y_selected\n state.properties_scatter_dataset = {\"x\":x_selected,\n \"y[1]\":y_selected+'_pos',\n \"y[2]\":y_selected+'_neg'} \n state.scatter_dataset = state.scatter_dataset\n state.scatter_dataset_pred = state.scatter_dataset_pred\n\n state.properties_histo_full = {\"x[1]\":x_selected,\n \"x[2]\":x_selected+'_neg'} \n state.histo_full = state.histo_full\n state.histo_full_pred = state.histo_full_pred\n\n\ndv_data_visualization_md = \"\"\"\n# Data **Visualization**{: .color-primary}\n<|{dv_graph_selected}|toggle|lov={dv_graph_selector}|>\n\n--------------------------------------------------------------------\n\n<|part|render={dv_graph_selected == 'Histogram'}|\n### Histogram\n<|{x_selected}|selector|lov={select_x}|dropdown=True|label=Select x|>\n\n<|{histo_full}|chart|type=histogram|properties={properties_histo_full}|rebuild|y=EXITED|label=EXITED|color[1]=red|color[2]=green|name[1]=Exited|name[2]=Stayed|height=600px|>\n|>\n\n<|part|render={dv_graph_selected == 'Scatter'}|\n### Scatter\n<|layout|columns= 1 2|\n<|{x_selected}|selector|lov={select_x}|dropdown|label=Select x|>\n\n<|{y_selected}|selector|lov={select_y}|dropdown|label=Select y|>\n|>\n\n<|{scatter_dataset}|chart|properties={properties_scatter_dataset}|rebuild|color[1]=red|color[2]=green|name[1]=Exited|name[2]=Stayed|mode=markers|type=scatter|height=600px|>\n|>\n\n\"\"\"\n\n"} {"text": "# Create app for demo-churn-classification compare_models_md.py\nimport numpy as np\n\nfrom sklearn.metrics import f1_score\n\nimport pandas as pd\nimport numpy as np\n\ncm_height_histo = \"100%\"\ncm_dict_barmode = {\"barmode\": \"stack\",\"margin\":{\"t\":30}}\ncm_options_md = \"height={cm_height_histo}|width={cm_height_histo}|layout={cm_dict_barmode}\"\n\ncm_compare_models_md = \"\"\"\n# Model comparison\n\n----\n\n
\n
\n
\n\n<|layout|columns= 1 1 1|columns[mobile]=1|\n<|{accuracy_graph}|chart|type=bar|x=Model Type|y[1]=Accuracy Model|y[2]=Accuracy Baseline|title=Accuracy|\"\"\" + cm_options_md + \"\"\"|>\n\n<|{f1_score_graph}|chart|type=bar|x=Model Type|y[1]=F1 Score Model|y[2]=F1 Score Baseline|title=F1 Score|\"\"\" + cm_options_md + \"\"\"|>\n\n<|{score_auc_graph}|chart|type=bar|x=Model Type|y[1]=AUC Score Model|y[2]=AUC Score Baseline|title=AUC Score|\"\"\" + cm_options_md + \"\"\"|>\n\n|>\n\"\"\"\n\ndef compare_charts(accuracies, f1_scores, scores_auc, names):\n \"\"\"This funcion creates the pandas Dataframes (charts) used in the model comparison page\n\n Args:\n accuracies (list): list of accuracies\n f1_scores (list): list of f1 scores\n scores_auc (list): list of auc scores\n names (list): list of scenario names\n\n Returns:\n pd.DataFrame: the resulting three pd.DataFrame\n \"\"\"\n accuracy_graph = pd.DataFrame(create_metric_dict(accuracies, \"Accuracy\", names))\n f1_score_graph = pd.DataFrame(create_metric_dict(f1_scores, \"F1 Score\", names))\n score_auc_graph = pd.DataFrame(create_metric_dict(scores_auc, \"AUC Score\", names))\n\n return accuracy_graph, f1_score_graph, score_auc_graph\n\ndef compare_models_baseline(scenario,model_types):\n \"\"\"This function creates the objects for the model comparison\n\n Args:\n scenario (scenario): the selected scenario\n model_types (str): the name of the selected model type\n\n Returns:\n pd.DataFrame: the resulting three pd.DataFrame\n \"\"\"\n accuracies = []\n f1_scores = []\n scores_auc = []\n names = []\n for model_type in model_types:\n (_,accuracy,f1_score,score_auc,_,_,_,_,_,_) = c_update_metrics(scenario, model_type)\n \n accuracies.append(accuracy)\n f1_scores.append(f1_score)\n scores_auc.append(score_auc)\n names.append('Model' if model_type != \"baseline\" else \"Baseline\") \n \n accuracy_graph,f1_score_graph, score_auc_graph = compare_charts(accuracies, f1_scores, scores_auc, names)\n return accuracy_graph, f1_score_graph, score_auc_graph\n \n\ndef create_metric_dict(metric, metric_name, names):\n \"\"\"This function creates a dictionary of metrics for multiple models that will be used in a Dataframe shown on the Gui\n\n Args:\n metric (list): the value of the metric\n metric_name (str): the name of the metric\n names (list): list of scenario names\n\n Returns:\n dict: dicitonary used for a pandas Dataframe\n \"\"\"\n metric_dict = {}\n initial_list = [0]*len(names)\n \n metric_dict[\"Model Type\"] = names\n for i in range(len(names)):\n current_list = initial_list.copy()\n \n current_list[i] = metric[i]\n metric_dict[metric_name +\" \"+ names[i].capitalize()] = current_list\n \n return metric_dict\n\ndef c_update_metrics(scenario, model_type):\n \"\"\"This function updates the metrics of a scenario using a model\n\n Args:\n scenario (scenario): the selected scenario\n model_type (str): the name of the selected model_type\n\n Returns:\n obj: a number of values, lists that represent the metrics\n \"\"\"\n metrics = scenario.data_nodes[f'metrics_{model_type}'].read()\n\n number_of_predictions = metrics['number_of_predictions']\n number_of_good_predictions = metrics['number_of_good_predictions']\n number_of_false_predictions = metrics['number_of_false_predictions']\n\n accuracy = np.around(metrics['accuracy'], decimals=2)\n f1_score = np.around(metrics['f1_score'], decimals=2)\n score_auc = np.around(scenario.data_nodes[f'score_auc_{model_type}'].read(), decimals=2)\n \n dict_ftpn = metrics['dict_ftpn']\n \n fp_ = dict_ftpn['fp']\n tp_ = dict_ftpn['tp']\n fn_ = dict_ftpn['fn']\n tn_ = dict_ftpn['tn']\n return number_of_predictions, accuracy, f1_score, score_auc, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_\n\n"} {"text": "# Create app for demo-churn-classification model_manager_md.py\nimport pandas as pd\nimport numpy as np\n\n\nmm_graph_selector_scenario = ['Metrics', 'Features', 'Histogram','Scatter']\nmm_graph_selected_scenario = mm_graph_selector_scenario[0]\n\nmm_algorithm_selector = ['Baseline', 'ML']\nmm_algorithm_selected = 'ML'\n\nmm_pie_color_dict_2 = {\"piecolorway\":[\"#00D08A\",\"#FE913C\"]}\nmm_pie_color_dict_4 = {\"piecolorway\":[\"#00D08A\",\"#81F1A0\",\"#F3C178\",\"#FE913C\"]}\n\nmm_margin_features = {'margin': {'l': 150}}\n\ndef creation_scatter_dataset_pred(test_dataset:pd.DataFrame, forecast_series:pd.Series):\n \"\"\"This function creates the dataset for the scatter plot for the predictions. For every column (except EXITED) will have a positive and negative version.\n EXITED is here a binary indicating if the prediction is good or bad.\n The positive column will have NaN when the Exited is zero and the negative column will have NaN when the Exited is one. \n\n Args:\n test_dataset (pd.DataFrame): the test dataset\n forecast_series (pd.DataFrame): the forecast dataset\n\n Returns:\n pd.DataFrame: the Dataframe used to display the Histogram\n \"\"\"\n \n scatter_dataset = test_dataset.copy()\n scatter_dataset['EXITED'] = (scatter_dataset['EXITED']!=forecast_series.to_numpy()).astype(int)\n\n for column in scatter_dataset.columns:\n if column != 'EXITED' :\n column_neg = str(column)+'_neg'\n column_pos = str(column)+'_pos'\n \n scatter_dataset[column_neg] = scatter_dataset[column]\n scatter_dataset[column_pos] = scatter_dataset[column]\n \n scatter_dataset.loc[(scatter_dataset['EXITED'] == 1),column_neg] = np.NaN\n scatter_dataset.loc[(scatter_dataset['EXITED'] == 0),column_pos] = np.NaN\n \n return scatter_dataset\n\n\n\n\ndef creation_histo_full_pred(test_dataset:pd.DataFrame,forecast_series:pd.Series):\n \"\"\"This function creates the dataset for the histogram plot for the predictions. For every column (except PREDICTION) will have a positive and negative version.\n PREDICTION is a binary indicating if the prediction is good or bad.\n The positive column will have NaN when the PREDICTION is zero and the negative column will have NaN when the PREDICTION is one. \n\n Args:\n test_dataset (pd.DataFrame): the test dataset\n forecast_series (pd.DataFrame): the forecast dataset\n\n Returns:\n pd.DataFrame: the Dataframe used to display the Histogram\n \"\"\"\n histo_full = test_dataset.copy()\n histo_full['EXITED'] = (histo_full['EXITED']!=forecast_series.to_numpy()).astype(int)\n histo_full.columns = histo_full.columns.str.replace('EXITED', 'PREDICTION')\n \n for column in histo_full.columns:\n column_neg = str(column)+'_neg'\n histo_full[column_neg] = histo_full[column]\n histo_full.loc[(histo_full['PREDICTION'] == 1),column_neg] = np.NaN\n histo_full.loc[(histo_full['PREDICTION'] == 0),column] = np.NaN\n \n return histo_full\n\n\n\nmm_model_manager_md = \"\"\"\n# **Model**{: .color-primary} Manager\n\n<|layout|columns=3 2 2 2|\n<|{mm_graph_selected_scenario}|toggle|lov={mm_graph_selector_scenario}|>\n\n\n<|{mm_algorithm_selected}|selector|lov={mm_algorithm_selector}|dropdown|label=Algorithm|>\n\n<|show roc|button|on_action={lambda s: s.assign(\"dr_show_roc\", True)}|>\n\n
**Number of predictions:** <|{number_of_predictions}|>\n|>\n\n-----------------------------------------------------------------\n\n\n\n\n\n\n\n**Model accuracy**\n{: .text-center}\n\n<|{pie_plotly}|chart|title=Accuracy of predictions model|values=values|labels=labels|type=pie|layout={mm_pie_color_dict_2}|>\n|accuracy>\n\n\n\n**Model AUC**\n{: .text-center}\n\n<|{pie_confusion_matrix}|chart|title=Confusion Matrix|values=values|labels=labels|type=pie|layout={mm_pie_color_dict_4}|>\n|score_auc>\n\n\n\n**Model F1-score**\n{: .text-center}\n\n<|{distrib_class}|chart|title=Distribution between Exited and Stayed|values=values|labels=labels|type=pie|layout={mm_pie_color_dict_2}|>\n|f1_score>\n|>\n|Metrics>\n\n\n\n\n\n|Features>\n\n\n\n\n\n\n<|{histo_full_pred}|chart|type=histogram|properties={properties_histo_full}|rebuild|y=PREDICTION|label=PREDICTION|color[1]=red|color[2]=green|name[1]=Good Predictions|name[2]=Bad Predictions|height=600px|>\n|Histogram>\n\n\n\n\n\n\n\n\n<|{y_selected}|selector|lov={select_y}|dropdown=True|label=Select y|>\n|>\n\n<|{scatter_dataset_pred}|chart|properties={properties_scatter_dataset}|rebuild|color[1]=red|color[2]=green|name[1]=Bad prediction|name[2]=Good prediction|mode=markers|type=scatter|height=600px|>\n|Scatter>\n\"\"\"\n"} {"text": "# Create app for demo-stock-visualization main.py\nfrom taipy.gui import Gui, notify\nfrom datetime import date\nimport yfinance as yf\nfrom prophet import Prophet\nimport pandas as pd\n\n\n# Parameters for retrieving the stock data\nstart_date = \"2015-01-01\"\nend_date = date.today().strftime(\"%Y-%m-%d\")\nselected_stock = 'AAPL'\nn_years = 1\n\n\ndef get_stock_data(ticker, start, end):\n ticker_data = yf.download(ticker, start, end) # downloading the stock data from START to TODAY\n ticker_data.reset_index(inplace=True) # put date in the first column\n ticker_data['Date'] = pd.to_datetime(ticker_data['Date']).dt.tz_localize(None)\n return ticker_data\n\ndef get_data_from_range(state):\n print(\"GENERATING HIST DATA\")\n start_date = state.start_date if type(state.start_date)==str else state.start_date.strftime(\"%Y-%m-%d\")\n end_date = state.end_date if type(state.end_date)==str else state.end_date.strftime(\"%Y-%m-%d\")\n\n state.data = get_stock_data(state.selected_stock, start_date, end_date)\n if len(state.data) == 0:\n notify(state, \"error\", f\"Not able to download data {state.selected_stock} from {start_date} to {end_date}\")\n return\n notify(state, 's', 'Historical data has been updated!')\n notify(state, 'w', 'Deleting previous predictions...')\n state.forecast = pd.DataFrame(columns=['Date', 'Lower', 'Upper'])\n\n\n\ndef generate_forecast_data(data, n_years):\n # FORECASTING\n df_train = data[['Date', 'Close']]\n df_train = df_train.rename(columns={\"Date\": \"ds\", \"Close\": \"y\"}) # This is the format that Prophet accepts\n\n m = Prophet()\n m.fit(df_train)\n future = m.make_future_dataframe(periods=n_years * 365)\n fc = m.predict(future)[['ds', 'yhat_lower', 'yhat_upper']].rename(columns={\"ds\": \"Date\", \"yhat_lower\": \"Lower\", \"yhat_upper\": \"Upper\"})\n print(\"Process Completed!\")\n return fc\n\n\ndef forecast_display(state):\n notify(state, 'i', 'Predicting...')\n state.forecast = generate_forecast_data(state.data, state.n_years)\n notify(state, 's', 'Prediction done! Forecast data has been updated!')\n\n\n\n#### Getting the data, make initial forcast and build a front end web-app with Taipy GUI\ndata = get_stock_data(selected_stock, start_date, end_date)\nforecast = generate_forecast_data(data, n_years)\n\nshow_dialog = False\n\npartial_md = \"<|{forecast}|table|>\"\ndialog_md = \"<|{show_dialog}|dialog|partial={partial}|title=Forecast Data|on_action={lambda state: state.assign('show_dialog', False)}|>\"\n\npage = dialog_md + \"\"\"<|toggle|theme|>\n<|container|\n# Stock Price **Analysis**{: .color-primary} Dashboard\n\n<|layout|columns=1 2 1|gap=40px|class_name=card p2|\n\n \n\nTo:\n<|{end_date}|date|on_change=get_data_from_range|> \n|dates>\n\n \n\n\nor choose a popular one\n\n<|{selected_stock}|toggle|lov=MSFT;GOOG;AAPL; AMZN; META; COIN; AMC; PYPL|on_change=get_data_from_range|>\n|ticker>\n\n \n<|{n_years}|slider|min=1|max=5|> \n\n<|PREDICT|button|on_action=forecast_display|class_name={'plain' if len(forecast)==0 else ''}|>\n|years>\n\n|>\n\n\n<|Historical Data|expandable|expanded=False|\n<|layout|columns=1 1|\n<|\n### Historical **closing**{: .color-primary} price\n<|{data}|chart|mode=line|x=Date|y[1]=Open|y[2]=Close|>\n|>\n\n<|\n### Historical **daily**{: .color-primary} trading volume\n<|{data}|chart|mode=line|x=Date|y=Volume|>\n|>\n|>\n\n### **Whole**{: .color-primary} historical data: <|{selected_stock}|text|raw|>\n<|{data}|table|>\n\n
\n|>\n\n\n### **Forecast**{: .color-primary} Data\n<|{forecast}|chart|mode=line|x=Date|y[1]=Lower|y[2]=Upper|>\n\n
\n\n\n<|More info|button|on_action={lambda s: s.assign(\"show_dialog\", True)}|>\n{: .text-center}\n|>\n\n
\n\"\"\"\n\n\n# Run Taipy GUI\ngui = Gui(page)\npartial = gui.add_partial(partial_md)\ngui.run(dark_mode=False, title=\"Stock Visualization\")\n"} {"text": "# Create app for demo-movie-genre main.py\nimport taipy as tp\nimport pandas as pd\nfrom taipy import Config, Scope, Gui\n\n# Create a Taipy App that will output the 7 best movies for a genre\n\n# Taipy Core - backend definition\n\n# Filter function for Task\ndef filtering_genre(initial_dataset: pd.DataFrame, selected_genre):\n filtered_dataset = initial_dataset[initial_dataset['genres'].str.contains(selected_genre)]\n filtered_data = filtered_dataset.nlargest(7, 'Popularity %')\n return filtered_data\n\n\n# Input Data Nodes configuration\ninitial_dataset_cfg = Config.configure_data_node(id=\"initial_dataset\",\n storage_type=\"csv\",\n path=\"data.csv\",\n scope=Scope.GLOBAL)\n\nselected_genre_cfg = Config.configure_data_node(id=\"selected_genre_node\",\n default_data=\"ACTION\",\n scope=Scope.GLOBAL)\n\n# Output Data Node configuration\nfiltered_data_cfg = Config.configure_data_node(id=\"filtered_data\",\n scope=Scope.GLOBAL)\n\n\n# Task configuration\nfilter_task_cfg = Config.configure_task(id=\"filter_genre\",\n function=filtering_genre,\n input=[initial_dataset_cfg, selected_genre_cfg],\n output=filtered_data_cfg,\n skippable=True)\n\n# Pipeline configuration\npipeline_cfg = Config.configure_pipeline(id=\"pipeline\",\n task_configs=[filter_task_cfg])\n# Scenario configuration\nscenario_cfg = Config.configure_scenario(id=\"scenario\", pipeline_configs=[pipeline_cfg])\n\n# Run of the Taipy Core service\ntp.Core().run()\n\n# Creation of my scenario\nscenario = tp.create_scenario(scenario_cfg)\n\n\n\n# Taipy GUI- front end definition\n\n# Callback definition\ndef modify_df(state):\n scenario.selected_genre_node.write(state.selected_genre)\n tp.submit(scenario)\n state.df = scenario.filtered_data.read() \n\n# Get list of genres\nlist_genres = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Fantasy', 'IMAX', 'Romance',\n 'Sci-FI', 'Western', 'Crime', 'Mystery', 'Drama', 'Horror', 'Thriller', 'Film-Noir',\n 'War', 'Musical', 'Documentary']\n\n# Initialization of variables\ndf = pd.DataFrame(columns=['Title', 'Popularity %'])\nselected_genre = None\n\n# movie_genre_app\nmovie_genre_app = \"\"\"\n# Film recommendation\n\n## Choose your favorite genre\n<|{selected_genre}|selector|lov={list_genres}|on_change=modify_df|dropdown|>\n\n## Here are the top 7 picks\n<|{df}|chart|x=Title|y=Popularity %|type=bar|title=Film Popularity|>\n\"\"\"\n# run the app\nGui(page=movie_genre_app).run()\n"} {"text": "# Create app for demo-job-monitoring __init__.py\n"} {"text": "# Create app for demo-job-monitoring runtime.py\nfrom taipy import run\n\n\nclass App:\n \"\"\"A singleton class that provides the Taipy runtime objects.\"\"\"\n\n def __new__(cls):\n if not hasattr(cls, \"instance\"):\n cls.instance = super(App, cls).__new__(cls)\n return cls.instance\n\n @property\n def gui(self):\n return self.__gui\n\n @property\n def core(self):\n return self.__core\n\n @gui.setter\n def gui(self, gui):\n self.__gui = gui\n\n @core.setter\n def core(self, core):\n self.__core = core\n\n def start(self, **kwargs):\n # Starts the app by calling `taipy.run` on the core and gui objects:\n run(self.__gui, self.__core, **kwargs)\n"} {"text": "# Create app for demo-job-monitoring main.py\nfrom runtime import App\nfrom pages import root, monitoring\nimport taipy\nfrom taipy.config.config import Config\nfrom taipy.gui import Gui\nimport os\n\n# Variables for bindings\nall_jobs = [['','','','']]\nshow_dialog_run_pipeline = False\nselected_pipeline = None\nshow_details_pane = False\nselected_job = None\n\n\nif __name__ == \"__main__\":\n # Initialize Taipy objects\n Config.configure_job_executions(mode=\"standalone\", nb_of_workers=4)\n Config.load(\"app.config.toml\")\n App().core = taipy.Core()\n App().gui = Gui(pages={\"/\": root.page, \"monitoring\": monitoring.page})\n\n # Start the app\n App().start(\n title=\"Job Monitoring Demo\",\n port=os.environ.get(\"PORT\", \"8080\"),\n dark_mode=False,\n css_file=\"app\",\n )\n"} {"text": "# Create app for demo-job-monitoring __init__.py\n"} {"text": "# Create app for demo-job-monitoring ml.py\nfrom sklearn.linear_model import LogisticRegression\nimport pandas as pd\nimport numpy as np\n\n\n# Test prediction with a Female, 19 years old, earning 20000\nfixed_value = [1, 19, 20000]\n\n\ndef preprocess(df: pd.DataFrame) -> pd.DataFrame:\n def _gender_to_int(gender):\n if gender == \"Female\":\n return 1\n return 0\n\n df[\"GenderNum\"] = df[\"Gender\"].apply(_gender_to_int)\n\n return df\n\n\ndef train(dataset):\n # X (features) are \"GenderNum\", \"Age\", \"EstimatedSalary\"\n X = dataset[[\"GenderNum\", \"Age\", \"EstimatedSalary\"]]\n\n # Y is \"Purchased\"\n Y = dataset[[\"Purchased\"]]\n\n # Let's split the dataset: the first 50 will be used for training,\n # the rest will be for testing\n split = 50\n X_train, Y_train = X[:split], Y[:split]\n X_test, Y_test = X[split:], Y[split:]\n\n # Using scikit-learn default\n regression = LogisticRegression(random_state=0, max_iter=10000).fit(\n X_train.values, Y_train.values.ravel()\n )\n\n # Accuracy of our model:\n print(f\"intercept: {regression.intercept_} coefficients: {regression.coef_}\")\n print(f\"train accuracy: {regression.score(X_train, Y_train)}\")\n print(f\"test accuracy: {regression.score(X_test, Y_test)}\") # We aim for > 0.8...\n\n return regression\n\n\ndef predict(x, regression: LogisticRegression):\n variables = np.array(x).reshape(1, -1)\n result = regression.predict(variables)\n print(f\"for: {variables}, the prediction is {result}\")\n return result\n\n\nif __name__ == \"__main__\":\n # Testing\n df = pd.read_csv(\"data/data.csv\")\n df = preprocess(df)\n model = train(df)\n print(predict(fixed_value, model))\n"} {"text": "# Create app for demo-job-monitoring debug.py\nimport time\n\n\ndef long_running(anything):\n print(\"Waiting 20 seconds...\")\n time.sleep(20)\n print(\"Done!\")\n return anything\n\n\ndef raise_exception(anything):\n print(\"Waiting 5 seconds before raising an exception...\")\n time.sleep(5)\n raise Exception(\"A very expected error occured!\")\n"} {"text": "# Create app for demo-job-monitoring monitoring.py\nimport taipy as tp\nfrom taipy.gui import get_state_id, invoke_callback, Markdown\nfrom taipy.config.config import Config\nfrom taipy.core.job.job import Job\nfrom runtime import App\n\n\ndef get_all_jobs():\n \"\"\"Returns all the known jobs (as a array of fields).\"\"\"\n\n def _job_to_fields(job: Job) -> list[str]:\n return [\n job.submit_id,\n job.id,\n job.creation_date.strftime(\"%b %d %Y %H:%M:%S\"),\n str(job.status),\n ]\n\n return [_job_to_fields(job) for job in tp.get_jobs()]\n\n\ndef get_all_pipelines():\n \"\"\"Returns all pipelines (as an array of ids)\"\"\"\n return [\n pipeline.id\n for pipeline in Config.pipelines.values()\n if pipeline.id != \"default\" # we explicitely get rid of the \"default\" pipeline\n ]\n\n\ndef get_job_by_id(id):\n \"\"\"Return a job from its id\"\"\"\n found = [job for job in tp.get_jobs() if job.id == id]\n if found:\n return found[0]\n return None\n\n\ndef get_job_by_index(index):\n \"\"\"Return a job from its index\"\"\"\n all_jobs = tp.get_jobs()\n if len(all_jobs) > index:\n return all_jobs[index]\n return None\n\n\ndef get_status(job: Job):\n \"\"\"Get the status of the given job as string.\"\"\"\n if not job:\n return None\n return job.status.name.lower()\n\n\n# -----------------------------------------------------------------------------\n# Callbacks / UI function\n\ndef on_style(state, index, row):\n status_index = 3\n if 'RUNNING' in row[status_index]:\n return 'blue'\n if 'COMPLETED' in row[status_index]:\n return 'green'\n if 'BLOCKED' in row[status_index]:\n return 'orange'\n if 'FAILED' in row[status_index]:\n return 'red'\n\ndef refresh_job_list(state):\n \"\"\"Refresh the job list\"\"\"\n state.all_jobs = get_all_jobs()\n\n\ndef job_updated(state_id, pipeline, job):\n \"\"\"Callback called when a job has been updated.\"\"\"\n\n # invoke_callback allows to run a function with a GUI _state_.\n invoke_callback(App().gui, state_id, refresh_job_list, args=[])\n\n\ndef open_run_pipeline_dialog(state):\n \"\"\"Opens the 'Run pipeline...' dialog.\"\"\"\n state.show_dialog_run_pipeline = True\n\n\ndef close_run_pipeline_dialog(state):\n \"\"\"Closes the 'Run pipeline...' dialog.\"\"\"\n state.show_dialog_run_pipeline = False\n\n\ndef run_pipeline(state):\n \"\"\"Runs a pipeline action.\"\"\"\n\n # We need to pass the state ID so that it can be restored in the job_updated listener:\n state_id = get_state_id(state)\n\n # Get selected pipeline config:\n selected = state.selected_pipeline\n pipeline_config = Config.pipelines[selected]\n if not pipeline_config:\n raise Exception(f\"unknown pipeline config: {selected}\")\n\n # Close the dialog\n close_run_pipeline_dialog(state)\n\n pipeline = tp.create_pipeline(pipeline_config)\n tp.subscribe_pipeline(pipeline=pipeline, callback=job_updated, params=[state_id])\n tp.submit(pipeline)\n\n\ndef on_table_click(state, table, action, payload):\n job_index = payload[\"index\"]\n selected_job = get_job_by_index(job_index)\n state.selected_job = selected_job\n state.show_details_pane = True\n\n\ndef cancel_selected_job(state):\n job_id = state.selected_job.id\n tp.cancel_job(state.selected_job)\n state.show_details_pane = False\n refresh_job_list(state)\n state.selected_job = get_job_by_id(job_id)\n\n\ndef delete_selected_job(state):\n tp.delete_job(state.selected_job, force=True)\n state.show_details_pane = False\n refresh_job_list(state)\n\n\n# -----------------------------------------------------------------------------\n# UI Configuration\n\ncolumns = {\n \"0\": {\"title\": \"Submit ID\"},\n \"1\": {\"title\": \"Job ID\"},\n \"2\": {\"title\": \"Creation Date\"},\n \"3\": {\"title\": \"Status\"},\n}\n\n\n# -----------------------------------------------------------------------------\n# Page\n\n\npage = Markdown(\"job_monitoring/pages/monitoring.md\")\n"} {"text": "# Create app for demo-job-monitoring __init__.py\n"} {"text": "# Create app for demo-job-monitoring root.py\nfrom taipy.gui import Markdown\n\ncontent = \"\"\"\n# Job Monitoring Demo\n\"\"\"\n\npage = Markdown(content)\n"} {"text": "# Create app for demo-job-monitoring monitoring.md\n<|{all_jobs}|table|columns={columns}|width='100%'|on_action={on_table_click}|style=on_style|>\n<|Refresh List|button|on_action={refresh_job_list}|>\n<|Run Pipeline...|button|on_action={open_run_pipeline_dialog}|>\n\n<|{show_dialog_run_pipeline}|dialog|title=Run pipeline...|\n<|{selected_pipeline}|selector|lov={get_all_pipelines()}|>\n<|Run|button|on_action={run_pipeline}|>\n<|Cancel|button|on_action={close_run_pipeline_dialog}|>\n|>\n<|{show_details_pane}|pane|\n\n# Job Details <|Delete|button|on_action=delete_selected_job|> <|Cancel|button|on_action=cancel_selected_job|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n## Task\n<|{selected_job.task.config_id}|>\n|>\n\n<|part|class_name=card|\n## Status\n<|{get_status(selected_job)}|>\n|>\n|>\n\n<|part|class_name=card|\n## ID\n<|{selected_job.id}|>\n|>\n\n<|part|class_name=card|\n## Submission ID\n<|{selected_job.submit_id}|>\n|>\n\n<|part|class_name=card|\n## Creation Date\n<|{selected_job.creation_date.strftime(\"%b %d %y %H:%M:%S\")}|>\n|>\n\n<|part|class_name=card|\n## Stacktrace\n<|{\"\\n\".join(selected_job.stacktrace)}|class_name=code|>\n|>\n\n----\n\n\n|>\n"} {"text": "# Create app for demo-fraud-detection charts.py\n\"\"\" Prepare data for charts \"\"\"\n\nimport pandas as pd\n\n\ndef gen_amt_data(transactions: pd.DataFrame) -> list:\n \"\"\"\n Create a list of amt values for fraudulent and non-fraudulent transactions\n\n Args:\n - transactions: the transactions dataframe\n\n Returns:\n - a list of two dictionaries containing the data for the two histograms\n \"\"\"\n amt_fraud = transactions[transactions[\"fraud\"]][\"amt\"]\n amt_no_fraud = transactions[~transactions[\"fraud\"]][\"amt\"]\n amt_data = [\n {\"Amount ($)\": list(amt_no_fraud)},\n {\"Amount ($)\": list(amt_fraud)},\n ]\n return amt_data\n\n\ndef gen_gender_data(transactions: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Create a dataframe containing the percentage of fraudulent transactions\n per gender\n\n Args:\n - transactions: the transactions dataframe\n\n Returns:\n - the resulting dataframe\n \"\"\"\n male_fraud_percentage = len(\n transactions[transactions[\"fraud\"]].loc[transactions[\"gender\"] == \"M\"]\n ) / len(transactions[transactions[\"fraud\"]])\n female_fraud_percentage = 1 - male_fraud_percentage\n male_not_fraud_percentage = len(\n transactions[~transactions[\"fraud\"]].loc[transactions[\"gender\"] == \"M\"]\n ) / len(transactions[~transactions[\"fraud\"]])\n female_not_fraud_percentage = 1 - male_not_fraud_percentage\n\n gender_data = pd.DataFrame(\n {\n \"Fraudulence\": [\"Not Fraud\", \"Fraud\"],\n \"Male\": [male_not_fraud_percentage, male_fraud_percentage],\n \"Female\": [female_not_fraud_percentage, female_fraud_percentage],\n }\n )\n return gender_data\n\n\ndef gen_cat_data(transactions: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Generates a dataframe with the percentage difference\n between fraudulent and non-fraudulent transactions per category\n\n Args:\n - transactions: the transactions dataframe\n\n Returns:\n - the resulting dataframe\n \"\"\"\n categories = transactions[\"category\"].unique()\n fraud_categories = [\n len(\n transactions[transactions[\"fraud\"]].loc[\n transactions[\"category\"] == category\n ]\n )\n for category in categories\n ]\n fraud_categories_norm = [\n category / len(transactions[transactions[\"fraud\"]])\n for category in fraud_categories\n ]\n not_fraud_categories = [\n len(\n transactions[~transactions[\"fraud\"]].loc[\n transactions[\"category\"] == category\n ]\n )\n for category in categories\n ]\n not_fraud_categories_norm = [\n category / len(transactions[~transactions[\"fraud\"]])\n for category in not_fraud_categories\n ]\n diff_categories = [\n fraud_categories_norm[i] - not_fraud_categories_norm[i]\n for i in range(len(categories))\n ]\n cat_data = pd.DataFrame(\n {\n \"Category\": categories,\n \"Difference\": diff_categories,\n }\n )\n\n cat_data = cat_data.sort_values(by=\"Difference\", ascending=False)\n return cat_data\n\n\ndef gen_age_data(transactions: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Generates a dataframe with the percentage of fraudulent transactions\n per age\n\n Args:\n - transactions: the transactions dataframe\n\n Returns:\n - the resulting dataframe\n \"\"\"\n age = range(111)\n fraud_age = [\n len(transactions[transactions[\"fraud\"]].loc[transactions[\"age\"] == age])\n / len(transactions[transactions[\"fraud\"]])\n for age in age\n ]\n not_fraud_age = [\n len(transactions[~transactions[\"fraud\"]].loc[transactions[\"age\"] == age])\n / len(transactions[~transactions[\"fraud\"]])\n for age in age\n ]\n age_data = pd.DataFrame(\n {\n \"Age\": age,\n \"Fraud\": fraud_age,\n \"Not Fraud\": not_fraud_age,\n }\n )\n return age_data\n\n\ndef gen_hour_data(transactions: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Generates a dataframe with the percentage of fraudulent transactions\n per hour\n\n Args:\n - transactions: the transactions dataframe\n\n Returns:\n - the resulting dataframe\n \"\"\"\n hours = range(1, 25)\n fraud_hours = [\n len(transactions[transactions[\"fraud\"]].loc[transactions[\"hour\"] == hour])\n / len(transactions[transactions[\"fraud\"]])\n for hour in hours\n ]\n not_fraud_hours = [\n len(transactions[~transactions[\"fraud\"]].loc[transactions[\"hour\"] == hour])\n / len(transactions[~transactions[\"fraud\"]])\n for hour in hours\n ]\n hour_data = pd.DataFrame(\n {\n \"Hour\": hours,\n \"Fraud\": fraud_hours,\n \"Not Fraud\": not_fraud_hours,\n }\n )\n return hour_data\n\n\ndef gen_day_data(transactions: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Generates a dataframe with the percentage of fraudulent transactions\n per weekday\n\n Args:\n - transactions: the transactions dataframe\n\n Returns:\n - the resulting dataframe\n \"\"\"\n days = range(7)\n days_names = [\n \"Monday\",\n \"Tuesday\",\n \"Wednesday\",\n \"Thursday\",\n \"Friday\",\n \"Saturday\",\n \"Sunday\",\n ]\n fraud_days = [\n len(transactions[transactions[\"fraud\"]].loc[transactions[\"day\"] == day])\n / len(transactions[transactions[\"fraud\"]])\n for day in days\n ]\n not_fraud_days = [\n len(transactions[~transactions[\"fraud\"]].loc[transactions[\"day\"] == day])\n / len(transactions[~transactions[\"fraud\"]])\n for day in days\n ]\n day_data = pd.DataFrame(\n {\n \"Day\": days_names,\n \"Fraud\": fraud_days,\n \"Not Fraud\": not_fraud_days,\n }\n )\n return day_data\n\n\ndef gen_month_data(transactions: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Generates a dataframe with the percentage of fraudulent transactions\n per month\n\n Args:\n - transactions: the transactions dataframe\n\n Returns:\n - the resulting dataframe\n \"\"\"\n months = range(1, 13)\n months_names = [\n \"January\",\n \"February\",\n \"March\",\n \"April\",\n \"May\",\n \"June\",\n \"July\",\n \"August\",\n \"September\",\n \"October\",\n \"November\",\n \"December\",\n ]\n fraud_months = [\n len(transactions[transactions[\"fraud\"]].loc[transactions[\"month\"] == month])\n / len(transactions[transactions[\"fraud\"]])\n for month in months\n ]\n not_fraud_months = [\n len(transactions[~transactions[\"fraud\"]].loc[transactions[\"month\"] == month])\n / len(transactions[~transactions[\"fraud\"]])\n for month in months\n ]\n month_data = pd.DataFrame(\n {\n \"Month\": months_names,\n \"Fraud\": fraud_months,\n \"Not Fraud\": not_fraud_months,\n }\n )\n return month_data\n"} {"text": "# Create app for demo-fraud-detection utils.py\n\"\"\" Data Manipulation and Callbacks \"\"\"\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\nfrom taipy.gui import State, navigate, notify\nimport xgboost as xgb\nfrom shap import Explainer, Explanation\nfrom sklearn.metrics import confusion_matrix\n\ncolumn_names = [\n \"amt\",\n \"zip\",\n \"city_pop\",\n \"age\",\n \"hour\",\n \"day\",\n \"month\",\n \"category_food_dining\",\n \"category_gas_transport\",\n \"category_grocery_net\",\n \"category_grocery_pos\",\n \"category_health_fitness\",\n \"category_home\",\n \"category_kids_pets\",\n \"category_misc_net\",\n \"category_misc_pos\",\n \"category_personal_care\",\n \"category_shopping_net\",\n \"category_shopping_pos\",\n \"category_travel\",\n]\n\n\ndef explain_pred(state: State, _: str, payload: dict) -> None:\n \"\"\"\n When a transaction is selected in the table\n Explain the prediction using SHAP, update the waterfall chart\n\n Args:\n - state: the state of the app\n - payload: the payload of the event containing the index of the transaction\n \"\"\"\n idx = payload[\"index\"]\n exp = state.explaination[idx]\n\n feature_values = [-value for value in list(exp.values)]\n data_values = list(exp.data)\n\n for i, value in enumerate(data_values):\n if isinstance(value, float):\n value = round(value, 2)\n data_values[i] = value\n\n names = [f\"{name}: {value}\" for name, value in zip(column_names, data_values)]\n\n exp_data = pd.DataFrame({\"Feature\": names, \"Influence\": feature_values})\n exp_data[\"abs_importance\"] = exp_data[\"Influence\"].abs()\n exp_data = exp_data.sort_values(by=\"abs_importance\", ascending=False)\n exp_data = exp_data.drop(columns=[\"abs_importance\"])\n exp_data = exp_data[:5]\n state.exp_data = exp_data\n\n if state.transactions.iloc[idx][\"fraud\"]:\n state.fraud_text = \"Why is this transaction fraudulent?\"\n else:\n state.fraud_text = \"Why is this transaction not fraudulent?\"\n\n first = state.transactions.iloc[idx][\"first\"]\n last = state.transactions.iloc[idx][\"last\"]\n\n state.specific_transactions = state.transactions[\n (state.transactions[\"first\"] == first) & (state.transactions[\"last\"] == last)\n ]\n\n state.selected_transaction = state.transactions.loc[[idx]]\n\n state.selected_client = f\"{first} {last}\"\n\n navigate(state, \"Analysis\")\n\n\ndef generate_transactions(\n state: State,\n df: pd.DataFrame,\n model: xgb.XGBRegressor,\n threshold: float,\n start_date=\"2020-06-21\",\n end_date=\"2030-01-01\",\n) -> [pd.DataFrame, Explanation]:\n \"\"\"\n Generates a DataFrame of transactions with the fraud prediction\n\n Args:\n - state: the state of the app\n - df: the DataFrame containing the transactions\n - model: the model used to predict the fraud\n - threshold: the threshold used to determine if a transaction is fraudulent\n - start_date: the start date of the transactions\n - end_date: the end date of the transactions\n\n Returns:\n - a DataFrame of transactions with the fraud prediction\n \"\"\"\n start_date = str(start_date)\n end_date = str(end_date)\n start_date_dt = dt.datetime.strptime(start_date, \"%Y-%m-%d\")\n end_date_dt = dt.datetime.strptime(end_date, \"%Y-%m-%d\")\n # Make sure the dates are separated by at least one day\n if (end_date_dt - start_date_dt).days < 1:\n notify(state, \"error\", \"The start date must be before the end date\")\n raise Exception(\"The start date must be before the end date\")\n # Make sure that start_date is between 2020-06-21 and 2020-06-30\n if not (dt.datetime(2020, 6, 21) <= start_date_dt <= dt.datetime(2020, 6, 30)):\n notify(\n state, \"error\", \"The start date must be between 2020-06-21 and 2020-06-30\"\n )\n raise Exception(\"The start date must be between 2020-06-21 and 2020-06-30\")\n df[\"age\"] = dt.date.today().year - pd.to_datetime(df[\"dob\"]).dt.year\n df[\"hour\"] = pd.to_datetime(df[\"trans_date_trans_time\"]).dt.hour\n df[\"day\"] = pd.to_datetime(df[\"trans_date_trans_time\"]).dt.dayofweek\n df[\"month\"] = pd.to_datetime(df[\"trans_date_trans_time\"]).dt.month\n test = df[\n [\n \"category\",\n \"amt\",\n \"zip\",\n \"city_pop\",\n \"age\",\n \"hour\",\n \"day\",\n \"month\",\n \"is_fraud\",\n ]\n ]\n test = pd.get_dummies(test, drop_first=True)\n test = test[df[\"trans_date_trans_time\"].between(str(start_date), str(end_date))]\n\n X_test = test.drop(\"is_fraud\", axis=\"columns\")\n X_test_values = X_test.values\n\n transactions = df[\n df[\"trans_date_trans_time\"].between(str(start_date), str(end_date))\n ]\n raw_results = model.predict(X_test_values)\n results = [str(min(1, round(result, 2))) for result in raw_results]\n transactions.insert(0, \"fraud_value\", results)\n # Low if under 0.2, Medium if under 0.5, High if over 0.5\n results = [\"Low\" if float(result) < 0.2 else \"Medium\" for result in raw_results]\n for i, result in enumerate(results):\n if result == \"Medium\" and float(raw_results[i]) > 0.5:\n results[i] = \"High\"\n transactions.insert(0, \"fraud_confidence\", results)\n results = [float(result) > threshold for result in raw_results]\n transactions.insert(0, \"fraud\", results)\n\n explainer = Explainer(model)\n sv = explainer(X_test)\n explaination = Explanation(sv, sv.base_values, X_test, feature_names=X_test.columns)\n # Drop Unnamed: 0 column if it exists\n if \"Unnamed: 0\" in transactions.columns:\n transactions = transactions.drop(columns=[\"Unnamed: 0\"])\n return transactions, explaination\n\n\ndef update_threshold(state: State) -> None:\n \"\"\"\n Change the threshold used to determine if a transaction is fraudulent\n Generate the confusion matrix\n\n Args:\n - state: the state of the app\n \"\"\"\n threshold = float(state.threshold)\n results = [\n float(result) > threshold for result in state.transactions[\"fraud_value\"]\n ]\n state.transactions[\"fraud\"] = results\n state.transactions = state.transactions\n results = [\n float(result) > threshold\n for result in state.original_transactions[\"fraud_value\"]\n ]\n state.original_transactions[\"fraud\"] = results\n state.original_transactions = state.original_transactions\n y_pred = results\n y_true = state.original_transactions[\"is_fraud\"]\n cm = confusion_matrix(y_true, y_pred)\n cm = cm.astype(\"float\") / cm.sum(axis=1)[:, np.newaxis]\n tp, tn, fp, fn = cm[1][1], cm[0][0], cm[0][1], cm[1][0]\n\n dataset = state.original_transactions[:10000]\n state.true_positives = dataset[\n (dataset[\"is_fraud\"] == True) & (dataset[\"fraud\"] == True)\n ]\n state.true_negatives = dataset[\n (dataset[\"is_fraud\"] == False) & (dataset[\"fraud\"] == False)\n ]\n state.false_positives = dataset[\n (dataset[\"is_fraud\"] == False) & (dataset[\"fraud\"] == True)\n ]\n state.false_negatives = dataset[\n (dataset[\"is_fraud\"] == True) & (dataset[\"fraud\"] == False)\n ]\n\n data = {\n \"Values\": [\n [fn, tp],\n [tn, fp],\n ],\n \"Actual\": [\"Fraud\", \"Not Fraud\"],\n \"Predicted\": [\"Not Fraud\", \"Fraud\"],\n }\n\n layout = {\n \"annotations\": [],\n \"xaxis\": {\"ticks\": \"\", \"side\": \"top\"},\n \"yaxis\": {\"ticks\": \"\", \"ticksuffix\": \" \"},\n }\n\n predicted = data[\"Predicted\"]\n actuals = data[\"Actual\"]\n for actual, _ in enumerate(actuals):\n for pred, _ in enumerate(predicted):\n value = data[\"Values\"][actual][pred]\n annotation = {\n \"x\": predicted[pred],\n \"y\": actuals[actual],\n \"text\": f\"{str(round(value, 3)*100)[:4]}%\",\n \"font\": {\"color\": \"white\" if value < 0.5 else \"black\", \"size\": 30},\n \"showarrow\": False,\n }\n layout[\"annotations\"].append(annotation)\n\n state.confusion_data = data\n state.confusion_layout = layout\n update_table(state)\n return (\n state.true_positives,\n state.true_negatives,\n state.false_positives,\n state.false_negatives,\n )\n\n\ndef update_table(state: State) -> None:\n \"\"\"\n Updates the table of transactions displayed\n\n Args:\n - state: the state of the app\n \"\"\"\n if state.selected_table == \"True Positives\":\n state.displayed_table = state.true_positives\n elif state.selected_table == \"False Positives\":\n state.displayed_table = state.false_positives\n elif state.selected_table == \"True Negatives\":\n state.displayed_table = state.true_negatives\n elif state.selected_table == \"False Negatives\":\n state.displayed_table = state.false_negatives\n"} {"text": "# Create app for demo-fraud-detection main.py\n\"\"\" Fraud Detection App \"\"\"\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nfrom taipy.gui import Gui, Icon, State, navigate, notify\n\nfrom utils import (\n explain_pred,\n generate_transactions,\n update_threshold,\n update_table,\n)\nfrom charts import *\n\nDATA_POINTS = 30000\nthreshold = \"0.5\"\nthreshold_lov = np.arange(0, 1, 0.01)\nconfusion_text = \"Confusion Matrix\"\nfraud_text = \"No row selected\"\nexp_data = pd.DataFrame({\"Feature\": [], \"Influence\": []})\n\ndf = pd.read_csv(\"data/fraud_data.csv\")\ndf[\"merchant\"] = df[\"merchant\"].str[6:]\nmodel = pickle.load(open(\"model.pkl\", \"rb\"))\ntransactions, explaination = generate_transactions(None, df, model, float(threshold))\noriginal_transactions = transactions\noriginal_explaination = explaination\nspecific_transactions = transactions\nselected_client = \"No client selected\"\nstart_date = \"2020-06-21\"\nend_date = \"2020-06-22\"\nselected_table = \"True Positives\"\ntrue_positives = None\nfalse_positives = None\ntrue_negatives = None\nfalse_negatives = None\ndisplayed_table = None\nselected_transaction = None\n\n\ndef fraud_style(_: State, index: int, values: list) -> str:\n \"\"\"\n Style the transactions table: red if fraudulent\n\n Args:\n - state: the state of the app\n - index: the index of the row\n\n Returns:\n - the style of the row\n \"\"\"\n if values[\"fraud_confidence\"] == \"High\":\n return \"red-row\"\n elif values[\"fraud_confidence\"] == \"Medium\":\n return \"orange-row\"\n return \"\"\n\n\namt_data = gen_amt_data(transactions)\ngender_data = gen_gender_data(transactions)\ncat_data = gen_cat_data(transactions)\nage_data = gen_age_data(transactions)\nhour_data = gen_hour_data(transactions)\nday_data = gen_day_data(transactions)\nmonth_data = gen_month_data(transactions)\n\ndf = df[:DATA_POINTS]\ntransactions = transactions[:DATA_POINTS]\n\n\nwaterfall_layout = {\n \"margin\": {\"b\": 150},\n}\n\namt_options = [\n {\n \"marker\": {\"color\": \"#4A4\", \"opacity\": 0.8},\n \"xbins\": {\"start\": 0, \"end\": 2000, \"size\": 10},\n \"histnorm\": \"probability\",\n },\n {\n \"marker\": {\"color\": \"#A33\", \"opacity\": 0.8, \"text\": \"Compare Data\"},\n \"xbins\": {\"start\": 0, \"end\": 2000, \"size\": 10},\n \"histnorm\": \"probability\",\n },\n]\n\namt_layout = {\n \"barmode\": \"overlay\",\n \"showlegend\": True,\n}\n\nconfusion_data = pd.DataFrame({\"Predicted\": [], \"Actual\": [], \"Values\": []})\nconfusion_layout = None\nconfusion_options = {\"colorscale\": \"YlOrRd\", \"displayModeBar\": False}\nconfusion_config = {\"scrollZoom\": False, \"displayModeBar\": False}\n\ntransactions = df\ntransactions = transactions.drop(\"Unnamed: 0\", axis=\"columns\")\n\n\ndef on_init(state: State) -> None:\n \"\"\"\n Generate the confusion matrix on start\n\n Args:\n - state: the state of the app\n \"\"\"\n update_transactions(state)\n state.displayed_table = state.true_positives\n (\n state.true_positives,\n state.true_negatives,\n state.false_positives,\n state.false_negatives,\n ) = update_threshold(state)\n update_table(state)\n\n\ndef update_transactions(state: State) -> None:\n \"\"\"\n Detects frauds in the selected time period\n\n Args:\n - state: the state of the app\n \"\"\"\n notify(state, \"info\", \"Predicting fraud...\")\n state.transactions, state.explaination = generate_transactions(\n state, df, model, float(state.threshold), state.start_date, state.end_date\n )\n state.transactions.reset_index(inplace=True)\n number_of_fraud = len(state.transactions[state.transactions[\"fraud\"] == True])\n notify(state, \"success\", f\"Predicted {number_of_fraud} fraudulent transactions\")\n\n\nmenu_lov = [\n (\"Transactions\", Icon(\"images/transactions.png\", \"Transactions\")),\n (\"Analysis\", Icon(\"images/analysis.png\", \"Analysis\")),\n (\"Fraud Distribution\", Icon(\"images/distribution.png\", \"Fraud Distribution\")),\n (\"Threshold Selection\", Icon(\"images/threshold.png\", \"Threshold Selection\")),\n]\n\npage = \"Transactions\"\n\n\ndef menu_fct(state, var_name, var_value):\n \"\"\"Function that is called when there is a change in the menu control.\"\"\"\n state.page = var_value[\"args\"][0]\n navigate(state, state.page.replace(\" \", \"-\"))\n\n\nROOT = \"\"\"\n<|menu|label=Menu|lov={menu_lov}|on_action=menu_fct|>\n\"\"\"\n\nTRANSACTIONS_PAGE = \"\"\"\n# List of **Transactions**{: .color-primary}\n\n--------------------------------------------------------------------\n\n## Select start and end date for a prediction\n<|layout|columns=1 1 3|\nStart Date: <|{start_date}|date|>\n\nEnd Date (excluding): <|{end_date}|date|>\n|>\n\n<|Detect Frauds|button|on_action=update_transactions|>\n\n## Select a transaction to explain the prediction\n\n<|{transactions}|table|on_action=explain_pred|style=fraud_style|filter|rebuild|>\n\"\"\"\n\nANALYSIS_PAGE = \"\"\"\n# Prediction **Analysis**{: .color-primary}\n\n--------------------------------------------------------------------\n\n<|layout|columns=2 3|\n<|card|\n## <|{fraud_text}|text|>\n<|{exp_data}|chart|type=waterfall|x=Feature|y=Influence|layout={waterfall_layout}|>\n|>\n\n<|\n## Selected Transaction:\n<|{selected_transaction}|table|show_all=True|rebuild||style=fraud_style|>\n## Transactions of client: **<|{selected_client}|text|raw|>**{: .color-primary}\n<|{specific_transactions}|table|style=fraud_style|filter|on_action=explain_pred|>\n|>\n|>\n\"\"\"\n\nCHART_PAGE = \"\"\"\n# Fraud **Distribution**{: .color-primary}\n\n--------------------------------------------------------------------\n\n## Charts of fraud distribution by feature\n\n<|{amt_data}|chart|type=histogram|title=Transaction Amount Distribution|color[2]=red|color[1]=green|name[2]=Fraud|name[1]=Not Fraud|options={amt_options}|layout={amt_layout}|>\n
<|{gender_data}|chart|type=bar|x=Fraudulence|y[1]=Male|y[2]=Female|title=Distribution of Fraud by Gender|>\n
<|{cat_data}|chart|type=bar|x=Category|y=Difference|orientation=v|title=Difference in Fraudulence by Category (Positive = Fraudulent)|>\n
<|{hour_data}|chart|type=bar|x=Hour|y[1]=Not Fraud|y[2]=Fraud|title=Distribution of Fraud by Hour|>\n
<|{day_data}|chart|type=bar|x=Day|y[1]=Not Fraud|y[2]=Fraud|title=Distribution of Fraud by Day|>\n\"\"\"\n\nTHRESHOLD_PAGE = \"\"\"\n# Threshold **Selection**{: .color-primary}\n\n--------------------------------------------------------------------\n\n## Select a threshold of confidence to filter the transactions\n<|{threshold}|slider|on_change=update_threshold|lov=0.05;0.1;0.15;0.2;0.25;0.3;0.35;0.4;0.45;0.5;0.55;0.6;0.65;0.7;0.75;0.8;0.85;0.9;0.95|>\n<|layout|columns=1 2|\n<|{confusion_data}|chart|type=heatmap|z=Values|x=Predicted|y=Actual|layout={confusion_layout}|options={confusion_options}|plot_config={confusion_config}|height=70vh|>\n\n<|card\n<|{selected_table}|selector|lov=True Positives;False Positives;True Negatives;False Negatives|on_change=update_table|dropdown=True|>\n<|{displayed_table}|table|style=fraud_style|filter|rebuild|>\n|>\n|>\n\"\"\"\n\npages = {\n \"/\": ROOT,\n \"Transactions\": TRANSACTIONS_PAGE,\n \"Analysis\": ANALYSIS_PAGE,\n \"Fraud-Distribution\": CHART_PAGE,\n \"Threshold-Selection\": THRESHOLD_PAGE,\n}\n\nGui(pages=pages).run(title=\"Fraud Detection Demo\", dark_mode=False, debug=True)\n"} {"text": "# Create app for dask_taipy_bigdata_DEMO algo.py\nimport time\nimport pandas as pd\nimport dask.dataframe as dd\n\ndef task1(path_to_original_data: str):\n print(\"__________________________________________________________\")\n print(\"1. TASK 1: DATA PREPROCESSING AND CUSTOMER SCORING ...\")\n start_time = time.perf_counter() # Start the timer\n \n # Step 1: Read data using Dask\n df = dd.read_csv(path_to_original_data)\n \n # Step 2: Simplify the customer scoring formula\n df['CUSTOMER_SCORE'] = (\n 0.5 * df['TotalPurchaseAmount'] / 1000 +\n 0.3 * df['NumberOfPurchases'] / 10 +\n 0.2 * df['AverageReviewScore']\n )\n \n # Save all customers to a new CSV file\n scored_df = df[[\"CUSTOMER_SCORE\", \"TotalPurchaseAmount\", \"NumberOfPurchases\", \"TotalPurchaseTime\"]]\n \n pd_df = scored_df.compute()\n\n end_time = time.perf_counter() # Stop the timer\n execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds\n print(f\"Time of Execution: {execution_time:.4f} ms\")\n \n return pd_df\n\ndef task2(scored_df, payment_threshold, score_threshold):\n print(\"__________________________________________________________\")\n print(\"2. TASK 2: FEATURE ENGINEERING AND SEGMENTATION ...\")\n\n payment_threshold, score_threshold = float(payment_threshold), float(score_threshold)\n start_time = time.perf_counter() # Start the timer\n \n df = scored_df\n \n # Feature: Indicator if customer's total purchase is above the payment threshold\n df['HighSpender'] = (df['TotalPurchaseAmount'] > payment_threshold).astype(int)\n \n # Feature: Average time between purchases\n df['AverageTimeBetweenPurchases'] = df['TotalPurchaseTime'] / df['NumberOfPurchases']\n \n # Additional computationally intensive features\n df['Interaction1'] = df['TotalPurchaseAmount'] * df['NumberOfPurchases']\n df['Interaction2'] = df['TotalPurchaseTime'] * df['CUSTOMER_SCORE']\n df['PolynomialFeature'] = df['TotalPurchaseAmount'] ** 2\n \n # Segment customers based on the score_threshold\n df['ValueSegment'] = ['High Value' if score > score_threshold else 'Low Value' for score in df['CUSTOMER_SCORE']]\n \n end_time = time.perf_counter() # Stop the timer\n execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds\n print(f\"Time of Execution: {execution_time:.4f} ms\")\n \n return df\n\ndef task3(df: pd.DataFrame, metric):\n print(\"__________________________________________________________\")\n print(\"3. TASK 3: SEGMENT ANALYSIS ...\")\n start_time = time.perf_counter() # Start the timer\n \n # Detailed analysis for each segment: mean/median of various metrics\n segment_analysis = df.groupby('ValueSegment').agg({\n 'CUSTOMER_SCORE': metric,\n 'TotalPurchaseAmount': metric,\n 'NumberOfPurchases': metric,\n 'TotalPurchaseTime': metric,\n 'HighSpender': 'sum', # Total number of high spenders in each segment\n 'AverageTimeBetweenPurchases': metric\n }).reset_index()\n \n end_time = time.perf_counter() # Stop the timer\n execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds\n print(f\"Time of Execution: {execution_time:.4f} ms\")\n \n return segment_analysis\n\ndef task4(df: pd.DataFrame, segment_analysis: pd.DataFrame, summary_statistic_type: str):\n print(\"__________________________________________________________\")\n print(\"4. TASK 4: ADDITIONAL ANALYSIS BASED ON SEGMENT ANALYSIS ...\")\n start_time = time.perf_counter() # Start the timer\n\n # Filter out the High Value customers\n high_value_customers = df[df['ValueSegment'] == 'High Value']\n \n # Use summary_statistic_type to calculate different types of summary statistics\n if summary_statistic_type == 'mean':\n average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].mean()\n elif summary_statistic_type == 'median':\n average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].median()\n elif summary_statistic_type == 'max':\n average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].max()\n elif summary_statistic_type == 'min':\n average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].min()\n \n median_score_high_value = high_value_customers['CUSTOMER_SCORE'].median()\n \n # Fetch the summary statistic for 'TotalPurchaseAmount' for High Value customers from segment_analysis\n segment_statistic_high_value = segment_analysis.loc[segment_analysis['ValueSegment'] == 'High Value', 'TotalPurchaseAmount'].values[0]\n\n # Create a DataFrame to hold the results\n result_df = pd.DataFrame({\n 'SummaryStatisticType': [summary_statistic_type],\n 'AveragePurchaseHighValue': [average_purchase_high_value],\n 'MedianScoreHighValue': [median_score_high_value],\n 'SegmentAnalysisHighValue': [segment_statistic_high_value]\n })\n\n end_time = time.perf_counter() # Stop the timer\n execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds\n print(f\"Time of Execution: {execution_time:.4f} ms\")\n\n return result_df\n\n\n\n\nif __name__ == \"__main__\":\n\n t1 = task1(\"data/SMALL_amazon_customers_data.csv\")\n\n t2 = task2(t1, 1500, 1.5)\n\n t3 = task3(t2, \"mean\")\n\n t4 = task4(t2, t3, \"mean\")\n\n\n\n print(t4)\n\n"} {"text": "# Create app for demo-image-classification-part-2 readme.md\n# Image Classification Part 2 Using Taipy Core\n\n## Usage\n- [Usage](#usage)\n- [Image Classification Part 2](#what-is-image-classification-part-2)\n- [Directory Structure](#directory-structure)\n- [License](#license)\n- [Installation](#installation)\n- [Contributing](#contributing)\n- [Code of conduct](#code-of-conduct)\n\n## What is Image Classification Part 2\n\nTaipy is a Python library for creating Business Applications. More information on our\n[website](https://www.taipy.io).\n\n[Image Classification Part 2](https://github.com/Avaiga/image-classification-part-2) is about how to use Taipy Core and Taipy Studio to efficiently create and manage Data and ML pipelines. \n\n### Demo Type\n- **Level**: Intermediate\n- **Topic**: Taipy-CORE\n- **Components/Controls**: \n - Taipy CORE: configs, Taipy Studio\n\n## How to run\n\nThis demo works with a Python version superior to 3.8. Install the dependencies of the *Pipfile* and run the *main.py*.\n\n## Introduction\nThe Demo is the second part of the Image Classification App using Taipy and Tensorflow, and it is recommended to watch the first part or go through the repo to understand the main functions and tasks. \n\nThe video on Youtubecovers using Taipy Core and Taipy Studio to build pipelines and manage different scenarios. The demo covers copying necessary functions into a script file, configuring datanodes, specifying functions for tasks, configuring pipelines, and executing the scenario with Taipy Studio and/or Taipy Core. \n\n\n## Directory Structure\n\n\n- `src/`: Contains the demo source code.\n- `docs/`: contains the images for the documentation\n- `CODE_OF_CONDUCT.md`: Code of conduct for members and contributors of _image-classification-part-2_.\n- `CONTRIBUTING.md`: Instructions to contribute to _image-classification-part-2_.\n- `INSTALLATION.md`: Instructions to install _image-classification-part-2_.\n- `LICENSE`: The Apache 2.0 License.\n- `Pipfile`: File used by the Pipenv virtual environment to manage project dependencies.\n- `README.md`: Current file.\n\n## License\nCopyright 2022 Avaiga Private Limited\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n[http://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\n## Installation\n\nWant to install _image-classification-part-2_? Check out our [`INSTALLATION.md`](INSTALLATION.md) file.\n\n## Contributing\n\nWant to help build _image-classification-part-2_? Check out our [`CONTRIBUTING.md`](CONTRIBUTING.md) file.\n\n## Code of conduct\n\nWant to be part of the _image-classification-part-2_ community? Check out our [`CODE_OF_CONDUCT.md`](CODE_OF_CONDUCT.md) file."} {"text": "# Create app for demo-image-classification-part-2 config_from_tp_studio.py\nfrom main_functions import *\nfrom taipy import Config\nimport taipy as tp \n\nConfig.load('built_with_tp_studio.toml')\nscenario_cfg = Config.scenarios['testing_scenario']\n\ntp.Core().run()\nmain_scenario = tp.create_scenario(scenario_cfg)\ntp.submit(main_scenario)\n\n\n\n\n\n\n\n"} {"text": "# Create app for demo-image-classification-part-2 main_functions.py\nimport tensorflow as tf\nfrom tensorflow.keras import layers, models \nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator \nfrom tensorflow.keras.utils import to_categorical \nimport pandas as pd \nimport numpy as np\n\nclass_names = ['AIRPLANE', 'AUTOMOBILE', 'BIRD', 'CAT', 'DEER', 'DOG', 'FROG', 'HORSE', 'SHIP', 'TRUCK']\n\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\n\nx_train = x_train / 255.0\ny_train = to_categorical(y_train, len(class_names))\nx_test = x_test / 255.0\ny_test = to_categorical(y_test, len(class_names))\n\ndef tf_read(path: str): return tf.keras.models.load_model(path)\ndef tf_write(model, path: str):model.save(path)\n\n#Task 1.1: Building the base model\ndef initialize_model(loss_f):\n # Creating model base\n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))\n model.add(layers.MaxPool2D((2,2)))\n\n model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))\n model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))\n model.add(layers.MaxPool2D((2,2)))\n\n model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))\n model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))\n model.add(layers.MaxPool2D((2,2)))\n\n model.add(layers.Flatten())\n model.add(layers.Dense(128, activation='relu'))\n model.add(layers.Dense(10, activation='softmax'))\n\n model.compile(optimizer='adam',\n loss=loss_f,\n metrics=['accuracy'])\n\n return model\n\n#Task 1.2: Initial training witha fixed number of epochs\ndatagen = ImageDataGenerator(\n horizontal_flip=True,\n width_shift_range=3/32,\n height_shift_range=3/32\n)\n\ndef initial_model_training(n_epochs, model):\n print(\"INITIAL MODEL TRAINING STARTED: \")\n\n h = model.fit(\n datagen.flow(x_train, y_train, batch_size=64),\n epochs=n_epochs,\n validation_data=(x_test, y_test))\n\n training_result = pd.DataFrame.from_dict(h.history)\n training_result[\"N_Epochs\"] = range(1,len(training_result)+1)\n \n return training_result, model\n\n#Task 2.1: Merge train with a chosen number of epochs (training + validation set as training)\ndef merged_train(number_of_epochs,model):\n print(\"MERGED TRAIN STARTED: \")\n # merge the training and validation sets\n x_all = np.concatenate((x_train, x_test))\n y_all = np.concatenate((y_train, y_test))\n\n h = model.fit(\n datagen.flow(x_all, y_all, batch_size=64),\n epochs=number_of_epochs)\n \n training_result = pd.DataFrame.from_dict(h.history)\n training_result[\"N_Epochs\"] = range(1,len(training_result)+1)\n \n return training_result, model\n\n#Task 2.2: Predict image class\ndef predict_image(image_path, trained_model):\n print(\"PREDICTION TASK STARTED: \")\n img_array = tf.keras.utils.load_img(image_path, target_size=(32, 32))\n image = tf.keras.utils.img_to_array(img_array) \n image = np.expand_dims(image, axis=0) / 255. \n prediction_result = class_names[np.argmax(trained_model.predict(image))]\n print(\"Prediction result: {}\".format(prediction_result))\n return prediction_result\n\n"} {"text": "# Create app for demo-image-classification-part-2 main.py\nfrom main_functions import *\nfrom taipy import Config\nimport taipy as tp \n\n#######################################################################################################\n##############################################PIPELINE 1###############################################\n#######################################################################################################\n\n###TASK 1.1: Building the base model\n#input dn\nloss_fn_cfg = Config.configure_data_node(\"loss_fn\", default_data='categorical_crossentropy')\n#output dn\nbase_model_cfg = Config.configure_generic_data_node(\"base_model\", \n read_fct=tf_read, read_fct_params=('models/base_model',),\n write_fct=tf_write, write_fct_params=('models/base_model',))\n#task\nBUILD_CNN_BASE_cfg = Config.configure_task(\"BUILD_CNN_BASE\",\n initialize_model,\n loss_fn_cfg,\n base_model_cfg)\n\n###TASK 1.2: Initial training with a fixed number of epochs\n#input dn\ninitial_n_epochs_cfg = Config.configure_data_node(\"initial_n_epochs\", default_data=30)\n#output dn\ninitial_train_perf_cfg = Config.configure_data_node(\"initial_train_perf\")\ntrained_initial_model_cfg = Config.configure_generic_data_node(\"trained_initial_model\", \n read_fct=tf_read, read_fct_params=('models/trained_initial_model',),\n write_fct=tf_write, write_fct_params=('models/trained_initial_model',))\n#task\nINITIAL_TRAIN_cfg = Config.configure_task(\"INITIAL_TRAIN\",\n initial_model_training,\n [initial_n_epochs_cfg, base_model_cfg],\n [initial_train_perf_cfg, trained_initial_model_cfg])\n#pipeline\npipeline_1_cfg = Config.configure_pipeline(\"pipeline_1\",\n [BUILD_CNN_BASE_cfg,\n INITIAL_TRAIN_cfg])\n\n#######################################################################################################\n##############################################PIPELINE 2###############################################\n#######################################################################################################\n\n###TASK 2.1: Merge train with a chosen number of epochs (training + validation set as training)\n#input dn\noptimal_n_epochs_cfg = Config.configure_data_node(\"optimal_n_epochs\", default_data=13)\n#output dn\nmerged_train_perf_cfg = Config.configure_data_node(\"merged_train_perf\")\nmerged_trained_model_cfg = Config.configure_generic_data_node(\"merged_trained_model\", \n read_fct=tf_read, read_fct_params=('models/merged_trained_model',),\n write_fct=tf_write, write_fct_params=('models/merged_trained_model',))\n#task\nMERGED_TRAIN_cfg = Config.configure_task(\"MERGED_TRAIN\",\n merged_train,\n [optimal_n_epochs_cfg, base_model_cfg],\n [merged_train_perf_cfg, merged_trained_model_cfg])\n\n\n###TASK 2.2: Make a prediction from an image path\n#input dn: the trained model datanode, already set up\nimage_path_dn_cfg = Config.configure_data_node(\"image_path_dn\", default_data=\"test_images/dog.jpg\") \n#output dn\nprediction_cfg = Config.configure_data_node(\"image_prediction\")\n#task\nIMAGE_PREDICT_cfg = Config.configure_task(\"IMAGE_PREDICT\", predict_image,\n [image_path_dn_cfg, merged_trained_model_cfg],\n [prediction_cfg])\n#pipeline\npipeline_2_cfg = Config.configure_pipeline(\"pipeline_2\",\n [MERGED_TRAIN_cfg, \n IMAGE_PREDICT_cfg])\n\n\n#######################################################################################################\n##############################################Scenario#################################################\n#######################################################################################################\nscenario_cfg = Config.configure_scenario(\"testing_scenario\",\n [pipeline_1_cfg, pipeline_2_cfg])\n\ntp.Core().run()\nmain_scenario = tp.create_scenario(scenario_cfg)\ntp.submit(main_scenario)\nConfig.export(\"tpcore.toml\")\n\n\n\n\n\n"} {"text": "# Create app for demo-edit-log LICENSE.md\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2022 Avaiga Private Limited\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"} {"text": "# Create app for demo-edit-log main.py\nfrom taipy.gui import Gui\nimport taipy as tp\nfrom taipy.gui import notify\nfrom config.config import *\n\n\n# Variables for bindings\nall_scenarios = [] # List of scenarios\nall_scenarios_configs = [] # List of scenario configs\nall_data_nodes = [] # List of node IDs\n\ncurrent_scenario = None\ncurrent_data_node = None\ncurrent_scenario_config = None\n\nscenario_name = None\nedits = [[\"\",\"\",\"\"]]\n\nvalue = None\ncommit_message = \"\"\ncreate_scenario_dialog_visible = False\nset_value_dialog_visible = False\n\n\n# ====================================================================\n\n\ndef on_init(state):\n state.all_scenarios = [(sc.id, sc.name) for sc in tp.get_scenarios()]\n state.all_scenarios_configs = [sc.id for sc in Config.scenarios.values()]\n\n\ndef on_change_current_scenario(state):\n scenario = tp.get(state.current_scenario[0])\n # Propagate to list of nodes:\n state.all_data_nodes = [(dn.id, dn.config_id) for dn in scenario.data_nodes.values()]\n\ndef on_change(state, var_name: str, var_value):\n if var_name == \"all_data_nodes\":\n # Propagate to current data node (pick any...):\n if var_value and len(var_value) > 0:\n data_node = next(iter(var_value))\n state.current_data_node = data_node\n if var_name == \"current_data_node\":\n # Propagate to list of edits:\n refresh_edit_log(state)\n\n\ndef refresh_edit_log(state):\n # Forces a refresh of the edit log:\n if state.current_data_node:\n data_node_id = state.current_data_node[0]\n data_node = tp.get(data_node_id)\n state.edits = get_edit_log(data_node) if data_node else []\n\n\ndef create_scenario_clicked(state):\n state.scenario_name = None\n state.create_scenario_dialog_visible = True\n\n\ndef get_edit_log(data_node):\n def _get_edit_fields(edit):\n return [str(edit.get(\"timestamp\")), edit.get(\"job_id\"), edit.get(\"message\")]\n\n return [_get_edit_fields(edit) for edit in data_node.edits] if data_node else []\n\n\ndef on_submit_button_clicked(state):\n scenario_id = state.current_scenario[0]\n scenario = tp.get(scenario_id)\n tp.submit(scenario)\n # Force refresh of current data node:\n refresh_edit_log(state)\n notify(state, message=f\"Scenario {scenario.name} submitted!\")\n\n\ndef on_set_value_clicked(state):\n state.set_value_dialog_visible = True\n\n\ndef create_scenario_dialog_action(state, id, action, payload):\n state.create_scenario_dialog_visible = False\n btn_idx = payload[\"args\"][0]\n if btn_idx == 0: # OK button\n scenario_cfg = Config.scenarios[state.current_scenario_config]\n name = state.scenario_name\n scenario = tp.create_scenario(config=scenario_cfg, name=name)\n all_scenarios = state.all_scenarios\n all_scenarios.append((scenario.id, scenario.name))\n state.all_scenarios = all_scenarios\n notify(state, message=f\"Scenario {scenario.name} created!\")\n\n\ndef set_value_dialog_action(state, id, action, payload):\n btn_idx = payload[\"args\"][0]\n if btn_idx == 0: # OK button\n data_node_id = state.current_data_node[0]\n node = tp.get(data_node_id)\n node.write(state.value, message=state.commit_message)\n state.current_data_node = state.current_data_node\n\n state.set_value_dialog_visible = False\n\n\nhistory_table_columns = {\n \"0\": {\"title\": \"Date\"},\n \"1\": {\"title\": \"Job Id\"},\n \"2\": {\"title\": \"Comments\"},\n}\n\n\nscenario_manager_page = \"\"\"\n<|part|class_name=card|\n## Data Node Selection\n<|{current_scenario}|selector|lov={all_scenarios}|dropdown|label=|>\n\n<|Create New Scenario...|button|on_action=create_scenario_clicked|>\n<|Run Scenario|button|active={current_scenario is not None}|on_action=on_submit_button_clicked|>\n|>\n\n<|part|class_name=card|\n## Data Node Edit Log\n<|{edits}|table|columns={history_table_columns}|width=50vw|>\n<|Refresh|button|on_action=refresh_edit_log|>\n<|Set value...|button|active={len(edits) > 0}|on_action=on_set_value_clicked|>\n|>\n\n<|{create_scenario_dialog_visible}|dialog|title=Create Scenario|labels=OK;Cancel|on_action=create_scenario_dialog_action|\n\nSelect a scenario config:\n<|{current_scenario_config}|selector|dropdown|lov={all_scenarios_configs}|>\n\nEnter a name for your scenario:\n\n<|{scenario_name}|input|change_delay=10|>\n|>\n\n\n<|{set_value_dialog_visible}|dialog|title=Set value|labels=OK;Cancel|change_delay=10|on_action=set_value_dialog_action|\n<|{value}|input|label=Enter a value|>\n\n<|Optional commit message|expandable|expanded=False|\n<|{commit_message}|input|>\n|>\n|>\n\"\"\"\n\n\nif __name__ == \"__main__\":\n gui = Gui(page=scenario_manager_page)\n core = tp.Core()\n tp.run(core, gui, port=8080, dark_mode=False)\n"} {"text": "# Create app for demo-edit-log config.py\nfrom algos.algos import task_function \nfrom taipy import Config\n\nConfig.configure_job_executions(mode=\"standalone\", max_nb_of_workers=1)\n\nnode_start_cfg = Config.configure_data_node(\n id=\"node_start\", default_data=[1, 2], description=\"This is the initial data node.\"\n)\nnode_end_cfg = Config.configure_data_node(id=\"node_end\", description=\"This is the result data node.\")\ntask_cfg = Config.configure_task(id=\"task\", input=[node_start_cfg], output=node_end_cfg, function=task_function)\npipeline_cfg = Config.configure_pipeline(id=\"pipeline\", task_configs=[task_cfg])\nConfig.configure_scenario(\"My_super_scenario\", [pipeline_cfg])\n"} {"text": "# Create app for demo-edit-log algos.py\ndef task_function(data):\n \"\"\"A dummy task function\"\"\"\n print(f\"Executing function: {data}\")\n return data\n"} {"text": "# Create app for demo-face-recognition LICENSE.md\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2022 Avaiga Private Limited\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"} {"text": "# Create app for demo-face-recognition find_taipy_gui_dir.py\n# This Python script tries to locate the taipy.gui package, and\n# prints its absolute path if it finds it.\nimport importlib.util\nimport os\n\ntaipy_gui = importlib.util.find_spec(\"taipy.gui\")\nif taipy_gui is None:\n print(\"Cannot find 'taipy.gui'\\nPlease run 'pip install taipy-gui'.\")\nelse:\n print(f\"Taipy GUI location: {os.path.dirname(taipy_gui.origin)}\")\n"} {"text": "# Create app for demo-face-recognition GETTING_STARTED.md\n# Getting Started\n\n## Installation\nFirst you need to install the dependencies and build the front-end. Please refer to [INSTALLATION.md](INSTALLATION.md).\n\n\n## How to use the demo\n\nOnce you started the application, your default Web browser should open automatically. If not, open this URL: [http://127.0.0.1:9090](http://127.0.0.1:9090).\nThe browser should ask you for the authorization to use the camera. Press \"Allow\".\n

\n \"drawing\"\n

\n\n\nOnce allowed, your camera should activate and you will see a live view of the video. Notice that your face your already be detected and the label \"None\" is displayed. This is because the application does not know you yet.\n\n

\n \"drawing\"\n

\n\nTo train the app to recognize your face, press the \"Capture\" button. This will show a dialog with a captured image. Enter a name for that face and press \"validate\".\nThe more training examples, the better. So add few more captured images of your faces.\n\nNotice that the case of the given name is important. So always use the same name for captured image.\nExample: \"Greg\" and \"greg\" will be considered as two different names.\n\n

\n \"drawing\"\n

\n\nAfter say 6 different images, you can ask the system to learn from them by pressing the \"Re-train\" button.\nDepending on the number of images to process, this can take from a second to a dozen of seconds.\n\nThe application will then be able to recognize the new face, and the name should be displayed on screen!\n\n

\n \"drawing\"\n

\n"} {"text": "# Create app for demo-face-recognition main.py\nfrom taipy.gui import Gui\nfrom webcam import Webcam\nimport cv2\n\nimport PIL.Image\nimport io\n\nimport logging\nimport uuid\nfrom pathlib import Path\nfrom demo.faces import detect_faces, recognize_face, train_face_recognizer\n\n\nlogging.basicConfig(level=logging.DEBUG)\n\ntraining_data_folder = Path(\"images\")\n\nshow_capture_dialog = False\ncapture_image = False\nshow_add_captured_images_dialog = False\n\nlabeled_faces = [] # Contains rect with label (for UI component)\n\ncaptured_image = None\ncaptured_label = \"\"\n\n\ndef on_action_captured_image(state, id, action, payload):\n print(\"Captured image\")\n choice = payload[\"args\"][0]\n if choice == 0:\n # Add image to training data:\n img = state.captured_image\n file_name = str(uuid.uuid4()) + \".jpg\"\n label = state.captured_label\n image_path = Path(training_data_folder, file_name)\n with image_path.open(\"wb\") as f:\n f.write(img)\n label_file_path = Path(training_data_folder, \"data.csv\")\n with label_file_path.open(\"a\") as f:\n f.write(f\"{file_name},{label}\\n\")\n\n state.captured_image = None\n state.captured_label = \"\"\n state.show_capture_dialog = False\n\n\ndef process_image(state, frame):\n print(\"Processing image...\")\n found = detect_faces(frame)\n\n labeled_images = []\n for rect, img in found:\n (label, _) = recognize_face(img)\n labeled_images.append((img, rect, label))\n\n # Return this to the UI component so that it can display a rect around recognized faces:\n state.labeled_faces = [str([*rect, label]) for (_, rect, label) in labeled_images]\n\n # Capture image (actually we consider only the first detected face)\n if state.capture_image and len(labeled_images) > 0:\n img = labeled_images[0][0]\n label = labeled_images[0][2]\n state.captured_image = cv2.imencode(\".jpg\", img)[1].tobytes()\n state.captured_label = label\n state.show_capture_dialog = True\n state.capture_image = False\n\n\ndef handle_image(state, action, args, value):\n print(\"Handling image...\")\n payload = value[\"args\"][0]\n bytes = payload[\"data\"]\n logging.debug(f\"Received data: {len(bytes)}\")\n\n temp_path = \"temp.png\"\n\n # Write Data into temp file (OpenCV is unable to load from memory)\n image = PIL.Image.open(io.BytesIO(bytes))\n image.save(temp_path)\n # Load image file\n try:\n img = cv2.imread(temp_path, cv2.IMREAD_UNCHANGED)\n except cv2.error as e:\n logging.error(f\"Failed to read image file: {e}\")\n return\n process_image(state, img)\n # Finish. Tempfile is removed.\n\n\ndef button_retrain_clicked(state):\n print(\"Retraining...\")\n train_face_recognizer(training_data_folder)\n\n\nwebcam_md = \"\"\"<|toggle|theme|>\n\n\n\n\n\n<|Capture|button|on_action={lambda s: s.assign(\"capture_image\", True)}|>\n<|RE-train|button|on_action=button_retrain_clicked|>\n>\n|card>\n|container>\n\n\n<|{show_capture_dialog}|dialog|labels=Validate;Cancel|on_action=on_action_captured_image|title=Add new training image|\n<|{captured_image}|image|width=300px|height=300px|>\n\n<|{captured_label}|input|>\n|>\n\"\"\"\n\nif __name__ == \"__main__\":\n # Create dir where the pictures will be stored\n if not training_data_folder.exists():\n training_data_folder.mkdir()\n\n train_face_recognizer(training_data_folder)\n\n gui = Gui(webcam_md)\n gui.add_library(Webcam())\n gui.run(port=9090)"} {"text": "# Create app for demo-face-recognition faces.py\nimport cv2\nfrom pathlib import Path\nimport os\nimport numpy as np\nimport logging\nfrom .image import crop_image\nimport pandas as pd\n\nlogging.basicConfig(level=logging.DEBUG)\n\n# Create our face detector. Both HAAR and LBP classifiers are somehow equivelent and both give good results.\n# Up to you to choose one or the other.\nface_detector = cv2.CascadeClassifier(\"classifiers/haarcascade_frontalface_default.xml\")\n# face_cascade = cv2.CascadeClassifier(\"classifiers/lbpcascade_frontalface_improved.xml\")\n\n# Create our face recognizer\nface_recognizer = cv2.face.LBPHFaceRecognizer_create()\n\n# The subjects that can be recognized\nsubjects = {}\n\nFACE_DETECTOR_SCALE_FACTOR = 1.1\nFACE_DETECTOR_MIN_NEIGHBORS = 5\n\n\ndef detect_faces(image):\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n detected_faces = face_detector.detectMultiScale(\n gray_image,\n scaleFactor=FACE_DETECTOR_SCALE_FACTOR,\n minNeighbors=FACE_DETECTOR_MIN_NEIGHBORS,\n )\n if len(detected_faces) == 0:\n return []\n\n return [(rect, crop_image(image, rect)) for rect in detected_faces]\n\n\ndef recognize_face(image):\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n if len(subjects) == 0:\n # No subject, the model hasn't been trained, let's do nothing.\n return (None, None)\n try:\n face = face_recognizer.predict(gray_image)\n except Exception:\n logging.warning(\"Could not run face recognizer\", exc_info=True)\n # Return the name of the recognize subject and the confident level\n return (subjects[face[0]], face[1])\n\n\ndef train_face_recognizer(training_data_directory=\"images\"):\n data_file_path = Path(training_data_directory, \"data.csv\")\n if not data_file_path.exists():\n # Create file\n with data_file_path.open(\"w\") as f:\n f.write(\"image,label\\n\")\n\n # Load file as CSV file\n data = pd.read_csv(data_file_path, delimiter=\",\", header=0).to_numpy()\n\n # Subjects that can be recognized from these data:\n identified_subjects = np.unique(data[:, 1])\n global subjects\n\n if len(identified_subjects) == 0:\n # No subject... We stop here\n subjects = {}\n return\n else:\n # Update subjects (persons who can be recognized)\n subjects = {e[0]: e[1] for e in enumerate(identified_subjects)}\n\n # Prepare training data\n faces, labels = [], []\n for row in data:\n file_name = row[0]\n label = np.where(identified_subjects == row[1])[0][0]\n file_path = Path(training_data_directory, file_name)\n if os.path.exists(file_path):\n img = cv2.imread(str(file_path), cv2.IMREAD_GRAYSCALE)\n faces.append(img)\n labels.append(label)\n\n # Run training!\n logging.debug(f\"Run training for {subjects}...\")\n face_recognizer.train(faces, np.array(labels))\n"} {"text": "# Create app for demo-face-recognition __init__.py\n"} {"text": "# Create app for demo-face-recognition image.py\ndef crop_image(img, rect):\n \"\"\"An utility function to crop an image to the given rect\"\"\"\n x, y, w, h = rect\n return img[y : y + h, x : x + w]\n"} {"text": "# Create app for demo-face-recognition __init__.py\nfrom .webcam import Webcam\n"} {"text": "# Create app for demo-face-recognition webcam.py\nfrom taipy.gui.extension import ElementLibrary, Element, ElementProperty, PropertyType\n\n\nclass Webcam(ElementLibrary):\n def get_name(self) -> str:\n return \"webcam\"\n\n def get_elements(self) -> dict:\n return {\n \"Webcam\": Element(\n \"faces\",\n {\n \"faces\": ElementProperty(PropertyType.dynamic_list),\n \"id\": ElementProperty(PropertyType.string),\n \"classname\": ElementProperty(PropertyType.dynamic_string),\n \"on_data_receive\": ElementProperty(PropertyType.string),\n \"sampling_rate\": ElementProperty(PropertyType.number),\n },\n react_component=\"Webcam\",\n )\n }\n\n def get_scripts(self) -> list[str]:\n return [\"webui/dist/webcam.js\"]\n"} {"text": "# Create app for demo-taipy-gui-starter-2 main.py\nfrom taipy.gui import Gui, notify\nimport pandas as pd\nimport webbrowser\nimport datetime\nimport os\n\nDOWNLOAD_PATH = \"data/download.csv\"\nupload_file = None\n\nsection_1 = \"\"\"\n
\n<|navbar|lov={[(\"page1\", \"This Page\"), (\"https://docs.taipy.io/en/latest/manuals/about/\", \"Taipy Docs\"), (\"https://docs.taipy.io/en/latest/getting_started/\", \"Getting Started\")]}|>\n
\n\nData Dashboard with Taipy\n=========================\n<|layout|columns=1 3|\n<|\n### Let's create a simple Data Dashboard!\n
\n
\n <|{upload_file}|file_selector|label=Upload Dataset|>\n
\n|>\n<|\n
\n <|{logo}|image|height=250px|width=250px|on_action=image_action|>\n
\n|>\n|>\n\"\"\"\n\nsection_2 = \"\"\"\n## Data Visualization\n<|{dataset}|chart|mode=lines|x=Date|y[1]=MinTemp|y[2]=MaxTemp|color[1]=blue|color[2]=red|>\n\"\"\"\n\nsection_3 = \"\"\"\n<|layout|columns= 1 5|\n<|\n## Custom Parameters\n**Starting Date**\\n\\n<|{start_date}|date|not with_time|on_change=start_date_onchange|>\n

\n**Ending Date**\\n\\n<|{end_date}|date|not with_time|on_change=end_date_onchange|>\n
\n
\n<|button|label=GO|on_action=button_action|>\n|>\n<|\n
\n

Dataset

<|{DOWNLOAD_PATH}|file_download|on_action=download|>\n <|{dataset}|table|page_size=10|height=500px|width=65%|>\n
\n|>\n|>\n\"\"\"\ndef image_action(state):\n webbrowser.open(\"https://taipy.io\")\n\ndef get_data(path: str):\n dataset = pd.read_csv(path)\n dataset[\"Date\"] = pd.to_datetime(dataset[\"Date\"]).dt.date\n return dataset\n\ndef start_date_onchange(state, var_name, value):\n state.start_date = value.date()\n\ndef end_date_onchange(state, var_name, value):\n state.end_date = value.date()\n\ndef filter_by_date_range(dataset, start_date, end_date):\n mask = (dataset['Date'] > start_date) & (dataset['Date'] <= end_date)\n return dataset.loc[mask]\n\ndef button_action(state):\n state.dataset = filter_by_date_range(dataset, state.start_date, state.end_date)\n notify(state, \"info\", \"Updated date range from {} to {}.\".format(state.start_date.strftime(\"%m/%d/%Y\"), state.end_date.strftime(\"%m/%d/%Y\")))\n\ndef download(state):\n state.dataset.to_csv(DOWNLOAD_PATH)\n\nlogo = \"images/taipy_logo.jpg\"\ndataset = get_data(\"data/weather.csv\")\nstart_date = datetime.date(2008, 12, 1)\nend_date = datetime.date(2017, 6, 25)\n\ngui = Gui(page=section_1+section_2+section_3)\n\nif __name__ == '__main__':\n # the options in the gui.run() are optional, try without them\n gui.run(title='Taipy Demo GUI 2',\n \t\thost='0.0.0.0',\n \t\tport=os.environ.get('PORT', '5050'),\n \t\tdark_mode=False)\nelse:\n app = gui.run(title='Taipy Demo GUI 2',\n dark_mode=False,\n run_server=False)"} {"text": "# Create app for demo-production-planning main.py\nfrom pages.compare_cycles_md import *\nimport taipy as tp\nimport os\nimport pandas as pd\nfrom taipy.gui import Gui\n\n\nif __name__ == \"__main__\":\n tp.Core().run()\n\n if len(tp.get_scenarios())==0:\n cc_create_scenarios_for_cycle()\n\n\nfrom pages.compare_scenario_md import *\nfrom pages.databases_md import *\nfrom pages.data_visualization_md import *\nfrom pages.shared import *\nfrom pages.scenario_manager.scenario_manager_md import *\n\n\ndef create_chart(sm_results: pd.DataFrame, var: str):\n \"\"\"Functions that create/update the chart table visible in the \"Databases\" page. This\n function is used in the \"on_change\" function to change the chart when the graph selected is changed.\n\n Args:\n sm_results (pd.DataFrame): the results database that comes from the state\n var (str): the string that has to be found in the columns that are going to be used to create the chart table\n\n Returns:\n pd.DataFrame: the chart with the proper columns\n \"\"\"\n if var == 'Cost':\n columns = ['index'] + [col for col in sm_results.columns if var in col]\n else:\n columns = ['index'] + [col for col in sm_results.columns if var in col and 'Cost' not in col]\n\n chart = sm_results[columns]\n return chart\n\n\ndef on_change(state, var_name, var_value):\n \"\"\"This function is called whener a change in the state variables is done. When a change is seen, operations can be created\n depending on the variable changed\n Args:\n state (State): the state object of Taipy\n var_name (str): the changed variable name\n var_value (obj): the changed variable value\n \"\"\"\n # if the changed variable is the scenario selected\n if var_name == \"selected_scenario\" and var_value:\n if state.selected_scenario.results.is_ready_for_reading:\n # I update all the other useful variables\n update_variables(state)\n\n \n if var_name == 'sm_graph_selected' or var_name == \"selected_scenario\":\n # Update the chart table\n str_to_select_chart = None\n chart_mapping = {\n 'Costs': 'Cost',\n 'Purchases': 'Purchase',\n 'Productions': 'Production',\n 'Stocks': 'Stock',\n 'Back Order': 'BO',\n 'Product FPA': 'FPA',\n 'Product FPB': 'FPB',\n 'Product RP1': 'RP1',\n 'Product RP2': 'RP2'\n }\n\n str_to_select_chart = chart_mapping.get(state.sm_graph_selected)\n state.chart = create_chart(state.sm_results, str_to_select_chart)\n\n # If we are on the 'Databases' page, we have to create a temp CSV file\n if state.page == 'Databases':\n state.d_chart_csv_path = PATH_TO_TABLE\n state.chart.to_csv(state.d_chart_csv_path, sep=',')\n\n\ndef on_init(state):\n state.state_id = str(os.urandom(32))\n update_scenario_selector(state)\n\n\npages = {\"/\": Markdown('pages/shared.md'),\n \"Data-Visualization\":da_data_visualisation_md,\n \"Scenario-Manager\":sm_scenario_manager_md,\n \"Compare-Scenarios\":cs_compare_scenario_md,\n \"Compare-Cycles\":cc_compare_cycles_md,\n 'Databases':da_databases_md\n }\n\nif __name__ == \"__main__\":\n gui = Gui(pages=pages)\n gui.run(title=\"Production planning\")\n"} {"text": "# Create app for demo-production-planning config.py\nfrom taipy import Config, Frequency\nimport json\nfrom algos.algos import *\n\n# This code produces scenario_cfg and configures our graph of execution\n\n###############################################################################\n# Data nodes\n###############################################################################\n\n# we create our first datanode, the source is csv file\npath_to_demand = 'data/time_series_demand.csv'\ndemand_cfg = Config.configure_data_node(id=\"demand\",\n storage_type=\"csv\", \n path=path_to_demand,\n has_header=True)\n\nwith open('data/fixed_variables_default.json') as f:\n fixed_variables_default = json.load(f)\n\n# creation of our second datanode that will have as a default data our fixed_variables_default\n# this is this datanode that we will write on when we submit other values for fixed_variable\nfixed_variables_cfg = Config.configure_data_node(id=\"fixed_variables\", default_data = fixed_variables_default)\n\nsolver_name_cfg = Config.configure_data_node(id=\"solver_name\", default_data=\"Default\")\n\n# here are the datanodes that keep track of the model : the model_created datanode, the model_solved datanode\nmodel_created_cfg = Config.configure_data_node(id=\"model_created\")\nmodel_solved_cfg = Config.configure_data_node(id=\"model_solved\")\n\n# and this is the datanode that will be used to get our results from the main code\nresults_cfg = Config.configure_data_node(id=\"results\")\n\n###############################################################################\n# Tasks\n###############################################################################\n\n# (demand_cfg,fixed_variables_cfg) -> |create_model| -> (model_created_cfg)\ncreate_model_task = Config.configure_task(id=\"create_model\",\n input=[demand_cfg,fixed_variables_cfg],\n function=create_model,\n output=[model_created_cfg])\n\n# (model_created_cfg, solver_name_cfg) -> |solve_model| -> (model_solved_cfg)\nsolve_model_cfg = Config.configure_task(id=\"solve_model\",\n input=[model_created_cfg, solver_name_cfg],\n function=solve_model,\n output=[model_solved_cfg])\n\n# (model_solved_cfg,fixed_variables_cfg,demand_cfg) -> |create_results| -> (results_cfg)\ncreate_results_cfg = Config.configure_task(id=\"create_results\",\n input=[model_solved_cfg,fixed_variables_cfg,demand_cfg],\n function=create_results,\n output=[results_cfg])\n\n\n###############################################################################\n# Scenario config\n###############################################################################\n\nscenario_cfg = Config.configure_scenario(id=\"scenario\",task_configs=[create_model_task,solve_model_cfg,create_results_cfg], frequency=Frequency.MONTHLY)\n\nConfig.export(\"config/config.toml\")\n"} {"text": "# Create app for demo-production-planning algos.py\nimport pandas as pd\nimport numpy as np\nfrom pulp import *\n\n\n# This code is used for config.py\n\n###############################################################################\n# Functions\n###############################################################################\n\ndef create_model(demand: pd.DataFrame, fixed_variables: dict):\n \"\"\"This function creates the model. It will creates all the variables and contraints of the problem.\n It will also create the objective function.\n\n Args:\n demand (pd.DataFrame): demand dataframe\n fixed_variables (dict): fixed variables dictionary\n\n Returns:\n dict: model_info (with the model created)\n \"\"\"\n print(\"Creating the model...\")\n\n monthly_demand_FPA = demand[\"Demand_A\"]\n monthly_demand_FPB = demand[\"Demand_B\"]\n\n nb_periods = len(monthly_demand_FPA)\n\n # creation of the model\n prob = LpProblem(\"Production_Planning\", LpMinimize)\n\n # creation of the variables\n # for product A\n monthly_production_FPA = [\n LpVariable(f\"Monthly_Production_FPA_{m}\", 0) for m in range(nb_periods)\n ]\n monthly_stock_FPA = [\n LpVariable(f\"Monthly_Stock_FPA_{m}\", 0) for m in range(nb_periods)\n ]\n\n monthly_back_order_FPA = [\n LpVariable(f\"Monthly_Back_Order_FPA_{m}\", 0) for m in range(nb_periods)\n ]\n\n # for product B\n monthly_production_FPB = [\n LpVariable(f\"Monthly_Production_FPB_{m}\", 0) for m in range(nb_periods)\n ]\n monthly_stock_FPB = [\n LpVariable(f\"Monthly_Stock_FPB_{m}\", 0) for m in range(nb_periods)\n ]\n monthly_back_order_FPB = [\n LpVariable(f\"Monthly_Back_Order_FPB_{m}\", 0) for m in range(nb_periods)\n ]\n\n # for product 1\n monthly_purchase_RPone = [\n LpVariable(f\"Monthly_Purchase_RPone_{m}\", 0) for m in range(nb_periods)\n ]\n monthly_stock_RPone = [\n LpVariable(f\"Monthly_Stock_RPone_{m}\", 0) for m in range(nb_periods)\n ]\n monthly_stock_not_used_RPone = [\n LpVariable(f\"Monthly_Stock_not_used_RPone{m}\", 0) for m in range(nb_periods)\n ]\n monthly_stock_RPone_for_FPA = [\n LpVariable(f\"Monthly_Stock_RPone_for_FPA{m}\", 0) for m in range(nb_periods)\n ]\n monthly_stock_RPone_for_FPB = [\n LpVariable(f\"Monthly_Stock_RPone_for_FPB{m}\", 0) for m in range(nb_periods)\n ]\n\n # for product 2\n monthly_purchase_RPtwo = [\n LpVariable(f\"Monthly_Purchase_RPtwo{m}\", 0) for m in range(nb_periods)\n ]\n monthly_stock_RPtwo = [\n LpVariable(f\"Monthly_Stock_RP{m}two\", 0) for m in range(nb_periods)\n ]\n monthly_stock_not_used_RPtwo = [\n LpVariable(f\"Monthly_Stock_not_used_RPtwo{m}\", 0) for m in range(nb_periods)\n ]\n monthly_stock_RPtwo_for_FPA = [\n LpVariable(f\"Monthly_Stock_RPtwo_for_FPA{m}\", 0) for m in range(nb_periods)\n ]\n monthly_stock_RPtwo_for_FPB = [\n LpVariable(f\"Monthly_Stock_RPtwo_for_FPB{m}\", 0) for m in range(nb_periods)\n ]\n\n # creation of the constraints\n\n # Kirchoff's law for product A\n for m in range(1, nb_periods):\n prob += (\n monthly_production_FPA[m]\n - monthly_back_order_FPA[m - 1]\n + monthly_stock_FPA[m - 1]\n == monthly_demand_FPA[m] + monthly_stock_FPA[m] - monthly_back_order_FPA[m]\n )\n # Kirchoff's law for product B\n for m in range(1, nb_periods):\n prob += (\n monthly_production_FPB[m]\n - monthly_back_order_FPB[m - 1]\n + monthly_stock_FPB[m - 1]\n == monthly_demand_FPB[m] + monthly_stock_FPB[m] - monthly_back_order_FPB[m]\n )\n\n # Kirchoff's law for product 1\n for m in range(1, nb_periods):\n prob += (\n monthly_purchase_RPone[m - 1] + monthly_stock_not_used_RPone[m - 1]\n == monthly_stock_RPone[m]\n )\n # MS Fix for None issue\n prob += monthly_purchase_RPone[nb_periods - 1] == 0\n\n for m in range(1, nb_periods):\n prob += (\n monthly_purchase_RPtwo[m - 1] + monthly_stock_not_used_RPtwo[m - 1]\n == monthly_stock_RPtwo[m]\n )\n # MS Fix for None issue\n prob += monthly_purchase_RPtwo[nb_periods - 1] == 0\n\n for m in range(nb_periods):\n prob += monthly_production_FPA[m] <= fixed_variables[\"Max_Capacity_FPA\"]\n\n prob += monthly_production_FPA[0] == fixed_variables[\"Initial_Production_FPA\"]\n\n prob += monthly_back_order_FPA[0] == fixed_variables[\"Initial_Back_Order_FPA\"]\n\n prob += monthly_stock_FPA[0] == fixed_variables[\"Initial_Stock_FPA\"]\n\n # constraints on bill of materials for product A\n\n for m in range(1, nb_periods):\n prob += (\n monthly_production_FPA[m]\n == fixed_variables[\"number_RPone_to_produce_FPA\"]\n * monthly_stock_RPone_for_FPA[m - 1]\n + fixed_variables[\"number_RPtwo_to_produce_FPA\"]\n * monthly_stock_RPtwo_for_FPA[m - 1]\n )\n\n for m in range(nb_periods):\n prob += (\n fixed_variables[\"number_RPone_to_produce_FPA\"]\n * monthly_stock_RPone_for_FPA[m]\n == fixed_variables[\"number_RPtwo_to_produce_FPA\"]\n * monthly_stock_RPtwo_for_FPA[m]\n )\n\n # constraints on the variables : max and initial value for product A\n for m in range(nb_periods):\n prob += monthly_production_FPB[m] <= fixed_variables[\"Max_Capacity_FPB\"]\n prob += monthly_production_FPB[0] == fixed_variables[\"Initial_Production_FPB\"]\n\n prob += monthly_back_order_FPB[0] == fixed_variables[\"Initial_Back_Order_FPB\"]\n prob += monthly_stock_FPB[0] == fixed_variables[\"Initial_Stock_FPB\"]\n\n # constraints on bill of materials for product B\n\n for m in range(1, nb_periods):\n prob += (\n monthly_production_FPB[m]\n == fixed_variables[\"number_RPone_to_produce_FPB\"]\n * monthly_stock_RPone_for_FPB[m - 1]\n + fixed_variables[\"number_RPtwo_to_produce_FPB\"]\n * monthly_stock_RPtwo_for_FPB[m - 1]\n )\n\n for m in range(nb_periods):\n prob += (\n fixed_variables[\"number_RPone_to_produce_FPB\"]\n * monthly_stock_RPone_for_FPB[m]\n == fixed_variables[\"number_RPtwo_to_produce_FPB\"]\n * monthly_stock_RPtwo_for_FPB[m]\n )\n\n for m in range(nb_periods):\n prob += monthly_stock_RPone[m] <= fixed_variables[\"Max_Stock_RPone\"]\n\n prob += monthly_stock_RPone[0] == fixed_variables[\"Initial_Stock_RPone\"]\n\n prob += monthly_purchase_RPone[0] == fixed_variables[\"Initial_Purchase_RPone\"]\n\n for m in range(nb_periods):\n prob += monthly_stock_RPone[m] == (\n monthly_stock_not_used_RPone[m]\n + monthly_stock_RPone_for_FPA[m]\n + monthly_stock_RPone_for_FPB[m]\n )\n # constraints on the variables : max and initial value for product 1\n\n for m in range(nb_periods):\n prob += monthly_stock_RPtwo[m] <= fixed_variables[\"Max_Stock_RPtwo\"]\n\n prob += monthly_stock_RPtwo[0] == fixed_variables[\"Initial_Stock_RPtwo\"]\n\n prob += monthly_purchase_RPtwo[0] == fixed_variables[\"Initial_Purchase_RPtwo\"]\n # constraints that define what is a stock for product 1\n\n for m in range(nb_periods):\n prob += monthly_stock_RPtwo[m] == (\n monthly_stock_not_used_RPtwo[m]\n + monthly_stock_RPtwo_for_FPA[m]\n + monthly_stock_RPtwo_for_FPB[m]\n )\n # constraints on the variables : max value for product A and B (cumulative)\n\n for m in range(nb_periods):\n prob += (\n monthly_production_FPA[m] + monthly_demand_FPB[m]\n <= fixed_variables[\"Max_Capacity_of_FPA_and_FPB\"]\n )\n\n # setting the objective function\n prob += lpSum(\n fixed_variables[\"Weight_of_Back_Order\"]\n / 100\n * (\n fixed_variables[\"cost_FPA_Back_Order\"] * monthly_back_order_FPA[m]\n + fixed_variables[\"cost_FPB_Back_Order\"] * monthly_back_order_FPB[m]\n )\n + fixed_variables[\"Weight_of_Stock\"]\n / 100\n * (\n fixed_variables[\"cost_FPA_Stock\"] * monthly_stock_FPA[m]\n + fixed_variables[\"cost_FPB_Stock\"] * monthly_stock_FPB[m]\n + fixed_variables[\"cost_RPone_Stock\"] * monthly_stock_RPone[m]\n + fixed_variables[\"cost_RPtwo_Stock\"] * monthly_stock_RPtwo[m]\n )\n for m in range(nb_periods)\n )\n\n # putting all the needed information in a dictionary\n model_info = {\n \"model_created\": prob,\n \"model_solved\": None,\n \"Monthly_Production_FPA\": monthly_production_FPA,\n \"Monthly_Stock_FPA\": monthly_stock_FPA,\n \"Monthly_Back_Order_FPA\": monthly_back_order_FPA,\n \"Monthly_Production_FPB\": monthly_production_FPB,\n \"Monthly_Stock_FPB\": monthly_stock_FPB,\n \"Monthly_Back_Order_FPB\": monthly_back_order_FPB,\n \"Monthly_Stock_RPone\": monthly_stock_RPone,\n \"Monthly_Stock_RPtwo\": monthly_stock_RPtwo,\n \"Monthly_Purchase_RPone\": monthly_purchase_RPone,\n \"Monthly_Purchase_RPtwo\": monthly_purchase_RPtwo,\n }\n\n print(\"Model created\")\n return model_info\n\n\ndef solve_model(model_info: dict, solver_name):\n \"\"\"This function solves the model and returns all the solutions in a dictionary.\n\n Args:\n model_info (dict): the model_info passed by the create_model function\n\n Returns:\n dict: the model solved and and the solutions\n \"\"\"\n print(\"Solving the model...\")\n prob = model_info[\"model_created\"]\n\n nb_periods = len(model_info[\"Monthly_Production_FPA\"])\n\n if solver_name != \"Default\":\n solver = getSolver(solver_name)\n # solving the model\n m_solved = prob.solve(solver)\n else:\n m_solved = prob.solve()\n \n # getting the solution in the right variables\n # for product A\n prod_sol_FPA = [\n value(model_info[\"Monthly_Production_FPA\"][p]) for p in range(nb_periods)\n ]\n stock_sol_FPA = [\n value(model_info[\"Monthly_Stock_FPA\"][p]) for p in range(nb_periods)\n ]\n bos_sol_FPA = [\n value(model_info[\"Monthly_Back_Order_FPA\"][p]) for p in range(nb_periods)\n ]\n\n # for product B\n prod_sol_FPB = [\n value(model_info[\"Monthly_Production_FPB\"][p]) for p in range(nb_periods)\n ]\n stock_sol_FPB = [\n value(model_info[\"Monthly_Stock_FPB\"][p]) for p in range(nb_periods)\n ]\n bos_sol_FPB = [\n value(model_info[\"Monthly_Back_Order_FPB\"][p]) for p in range(nb_periods)\n ]\n\n # for product 1\n stock_RPone_sol = [\n value(model_info[\"Monthly_Stock_RPone\"][p]) for p in range(nb_periods)\n ]\n stock_RPtwo_sol = [\n value(model_info[\"Monthly_Stock_RPtwo\"][p]) for p in range(nb_periods)\n ]\n\n # for product 2\n purchase_RPone_sol = [\n value(model_info[\"Monthly_Purchase_RPone\"][p]) for p in range(nb_periods)\n ]\n purchase_RPtwo_sol = [\n value(model_info[\"Monthly_Purchase_RPtwo\"][p]) for p in range(nb_periods)\n ]\n\n # put it in a dictionary\n model_info = {\n \"model_created\": prob,\n \"model_solved\": m_solved,\n \"Monthly_Production_FPA\": prod_sol_FPA,\n \"Monthly_Stock_FPA\": stock_sol_FPA,\n \"Monthly_Back_Order_FPA\": bos_sol_FPA,\n \"Monthly_Production_FPB\": prod_sol_FPB,\n \"Monthly_Stock_FPB\": stock_sol_FPB,\n \"Monthly_Back_Order_FPB\": bos_sol_FPB,\n \"Monthly_Stock_RPone\": stock_RPone_sol,\n \"Monthly_Purchase_RPone\": purchase_RPone_sol,\n \"Monthly_Stock_RPtwo\": stock_RPtwo_sol,\n \"Monthly_Purchase_RPtwo\": purchase_RPtwo_sol,\n }\n print(\"Model solved\")\n return model_info\n\n\ndef create_results(model_info: dict, fixed_variables: dict, demand: pd.DataFrame):\n \"\"\"This function creates the results of the model. The results dataframe is a concatenation of all the useful information.\n\n Args:\n model_info (dict): the dictionary created by the solve_model function\n fixed_variables (dict): the fixed variables of the problem\n demand (pd.DataFrame): the demand for A and B\n\n Returns:\n pd.DataFrame: dataframe that gathers all the useful information about the solution\n \"\"\"\n print(\"Creating the results...\")\n\n # getting the demand for A and B\n demand_series_FPA = demand[\"Demand_A\"]\n demand_series_FPB = demand[\"Demand_B\"]\n\n nb_periods = len(demand_series_FPA)\n\n # calculate the different costs\n cost_FPBO_FPA = fixed_variables[\"cost_FPA_Back_Order\"] * np.array(\n model_info[\"Monthly_Back_Order_FPA\"]\n )\n cost_stock_FPA = fixed_variables[\"cost_FPA_Stock\"] * np.array(\n model_info[\"Monthly_Stock_FPA\"]\n )\n cost_FPBO_FPB = fixed_variables[\"cost_FPB_Back_Order\"] * np.array(\n model_info[\"Monthly_Back_Order_FPB\"]\n )\n cost_stock_FPB = fixed_variables[\"cost_FPB_Stock\"] * np.array(\n model_info[\"Monthly_Stock_FPB\"]\n )\n cost_stock_RPone = fixed_variables[\"cost_RPone_Stock\"] * np.array(\n model_info[\"Monthly_Stock_RPone\"]\n )\n cost_stock_RPtwo = fixed_variables[\"cost_RPtwo_Stock\"] * np.array(\n model_info[\"Monthly_Stock_RPtwo\"]\n )\n cost_product_RPone = fixed_variables[\"cost_RPone_Purchase\"] * np.array(\n model_info[\"Monthly_Purchase_RPone\"]\n )\n cost_product_RPtwo = fixed_variables[\"cost_RPtwo_Purchase\"] * np.array(\n model_info[\"Monthly_Purchase_RPtwo\"]\n )\n\n # the total cost (sum of the costs)\n total_cost = (\n cost_FPBO_FPA\n + cost_stock_FPA\n + cost_FPBO_FPB\n + cost_stock_FPB\n + cost_stock_RPone\n + cost_product_RPone\n + cost_product_RPtwo\n + cost_stock_RPtwo\n )\n\n # creation of the dictionary that will be used to create the dataframe\n dict_for_dataframe = {\n \"Monthly Production FPA\": model_info[\"Monthly_Production_FPA\"],\n \"Monthly Stock FPA\": model_info[\"Monthly_Stock_FPA\"],\n \"Monthly BO FPA\": model_info[\"Monthly_Back_Order_FPA\"],\n \"Max Capacity FPA\": [fixed_variables[\"Max_Capacity_FPA\"]] * nb_periods,\n \"Monthly Production FPB\": model_info[\"Monthly_Production_FPB\"],\n \"Monthly Stock FPB\": model_info[\"Monthly_Stock_FPB\"],\n \"Monthly BO FPB\": model_info[\"Monthly_Back_Order_FPB\"],\n \"Max Capacity FPB\": [fixed_variables[\"Max_Capacity_FPB\"]] * nb_periods,\n \"Monthly Stock RP1\": model_info[\"Monthly_Stock_RPone\"],\n \"Monthly Stock RP2\": model_info[\"Monthly_Stock_RPtwo\"],\n \"Monthly Purchase RP1\": model_info[\"Monthly_Purchase_RPone\"],\n \"Monthly Purchase RP2\": model_info[\"Monthly_Purchase_RPtwo\"],\n \"Demand FPA\": demand_series_FPA,\n \"Demand FPB\": demand_series_FPB,\n \"Stock FPA Cost\": cost_stock_FPA,\n \"Stock FPB Cost\": cost_stock_FPB,\n \"Stock RP1 Cost\": cost_stock_RPone,\n \"Stock RP2 Cost\": cost_stock_RPtwo,\n \"Purchase RP1 Cost\": cost_product_RPone,\n \"Purchase RP2 Cost\": cost_product_RPtwo,\n \"BO FPA Cost\": cost_FPBO_FPA,\n \"BO FPB Cost\": cost_FPBO_FPB,\n \"Total Cost\": total_cost,\n \"index\": range(nb_periods),\n }\n\n results = pd.DataFrame(dict_for_dataframe).round()\n print(\"Results created\")\n\n # we erase the last two observations because of how the model is created,\n # their values don't have a meaning\n return results[:-2]\n"} {"text": "# Create app for demo-production-planning create_data.py\nimport numpy as np\nimport pandas as pd\n\n# this code is used to create the csv file for the demand, it is the source data for the problem\n\ndef create_time_series(nb_months=12,mean_A=840,mean_B=760,std_A=96,std_B=72, amplitude_A=108,amplitude_B=144):\n time_series_A = [mean_A] \n time_series_B = [mean_B]\n \n for i in range(1,nb_months):\n time_series_A.append(np.random.normal(mean_A + amplitude_A*np.sin(2*np.pi*i/12), std_A))\n time_series_B.append(np.random.normal(mean_B + amplitude_B*np.sin((2*np.pi*(i+6))/12), std_B))\n \n time_series_A = pd.Series(time_series_A)\n time_series_B = pd.Series(time_series_B)\n month = [i%12 for i in range(nb_months)]\n year = [i//12 + 2020 for i in range(nb_months)]\n df_time_series = pd.DataFrame({\"Year\":year,\n \"Month\":month,\n \"Demand_A\":time_series_A,\n \"Demand_B\":time_series_B})\n return df_time_series\n\n\ndef time_series_to_csv(nb_months=12,mean_A=840,mean_B=760,std_A=96,std_B=72, amplitude_A=108,amplitude_B=144):\n time_serie_data = create_time_series(nb_months,\n mean_A,\n mean_B,\n std_A,\n std_B,\n amplitude_A,\n amplitude_B)\n time_serie_data.to_csv('data/time_series_demand.csv')\n"} {"text": "# Create app for demo-production-planning databases_md.py\nimport pathlib\n\nd_chart_csv_path = None\n\n# this path is used to create a temporary file that will allow us to\n# download a table in the Datasouces page\ntempdir = pathlib.Path(\".tmp\")\ntempdir.mkdir(exist_ok=True)\nPATH_TO_TABLE = str(tempdir / \"table.csv\")\n\nda_databases_md = \"\"\"\n<|container|\n# Data**sources**{: .color-primary } \n\n<|layout|columns=3 2 1|columns[mobile]=1|class_name=align_columns_bottom|\n\n|year>\n\n\n|month>\n\n\n|scenario>\n|>\n|layout_scenario>\n\n<|\nTable\n\n<|{sm_graph_selected}|selector|lov={sm_graph_selector}|dropdown|>\n|>\n\n
\n<|{d_chart_csv_path}|file_download|name=table.csv|label=Download table|>\n|>\n\n<|part|render={len(scenario_selector)>0}|class_name=mt2|\n<|{chart}|table|width=100%|rebuild|>\n|>\n|>\n\"\"\"\n"} {"text": "# Create app for demo-production-planning data_visualization_md.py\nimport pandas as pd\nimport json\n\nwith open('data/fixed_variables_default.json', \"r\") as f:\n fixed_variables_default = json.load(f)\n\n# no code from Taipy Core has been executed yet, we read the csv file this way\nda_initial_demand = pd.read_csv('data/time_series_demand.csv')[['Year', 'Month', 'Demand_A', 'Demand_B']]\\\n .astype(int)\n\nda_initial_demand.columns = [col.replace('_', ' ') for col in da_initial_demand.columns]\n\nda_initial_variables = pd.DataFrame({key: [fixed_variables_default[key]]\n for key in fixed_variables_default.keys() if 'Initial' in key})\n\n# The code below is to correctly format the name of the columns\nda_initial_variables.columns = [col.replace('_', ' ').replace('one', '1').replace('two', '2').replace('initial ', '').replace('Initial ', '')\n for col in da_initial_variables.columns]\nda_initial_variables.columns = [col[0].upper() +\n col[1:] for col in da_initial_variables.columns]\n\n\nda_data_visualisation_md = \"\"\"\n<|container|\n# Data **Visualization**{: .color-primary } \n\n<|Expand here to see more data|expandable|expanded=False|\n\n <|layout|columns=5 3 3|columns[mobile]=1|\n### Initial **stock**{: .color-secondary } \\\n<|{da_initial_variables[[col for col in da_initial_variables.columns if 'Stock' in col]]}|table|show_all|width=100%|>\n\n### Incoming **purchases**{: .color-secondary } \\\n<|{da_initial_variables[[col for col in da_initial_variables.columns if 'Purchase' in col]]}|table|show_all|width=100%|>\n\n### Initial **production**{: .color-secondary } \\\n<|{da_initial_variables[[col for col in da_initial_variables.columns if 'Production' in col]]}|table|show_all|width=100%|>\n |>\n\n\n## **Demand**{: .color-secondary } of the upcoming months\n<|{da_initial_demand.round()}|table|width=fit-content|show_all|height=fit-content|>\n|>\n\n### **Evolution**{: .color-primary } of the demand\n<|{da_initial_demand}|chart|x=Month|y[1]=Demand A|y[2]=Demand B|>\n|>\n\"\"\"\n"} {"text": "# Create app for demo-production-planning shared.md\n<|toggle|theme|>\n\n<|menu|label=Menu|lov={menu_lov}|on_action=menu_fct|>\n\n\n\n\n<|Need any help?|button|on_action={lambda s: s.assign('dialog_help', True)}|id=help_button|>\n\n<|{dialog_help}|dialog|title=Walkthough|on_action=validate_help|labels=Go!|id=dialog_help|width=100%|\n\n<|container|\n## Page 1: Data Visualization\nUpon registering with a new account (name & password), the first page is displayed.\n\nThe primary chart depicts future demand for finished products A (FPA) \nand B (FPB) over the next 11 months, with the current month marked as month 0.\n\n\nJust above the chart, by clicking \"Expand here,\" you can access an expandable \nTaipy front-end containing initial production data at time 0 (current month): \nstock & production levels, incoming raw material orders, and demand, all presented in a table.\n\n\n\n## Page 2: Scenario Manager\n\nCreate, configure, and optimize production scenarios.\nThis is the application's main page, where users can create new scenarios, \nadjust scenario parameters (on the 'Scenario Configuration' side of the page), \nand re-submit scenarios for re-optimization based on modified parameters.\nInitially, no scenario is available, and the Year/Month corresponds to the current month.\n\n### Creating your first scenario\n\nThe purpose of the model is to generate a production plan (level of production \nfor both products) for the the next 11 months in order to:\n\n- Meet the demand for the finished product\n- Respect the Capacity Constraints\n- Minimize 2 cost functions:\n\n - Back ordering costs: the costs of not meeting the demand on time\n - Stock costs: costs of storing raw and finished products.\n\nIt is worth noting that these 2 cost functions are kind of opposite:\nif I have a lot of stock, I should easily meet the demand. Conversely,\na low inventory may put the demand in jeopardy.\nWhen creating a first scenario, two key indicators , \"Back Order Cost\"\nand \"Stock Cost,\" appear above an empty main chart (no plan generated yet).\n\n\nClick on \"New Scenario\" to launch the optimization algorithm, which \nquickly finds the optimal production levels, respecting the capacity \nconstraints and optimizing costs. \nResults can be displayed as time series or pie charts, and different \ngraphs can be selected by choosing the data to display (costs, productions, etc.).\n\n\n### Modifying the Parameters\nOn the right-hand side of this panel, you can modify various parameters categorized into three sections:\n\n- **Capacity Constraints**: Modify capacity values for different products (finished and raw).\n- **Objectives Weights**: Emphasize minimizing a specific cost (stock or backordering).\n- **Initial Parameters**: Modify other parameters like Initial Stock and Unit Cost.\nBy \"Playing\" with these parameters, you can create several scenarios.\n\n\n\n## Page 3: Compare Scenarios\nTo Compare two scenarios, select them then click on the \"compare scenario\" button. \nYou can select different comparison metrics such as costs, purchases, and production levels, etc.\n\n\n## Page 4: Compare Cycles\n\nThis demo also introduces the concept of \u2018Cycles\". \nIn this manufacturing context, the cycle is monthly. \nThis implies that scenarios are created each month. \nOnly one of the generated scenarios will be chosen as the \n\u2018official scenario', this scenario is referred as the \"Primary\" scenario. \n\nThis demo already contains many scenarios generated from the \nprevious months. The \"Evolution of costs\" bar chart displays \nthe performance for every single \"primary' scenario generated \nevery month for the past few years. Compare monthly stock and \nbackorder costs from January 2021 to the present month using stacked bar charts.\n\n\n## Page 5: Datasources\n\nAccess and display various tables associated with \na selected scenario. Conveniently download data tables in CSV format.\n|>\n|>"} {"text": "# Create app for demo-production-planning shared.py\nfrom taipy.gui import notify, navigate, Icon\nimport taipy as tp\nimport datetime as dt\n\n# User id\nstate_id = None\n\n# Metrics for scenario manager and comparison\nsum_costs = 0\nsum_costs_of_stock = 0\nsum_costs_of_BO = 0\nsum_costs_of_BO = 0\n\n# Navigation\npage = \"Data Visualization\"\n\nmenu_lov = [(\"Data-Visualization\", Icon('images/icons/visualize.svg', 'Data Visualization')),\n (\"Scenario-Manager\", Icon('images/icons/scenario.svg', 'Scenario Manager')),\n (\"Compare-Scenarios\", Icon('images/icons/compare.svg', 'Compare Scenarios')),\n (\"Compare-Cycles\", Icon('images/icons/cycle.svg', 'Compare Cycles')),\n ('Databases', Icon('images/icons/data_base.svg', 'Databases'))]\n\n\ndef menu_fct(state, var_name: str, var_value):\n \"\"\"Functions that is called when there is a change in the menu control\n\n Args:\n state (_type_): the state object of Taipy\n var_name (str): the changed variable name\n var_value (_type_): the changed variable value\n \"\"\"\n\n # change the value of the state.page variable in order to render the\n # correct page\n state.page = var_value['args'][0]\n navigate(state, to=state.page)\n\n\n # security on the 'All' option of sm_graph_selected that can be selected\n # only on the 'Databases' page\n if state.page != 'Databases' and state.sm_graph_selected == 'All':\n state.sm_graph_selected = 'Costs'\n\n\n# Functions for scenarios\ndef adapt_scenarios(scenario):\n return 'Primary ' + scenario.name if scenario.is_primary else scenario.name\n\n\ndef create_sm_tree_dict(scenarios, sm_tree_dict: dict = None):\n \"\"\"This function creates a tree dict from a list of scenarios. The levels of the tree are:\n year/month/scenario\n\n Args:\n scenarios (list): a list of scenarios\n sm_tree_dict (dict, optional): the tree gathering all the scenarios. Defaults to None.\n\n Returns:\n tree: the tree created to classify the scenarios\n \"\"\"\n print(\"Creating tree dict...\")\n if sm_tree_dict is None:\n # Initialize the tree dict if it is not already initialized\n sm_tree_dict = {}\n\n # Add all the scenarios that are in the list\n for scenario in scenarios:\n # Create a name for the cycle\n date = scenario.creation_date\n year = f\"{date.strftime('%Y')}\"\n period = f\"{date.strftime('%b')}\"\n\n # Add the cycle if it was not already added\n if year not in sm_tree_dict:\n sm_tree_dict[year] = {}\n if period not in sm_tree_dict[year]:\n sm_tree_dict[year][period] = []\n\n sm_tree_dict[year][period] += [scenario]\n\n return sm_tree_dict\n\n\n\ndef create_time_selectors():\n \"\"\"This function creates the time selectors that will be displayed on the GUI and it is also creating \n the tree dict gathering all the scenarios.\n\n Returns:\n dict: the tree dict gathering all the scenarios\n list: the list of years\n list: the list of months\n \"\"\"\n all_scenarios_ordered = sorted(tp.get_scenarios(), key=lambda x: x.creation_date.timestamp())\n\n sm_tree_dict = create_sm_tree_dict(all_scenarios_ordered)\n\n if sm_current_year not in list(sm_tree_dict.keys()):\n sm_tree_dict[sm_current_year] = {}\n if sm_current_month not in sm_tree_dict[sm_current_year]:\n sm_tree_dict[sm_current_year][sm_current_month] = []\n\n sm_year_selector = list(sm_tree_dict.keys())\n sm_month_selector = list(sm_tree_dict[sm_selected_year].keys())\n\n return sm_tree_dict, sm_year_selector, sm_month_selector\n\n\ndef change_sm_month_selector(state):\n \"\"\"\n This function is called when the user changes the year selector. It updates the selector shown on the GUI\n for the month selector and is calling the same function for the scenario selector.\n \n\n Args:\n state (State): all the GUI variables\n \"\"\"\n state.sm_month_selector = list(state.sm_tree_dict[state.sm_selected_year].keys())\n\n if state.sm_selected_month not in state.sm_month_selector:\n state.sm_selected_month = state.sm_month_selector[0]\n\n change_scenario_selector(state)\n\n\ndef change_scenario_selector(state):\n \"\"\"\n This function is called when the user changes the month selector. It updates the selector shown on the GUI\n for the scenario selector.\n\n Args:\n state (State): all the GUI variables\n \"\"\"\n state.scenario_selector = list(state.sm_tree_dict[state.sm_selected_year][state.sm_selected_month])\n state.scenario_selector_two = state.scenario_selector.copy()\n if len(state.scenario_selector) > 0:\n state.selected_scenario = state.scenario_selector[0]\n\n if (state.sm_selected_month != sm_current_month or\\\n state.sm_selected_year != sm_current_year) and\\\n state.sm_show_config_scenario:\n\n state.sm_show_config_scenario = False\n notify(state, \"info\", \"This scenario is historical, you can't modify it\")\n else:\n state.sm_show_config_scenario = True\n\ndef update_scenario_selector(state):\n \"\"\"\n This function will update the scenario selectors. It will be used when\n we create a new scenario. If there is a scenario that is created, we will\n add its (id,name) in this list.\n\n Args:\n scenarios (list): a list of tuples (scenario,properties)\n \"\"\"\n state.scenario_selector = [s for s in tp.get_scenarios() if 'user' in s.properties and\\\n state.state_id == s.properties['user']]\n state.scenario_selector_two = state.scenario_selector.copy()\n sm_tree_dict[state.sm_selected_year][state.sm_selected_month] = state.scenario_selector\n\nscenario_selector = []\nselected_scenario = None\n\nscenario_selector_two = []\nselected_scenario_two = None\n\n# Initialization of scenario tree\nsm_tree_dict = {}\n\nsm_current_month = dt.date.today().strftime('%b')\nsm_current_year = dt.date.today().strftime('%Y')\n\nsm_selected_year = sm_current_year\nsm_selected_month = sm_current_month\n\nsm_tree_dict, sm_year_selector, sm_month_selector = create_time_selectors()\n\n\n# Help\ndialog_help = False\n\ndef restore_state(state):\n state.cs_show_comparison = False\n update_scenario_selector(state)\n notify(state, 'info', 'Restoring your session')\n\n\ndef validate_help(state, action, payload):\n state.dialog_help = False\n"} {"text": "# Create app for demo-production-planning compare_cycles_md.py\nfrom data.create_data import time_series_to_csv\nfrom config.config import scenario_cfg\n\nfrom taipy.core import taipy as tp\n\nimport datetime as dt\nimport pandas as pd\n\ncc_data = pd.DataFrame(\n {\n 'Date': [dt.datetime(2021, 1, 1)],\n 'Cycle': [dt.date(2021, 1, 1)],\n 'Cost of Back Order': [0],\n 'Cost of Stock': [0]\n })\n\n\ncc_show_comparison = False\ncc_layout = {'barmode': 'stack'}\n\n\ndef cc_create_scenarios_for_cycle():\n \"\"\"This function creates scenarios for multiple cycles and submit them.\n \"\"\"\n date = dt.datetime.now() - dt.timedelta(days=365)\n month = date.strftime('%b')\n year = date.strftime('%Y')\n\n current_month = dt.date.today().strftime('%b')\n current_year = dt.date.today().strftime('%Y')\n\n while month != current_month or year != current_year:\n date += dt.timedelta(days=15)\n month = date.strftime('%b')\n year = date.strftime('%Y')\n\n if month != current_month or year != current_year:\n time_series_to_csv()\n\n scenario = tp.create_scenario(scenario_cfg, creation_date=date, name=date.strftime('%d-%b-%Y'))\n tp.submit(scenario)\n\n\ndef update_cc_data(state):\n \"\"\"This function creates the evolution of the cost of back order and stock for the primary scenario of all the cycles.\"\"\"\n\n dates = []\n cycles = []\n costs_of_back_orders = []\n costs_of_stock = []\n all_scenarios = tp.get_primary_scenarios()\n\n all_scenarios_ordered = sorted(\n all_scenarios,\n key=lambda x: x.creation_date.timestamp()) \n \n for scenario in all_scenarios_ordered:\n results = scenario.results.read()\n\n if results is not None:\n date_ = scenario.creation_date\n\n # creation of sum_costs_of_stock metrics\n bool_costs_of_stock = [c for c in results.columns if 'Cost' in c and\\\n 'Total' not in c and\\\n 'Stock' in c]\n sum_costs_of_stock = int(results[bool_costs_of_stock].sum(axis=1)\\\n .sum(axis=0))\n\n # creation of sum_costs_of_BO metrics\n bool_costs_of_BO = [c for c in results.columns if 'Cost' in c and\\\n 'Total' not in c and\\\n 'BO' in c]\n sum_costs_of_BO = int(results[bool_costs_of_BO].sum(axis=1)\\\n .sum(axis=0))\n\n dates.append(date_)\n cycles.append(dt.date(date_.year, date_.month, 1))\n costs_of_back_orders.append(sum_costs_of_BO)\n costs_of_stock.append(sum_costs_of_stock)\n\n state.cc_data = pd.DataFrame({'Date': dates,\n 'Cycle': cycles,\n 'Cost of Back Order': costs_of_back_orders,\n 'Cost of Stock': costs_of_stock})\n\n\ncc_compare_cycles_md = \"\"\"\n<|container|\n# **Compare**{: .color-primary} cycles\n<|Start cycles comparison|button|on_action=update_cc_data|class_name=mb2|>\n\n<|Table|expandable|expanded=False|\n<|{cc_data}|table|>\n|>\n\n## Evolution of costs\n<|{cc_data}|chart|type=bar|x=Cycle|y[1]=Cost of Back Order|y[2]=Cost of Stock|layout={cc_layout}|>\n|>\n\"\"\"\n"} {"text": "# Create app for demo-production-planning compare_scenario_md.py\nimport taipy as tp\nimport pandas as pd\n\n\ncs_compare_scenario_md = \"\"\"\n<|part|class_name=container|\n# **Compare**{: .color-primary} scenarios\n\nChoose two scenarios to compare.\n\n<|layout|columns=3 3 auto|columns[mobile]=1|gap=1.5rem|class_name=align_columns_bottom|\n\n\n\nMonth <|{sm_selected_month}|selector|lov={sm_month_selector}|dropdown|width=100%|on_change=change_scenario_selector|>\n\n**Scenario** <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|adapter=adapt_scenarios|width=18rem|>\n|>\n|layout_scenario>\n\n\n\n\n\nMonth <|{sm_selected_month}|selector|lov={sm_month_selector}|dropdown|width=100%|on_change=change_scenario_selector|>\n\n**Scenario** <|{selected_scenario_two}|selector|lov={scenario_selector_two}|dropdown|adapter=adapt_scenarios|>\n|>\n|layout_scenario>\n\n
\n
\n<|Compare scenario|button|on_action=compare_scenarios|active={len(scenario_selector)>1}|>\n|>\n\n<|part|render={cs_show_comparison and len(scenario_selector)>=2}|class_name=mt2 card p2|\n<|layout|columns=1 1 1|columns[mobile]=1|class_name=align_columns_bottom|\n**Representation**\n<|{cs_compar_graph_selected}|selector|lov={cs_compar_graph_selector}|dropdown|>\n\n\n**Total cost of scenario 1:** *<|{str(int(sum_costs/1000))+' K'}|>*\n{: .text-center}\n\n**Total cost of scenario 2:** *<|{str(int(cs_sum_costs_two/1000))+' K'}|>*\n{: .text-center}\n|>\n\n\n\n<|part|render={cs_compar_graph_selected=='Metrics'}|class_name=mt2|\n\n<|layout|columns=1 1|columns[mobile]=1|\n<|{cs_comparaison_metrics_df[cs_comparaison_metrics_df['Metrics']=='BO Cost']}|chart|type=bar|x=Metrics|y[1]=Scenario 1: BO Cost|y[2]=Scenario 2: BO Cost|color[2]=#2b93db|height={cs_height_bar_chart}|>\n\n<|{cs_comparaison_metrics_df[cs_comparaison_metrics_df['Metrics']=='Stock Cost']}|chart|type=bar|x=Metrics|y[1]=Scenario 1: Stock Cost|y[2]=Scenario 2: Stock Cost|color[1]=#ff7f0e|color[2]=#ff9a41|height={cs_height_bar_chart}|>\n|>\n|>\n\n<|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 Cost|y[2]=Scenario 2 Cost|color[2]=#1f77b4|line[2]=dash|render={cs_compar_graph_selected=='Costs'}|>\n<|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 Purchase|y[2]=Scenario 2 Purchase|color[2]=#1f77b4|line[2]=dash|render={cs_compar_graph_selected=='Purchases'}|>\n<|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 Production|y[2]=Scenario 2 Production|color[2]=#1f77b4|line[2]=dash|render={cs_compar_graph_selected=='Productions'}|>\n<|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 Stock|y[2]=Scenario 2 Stock|color[2]=#1f77b4|line[2]=dash|render={cs_compar_graph_selected=='Stocks'}|>\n<|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 BO|y[2]=Scenario 2 BO|color[2]=#1f77b4|line[2]=dash|render={cs_compar_graph_selected=='Back Order'}|>\n|>\n|>\n\"\"\"\n\n\ndef compare_scenarios(state):\n \"\"\"This function compares two scenarios chosen by the user on different metrics and populate dataframes for the comparison graphs.\n\n Args:\n state (State): All the GUI variables\n \"\"\"\n state.cs_show_comparison = True\n \n # get of the two scenarios chosen by the user\n results_1 = state.selected_scenario.results.read()\n results_2 = state.selected_scenario_two.results.read()\n state.cs_sum_costs_two = results_2['Total Cost'].sum()\n\n # calculate the partial costs of the two scenarios\n bool_costs_of_stock = [c for c in results_2.columns\n if 'Cost' in c and 'Total' not in c and 'Stock' in c]\n \n state.cs_sum_costs_of_stock_two = int(results_2[bool_costs_of_stock].sum(axis=1)\\\n .sum(axis=0))\n\n bool_costs_of_BO = [c for c in results_2.columns\n if 'Cost' in c and 'Total' not in c and 'BO' in c]\n state.cs_sum_costs_of_BO_two = int(results_2[bool_costs_of_BO].sum(axis=1)\\\n .sum(axis=0))\n\n # populate the dataframes for the comparison graphs\n new_result_1 = pd.DataFrame({\"index\": results_1.index})\n new_result_2 = pd.DataFrame({\"index\": results_2.index})\n\n columns_to_merge = ['Cost', 'Purchase', 'Production', 'Stock', 'BO']\n for col in columns_to_merge:\n if col == 'Cost':\n bool_col_1 = [c for c in results_1.columns\n if col in c and 'Total' not in c]\n bool_col_2 = [c for c in results_2.columns\n if col in c and 'Total' not in c]\n else:\n bool_col_1 = [c for c in results_1.columns\n if col in c and 'Total' not in c and 'Cost' not in c]\n bool_col_2 = [c for c in results_2.columns\n if col in c and 'Total' not in c and 'Cost' not in c]\n\n new_result_1[col] = results_1[bool_col_1].sum(axis=1)\n new_result_2[col] = results_2[bool_col_2].sum(axis=1)\n\n new_result_1.columns = ['Scenario 1 ' + column if column != 'index' else 'index'\n for column in new_result_1.columns]\n new_result_2.columns = ['Scenario 2 ' + column if column !='index' else 'index'\n for column in new_result_2.columns]\n\n state.cs_comparaison_metrics_df = pd.DataFrame(\n {\n \"Metrics\": [ \"Stock Cost\", \"BO Cost\"],\n \"Scenario 1: Stock Cost\": [state.sum_costs_of_stock, None],\n \"Scenario 2: Stock Cost\": [state.cs_sum_costs_of_stock_two, None],\n \"Scenario 1: BO Cost\": [None, state.sum_costs_of_BO],\n \"Scenario 2: BO Cost\": [None, state.cs_sum_costs_of_BO_two]\n })\n\n state.cs_comparaison_df = pd.merge(new_result_1, new_result_2, on=\"index\", how=\"inner\")\n print(\"Comparaison done\")\n pass\n\n\ncs_height_bar_chart = \"80%\"\n\ncs_show_comparison = False\n\ncs_compar_graph_selector = [\n 'Metrics',\n 'Costs',\n 'Purchases',\n 'Productions',\n 'Stocks',\n 'Back Order']\ncs_compar_graph_selected = cs_compar_graph_selector[0]\n\ncs_comparaison_df = pd.DataFrame({'index': [0],\n 'Scenario 1 Cost': [0],\n 'Scenario 1 Purchase': [0],\n 'Scenario 1 Production': [0],\n 'Scenario 1 Stock': [0],\n 'Scenario 1 BO': [0],\n 'Scenario 2 Cost': [0],\n 'Scenario 2 Purchase': [0],\n 'Scenario 2 Production': [0],\n 'Scenario 2 Stock': [0],\n 'Scenario 2 BO': [0]})\n\ncs_comparaison_metrics_df = pd.DataFrame({\"Metrics\": [\"Stock Cost\", \"BO Cost\"],\n \"Scenario 1: Stock Cost\": [0, 0],\n \"Scenario 2: Stock Cost\": [0, 0],\n \"Scenario 1: BO Cost\": [0, 0],\n \"Scenario 2: BO Cost\": [0, 0]})\n\ncs_sum_costs_of_stock_two = 0\ncs_sum_costs_of_BO_two = 0\ncs_sum_costs_two = 0\n"} {"text": "# Create app for demo-production-planning scenario_manager.md\n<|container|\n# **Scenario**{: .color-primary } Manager\n\n<|layout|columns=8 4 auto|\n \n\nMonth <|{sm_selected_month}|selector|lov={sm_month_selector}|dropdown|on_change=change_scenario_selector|>\n\n**Scenario** <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|adapter=adapt_scenarios|width=18rem|class_name=success|>\n |>\n |layout_scenario>\n\n\nGraph <|{sm_graph_selected}|selector|lov={sm_graph_selector}|dropdown|>\n\n\n|toggle_chart>\n|>\n\n---\n\n<|layout|columns=9 3|gap=1.5rem|\n\n<|part|render={len(scenario_selector)>0}|\n<|layout|columns=1 1|gap=1rem|\n <|part|class_name=card mb1 p2 text_center|\n<|{str(int(sum_costs_of_BO/1000))+' K'}|indicator|value={sum_costs_of_BO}|min=50_000|max=1_000|width=93%|>\n**Back Order Cost**\n |>\n\n <|part|class_name=card mb1 p2 text_center|\n<|{str(int(sum_costs_of_stock/1000))+' K'}|indicator|value={sum_costs_of_stock}|min=100_000|max=25_000|width=93%|> \n**Stock Cost**\n |>\n|>\n\n\n<|{pie_results.loc[['Stock FPA Cost', 'Stock FPB Cost', 'Stock RP1 Cost', 'Stock RP2 Cost', 'Purchase RP1 Cost', 'Purchase RP2 Cost', 'BO FPA Cost', 'BO FPB Cost']]}|chart|type=pie|values=values|labels=labels|render={sm_show_pie=='pie' and sm_graph_selected=='Costs'}|>\n\n<|{sm_results}|chart|x=index|y[1]=Stock FPA Cost|y[2]=Stock FPB Cost|y[3]=Stock RP1 Cost|y[4]=Stock RP2 Cost|y[5]=Purchase RP1 Cost|y[6]=Purchase RP2 Cost|y[7]=BO FPA Cost|y[8]=BO FPB Cost|y[9]=Total Cost|render={sm_show_pie=='chart' and sm_graph_selected=='Costs'}|>\n\n<|{pie_results.loc[['Monthly Purchase RP1', 'Monthly Purchase RP2']]}|chart|type=pie|values=values|labels=labels|render={sm_show_pie=='pie' and sm_graph_selected=='Purchases'}|>\n\n<|{sm_results}|chart|x=index|y[1]=Monthly Purchase RP1|y[2]=Monthly Purchase RP2|render={sm_show_pie=='chart' and sm_graph_selected=='Purchases'}|>\n\n<|{pie_results.loc[['Monthly Production FPA', 'Max Capacity FPA', 'Monthly Production FPB', 'Max Capacity FPB']]}|chart|type=pie|values=values|labels=labels|render={sm_show_pie=='pie' and sm_graph_selected=='Productions'}|>\n\n<|{sm_results}|chart|x=index|y[1]=Monthly Production FPA|y[2]=Max Capacity FPA|line[2]=dash|y[3]=Monthly Production FPB|y[4]=Max Capacity FPB|line[4]=dash|render={sm_show_pie=='chart' and sm_graph_selected=='Productions'}|>\n\n<|{pie_results.loc[['Monthly Stock FPA', 'Monthly Stock FPB', 'Monthly Stock RP1', 'Monthly Stock RP2']]}|chart|type=pie|values=values|labels=labels|render={sm_show_pie=='pie' and sm_graph_selected=='Stocks'}|>\n\n<|{sm_results}|chart|x=index|y[1]=Monthly Stock FPA|y[2]=Monthly Stock FPB|y[3]=Monthly Stock RP1|y[4]=Monthly Stock RP2|render={sm_show_pie=='chart' and sm_graph_selected=='Stocks'}|>\n\n<|{pie_results.loc[['Monthly BO FPA', 'Monthly BO FPB']]}|chart|type=pie|values=values|labels=labels|render={sm_show_pie=='pie' and sm_graph_selected=='Back Order'}|>\n\n<|{sm_results}|chart|x=index|y[1]=Monthly BO FPA|y[2]=Monthly BO FPB|render={sm_show_pie=='chart' and sm_graph_selected=='Back Order'}|>\n\n<|{sm_results}|chart|x=index|y[1]=Monthly Production FPA|y[2]=Monthly Stock FPA|y[3]=Monthly BO FPA|y[4]=Max Capacity FPA|line[4]=dash|y[5]=Demand FPA|render={sm_graph_selected=='Product FPA'}|>\n\n<|{sm_results}|chart|x=index|y[1]=Monthly Production FPB|y[2]=Monthly Stock FPB|y[3]=Monthly BO FPB|y[4]=Max Capacity FPB|line[4]=dash|y[5]=Demand FPB|render={sm_graph_selected=='Product FPB'}|>\n\n<|{sm_results}|chart|x=index|y[1]=Monthly Stock RP1|y[2]=Monthly Purchase RP1|render={sm_graph_selected=='Product RP1'}|>\n\n<|{sm_results}|chart|x=index|y[1]=Monthly Stock RP2|y[2]=Monthly Purchase RP2|render={sm_graph_selected=='Product RP2'}|>\n|>\n\n\n \n\n<|mt2|\n<|{sm_param_selected}|selector|lov={sm_param_selector}|class_name=fullwidth|>\n\n\n<|part|render={sm_param_selected == 'Capacity Constraints'}|\n\n<|{sm_product_param}|toggle|lov={sm_choice_product_param}|value_by_id|class_name=mb1 text_center|>\n\n\n<|part|render={sm_product_param == 'product_FPA'}|\nMax Capacity FPA : *<|{fixed_variables.Max_Capacity_FPA}|>*\n<|{fixed_variables.Max_Capacity_FPA}|slider|min=332|max=1567|active={sm_show_config_scenario}|>\n\nMax Capacity of FPA and FPB : *<|{fixed_variables.Max_Capacity_of_FPA_and_FPB}|>*\n<|{fixed_variables.Max_Capacity_of_FPA_and_FPB}|slider|min=598|max=2821|active={sm_show_config_scenario}|>\n|>\n\n<|part|render={sm_product_param == 'product_FPB'}|\nMax Capacity FPB : *<|{fixed_variables.Max_Capacity_FPB}|>*\n<|{fixed_variables.Max_Capacity_FPB}|slider|min=332|max=1567|active={sm_show_config_scenario}|>\n\nMax Capacity of FPA and FPB : *<|{fixed_variables.Max_Capacity_of_FPA_and_FPB}|>*\n<|{fixed_variables.Max_Capacity_of_FPA_and_FPB}|slider|min=598|max=2821|active={sm_show_config_scenario}|>\n|>\n<|part|render={sm_product_param == 'product_RPone'}|\n\nMax Stock RP1 : *<|{fixed_variables.Max_Stock_RPone}|>*\n<|{fixed_variables.Max_Stock_RPone}|slider|min=28|max=132|active={sm_show_config_scenario}|>\n|>\n<|part|render={sm_product_param == 'product_RPtwo'}|\n\nMax Stock RP2 : *<|{fixed_variables.Max_Stock_RPtwo}|>*\n<|{fixed_variables.Max_Stock_RPtwo}|slider|min=21|max=99|active={sm_show_config_scenario}|>\n|>\n|>\n\n<|part|render={sm_param_selected == 'Objective Weights'}|\nWeight of Stock : *<|{fixed_variables.Weight_of_Stock}|>*\n<|{fixed_variables.Weight_of_Stock}|slider|min=35|max=165|active={sm_show_config_scenario}|>\n\nWeight of Back Order : *<|{fixed_variables.Weight_of_Back_Order}|>*\n<|{fixed_variables.Weight_of_Back_Order}|slider|min=35|max=165|active={sm_show_config_scenario}|>\n|>\n\n<|part|render={sm_param_selected == 'Initial Parameters'}|\n<|{sm_product_param}|toggle|lov={sm_choice_product_param}|value_by_id|class_name=mb1 text_center|>\n\n\n<|part|render={sm_product_param == 'product_FPA'}|\nUnit Cost - FPA Back Order : *<|{fixed_variables.cost_FPA_Back_Order}|>*\n<|{fixed_variables.cost_FPA_Back_Order}|slider|min=70|max=330|active={sm_show_config_scenario}|>\n\nUnit Cost - FPA Stock : *<|{fixed_variables.cost_FPA_Stock}|>*\n<|{fixed_variables.cost_FPA_Stock}|slider|min=15|max=74|active={sm_show_config_scenario}|>\n\nInitial Back Order FPA : *<|{fixed_variables.Initial_Back_Order_FPA}|>*\n<|{fixed_variables.Initial_Back_Order_FPA}|slider|min=0|max=50|active={sm_show_config_scenario}|>\n\nInitial Stock FPA : *<|{fixed_variables.Initial_Stock_FPA}|>*\n<|{fixed_variables.Initial_Stock_FPA}|slider|min=10|max=49|active={sm_show_config_scenario}|>\n\nInitial Production FPA : *<|{fixed_variables.Initial_Production_FPA}|>*\n<|{fixed_variables.Initial_Production_FPA}|slider|min=297|max=1402|active={sm_show_config_scenario}|>\n|>\n\n<|part|render={sm_product_param == 'product_FPB'}|\nUnit Cost - FPB Back Order : *<|{fixed_variables.cost_FPB_Back_Order}|>*\n<|{fixed_variables.cost_FPB_Back_Order}|slider|min=87|max=412|active={sm_show_config_scenario}|>\n\nUnit Cost - FPB Stock : *<|{fixed_variables.cost_FPB_Stock}|>*\n<|{fixed_variables.cost_FPB_Stock}|slider|min=14|max=66|active={sm_show_config_scenario}|>\n\nInitial Back Order FPB : *<|{fixed_variables.Initial_Back_Order_FPB}|>*\n<|{fixed_variables.Initial_Back_Order_FPB}|slider|min=8|max=41|active={sm_show_config_scenario}|>\n\nInitial Stock FPB : *<|{fixed_variables.Initial_Stock_FPB}|>*\n<|{fixed_variables.Initial_Stock_FPB}|slider|min=0|max=50|active={sm_show_config_scenario}|>\n\nInitial Production FPB : *<|{fixed_variables.Initial_Production_FPB}|>*\n<|{fixed_variables.Initial_Production_FPB}|slider|min=280|max=1320|active={sm_show_config_scenario}|>\n|>\n\n<|part|render={sm_product_param == 'product_RPone'}|\nInitial Stock RP1 : *<|{fixed_variables.Initial_Stock_RPone}|>*\n<|{fixed_variables.Initial_Stock_RPone}|slider|min=10|max=49|active={sm_show_config_scenario}|>\n\nUnit Cost - RP1 Stock : *<|{fixed_variables.cost_RPone_Stock}|>*\n<|{fixed_variables.cost_RPone_Stock}|slider|min=10|max=49|active={sm_show_config_scenario}|>\n\nUnit Cost - RP1 Purchase : *<|{fixed_variables.cost_RPone_Purchase}|>*\n<|{fixed_variables.cost_RPone_Purchase}|slider|min=35|max=165|active={sm_show_config_scenario}|>\n\nInitial Purchase RP1 : *<|{fixed_variables.Initial_Purchase_RPone}|>*\n<|{fixed_variables.Initial_Purchase_RPone}|slider|min=12|max=57|active={sm_show_config_scenario}|>\n|>\n\n<|part|render={sm_product_param == 'product_RPtwo'}|\nInitial Stock RP2 : *<|{fixed_variables.Initial_Stock_RPtwo}|>*\n<|{fixed_variables.Initial_Stock_RPtwo}|slider|min=14|max=66|active={sm_show_config_scenario}|>\n\nUnit Cost - RP2 Stock : *<|{fixed_variables.cost_RPtwo_Stock}|>*\n<|{fixed_variables.cost_RPtwo_Stock}|slider|min=21|max=99|active={sm_show_config_scenario}|>\n\nUnit Cost - RP2 Purchase : *<|{fixed_variables.cost_RPtwo_Purchase}|>*\n<|{fixed_variables.cost_RPtwo_Purchase}|slider|min=52|max=247|active={sm_show_config_scenario}|>\n\nInitial Purchase RP2 : *<|{fixed_variables.Initial_Purchase_RPtwo}|>*\n<|{fixed_variables.Initial_Purchase_RPtwo}|slider|min=14|max=66|active={sm_show_config_scenario}|>\n|>\n|>\n\n\n<|Delete|button|on_action=delete_scenario_fct|active={len(scenario_selector)>0 and sm_show_config_scenario}|class_name=fullwidth error mb_half|>\n<|Make Primary|button|on_action=make_primary|active={len(scenario_selector)>0 and not selected_scenario.is_primary and sm_show_config_scenario}|class_name=fullwidth secondary mb_half|>\n<|Re-optimize|button|on_action=submit_scenario|active={len(scenario_selector)>0 and sm_show_config_scenario}|class_name=fullwidth secondary mb_half|>\n<|New scenario|button|on_action=create_new_scenario||active={sm_show_config_scenario}|class_name=fullwidth plain mb_half|>\n|>\n|>\n\n|>\n"} {"text": "# Create app for demo-production-planning scenario_manager_md.py\nfrom pages.shared import update_scenario_selector\n\nfrom taipy.gui import notify, invoke_long_callback, Markdown\nimport taipy as tp\nfrom config.config import scenario_cfg\nimport datetime as dt\nimport pandas as pd\n\nfrom config.config import fixed_variables_default\nfrom taipy.gui import Icon\nimport pandas as pd\n\n# Toggle for setting charts\nsm_choice_chart = [(\"pie\", Icon(\"images/icons/pie_chart.svg\", \"pie\")),\n (\"chart\", Icon(\"images/icons/bar_chart.svg\", \"chart\"))]\nsm_show_pie = sm_choice_chart[1][0]\n\nsm_results = pd.DataFrame({\"Monthly Production FPA\":[],\n \"Monthly Stock FPA\": [],\n \"Monthly BO FPA\": [],\n \"Max Capacity FPA\": [],\n \n \"Monthly Production FPB\": [],\n \"Monthly Stock FPB\": [],\n \"Monthly BO FPB\": [],\n \"Max Capacity FPB\": [],\n \n \"Monthly Stock RP1\":[],\n \"Monthly Stock RP2\":[],\n \n \"Monthly Purchase RP1\":[],\n \"Monthly Purchase RP2\":[],\n \n \"Demand FPA\": [],\n \"Demand FPB\": [],\n \n 'Stock FPA Cost': [],\n 'Stock FPB Cost': [],\n \n 'Stock RP1 Cost': [],\n 'Stock RP2 Cost': [],\n \n 'Purchase RP1 Cost': [],\n 'Purchase RP2 Cost': [],\n \n \"BO FPA Cost\":[],\n \"BO FPB Cost\":[],\n \n \"Total Cost\": [],\n \"index\": []})\n\n\npie_results = pd.DataFrame(\n {\n \"values\": [1] * len(list(sm_results.columns)),\n \"labels\": list(sm_results.columns)\n }, index=list(sm_results.columns)\n )\n\n\nchart = sm_results[['index',\n 'Purchase RP1 Cost',\n 'Stock RP1 Cost',\n 'Stock RP2 Cost',\n 'Purchase RP2 Cost',\n 'Stock FPA Cost',\n 'Stock FPB Cost',\n 'BO FPA Cost',\n 'BO FPB Cost',\n 'Total Cost']]\n\nsm_param_selector = ['Capacity Constraints','Objective Weights','Initial Parameters']\nsm_param_selected = sm_param_selector[0]\n\n\n# Toggle for choosing the sliders\nsm_choice_product_param = [(\"product_RPone\", Icon(\"images/P1.png\", \"product_RPone\")),\n (\"product_RPtwo\", Icon(\"images/P2.png\", \"product_RPtwo\")),\n (\"product_FPA\", Icon(\"images/PA.png\", \"product_FPA\")),\n (\"product_FPB\", Icon(\"images/PB.png\", \"product_FPB\"))]\nsm_product_param = 'Else'\n\n\n# Button for configuring scenario\nsm_show_config_scenario = True\n\n# Choose the graph to display\nsm_graph_selector = [\n 'Costs',\n 'Purchases',\n 'Productions',\n 'Stocks',\n 'Back Order',\n 'Product RP1',\n 'Product RP2',\n 'Product FPA',\n 'Product FPB']\nsm_graph_selected = sm_graph_selector[0]\n\n\nfixed_variables = fixed_variables_default\n\n\ndef make_primary(state):\n tp.set_primary(state.selected_scenario)\n update_scenario_selector(state)\n state.selected_scenario = state.selected_scenario\n\n\ndef delete_scenario_fct(state):\n if state.selected_scenario.is_primary:\n notify(\n state,\n \"warning\",\n \"You can't delete the primary scenario of the month\")\n else:\n tp.delete(state.selected_scenario.id)\n update_scenario_selector(state)\n\n if len(state.scenario_selector) != 0:\n state.selected_scenario = state.scenario_selector[0]\n\n\ndef create_new_scenario(state):\n \"\"\"\n This function is used whan the 'create' button is pressed in the scenario_manager_md page.\n See the scenario_manager_md page for more information. It will configure another scenario,\n create it and submit it.\n\n Args:\n state (_type_): the state object of Taipy\n \"\"\"\n\n name = f\"{dt.datetime.now().strftime('%d-%b-%Y')} Nb : {len(state.scenario_selector)}\"\n scenario = tp.create_scenario(scenario_cfg, name=name)\n scenario.properties['user'] = state.state_id\n\n # update the scenario selector\n update_scenario_selector(state)\n state.selected_scenario = scenario\n\n submit_scenario(state)\n\n\n\ndef catch_error_in_submit(state):\n \"\"\"\n This function is used to catch the error that can occur when we submit a scenario. When an\n error is catched, a notification will appear and variables wil be changed to avoid any error.\n The errors comes from the solution of the Cplex model where infeasible or unbounded problems\n can happen if the fixed variables are wrongly set.\n\n Args:\n state (_type_): the state object of Taipy\n \"\"\"\n\n # if our initial production is higher that our max capacity of production\n if state.fixed_variables[\"Initial_Production_FPA\"] > state.fixed_variables[\"Max_Capacity_FPA\"]:\n state.fixed_variables[\"Initial_Production_FPA\"] = state.fixed_variables[\"Max_Capacity_FPA\"]\n notify(\n state,\n \"warning\",\n \"Value of initial production FPA is greater than max production A\")\n\n # if our initial production is higher that our max capacity of production\n if state.fixed_variables[\"Initial_Production_FPB\"] > state.fixed_variables[\"Max_Capacity_FPB\"]:\n state.fixed_variables[\"Initial_Production_FPB\"] = state.fixed_variables[\"Max_Capacity_FPB\"]\n notify(\n state,\n \"warning\",\n \"Value of initial production FPB is greater than max production B\")\n\n # if our initial stock is higher that our max capacity of production\n if state.fixed_variables[\"Initial_Stock_RPone\"] > state.fixed_variables[\"Max_Stock_RPone\"]:\n state.fixed_variables[\"Initial_Stock_RPone\"] = state.fixed_variables[\"Max_Stock_RPone\"]\n notify(\n state,\n \"warning\",\n \"Value of initial stock RP1 is greater than max stock 1\")\n\n # if our initial stock is higher that our max capacity of production\n if state.fixed_variables[\"Initial_Stock_RPtwo\"] > state.fixed_variables[\"Max_Stock_RPtwo\"]:\n state.fixed_variables[\"Initial_Stock_RPtwo\"] = state.fixed_variables[\"Max_Stock_RPtwo\"]\n notify(\n state,\n \"warning\",\n \"Value of initial stock RP2 is greater than max stock 2\")\n\n # if our initial productions are higher that our max capacity of\n # productions\n if state.fixed_variables[\"Initial_Production_FPA\"] + \\\n state.fixed_variables[\"Initial_Production_FPB\"] > state.fixed_variables[\"Max_Capacity_of_FPA_and_FPB\"]:\n \n state.fixed_variables[\"Initial_Production_FPA\"] = int(state.fixed_variables[\"Max_Capacity_of_FPA_and_FPB\"] / 2)\n state.fixed_variables[\"Initial_Production_FPB\"] = int(state.fixed_variables[\"Max_Capacity_of_FPA_and_FPB\"] / 2)\n \n notify(\n state,\n \"warning\",\n \"Value of initial productions is greater than the max capacities\")\n\n\ndef submit_heavy(scenario):\n tp.submit(scenario)\n\ndef submit_status(state, status):\n update_variables(state)\n\n\ndef submit_scenario(state):\n \"\"\"\n This function will submit the scenario that is selected. It will be used when the 'submit' button is pressed\n or when we create a new scenario. It checks if there is any errors then it will change the parameters of the\n problem and submit the scenario. At the end, we update all the variables that we want to update.\n\n Args:\n state (_type_): the state object of Taipy\n\n Returns:\n _type_: _description_\n \"\"\"\n\n # see if there are errors in the parameters that will be given to the\n # scenario\n catch_error_in_submit(state)\n\n # setting the scenario with the right parameters\n old_fixed_variables = state.selected_scenario.fixed_variables.read()\n if old_fixed_variables != state.fixed_variables._dict:\n state.selected_scenario.fixed_variables.write(state.fixed_variables._dict)\n \n # running the scenario in a long callback and update variables\n invoke_long_callback(state, submit_heavy, [state.selected_scenario], submit_status)\n\n\ndef update_variables(state):\n \"\"\"This function is only used in the submit_scenario or when the selected_scenario changes. It will update all the useful variables that we want to update.\n\n Args:\n state (_type_): the state object of Taipy\n \"\"\"\n # it will set the sliders to the right values when a scenario is changed\n state.fixed_variables = state.selected_scenario.fixed_variables.read()\n\n\n # read the result\n state.sm_results = state.selected_scenario.results.read()\n state.pie_results = pd.DataFrame(\n {\n \"values\": state.sm_results.sum(axis=0),\n \"labels\": list(state.sm_results.columns)\n })\n\n state.sum_costs = state.sm_results['Total Cost'].sum()\n\n bool_costs_of_stock = [c for c in state.sm_results.columns if 'Cost' in c and\\\n 'Total' not in c and\\\n 'Stock' in c]\n state.sum_costs_of_stock = int(state.sm_results[bool_costs_of_stock].sum(axis=1)\\\n .sum(axis=0))\n\n bool_costs_of_BO = [c for c in state.sm_results.columns if 'Cost' in c and\\\n 'Total' not in c and\\\n 'BO' in c]\n state.sum_costs_of_BO = int(state.sm_results[bool_costs_of_BO].sum(axis=1)\\\n .sum(axis=0))\n\n\nsm_scenario_manager_md = Markdown('pages/scenario_manager/scenario_manager.md')\n"} {"text": "# Create app for demo-movie-recommendation ReadMe.md\n# Demo Movie Recommendation\n\n## Usage\n- [Usage](#usage)\n- [Demo Movie Recommendation](#what-is-demo-movie-recommendation)\n- [Directory Structure](#directory-structure)\n- [License](#license)\n- [Installation](#installation)\n- [Contributing](#contributing)\n- [Code of conduct](#code-of-conduct)\n\n## What is Demo Movie Recommendation\n\nTaipy is a Python library for creating Business Applications. More information on our\n[website](https://www.taipy.io).\n\n[Demo Movie Recommendation](https://github.com/Avaiga/demo-movie-recommendation) is a \nfull application showing how Taipy Core and Taipy Gui can work together to build a simple \nbut powerful application. This demo shows the basics of search a and recommendation \nalgorithms. The goal is to be able to search for films and recommend related/similar films. \nThese recommendations will use the user profile by tracking their session.\n\nGet data [here](https://files.grouplens.org/datasets/movielens/ml-25m.zip).\n\n### Demo Type\n- **Level**: Advanced\n- **Topic**: Taipy-GUI, Taipy-Core\n- **Components/Controls**: \n - Taipy GUI: input, selector, chart, expandable, table\n - Taipy Core: datanode, pipeline, scenario\n\n## How to run\n\nThis demo works with a Python version superior to 3.8. Install the dependencies of the \n*requirements.txt* and run the *main.py*.\n\nGet data [here](https://files.grouplens.org/datasets/movielens/ml-25m.zip).\n\n## Introduction\n\nA user has a userID generated when the user opens the app. Two pages are created to search and recommend films.\n\n### Search page\n\n- Be able to search for films\n- List of films appears after search (selector of movies)\n- Clicking on a movie will display a description of said movies, image, ratings, casting, date, ...\n - Possibility to use Imdb api in real time to provide these information\n- Recommendation on searched films (not based on syntax but on association with \n this film/similar films Avengers ~ Batman)\n- Recommendation depending on liked films of user\n\n### User page\n\n- Possibility to create Data Nodes for tracking/profiling:\n - Selected films\n - Viewed films\n - Liked films\n- Possibility to recommand a list of films based on selected/viewed/liked films (constraints on genres)\n - Example: `np.mean([find_similar_movies(movie_id) for movie_id in liked_movies_id])`\n- Deduce user profile (favourite genres, favourite period, ...)\n - when a film is selected, add to the set of the user films (a datanode?)\n\n\n## Directory Structure\n\n\n- `src/`: Contains the demo source code.\n - `src/algos`: Contains the functions to be executed as tasks by Taipy.\n - `src/config`: Contains the configuration files.\n - `src/data`: Contains the application data files.\n - `src/pages`: Contains the page definition files.\n- `CODE_OF_CONDUCT.md`: Code of conduct for members and contributors of _demo-movie-recommendation_.\n- `CONTRIBUTING.md`: Instructions to contribute to _demo-movie-recommendation_.\n- `INSTALLATION.md`: Instructions to install _demo-movie-recommendation_.\n- `LICENSE`: The Apache 2.0 License.\n- `Pipfile`: File used by the Pipenv virtual environment to manage project dependencies.\n- `README.md`: Current file.\n\n## License\nCopyright 2022 Avaiga Private Limited\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n[http://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\n## Installation\n\nWant to install _Demo Movie Recommendation_? Check out our \n[`INSTALLATION.md`](INSTALLATION.md) file.\n\n## Contributing\n\nWant to help build _Demo Movie Recommendation_? Check out our \n[`CONTRIBUTING.md`](CONTRIBUTING.md) file.\n\n## Code of conduct\n\nWant to be part of the _Demo Movie Recommendation_ community? Check out our \n[`CODE_OF_CONDUCT.md`](CODE_OF_CONDUCT.md) file.\n"} {"text": "# Create app for demo-movie-recommendation main.py\nfrom taipy.gui import Gui, Markdown, notify\nfrom pages.search import page_search\nfrom pages.user import page_user\n\n\npages = {\"/\":\"<|navbar|>\",\n \"search\":page_search,\n \"user\":page_user}\n\nif __name__ == \"__main__\":\n gui = Gui(pages=pages)\n gui.run(port=5006)"} {"text": "# Create app for demo-movie-recommendation algos.py\nimport pandas as pd\nimport re\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport numpy as np\n\n\ndef clean_title(title):\n title = re.sub(\"[^a-zA-Z0-9 ]\", \"\", title)\n return title\n\ndef search(title):\n title = clean_title(title)\n query_vec = vectorizer.transform([title])\n similarity = cosine_similarity(query_vec, tfidf).flatten()\n indices = np.argpartition(similarity, -5)[-5:]\n results = movies.iloc[indices].iloc[::-1]\n \n return results\n\ndef find_similar_movies(movie_id):\n similar_users = ratings[(ratings[\"movieId\"] == movie_id) & (ratings[\"rating\"] > 4)][\"userId\"].unique()\n similar_user_recs = ratings[(ratings[\"userId\"].isin(similar_users)) & (ratings[\"rating\"] > 4)][\"movieId\"]\n similar_user_recs = similar_user_recs.value_counts() / len(similar_users)\n\n similar_user_recs = similar_user_recs[similar_user_recs > .10]\n all_users = ratings[(ratings[\"movieId\"].isin(similar_user_recs.index)) & (ratings[\"rating\"] > 4)]\n all_user_recs = all_users[\"movieId\"].value_counts() / len(all_users[\"userId\"].unique())\n rec_percentages = pd.concat([similar_user_recs, all_user_recs], axis=1)\n rec_percentages.columns = [\"similar\", \"all\"]\n \n rec_percentages[\"score\"] = rec_percentages[\"similar\"] / rec_percentages[\"all\"]\n rec_percentages = rec_percentages.sort_values(\"score\", ascending=False)\n return rec_percentages.head(10).merge(movies, left_index=True, right_on=\"movieId\")[[\"score\", \"title\", \"genres\"]]\n\n\nvectorizer = TfidfVectorizer(ngram_range=(1,2))\n#movie_id = 89745\n\nratings = pd.read_csv('data/ratings.csv')\n\nmovies = pd.read_csv(\"data/movies.csv\")\nmovies[\"clean_title\"] = movies[\"title\"].apply(clean_title)\ntfidf = vectorizer.fit_transform(movies[\"clean_title\"])\n\n#print(search('Avengers'))\n#start=time.time()\n#print(find_similar_movies(movie_id))\n#print(time.time()-start)\n"} {"text": "# Create app for demo-movie-recommendation user.py\nfrom taipy.gui import Gui, notify, Markdown\n\n\npage_user = Markdown(\"\"\"\n\n\"\"\")"} {"text": "# Create app for demo-movie-recommendation search.py\nfrom taipy.gui import Gui, notify, Markdown\n\nfrom algos.algos import clean_title, search\n\nimport pandas as pd\n\nsearched_film = \"\"\nselected_film = \"\"\nfilm_selector = [('','')]\n\npage_search = Markdown(\"\"\"\n\"\"\")\n\n"} {"text": "# Create app for demo-sales-dashboard main.py\nimport pandas as pd\nfrom taipy.gui import Gui, notify\n\n# ---- READ EXCEL ----\n\ndf = pd.read_excel(\n io=\"data/supermarkt_sales.xlsx\",\n engine=\"openpyxl\",\n sheet_name=\"Sales\",\n skiprows=3,\n usecols=\"B:R\",\n nrows=1000,\n)\n# Add 'Hour' column to dataframe\ndf[\"Hour\"] = pd.to_datetime(df[\"Time\"], format=\"%H:%M:%S\").dt.hour\n\ncity = cities = list(df[\"City\"].unique())\ncustomer_type = types = list(df[\"Customer_type\"].unique())\ngender = genders = list(df[\"Gender\"].unique())\n\nlayout = {\"margin\": {\"l\": 220}}\n\npage = \"\"\"\n<|toggle|theme|>\n\n<|25 75|layout|gap=30px|\n<|sidebar|\n## Please **filter**{: .color-primary} here:\n\n<|{city}|selector|lov={cities}|multiple|label=Select the City|dropdown|on_change=on_filter|class_name=fullwidth|>\n\n<|{customer_type}|selector|lov={types}|multiple|label=Select the Customer Type|dropdown|on_change=on_filter|class_name=fullwidth|>\n\n<|{gender}|selector|lov={genders}|multiple|label=Select the Gender|dropdown|on_change=on_filter|class_name=fullwidth|>\n|>\n\n\n|total_sales>\n\n <|{\"\u2b50\" * int(round(round(df_selection[\"Rating\"].mean(), 1), 0))}|>\n|average_rating>\n\n\n|average_sale>\n|>\n\n
\n\n<|Sales Table|expandable|not expanded|\n<|{df_selection}|table|page_size=5|>\n|>\n\n<|card p2|\n<|{sales_by_hour}|chart|x=Hour|y=Total|type=bar|title=Sales by Hour|>\n\n<|{sales_by_product_line}|chart|x=Total|y=Product line|type=bar|orientation=h|title=Sales by Product|layout={layout}|>\n|>\n\nGet the Taipy Code [here](https://github.com/Avaiga/demo-sales-dashboard) and the original code [here](https://github.com/Sven-Bo/streamlit-sales-dashboard)\n|main_page>\n|>\n\n\"\"\"\n\n\ndef filter(city, customer_type, gender):\n df_selection = df[\n df[\"City\"].isin(city)\n & df[\"Customer_type\"].isin(customer_type)\n & df[\"Gender\"].isin(gender)\n ]\n\n # SALES BY PRODUCT LINE [BAR CHART]\n sales_by_product_line = (\n df_selection[[\"Product line\", \"Total\"]]\n .groupby(by=[\"Product line\"])\n .sum()[[\"Total\"]]\n .sort_values(by=\"Total\")\n )\n sales_by_product_line[\"Product line\"] = sales_by_product_line.index\n\n # SALES BY HOUR [BAR CHART]\n sales_by_hour = (\n df_selection[[\"Hour\", \"Total\"]].groupby(by=[\"Hour\"]).sum()[[\"Total\"]]\n )\n sales_by_hour[\"Hour\"] = sales_by_hour.index\n return df_selection, sales_by_product_line, sales_by_hour\n\n\ndef on_filter(state):\n if len(state.city) == 0 or len(state.customer_type) == 0 or len(state.gender) == 0:\n notify(state, \"Error\", \"No results found. Check the filters.\")\n return\n \n state.df_selection, state.sales_by_product_line, state.sales_by_hour = filter(\n state.city, state.customer_type, state.gender\n )\n \n\n\nif __name__ == \"__main__\":\n df_selection, sales_by_product_line, sales_by_hour = filter(\n city, customer_type, gender\n )\n Gui(page).run(margin=\"0em\", title=\"Sales Dashboard\")\n"} {"text": "# Create app for demo-sentiment-analysis main.py\n\"\"\" Creates a sentiment analysis App using Taipy\"\"\"\nfrom transformers import AutoTokenizer\nfrom transformers import AutoModelForSequenceClassification\nfrom scipy.special import softmax\n\nimport numpy as np\nimport pandas as pd\nfrom taipy.gui import Gui, notify\n\ntext = \"Original text\"\n\npage = \"\"\"\n# Getting started with **Taipy**{: .color-primary} **GUI**{: .color-primary}\n\n<|layout|columns=1 1|\n<|\n**My text:** <|{text}|>\n\n**Enter a word:**\n<|{text}|input|>\n<|Analyze|button|on_action=local_callback|>\n|>\n\n\n<|Table|expandable|\n<|{dataframe}|table|width=100%|number_format=%.2f|>\n|>\n|>\n\n<|layout|columns=1 1 1|\n## Positive <|{np.mean(dataframe['Score Pos'])}|text|format=%.2f|raw|>\n\n## Neutral <|{np.mean(dataframe['Score Neu'])}|text|format=%.2f|raw|>\n\n## Negative <|{np.mean(dataframe['Score Neg'])}|text|format=%.2f|raw|>\n|>\n\n<|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|>\n\"\"\"\n\nMODEL = \"sbcBI/sentiment_analysis_model\"\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModelForSequenceClassification.from_pretrained(MODEL)\n\ndataframe = pd.DataFrame(\n {\n \"Text\": [\"\"],\n \"Score Pos\": [0.33],\n \"Score Neu\": [0.33],\n \"Score Neg\": [0.33],\n \"Overall\": [0],\n }\n)\n\ndataframe2 = dataframe.copy()\n\n\ndef analyze_text(input_text: str) -> dict:\n \"\"\"\n Runs the sentiment analysis model on the text\n\n Args:\n - text (str): text to be analyzed\n\n Returns:\n - dict: dictionary with the scores\n \"\"\"\n encoded_text = tokenizer(input_text, return_tensors=\"pt\")\n output = model(**encoded_text)\n scores = output[0][0].detach().numpy()\n scores = softmax(scores)\n\n return {\n \"Text\": input_text[:50],\n \"Score Pos\": scores[2],\n \"Score Neu\": scores[1],\n \"Score Neg\": scores[0],\n \"Overall\": scores[2] - scores[0],\n }\n\n\ndef local_callback(state) -> None:\n \"\"\"\n Analyze the text and updates the dataframe\n\n Args:\n - state: state of the Taipy App\n \"\"\"\n notify(state, \"Info\", f\"The text is: {state.text}\", True)\n temp = state.dataframe.copy()\n scores = analyze_text(state.text)\n state.dataframe = temp.append(scores, ignore_index=True)\n state.text = \"\"\n\n\npath = \"\"\ntreatment = 0\n\npage_file = \"\"\"\n<|{path}|file_selector|extensions=.txt|label=Upload .txt file|on_action=analyze_file|> <|{f'Downloading {treatment}%...'}|>\n\n
\n\n<|Table|expandable|\n<|{dataframe2}|table|width=100%|number_format=%.2f|>\n|>\n\n
\n\n<|{dataframe2}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|height=600px|>\n\n\"\"\"\n\n\ndef analyze_file(state) -> None:\n \"\"\"\n Analyse the lines in a text file\n\n Args:\n - state: state of the Taipy App\n \"\"\"\n state.dataframe2 = dataframe2\n state.treatment = 0\n with open(state.path, \"r\", encoding=\"utf-8\") as f:\n data = f.read()\n print(data)\n\n file_list = list(data.split(\"\\n\"))\n\n for i, input_text in enumerate(file_list):\n state.treatment = int((i + 1) * 100 / len(file_list))\n temp = state.dataframe2.copy()\n scores = analyze_text(input_text)\n print(scores)\n state.dataframe2 = temp.append(scores, ignore_index=True)\n\n state.path = None\n\n\npages = {\n \"/\": \"<|toggle|theme|>\\n
\\n<|navbar|>\\n
\",\n \"line\": page,\n \"text\": page_file,\n}\n\n\nGui(pages=pages).run(title=\"Sentiment Analysis\")\n"} {"text": "# Create app for demo-template test_main.py\n"} {"text": "# Create app for demo-template test_config.py\n# Please insert your unit tests of config code here."} {"text": "# Create app for demo-template test_algo1.py\n# Please insert your unit tests of algos code here.\n#\n# Here is an example:\n#\n#\n# from .algos.algo1 import algo1_first_function\n#\n# def test_algo1():\n# assert algo1_first_function() == 10"} {"text": "# Create app for demo-template main.py\n# Please insert your main code here.\n"} {"text": "# Create app for demo-template config.py\nfrom taipy import Config\n\n# Please insert your configuration here.\n#\n# Here is an example:\n#\n# conf_path = Path('my', 'config', 'path', 'taipy-config.toml')\n# Config.load(str(conf_path))\n#\n# first_data_node_config = Config.configure_data_node(...)\n# second_data_node_config = Config.configure_data_node(...)\n#\n# task_config = Config.configure_task(...)\n#\n# scenario_config = Config.configure_scenario(...)\n"} {"text": "# Create app for demo-template algo2.py\n# Please insert your algo as python functions to be translated into taipy tasks/pipelines here.\n#\n# Here is an example:\n#\n# def algo2_(params):\n# print(\"this is my algo 2 function\")\n# #\n"} {"text": "# Create app for demo-template algo1.py\n# Please insert your python functions to be translated into taipy tasks here.\n#\n# Here is an example:\n#\n# def algo1_first_function(params):\n# print(\"this is my first function\")\n# if __sub_function(params):\n# return 10\n# else:\n# return 20\n#\n#\n# def __sub_function(params):\n# print(\"this is an internal sub_function\")\n# return True\n#\n#\n# def algo1_second_function(other_param: int):\n# print(\"this is my second function\")\n# return other_param + 1\n"} {"text": "# Create app for demo-template page1.py\n# Please insert your code dedicated to pages here.\n#\n"} {"text": "# Create app for demo-image-classification demo-image_classifcation-taipy-cloud.py\nimport tensorflow as tf\nfrom tensorflow.keras import layers, models \nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator \nfrom tensorflow.keras.utils import to_categorical \nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport numpy as np\nfrom PIL import Image \n\nclass_names = ['AIRPLANE', 'AUTOMOBILE', 'BIRD', 'CAT', 'DEER', 'DOG', 'FROG', 'HORSE', 'SHIP', 'TRUCK']\n\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\n\nx_train = x_train / 255.0\ny_train = to_categorical(y_train, len(class_names))\n\nx_test = x_test / 255.0\ny_test = to_categorical(y_test, len(class_names))\n\n#########################################################################################################\ndef create_model():\n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))\n model.add(layers.MaxPool2D((2,2)))\n\n model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))\n model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))\n model.add(layers.MaxPool2D((2,2)))\n\n model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))\n model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))\n model.add(layers.MaxPool2D((2,2)))\n\n model.add(layers.Flatten())\n model.add(layers.Dense(128, activation='relu'))\n model.add(layers.Dense(10, activation='softmax'))\n\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n return model\n\ndf = pd.read_csv(\"saved_models/df.csv\")\ndf[\"N_Epochs\"] = range(1,len(df)+1)\n\n\n\n#STATE VARIABLES \n\nmodel = None\n\n# Parameters for models & training\nepochs = 1\ninput_model_name = \"model\" \n\n# Parameters for trained model\ntrained_model_path = \"\"\n\n# Parameters for CIFAR dataset\ncifar_image_index = 10\ncifar_image_path = \"images/sample/taipy.jpg\"\ncifar_predicted_label = 'NA'\ncifar_true_label = 'NA'\n\n# Parameters for online image\nonline_image_url = \"URL\"\nonline_image_path = \"images/sample/airplane.jpg\" \nonline_image_count = 0\nonline_image_predicted_label = 'NA' # predicted label for the online image\n\n\n\n#P1\nfrom taipy import Gui\nfrom taipy.gui import invoke_long_callback, notify\nimport urllib\n\np1 = \"\"\"\n

Image Classification CNN

\n\n<|layout|columns=1 3|\n<|\n## PARAMETERS\nEnter chosen optimal numper of epochs: \n<|{epochs}|input|> \n\n\nRegister model name: \n<|{input_model_name}|input|>\n\nTrain the model with the Training + Validation sets: \n<|START TRAINING|button|on_action=train_button|> \n\n### Upload Trained Model\n<|{trained_model_path}|file_selector|label=Upload trained model|on_action=load_trained_model|extensions=.h5|>\n|>\n\n<|\n

Val_loss and Accuracy

\n<|{df}|chart|x=N_Epochs|y[1]=accuracy|y[2]=val_accuracy|>\n|>\n|>\n___\n\"\"\"\n\ndef merged_train(model,number_of_epochs,name):\n # merge the training and validation sets\n #x_all = np.concatenate((x_train, x_test))\n #y_all = np.concatenate((y_train, y_test))\n\n # train with the merged dataset\n #history = model.fit(\n # datagen.flow(x_all, y_all, batch_size=64),\n # epochs=number_of_epochs)\n\n #model.save(\"saved_models/{}.h5\".format(name),save_format='h5')\n print(\"TRAINING & SAVING COMPLETED!\")\n\ndef train_button(state):\n notify(state, \"info\", \"Started training model with {} epochs\".format(state.epochs), True, 1000)\n #model = create_model()\n invoke_long_callback(state,merged_train,[model, int(state.epochs), state.input_model_name])\n\ndef load_trained_model(state):\n loaded_model = tf.keras.models.load_model(state.trained_model_path)\n state.model = loaded_model\n\n\n#Second half of the applications\np2 = \"\"\" \n<|layout|columns=1 3|\n<|\n### CIFAR10 Images Prediction\nEnter CIFAR10 image index: |\n\n<|{cifar_image_index}|input|> \n<|PREDICT CIFAR IMAGE|button|on_action=predict_cifar_image|>\n\n<|{cifar_image_path}|image|height=100px|width=100px|>\n\n##Predicted label: <|{cifar_predicted_label}|> \n##True label: <|{cifar_true_label}|>\n\n|>\n\n<|\n###Paste an online image link here for prediction: \n\n<|{online_image_url}|input|on_action=load_online_image|> \n\n
<|{online_image_path}|image|height=300px|width=300px|>
\n\n<|PREDICT ONLINE IMAGE|button|on_action=predict_online_image|>\n\n## Predicted label: <|{online_image_predicted_label }|>\n|>\n|>\n\"\"\"\n\ndef predict_cifar_image(state):\n #Retrieve the cifar image at the specified index and save as PIL Image obj\n cifar_img_idx = int(state.cifar_image_index )\n cifar_img_data = x_test[cifar_img_idx]\n cifar_img = Image.fromarray(np.uint8(cifar_img_data*255))\n cifar_img.save(\"images/cifar10_saved/{}.jpg\".format(cifar_img_idx))\n\n #Predict the label of the CIFAR image\n img_for_pred = np.expand_dims(x_test[cifar_img_idx], axis=0)\n cifar_img_pred_label = np.argmax(state.model.predict(img_for_pred))\n cifar_img_true_label = y_test[cifar_img_idx].argmax() \n \n #Update the GUI\n state.cifar_image_path = \"images/cifar10_saved/{}.jpg\".format(cifar_img_idx)\n state.cifar_predicted_label = str(class_names[cifar_img_pred_label])\n state.cifar_true_label = str(class_names[cifar_img_true_label])\n\ndef load_online_image(state):\n urllib.request.urlretrieve(state.online_image_url, \"images/online_image.jpg\")\n state.online_image_path = \"images/online_image.jpg\"\n\ndef predict_online_image(state):\n #Retrieve & save online image in order to show on the image box\n urllib.request.urlretrieve(state.online_image_url , \"images/saved_images/{}.jpg\".format(state.online_image_count))\n state.online_image_path = \"images/saved_images/{}.jpg\".format(state.online_image_count)\n\n #Predict the label of the online image\n img_array = tf.keras.utils.load_img(state.online_image_path, target_size=(32, 32))\n image = tf.keras.utils.img_to_array(img_array) # (height, width, channels)\n image = np.expand_dims(image, axis=0) / 255. # (1, height, width, channels) + normalize\n\n #Update the GUI\n state.online_image_predicted_label = class_names[np.argmax(state.model.predict(image))]\n state.online_image_count += 1\n\nGui(page=p1+p2).run(dark_mode=False)\n"} {"text": "# Create app for demo-image-classification main.py\nimport tensorflow as tf\nfrom tensorflow.keras import layers, models \nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator \nfrom tensorflow.keras.utils import to_categorical \nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport numpy as np\nfrom PIL import Image \n\nclass_names = ['AIRPLANE', 'AUTOMOBILE', 'BIRD', 'CAT', 'DEER', 'DOG', 'FROG', 'HORSE', 'SHIP', 'TRUCK']\n\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\n\nx_train = x_train / 255.0\ny_train = to_categorical(y_train, len(class_names))\n\nx_test = x_test / 255.0\ny_test = to_categorical(y_test, len(class_names))\n\n#########################################################################################################\ndef create_model():\n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))\n model.add(layers.MaxPool2D((2,2)))\n\n model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))\n model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))\n model.add(layers.MaxPool2D((2,2)))\n\n model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))\n model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))\n model.add(layers.MaxPool2D((2,2)))\n\n model.add(layers.Flatten())\n model.add(layers.Dense(128, activation='relu'))\n model.add(layers.Dense(10, activation='softmax'))\n\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n return model\n\ndf = pd.read_csv(\"saved_models/df.csv\")\ndf[\"N_Epochs\"] = range(1,len(df)+1)\n\n\n\n#STATE VARIABLES \n\nmodel = None\n\n# Parameters for models & training\nepochs = 1\ninput_model_name = \"model\" \n\n# Parameters for trained model\ntrained_model_path = \"\"\n\n# Parameters for CIFAR dataset\ncifar_image_index = 10\ncifar_image_path = \"images/sample/taipy.jpg\"\ncifar_predicted_label = 'NA'\ncifar_true_label = 'NA'\n\n# Parameters for online image\nonline_image_url = \"URL\"\nonline_image_path = \"images/sample/airplane.jpg\" \nonline_image_count = 0\nonline_image_predicted_label = 'NA' # predicted label for the online image\n\n\n\n#P1\nfrom taipy import Gui\nfrom taipy.gui import invoke_long_callback, notify\nimport urllib\n\np1 = \"\"\"\n

Image Classification CNN

\n\n<|layout|columns=1 3|\n<|\n## PARAMETERS\nEnter chosen optimal numper of epochs: \n<|{epochs}|input|> \n\n\nRegister model name: \n<|{input_model_name}|input|>\n\nTrain the model with the Training + Validation sets: \n<|START TRAINING|button|on_action=train_button|> \n\n### Upload Trained Model\n<|{trained_model_path}|file_selector|label=Upload trained model|on_action=load_trained_model|extensions=.h5|>\n|>\n\n<|\n

Val_loss and Accuracy

\n<|{df}|chart|x=N_Epochs|y[1]=accuracy|y[2]=val_accuracy|>\n|>\n|>\n___\n\"\"\"\n\ndef merged_train(model,number_of_epochs,name):\n # merge the training and validation sets\n #x_all = np.concatenate((x_train, x_test))\n #y_all = np.concatenate((y_train, y_test))\n\n # train with the merged dataset\n #history = model.fit(\n # datagen.flow(x_all, y_all, batch_size=64),\n # epochs=number_of_epochs)\n\n #model.save(\"saved_models/{}.h5\".format(name),save_format='h5')\n print(\"TRAINING & SAVING COMPLETED!\")\n\ndef train_button(state):\n notify(state, \"info\", \"Started training model with {} epochs\".format(state.epochs), True, 1000)\n #model = create_model()\n invoke_long_callback(state,merged_train,[model, int(state.epochs), state.input_model_name])\n\ndef load_trained_model(state):\n loaded_model = tf.keras.models.load_model(state.trained_model_path)\n state.model = loaded_model\n\n\n#Second half of the applications\np2 = \"\"\" \n<|layout|columns=1 3|\n<|\n### CIFAR10 Images Prediction\nEnter CIFAR10 image index: |\n\n<|{cifar_image_index}|input|> \n<|PREDICT CIFAR IMAGE|button|on_action=predict_cifar_image|>\n\n<|{cifar_image_path}|image|height=100px|width=100px|>\n\n##Predicted label: <|{cifar_predicted_label}|> \n##True label: <|{cifar_true_label}|>\n\n|>\n\n<|\n###Paste an online image link here for prediction: \n\n<|{online_image_url}|input|on_action=load_online_image|> \n\n
<|{online_image_path}|image|height=300px|width=300px|>
\n\n<|PREDICT ONLINE IMAGE|button|on_action=predict_online_image|>\n\n## Predicted label: <|{online_image_predicted_label }|>\n|>\n|>\n\"\"\"\n\ndef predict_cifar_image(state):\n #Retrieve the cifar image at the specified index and save as PIL Image obj\n cifar_img_idx = int(state.cifar_image_index )\n cifar_img_data = x_test[cifar_img_idx]\n cifar_img = Image.fromarray(np.uint8(cifar_img_data*255))\n cifar_img.save(\"images/cifar10_saved/{}.jpg\".format(cifar_img_idx))\n\n #Predict the label of the CIFAR image\n img_for_pred = np.expand_dims(x_test[cifar_img_idx], axis=0)\n cifar_img_pred_label = np.argmax(state.model.predict(img_for_pred))\n cifar_img_true_label = y_test[cifar_img_idx].argmax() \n \n #Update the GUI\n state.cifar_image_path = \"images/cifar10_saved/{}.jpg\".format(cifar_img_idx)\n state.cifar_predicted_label = str(class_names[cifar_img_pred_label])\n state.cifar_true_label = str(class_names[cifar_img_true_label])\n\ndef load_online_image(state):\n urllib.request.urlretrieve(state.online_image_url, \"images/online_image.jpg\")\n state.online_image_path = \"images/online_image.jpg\"\n\ndef predict_online_image(state):\n #Retrieve & save online image in order to show on the image box\n urllib.request.urlretrieve(state.online_image_url , \"images/saved_images/{}.jpg\".format(state.online_image_count))\n state.online_image_path = \"images/saved_images/{}.jpg\".format(state.online_image_count)\n\n #Predict the label of the online image\n img_array = tf.keras.utils.load_img(state.online_image_path, target_size=(32, 32))\n image = tf.keras.utils.img_to_array(img_array) # (height, width, channels)\n image = np.expand_dims(image, axis=0) / 255. # (1, height, width, channels) + normalize\n\n #Update the GUI\n state.online_image_predicted_label = class_names[np.argmax(state.model.predict(image))]\n state.online_image_count += 1\n\nGui(page=p1+p2).run(dark_mode=False)\n"} {"text": "# Create app for demo-drift-detection main.py\nimport taipy as tp\nfrom taipy.gui import Gui\nimport pandas as pd\n\nfrom configuration.config import scenario_cfg\nfrom pages import *\nfrom pages.Drift.Drift import merge_data\n\n\nif __name__ == \"__main__\":\n ref_data = pd.read_csv(\"data/data_ref.csv\")\n\n tp.Core().run()\n scenario = tp.create_scenario(scenario_cfg)\n\n ref_selected = \"data_ref\"\n compare_selected = \"data_noisy\"\n\n ref_data = pd.read_csv(\"data/\" + ref_selected + \".csv\")\n scenario.reference_data.write(ref_data)\n\n compare_data = pd.read_csv(\"data/\" + compare_selected + \".csv\")\n scenario.compare_data.write(compare_data)\n\n bp_data, sex_data = merge_data(ref_data, compare_data)\n\n gui = Gui(page=Drift)\n gui.run(title=\"Drift Detection\")\n"} {"text": "# Create app for demo-drift-detection config.py\n\"\"\"\nContain the application's configuration including the scenario configurations.\n\nThe configuration is run by the Core service.\n\"\"\"\n\nfrom algorithms.algorithms import *\n\nfrom taipy import Config\n\nreference_data_cfg = Config.configure_data_node(\"reference_data\", \"csv\")\ncompare_data_cfg = Config.configure_data_node(\"compare_data\", \"csv\")\nnum_cols_cfg = Config.configure_data_node(\"num_cols\")\ncat_cols_cfg = Config.configure_data_node(\"cat_cols\")\nnum_results_cfg = Config.configure_data_node(\"num_results\")\ncat_results_cfg = Config.configure_data_node(\"cat_results\")\ndrift_results_cfg = Config.configure_data_node(\"drift_results\")\n\ndetect_numerical_cfg = Config.configure_task(\n id=\"detect_numerical\",\n function=detect_numerical,\n input=[reference_data_cfg],\n output=num_cols_cfg,\n)\ndetect_categorical_cfg = Config.configure_task(\n id=\"detect_categorical\",\n function=detect_categorical,\n input=[reference_data_cfg],\n output=cat_cols_cfg,\n)\nkolmogorov_cfg = Config.configure_task(\n id=\"kolmogorov\",\n function=kolmogorov,\n input=[compare_data_cfg, reference_data_cfg, num_cols_cfg],\n output=num_results_cfg,\n)\nchi_squared_cfg = Config.configure_task(\n id=\"chi_squared\",\n function=chi_squared,\n input=[compare_data_cfg, reference_data_cfg, cat_cols_cfg],\n output=cat_results_cfg,\n)\ncollect_results_cfg = Config.configure_task(\n id=\"collect_results\",\n function=collect_results,\n input=[num_results_cfg, cat_results_cfg],\n output=drift_results_cfg,\n)\n\nscenario_cfg = Config.configure_scenario(\n id=\"drift_detection\",\n task_configs=[\n detect_numerical_cfg,\n detect_categorical_cfg,\n kolmogorov_cfg,\n chi_squared_cfg,\n collect_results_cfg,\n ],\n)\n"} {"text": "# Create app for demo-drift-detection __init__.py\nfrom .config import *\n"} {"text": "# Create app for demo-drift-detection algorithms.py\n\"\"\"\nThis file is designed to contain the various Python functions used to configure tasks.\n\nThe functions will be imported by the __init__.py file in this folder.\n\"\"\"\n\nimport pandas as pd\nimport scipy.stats as stats\n\n\ndef detect_categorical(dataset: pd.DataFrame) -> list:\n \"\"\"\n Detect the names of categorical columns in a dataframe.\n\n Args:\n dataset: The dataframe to detect categorical columns from.\n\n Returns:\n A list of categorical column names.\n \"\"\"\n categorical = []\n for col in dataset.columns:\n if dataset[col].dtype == \"object\":\n categorical.append(col)\n return categorical\n\n\ndef detect_numerical(dataset: pd.DataFrame) -> list:\n \"\"\"\n Detect the names of numerical columns in a dataframe.\n\n Args:\n dataset: The dataframe to detect numerical columns from.\n\n Returns:\n A list of numerical column names.\n \"\"\"\n numerical = []\n for col in dataset.columns:\n if dataset[col].dtype != \"object\":\n numerical.append(col)\n return numerical\n\n\ndef ks_2samp(series_1: pd.Series, series_2: pd.Series) -> float:\n \"\"\"\n Runs the two-sample Kolmogorov-Smirnov test on two series.\n\n Args:\n series_1: The first series.\n series_2: The second series.\n\n Returns:\n The p-value of the test.\n \"\"\"\n analysis = stats.ks_2samp(series_1, series_2)\n return int(analysis[1] * 100) / 100\n\n\ndef kolmogorov(\n dataset: pd.DataFrame, ref_dataset: pd.DataFrame, num_cols: list\n) -> dict:\n \"\"\"\n Runs the two-sample Kolmogorov-Smirnov test on all numerical columns in a dataframe.\n\n Args:\n dataset: The dataframe to run the test on.\n ref_dataset: The reference dataframe to compare against.\n num_cols: The list of numerical column names.\n\n Returns:\n A dictionary of test statistics.\n \"\"\"\n ks_dict = {}\n for col in num_cols:\n ks_dict[col] = ks_2samp(dataset[col], ref_dataset[col])\n return ks_dict\n\n\ndef chi_squared_2samp(series_1: pd.Series, series_2: pd.Series) -> float:\n \"\"\"\n Runs the two-sample chi-squared test on two series.\n\n Args:\n series_1: The first series.\n series_2: The second series.\n\n Returns:\n The p-value of the test.\n \"\"\"\n # Get the unique values\n series_1_unique = series_1.unique()\n series_2_unique = series_2.unique()\n # Get the frequencies\n series_1_freq = series_1.value_counts()\n series_2_freq = series_2.value_counts()\n # Get the expected frequencies\n series_1_exp_freq = []\n series_2_exp_freq = []\n for i, _ in enumerate(series_1_unique):\n series_1_exp_freq.append(\n series_1_freq[series_1_unique[i]] * len(series_2) / len(series_1)\n )\n for i, _ in enumerate(series_2_unique):\n series_2_exp_freq.append(\n series_2_freq[series_2_unique[i]] * len(series_1) / len(series_2)\n )\n analysis = stats.chisquare(series_1_exp_freq, series_2_exp_freq)\n return int(analysis[1] * 100) / 100\n\n\ndef chi_squared(\n dataset: pd.DataFrame, ref_dataset: pd.DataFrame, cat_cols: list\n) -> dict:\n \"\"\"\n Runs the chi-squared test on all categorical columns in a dataframe.\n\n Args:\n dataset: The dataframe to run the test on.\n ref_dataset: The reference dataframe to compare against.\n cat_cols: The list of categorical column names.\n\n Returns:\n A dictionary of test statistics.\n \"\"\"\n chi_dict = {}\n for col in cat_cols:\n chi_dict[col] = chi_squared_2samp(dataset[col], ref_dataset[col])\n return chi_dict\n\n\ndef collect_results(num_results: dict, cat_results: dict) -> pd.DataFrame:\n \"\"\"\n Collects the results of the two tests into a single dictionary.\n\n Args:\n num_results: The dictionary of numerical test results.\n cat_results: The dictionary of categorical test results.\n\n Returns:\n A dataframe of the results.\n \"\"\"\n columns = []\n tests = []\n values = []\n detected = []\n for col in cat_results:\n columns.append(col)\n tests.append(\"Chi-Squared\")\n values.append(cat_results[col])\n if cat_results[col] < 0.05:\n detected.append(True)\n else:\n detected.append(False)\n for col in num_results:\n columns.append(col)\n tests.append(\"Kolmogorov-Smirnov\")\n values.append(num_results[col])\n if num_results[col] < 0.05:\n detected.append(True)\n else:\n detected.append(False)\n results = pd.DataFrame(\n {\"Column\": columns, \"Test\": tests, \"p-value\": values, \"Drift\": detected}\n )\n return results\n\n\ndef merge_data(ref_data: pd.DataFrame, compare_data: pd.DataFrame):\n \"\"\"\n Merges the reference and comparison data into a single dataframe.\n The Dataframe is prepared for plotting.\n\n Args:\n ref_data: The reference data.\n compare_data: The comparison data.\n\n Returns:\n plot_data: The dataset for other columns.\n sex_data: The dataset for sex distribution.\n \"\"\"\n bp_data = [\n {\"Blood Pressure\": list(ref_data[\"blood_pressure\"])},\n {\"Blood Pressure\": list(compare_data[\"blood_pressure\"])},\n ]\n # Count the Male and Female rows in ref and compare\n male_ref = ref_data[ref_data[\"sex\"] == \"Male\"].shape[0]\n male_compare = compare_data[compare_data[\"sex\"] == \"Male\"].shape[0]\n female_ref = ref_data[ref_data[\"sex\"] == \"Female\"].shape[0]\n female_compare = compare_data[compare_data[\"sex\"] == \"Female\"].shape[0]\n sex_data = pd.DataFrame(\n {\n \"Dataset\": [\"Ref\", \"Compare\"],\n \"Male\": [male_ref, male_compare],\n \"Female\": [female_ref, female_compare],\n }\n )\n return bp_data, sex_data\n"} {"text": "# Create app for demo-drift-detection __init__.py\nfrom algorithms import *\n"} {"text": "# Create app for demo-drift-detection root.md\n
\n<|navbar|>\n
\n"} {"text": "# Create app for demo-drift-detection __init__.py\nfrom .root import root_page\nfrom .Drift.Drift import Drift\n"} {"text": "# Create app for demo-drift-detection root.py\n\"\"\"\nThe rootpage of the application.\nPage content is imported from the root.md file.\n\nPlease refer to https://docs.taipy.io/en/latest/manuals/gui/pages for more details.\n\"\"\"\n\nfrom taipy.gui import Markdown\n\nroot_page = Markdown(\"pages/root.md\")\n"} {"text": "# Create app for demo-drift-detection Drift.py\n\"\"\"\nA page of the application.\nPage content is imported from the Drift.md file.\n\nPlease refer to https://docs.taipy.io/en/latest/manuals/gui/pages for more details.\n\"\"\"\n\nimport taipy as tp\nfrom taipy.gui import Markdown\nimport pandas as pd\nfrom taipy.gui import notify\n\nfrom configuration.config import scenario_cfg\n\nDrift = Markdown(\"pages/Drift/Drift.md\")\n\n\ndef merge_data(ref_data: pd.DataFrame, compare_data: pd.DataFrame):\n \"\"\"\n Merges the reference and comparison data into a single dataframe.\n The Dataframe is prepared for plotting.\n\n Args:\n ref_data: The reference data.\n compare_data: The comparison data.\n\n Returns:\n plot_data: The dataset for other columns.\n sex_data: The dataset for sex distribution.\n \"\"\"\n bp_data = [\n {\"Blood Pressure\": list(ref_data[\"blood_pressure\"])},\n {\"Blood Pressure\": list(compare_data[\"blood_pressure\"])},\n ]\n # Count the Male and Female rows in ref and compare\n male_ref = ref_data[ref_data[\"sex\"] == \"Male\"].shape[0]\n male_compare = compare_data[compare_data[\"sex\"] == \"Male\"].shape[0]\n female_ref = ref_data[ref_data[\"sex\"] == \"Female\"].shape[0]\n female_compare = compare_data[compare_data[\"sex\"] == \"Female\"].shape[0]\n sex_data = pd.DataFrame(\n {\n \"Dataset\": [\"Ref\", \"Compare\"],\n \"Male\": [male_ref, male_compare],\n \"Female\": [female_ref, female_compare],\n }\n )\n return bp_data, sex_data\n\n\ndef on_ref_change(state):\n state.ref_data = pd.read_csv(\"data/\" + state.ref_selected + \".csv\")\n state.scenario.reference_data.write(state.ref_data)\n state.bp_data, state.sex_data = merge_data(state.ref_data, state.compare_data)\n\n\ndef on_compare_change(state):\n state.compare_data = pd.read_csv(\"data/\" + state.compare_selected + \".csv\")\n state.scenario.compare_data.write(state.compare_data)\n state.bp_data, state.sex_data = merge_data(state.ref_data, state.compare_data)\n\n\nbp_options = [\n # First data set displayed as green-ish, and 5 bins\n {\n \"marker\": {\"color\": \"#4A4\", \"opacity\": 0.8},\n \"nbinsx\": 10,\n },\n # Second data set displayed as red-ish, and 25 bins\n {\n \"marker\": {\"color\": \"#A33\", \"opacity\": 0.8, \"text\": \"Compare Data\"},\n \"nbinsx\": 10,\n },\n]\n\nbp_layout = {\n # Overlay the two histograms\n \"barmode\": \"overlay\",\n \"title\": \"Blood Pressure Distribution (Green = Reference, Red = Compare)\",\n \"showlegend\": False,\n}\n\n\ndef on_submission_status_change(state, submittable, details):\n submission_status = details.get(\"submission_status\")\n\n if submission_status == \"COMPLETED\":\n notify(state, \"success\", \"Drift Detection Completed\")\n state.refresh(\"scenario\")\n"} {"text": "# Create app for demo-drift-detection Drift.md\n<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Reference Data
\n<|{ref_selected}|selector|lov=data_ref;data_noisy;data_female;data_big|dropdown|on_change=on_ref_change|>\n|>\n\n<|part|class_name=card|\n### Select Comparison Data
\n<|{compare_selected}|selector|lov=data_ref;data_noisy;data_female;data_big|dropdown|on_change=on_compare_change|>\n|>\n\n\n|>\n\n<|Reference Dataset and Compare Dataset|expandable|expanded=True|\n<|layout|columns=1 1|\n<|{ref_data}|table|page_size=5|>\n\n<|{compare_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{sex_data}|chart|type=bar|x=Dataset|y[1]=Male|y[2]=Female|title=Sex Distribution|>\n|>\n\n<|part|class_name=card|\n<|{bp_data}|chart|type=histogram|options={bp_options}|layout={bp_layout}|>\n|>\n|>\n\n
\n### Run the scenario:\n<|{scenario}|scenario|on_submission_change=on_submission_status_change|expandable=False|expanded=False|>\n\n<|{scenario}|scenario_dag|>\n\n
\n### View the results:\n<|{scenario.drift_results if scenario else None}|data_node|>"} {"text": "# Create app for demo-covid-dashboard main.py\nfrom taipy.gui import Gui\nimport taipy as tp\n\nfrom pages.country.country import country_md\nfrom pages.world.world import world_md\nfrom pages.map.map import map_md\nfrom pages.predictions.predictions import predictions_md, selected_scenario\nfrom pages.root import root, selected_country, selector_country\n\nfrom config.config import Config\n\npages = {\n '/':root,\n \"Country\":country_md,\n \"World\":world_md,\n \"Map\":map_md,\n \"Predictions\":predictions_md\n}\n\n\ngui_multi_pages = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n \n gui_multi_pages.run(title=\"Covid Dashboard\")\n"} {"text": "# Create app for demo-covid-dashboard config.py\nfrom taipy.config import Config, Scope\nimport datetime as dt\n\nfrom algos.algos import add_features, create_train_data, preprocess,\\\n train_arima, train_linear_regression,\\\n forecast, forecast_linear_regression,\\\n concat\n\n#Config.configure_job_executions(mode=\"standalone\", nb_of_workers=2)\n\npath_to_data = \"data/covid-19-all.csv\"\n\ninitial_data_cfg = Config.configure_data_node(id=\"initial_data\",\n storage_type=\"csv\",\n path=path_to_data,\n cacheable=True,\n validity_period=dt.timedelta(days=5),\n scope=Scope.GLOBAL)\n\ncountry_cfg = Config.configure_data_node(id=\"country\", default_data=\"France\", \n validity_period=dt.timedelta(days=5))\n\n\ndate_cfg = Config.configure_data_node(id=\"date\", default_data=dt.datetime(2020,10,1),\n validity_period=dt.timedelta(days=5))\n\nfinal_data_cfg = Config.configure_data_node(id=\"final_data\",\n validity_period=dt.timedelta(days=5))\n\ntrain_data_cfg = Config.configure_data_node(id=\"train_data\", \n validity_period=dt.timedelta(days=5))\n\nmodel_arima_cfg = Config.configure_data_node(id=\"model_arima\", validity_period=dt.timedelta(days=5))\nmodel_linear_regression_cfg = Config.configure_data_node(id=\"model_linear_regression\", validity_period=dt.timedelta(days=5))\n\npredictions_arima_cfg = Config.configure_data_node(id=\"predictions_arima\")\npredictions_linear_regression_cfg = Config.configure_data_node(id=\"predictions_linear_regression\")\n\nresult_cfg = Config.configure_data_node(id=\"result\")\n\n\ntask_preprocess_cfg = Config.configure_task(id=\"task_preprocess_data\",\n function=preprocess,\n input=[initial_data_cfg, country_cfg, date_cfg],\n output=[final_data_cfg,train_data_cfg])\n\n\ntask_train_arima_cfg = Config.configure_task(id=\"task_train\",\n function=train_arima,\n input=train_data_cfg,\n output=model_arima_cfg) \n\ntask_forecast_arima_cfg = Config.configure_task(id=\"task_forecast\",\n function=forecast,\n input=model_arima_cfg,\n output=predictions_arima_cfg)\n\n\ntask_train_linear_regression_cfg = Config.configure_task(id=\"task_train_linear_regression\",\n function=train_linear_regression,\n input=train_data_cfg,\n output=model_linear_regression_cfg)\n\ntask_forecast_linear_regression_cfg = Config.configure_task(id=\"task_forecast_linear_regression\",\n function=forecast_linear_regression,\n input=[model_linear_regression_cfg, date_cfg],\n output=predictions_linear_regression_cfg)\n\ntask_result_cfg = Config.configure_task(id=\"task_result\",\n function=concat,\n input=[final_data_cfg, predictions_arima_cfg, predictions_linear_regression_cfg, date_cfg],\n output=result_cfg)\n\n\nscenario_cfg = Config.configure_scenario(id='scenario', task_configs=[task_preprocess_cfg,\n task_train_arima_cfg,\n task_forecast_arima_cfg,\n task_train_linear_regression_cfg,\n task_forecast_linear_regression_cfg,\n task_result_cfg])\n\nConfig.export('config/config.toml')"} {"text": "# Create app for demo-covid-dashboard algos.py\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nimport datetime as dt\nimport numpy as np\nfrom pmdarima import auto_arima\n\n\n\ndef add_features(data):\n dates = pd.to_datetime(data[\"Date\"])\n data[\"Months\"] = dates.dt.month\n data[\"Days\"] = dates.dt.isocalendar().day\n data[\"Week\"] = dates.dt.isocalendar().week\n data[\"Day of week\"] = dates.dt.dayofweek\n return data\n\ndef create_train_data(final_data, date:dt.datetime):\n date = date.date() if type(date) == dt.datetime else date\n bool_index = pd.to_datetime(final_data['Date']).dt.date <= date\n train_data = final_data[bool_index]\n return train_data\n\ndef preprocess(initial_data, country, date):\n data = initial_data.groupby([\"Country/Region\",'Date'])\\\n .sum()\\\n .dropna()\\\n .reset_index()\n\n final_data = data.loc[data['Country/Region']==country].reset_index(drop=True)\n final_data = final_data[['Date','Deaths']]\n final_data = add_features(final_data)\n \n train_data = create_train_data(final_data, date)\n return final_data, train_data\n\n\ndef train_arima(train_data):\n model = auto_arima(train_data['Deaths'],\n start_p=1, start_q=1,\n max_p=5, max_q=5,\n start_P=0, seasonal=False,\n d=1, D=1, trace=True,\n error_action='ignore', \n suppress_warnings=True)\n model.fit(train_data['Deaths'])\n return model\n\n\ndef forecast(model):\n predictions = model.predict(n_periods=60)\n return np.array(predictions)\n\n\ndef concat(final_data, predictions_arima, predictions_linear_regression, date):\n def _convert_predictions(final_data, predictions, date, label='Predictions'):\n dates = pd.to_datetime([date + dt.timedelta(days=i)\n for i in range(len(predictions))])\n final_data['Date'] = pd.to_datetime(final_data['Date'])\n final_data = final_data[['Date','Deaths']]\n predictions = pd.concat([pd.Series(dates, name=\"Date\"),\n pd.Series(predictions, name=label)], axis=1)\n return final_data.merge(predictions, on=\"Date\", how=\"outer\")\n\n\n result_arima = _convert_predictions(final_data, predictions_arima, date, label='ARIMA')\n result_linear_regression = _convert_predictions(final_data, predictions_linear_regression, date, label='Linear Regression')\n\n return result_arima.merge(result_linear_regression, on=[\"Date\", 'Deaths'], how=\"outer\").sort_values(by='Date')\n\n\ndef train_linear_regression(train_data): \n y = train_data['Deaths']\n X = train_data.drop(['Deaths','Date'], axis=1)\n \n model = LinearRegression()\n model.fit(X,y)\n return model\n\ndef forecast_linear_regression(model, date):\n dates = pd.to_datetime([date + dt.timedelta(days=i)\n for i in range(60)])\n X = add_features(pd.DataFrame({\"Date\":dates}))\n X.drop('Date', axis=1, inplace=True)\n predictions = model.predict(X)\n return predictions"} {"text": "# Create app for demo-covid-dashboard data.py\nimport pandas as pd\n\npath_to_data = \"data/covid-19-all.csv\"\ndata = pd.read_csv(path_to_data, low_memory=False)"} {"text": "# Create app for demo-covid-dashboard root.md\n<|toggle|theme|>\n\n
\n<|navbar|>\n
\n"} {"text": "# Create app for demo-covid-dashboard root.py\nfrom taipy.gui import Markdown \n\nimport numpy as np\n\nfrom data.data import data\n\nselector_country = list(np.sort(data['Country/Region'].astype(str).unique()))\nselected_country = 'France'\n\nroot = Markdown(\"pages/root.md\")"} {"text": "# Create app for demo-covid-dashboard world.py\nfrom taipy.gui import Markdown\nimport numpy as np\n\nimport json\n\nfrom data.data import data\n\n\ntype_selector = ['Absolute', 'Relative']\nselected_type = type_selector[0]\n\n\ndef initialize_world(data):\n data_world = data.groupby([\"Country/Region\",\n 'Date'])\\\n .sum()\\\n .reset_index()\n\n with open(\"data/pop.json\",\"r\") as f: \n pop = json.load(f)\n \n data_world['Population'] = data_world['Country/Region'].map(lambda x: pop.get(x, [None, 0])[1])\n\n data_world = data_world.dropna()\\\n .reset_index()\n data_world['Deaths/100k'] = data_world.loc[:,'Deaths']/data_world.loc[:,'Population']*100000\n \n data_world_pie_absolute = data_world[['Country/Region', 'Deaths', 'Recovered', 'Confirmed']].groupby([\"Country/Region\"])\\\n .max()\\\n .sort_values(by='Deaths', ascending=False)[:20]\\\n .reset_index()\n \n data_world_pie_relative = data_world[['Country/Region', 'Deaths/100k']].groupby([\"Country/Region\"])\\\n .max()\\\n .sort_values(by='Deaths/100k', ascending=False)[:20]\\\n .reset_index()\n \n country_absolute = data_world_pie_absolute['Country/Region'].unique().tolist()\n country_relative = data_world_pie_relative.loc[:,'Country/Region'].unique().tolist()\n \n \n data_world_evolution_absolute = data_world[data_world['Country/Region'].str.contains('|'.join(country_absolute),regex=True)]\n data_world_evolution_absolute = data_world_evolution_absolute.pivot(index='Date', columns='Country/Region', values='Deaths')\\\n .reset_index()\n \n data_world_evolution_relative = data_world[data_world['Country/Region'].str.contains('|'.join(country_relative),regex=True)]\n data_world_evolution_relative = data_world_evolution_relative.pivot(index='Date', columns='Country/Region', values='Deaths/100k')\\\n .reset_index()\n return data_world, data_world_pie_absolute, data_world_pie_relative, data_world_evolution_absolute, data_world_evolution_relative\n\n\n\ndata_world,\\\ndata_world_pie_absolute, data_world_pie_relative,\\\ndata_world_evolution_absolute, data_world_evolution_relative = initialize_world(data)\n\n\n\n\ndata_world_evolution_absolute_properties = {\"x\":\"Date\"}\ncols = [col for col in data_world_evolution_absolute.columns if col != \"Date\"]\nfor i in range(len(cols)):\n data_world_evolution_absolute_properties[f'y[{i}]'] = cols[i]\n\n\ndata_world_evolution_relative_properties = {\"x\":\"Date\"}\ncols = [col for col in data_world_evolution_relative.columns if col != \"Date\"]\nfor i in range(len(cols)):\n data_world_evolution_relative_properties[f'y[{i}]'] = cols[i]\n \n \nworld_md = Markdown(\"pages/world/world.md\")"} {"text": "# Create app for demo-covid-dashboard world.md\n# **World**{: .color-primary} Statistics\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Deaths**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_pie_absolute['Deaths']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Recovered**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_pie_absolute['Recovered']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Confirmed**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_pie_absolute['Confirmed']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_type}|toggle|lov={type_selector}|>\n\n<|part|render={selected_type=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_pie_absolute}|chart|type=pie|labels=Country/Region|values=Deaths|title=Distribution around the World|>\n\n<|{data_world_evolution_absolute}|chart|properties={data_world_evolution_absolute_properties}|title=Evolution around the World|>\n|>\n|>\n\n<|part|render={selected_type=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_pie_relative}|chart|type=pie|labels=Country/Region|values=Deaths/100k|>\n\n<|{data_world_evolution_relative}|chart|properties={data_world_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "# Create app for demo-covid-dashboard map.md\n# **Map**{: .color-primary} Statistics\n\n<|{data_province_displayed}|chart|type=scattermapbox|lat=Latitude|lon=Longitude|marker={marker_map}|layout={layout_map}|text=Text|mode=markers|height=800px|options={options}|>\n"} {"text": "# Create app for demo-covid-dashboard map.py\n\nimport numpy as np\nfrom taipy.gui import Markdown\n\nfrom data.data import data\n\nmarker_map = {\"color\":\"Deaths\", \"size\": \"Size\", \"showscale\":True, \"colorscale\":\"Viridis\"}\nlayout_map = {\n \"dragmode\": \"zoom\",\n \"mapbox\": { \"style\": \"open-street-map\", \"center\": { \"lat\": 38, \"lon\": -90 }, \"zoom\": 3}\n }\noptions = {\"unselected\":{\"marker\":{\"opacity\":0.5}}}\n\ndef initialize_map(data):\n data['Province/State'] = data['Province/State'].fillna(data[\"Country/Region\"])\n data_province = data.groupby([\"Country/Region\",\n 'Province/State',\n 'Longitude',\n 'Latitude'])\\\n .max()\n \n\n data_province_displayed = data_province[data_province['Deaths']>10].reset_index()\n\n data_province_displayed['Size'] = np.sqrt(data_province_displayed.loc[:,'Deaths']/data_province_displayed.loc[:,'Deaths'].max())*80 + 3\n data_province_displayed['Text'] = data_province_displayed.loc[:,'Deaths'].astype(str) + ' deaths
' + data_province_displayed.loc[:,'Province/State']\n return data_province_displayed\n\n\ndata_province_displayed = initialize_map(data)\n\nmap_md = Markdown(\"pages/map/map.md\")\n"} {"text": "# Create app for demo-covid-dashboard country.md\n# **Country**{: .color-primary} Statistics\n\n<|layout|columns=1 1 1|\n<|{selected_country}|selector|lov={selector_country}|on_change=on_change_country|dropdown|label=Country|>\n\n<|{selected_representation}|toggle|lov={representation_selector}|on_change=convert_density|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Deaths**{: .color-primary}\n<|{'{:,}'.format(int(data_country_date.iloc[-1]['Deaths'])).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Recovered**{: .color-primary}\n<|{'{:,}'.format(int(data_country_date.iloc[-1]['Recovered'])).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Confirmed**{: .color-primary}\n<|{'{:,}'.format(int(data_country_date.iloc[-1]['Confirmed'])).replace(',', ' ')}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{data_country_date}|chart|type=bar|x=Date|y[3]=Deaths|y[2]=Recovered|y[1]=Confirmed|layout={layout}|options={options}|title=Covid Evolution|>\n\n<|{pie_chart}|chart|type=pie|values=values|labels=labels|title=Distribution between cases|>\n|>"} {"text": "# Create app for demo-covid-dashboard country.py\nimport numpy as np\nimport pandas as pd\n\nfrom taipy.gui import Markdown\n\nfrom data.data import data\n\nselected_country = 'France'\ndata_country_date = None\n\nrepresentation_selector = ['Cumulative', 'Density']\nselected_representation = representation_selector[0]\n\nlayout = {'barmode':'stack', \"hovermode\":\"x\"}\noptions = {\"unselected\":{\"marker\":{\"opacity\":0.5}}}\n\n\ndef initialize_case_evolution(data, selected_country='France'):\n # Aggregation of the dataframe to erase the regions that will not be used here\n data_country_date = data.groupby([\"Country/Region\",'Date'])\\\n .sum()\\\n .reset_index()\n \n # a country is selected, here France by default\n data_country_date = data_country_date.loc[data_country_date['Country/Region']==selected_country]\n return data_country_date\n\ndata_country_date = initialize_case_evolution(data)\npie_chart = pd.DataFrame({\"labels\": [\"Deaths\", \"Recovered\", \"Confirmed\"],\"values\": [data_country_date.iloc[-1, 6], data_country_date.iloc[-1, 5], data_country_date.iloc[-1, 4]]})\n\n\n\ndef convert_density(state):\n if state.selected_representation == 'Density':\n df_temp = state.data_country_date.copy()\n df_temp['Deaths'] = df_temp['Deaths'].diff().fillna(0)\n df_temp['Recovered'] = df_temp['Recovered'].diff().fillna(0)\n df_temp['Confirmed'] = df_temp['Confirmed'].diff().fillna(0)\n state.data_country_date = df_temp\n else:\n state.data_country_date = initialize_case_evolution(data, state.selected_country)\n\ndef on_change_country(state):\n # state contains all the Gui variables and this is through this state variable that we can update the Gui\n # state.selected_country, state.data_country_date, ...\n # update data_country_date with the right country (use initialize_case_evolution)\n print(\"Chosen country: \", state.selected_country)\n state.data_country_date = initialize_case_evolution(data, state.selected_country)\n state.pie_chart = pd.DataFrame({\"labels\": [\"Deaths\", \"Recovered\", \"Confirmed\"],\n \"values\": [state.data_country_date.iloc[-1, 6], state.data_country_date.iloc[-1, 5], state.data_country_date.iloc[-1, 4]]})\n \n convert_density(state)\n\n\ncountry_md = Markdown(\"pages/country/country.md\")\n"} {"text": "# Create app for demo-covid-dashboard predictions.py\nfrom taipy.gui import Markdown, notify\nimport datetime as dt\n\n\nselected_data_node = None\nselected_scenario = None\nselected_date = None\ndefault_result = {\"Date\": [dt.datetime(2020,10,1)], \"Deaths\": [0], \"ARIMA\": [0], \"Linear Regression\": [0]}\n\n\ndef on_submission_change(state, submitable, details):\n if details['submission_status'] == 'COMPLETED':\n state.refresh('selected_scenario')\n notify(state, \"success\", \"Predictions ready!\")\n print(\"Predictions ready!\")\n elif details['submission_status'] == 'FAILED':\n notify(state, \"error\", \"Submission failed!\")\n print(\"Submission failed!\")\n else:\n notify(state, \"info\", \"In progress...\")\n print(\"In progress...\")\n\n\ndef on_change_params(state):\n if state.selected_date.year < 2020 or state.selected_date.year > 2021:\n notify(state, \"error\", \"Invalid date! Must be between 2020 and 2021\")\n state.selected_date = dt.datetime(2020,10,1)\n return\n \n state.selected_scenario.date.write(state.selected_date.replace(tzinfo=None))\n state.selected_scenario.country.write(state.selected_country)\n notify(state, \"success\", \"Scenario parameters changed!\")\n\n state['Country'].on_change_country(state)\n\n\ndef on_change(state, var_name, var_value):\n if var_name == 'selected_scenario' and var_value:\n state.selected_date = state.selected_scenario.date.read()\n state.selected_country = state.selected_scenario.country.read()\n\n\npredictions_md = Markdown(\"pages/predictions/predictions.md\")"} {"text": "# Create app for demo-covid-dashboard predictions.md\n \n \n<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|country>\n|>\n\n<|{selected_scenario}|scenario|on_submission_change=on_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Predictions**{: .color-primary} and explorer of data nodes\n\n<|{selected_scenario.result.read() if selected_scenario and selected_scenario.result.read() is not None else default_result}|chart|x=Date|y[1]=Deaths|y[2]=Linear Regression|y[3]=ARIMA|type[1]=bar|title=Predictions|>\n\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|scenario>\n|>\n"} {"text": "# Create app for demo-yearly-prediction main.py\nfrom config.config import configure\nfrom pages import scenario_page\nfrom pages.root import root, selected_scenario, selected_data_node, content\n\nimport taipy as tp\nfrom taipy import Core, Gui, Config\n\n\ndef on_init(state):\n ...\n\n\ndef on_change(state, var, val):\n if var == \"selected_scenario\" and val:\n state.selected_scenario = val # BUG\n state.selected_data_node = None\n if var == \"selected_data_node\" and val:\n state.selected_data_node = val # BUG\n state[\"scenario\"].manage_data_node_partial(state)\n\n\npages = {\n \"/\": root,\n \"scenario\": scenario_page,\n}\n\n\nif __name__ == \"__main__\":\n # Instantiate, configure and run the Core\n scenario_cfg = configure()\n tp.Core().run()\n scenario = tp.create_scenario(scenario_cfg)\n tp.submit(scenario)\n print(scenario.prediction.read())\n\n # Instantiate, configure and run the GUI\n gui = Gui(pages=pages)\n data_node_partial = gui.add_partial(\"\")\n gui.run(title=\"Yearly Sales Prediction\")\n"} {"text": "# Create app for demo-yearly-prediction config.py\nfrom taipy import Config\nfrom taipy.config import Frequency, Scope\nfrom algos import clean_data, filter_data, predict\n\n\ndef configure():\n historical_data_cfg = Config.configure_data_node(\n \"historical_data\",\n storage_type=\"csv\",\n default_path=\"historical_data.csv\",\n scope=Scope.GLOBAL,\n )\n model_cfg = Config.configure_data_node(\n \"model\", default_data=\"linear\", scope=Scope.SCENARIO\n )\n prediction_year_cfg = Config.configure_data_node(\n \"prediction_year\", default_data=\"2016\", scope=Scope.CYCLE\n )\n last_two_years_cfg = Config.configure_data_node(\"last_two_years\", scope=Scope.CYCLE)\n prediction_cfg = Config.configure_data_node(\"prediction\", scope=Scope.SCENARIO)\n cleaned_data_cfg = Config.configure_data_node(\"cleaned_data\", scope=Scope.GLOBAL)\n\n clean_data_cfg = Config.configure_task(\n id=\"clean_data\",\n function=clean_data,\n input=[historical_data_cfg],\n output=[cleaned_data_cfg],\n )\n filter_data_cfg = Config.configure_task(\n id=\"filter_data\",\n function=filter_data,\n input=[cleaned_data_cfg, prediction_year_cfg],\n output=[last_two_years_cfg],\n )\n predict_cfg = Config.configure_task(\n id=\"predict\",\n function=predict,\n input=[last_two_years_cfg, model_cfg, historical_data_cfg, prediction_year_cfg],\n output=[prediction_cfg],\n )\n\n scenario_cfg = Config.configure_scenario(\n id=\"prediction_scenario\",\n task_configs=[clean_data_cfg, filter_data_cfg, predict_cfg],\n frequency=Frequency.YEARLY,\n )\n\n return scenario_cfg\n"} {"text": "# Create app for demo-yearly-prediction __init__.py\n"} {"text": "# Create app for demo-yearly-prediction algos.py\nimport pandas as pd\nimport statsmodels.api as sm\nfrom sklearn.linear_model import LinearRegression\n\n\ndef clean_data(historical_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Transforms sales data into total sales per month\n\n Args:\n historical_data: historical sales dataframe (date, store, item, sales)\n\n Returns:\n monthly sales dataframe (date, sales)\n \"\"\"\n historical_data[\"date\"] = pd.to_datetime(historical_data[\"date\"])\n historical_data[\"date\"] = (\n historical_data[\"date\"].dt.year.astype(\"str\")\n + \"-\"\n + historical_data[\"date\"].dt.month.astype(\"str\")\n + \"-01\"\n )\n historical_data[\"date\"] = pd.to_datetime(historical_data[\"date\"])\n historical_data = historical_data.groupby(\"date\").sales.sum().reset_index()\n return historical_data\n\n\ndef filter_data(cleaned_data: pd.DataFrame, prediction_year: str) -> pd.DataFrame:\n \"\"\"\n Filters data to include only data from the two years before the prediction year\n\n Args:\n cleaned_data: monthly sales dataframe (date, sales)\n prediction_year: year to predict\n\n Returns:\n filtered dataframe (date, sales)\n \"\"\"\n start_date = str(int(prediction_year) - 2) + \"-01-01\"\n end_date = str(int(prediction_year) - 1) + \"-12-01\"\n filtered_data = cleaned_data[\n (cleaned_data[\"date\"] >= start_date) & (cleaned_data[\"date\"] <= end_date)\n ]\n return filtered_data\n\n\ndef predict(\n last_two_years: pd.DataFrame,\n model: str,\n historical_data: pd.DataFrame,\n prediction_year: str,\n) -> pd.DataFrame:\n \"\"\"\n Predicts sales for the prediction year according to the model\n\n Args:\n last_two_years: filtered dataframe (date, sales)\n model: model to use for prediction (linear, arima)\n\n Returns:\n predicted sales for the prediction year (date, sales)\n \"\"\"\n predicted_data = pd.DataFrame()\n\n if model == \"linear\":\n X = last_two_years.index.values.reshape(-1, 1)\n y = last_two_years[\"sales\"].values.reshape(-1, 1)\n model = LinearRegression()\n model.fit(X, y)\n predicted_data[\"date\"] = pd.date_range(\n start=f\"{prediction_year}-01-01\", end=f\"{prediction_year}-12-01\", freq=\"MS\"\n )\n predicted_data[\"sales\"] = model.predict(\n pd.DataFrame(\n {\"date\": predicted_data[\"date\"].astype(\"str\")}\n ).index.values.reshape(-1, 1)\n )\n elif model == \"arima\":\n train_data = last_two_years.copy()\n train_data.set_index(\"date\", inplace=True)\n model = sm.tsa.statespace.SARIMAX(\n last_two_years[\"sales\"], order=(1, 1, 1), seasonal_order=(1, 1, 1, 12)\n )\n results = model.fit()\n predicted_data = results.predict(12)\n predicted_data = predicted_data[1:]\n predicted_data = pd.DataFrame(\n {\n \"date\": pd.date_range(\n start=f\"{prediction_year}-02-01\",\n end=f\"{prediction_year}-12-01\",\n freq=\"MS\",\n ),\n \"sales\": predicted_data.values,\n }\n )\n else:\n raise ValueError(\"Model not supported\")\n\n # Combine last_two_years and predicted_data with columns: date, actual, predicted\n combined_data = pd.DataFrame()\n combined_data[\"date\"] = pd.date_range(\n start=f\"{int(prediction_year)-2}-01-01\",\n end=f\"{prediction_year}-12-01\",\n freq=\"MS\",\n )\n combined_data = combined_data.merge(\n last_two_years, how=\"left\", on=\"date\", suffixes=(\"\", \"_actual\")\n )\n combined_data = combined_data.merge(\n predicted_data, how=\"left\", on=\"date\", suffixes=(\"\", \"_predicted\")\n )\n return combined_data\n"} {"text": "# Create app for demo-yearly-prediction __init__.py\nfrom .algos import clean_data, filter_data, predict\n"} {"text": "# Create app for demo-yearly-prediction root.md\n<|layout|columns=1 5|\n\n<|sidebar|\n\n<|{selected_scenario}|scenario_selector|>\n\n<|part|render={selected_scenario}|\n<|{selected_data_node}|data_node_selector|not display_cycles|>\n|>\n|>\n\n<|part|class_name=main|render={selected_scenario}|\n<|content|>\n|>\n\n|>\n"} {"text": "# Create app for demo-yearly-prediction __init__.py\nfrom .scenario_page import scenario_page\n"} {"text": "# Create app for demo-yearly-prediction root.py\nfrom taipy.gui import Markdown\n\nselected_scenario = None\nselected_data_node = None\ncontent = \"\"\n\nroot = Markdown(\"pages/root.md\")\n"} {"text": "# Create app for demo-yearly-prediction scenario_page.py\nfrom taipy.gui import Markdown\n\nfrom .data_node_management import manage_partial\n\n\ndef manage_data_node_partial(state):\n manage_partial(state)\n\nscenario_page = Markdown(\"pages/scenario_page/scenario_page.md\")\n"} {"text": "# Create app for demo-yearly-prediction __init__.py\nfrom .scenario_page import scenario_page\n"} {"text": "# Create app for demo-yearly-prediction data_node_management.py\n# build partial content for a specific data node\ndef build_dn_partial(dn, dn_label):\n partial_content = \"<|part|render={selected_scenario}|\\n\\n\"\n\n # ##################################################################################################################\n # PLACEHOLDER: data node specific content before automatic content #\n # #\n # Example: #\n if dn_label == \"replacement_type\":\n partial_content += \"All missing values will be replaced by the data node value.\"\n # Comment, remove or replace the previous lines with your own use case #\n # ##################################################################################################################\n\n # Automatic data node content\n partial_content += \"<|{selected_scenario.data_nodes['\" + dn.config_id + \"']}|data_node|scenario={selected_scenario}|>\\n\\n\"\n\n # ##################################################################################################################\n # PLACEHOLDER: data node specific content after automatic content #\n # #\n # Example: #\n if dn_label == \"initial_dataset\":\n partial_content += \"Select your CSV file: <|{selected_data_node.path}|file_selector|extensions=.csv|>\\n\\n\"\n # Comment, remove or replace the previous lines with your own use case #\n # ##################################################################################################################\n\n partial_content += \"|>\\n\\n\"\n return partial_content\n\n\ndef manage_partial(state):\n dn = state.selected_data_node\n dn_label = dn.get_simple_label()\n partial_content = build_dn_partial(dn, dn_label)\n state.data_node_partial.update_content(state, partial_content)\n"} {"text": "# Create app for demo-yearly-prediction scenario_page.md\n<|layout|columns=1 1|\n\n<|part|render={selected_scenario}|\n\n<|{selected_scenario}|scenario|not expandable|expanded|>\n\n<|{selected_scenario}|scenario_dag|>\n|>\n\n<|part|partial={data_node_partial}|render={selected_data_node}|>\n\n|>\n"} {"text": "# Create app for demo-image-classification-part-1 demo-image_classifcation-taipy-cloud.py\nimport tensorflow as tf\nfrom tensorflow.keras import layers, models \nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator \nfrom tensorflow.keras.utils import to_categorical \nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport numpy as np\nfrom PIL import Image \n\nclass_names = ['AIRPLANE', 'AUTOMOBILE', 'BIRD', 'CAT', 'DEER', 'DOG', 'FROG', 'HORSE', 'SHIP', 'TRUCK']\n\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\n\nx_train = x_train / 255.0\ny_train = to_categorical(y_train, len(class_names))\n\nx_test = x_test / 255.0\ny_test = to_categorical(y_test, len(class_names))\n\n#########################################################################################################\ndef create_model():\n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))\n model.add(layers.MaxPool2D((2,2)))\n\n model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))\n model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))\n model.add(layers.MaxPool2D((2,2)))\n\n model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))\n model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))\n model.add(layers.MaxPool2D((2,2)))\n\n model.add(layers.Flatten())\n model.add(layers.Dense(128, activation='relu'))\n model.add(layers.Dense(10, activation='softmax'))\n\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n return model\n\ndf = pd.read_csv(\"saved_models/df.csv\")\ndf[\"N_Epochs\"] = range(1,len(df)+1)\n\n\n\n#STATE VARIABLES \n\nmodel = None\n\n# Parameters for models & training\nepochs = 1\ninput_model_name = \"model\" \n\n# Parameters for trained model\ntrained_model_path = \"\"\n\n# Parameters for CIFAR dataset\ncifar_image_index = 10\ncifar_image_path = \"images/sample/taipy.jpg\"\ncifar_predicted_label = 'NA'\ncifar_true_label = 'NA'\n\n# Parameters for online image\nonline_image_url = \"URL\"\nonline_image_path = \"images/sample/airplane.jpg\" \nonline_image_count = 0\nonline_image_predicted_label = 'NA' # predicted label for the online image\n\n\n\n#P1\nfrom taipy import Gui\nfrom taipy.gui import invoke_long_callback, notify\nimport urllib\n\np1 = \"\"\"\n

Image Classification CNN

\n\n<|layout|columns=1 3|\n<|\n## PARAMETERS\nEnter chosen optimal numper of epochs: \n<|{epochs}|input|> \n\n\nRegister model name: \n<|{input_model_name}|input|>\n\nTrain the model with the Training + Validation sets: \n<|START TRAINING|button|on_action=train_button|> \n\n### Upload Trained Model\n<|{trained_model_path}|file_selector|label=Upload trained model|on_action=load_trained_model|extensions=.h5|>\n|>\n\n<|\n

Val_loss and Accuracy

\n<|{df}|chart|x=N_Epochs|y[1]=accuracy|y[2]=val_accuracy|>\n|>\n|>\n___\n\"\"\"\n\ndef merged_train(model,number_of_epochs,name):\n # merge the training and validation sets\n #x_all = np.concatenate((x_train, x_test))\n #y_all = np.concatenate((y_train, y_test))\n\n # train with the merged dataset\n #history = model.fit(\n # datagen.flow(x_all, y_all, batch_size=64),\n # epochs=number_of_epochs)\n\n #model.save(\"saved_models/{}.h5\".format(name),save_format='h5')\n print(\"TRAINING & SAVING COMPLETED!\")\n\ndef train_button(state):\n notify(state, \"info\", \"Started training model with {} epochs\".format(state.epochs), True, 1000)\n #model = create_model()\n invoke_long_callback(state,merged_train,[model, int(state.epochs), state.input_model_name])\n\ndef load_trained_model(state):\n loaded_model = tf.keras.models.load_model(state.trained_model_path)\n state.model = loaded_model\n\n\n#Second half of the applications\np2 = \"\"\" \n<|layout|columns=1 3|\n<|\n### CIFAR10 Images Prediction\nEnter CIFAR10 image index: |\n\n<|{cifar_image_index}|input|> \n<|PREDICT CIFAR IMAGE|button|on_action=predict_cifar_image|>\n\n<|{cifar_image_path}|image|height=100px|width=100px|>\n\n##Predicted label: <|{cifar_predicted_label}|> \n##True label: <|{cifar_true_label}|>\n\n|>\n\n<|\n###Paste an online image link here for prediction: \n\n<|{online_image_url}|input|on_action=load_online_image|> \n\n
<|{online_image_path}|image|height=300px|width=300px|>
\n\n<|PREDICT ONLINE IMAGE|button|on_action=predict_online_image|>\n\n## Predicted label: <|{online_image_predicted_label }|>\n|>\n|>\n\"\"\"\n\ndef predict_cifar_image(state):\n #Retrieve the cifar image at the specified index and save as PIL Image obj\n cifar_img_idx = int(state.cifar_image_index )\n cifar_img_data = x_test[cifar_img_idx]\n cifar_img = Image.fromarray(np.uint8(cifar_img_data*255))\n cifar_img.save(\"images/cifar10_saved/{}.jpg\".format(cifar_img_idx))\n\n #Predict the label of the CIFAR image\n img_for_pred = np.expand_dims(x_test[cifar_img_idx], axis=0)\n cifar_img_pred_label = np.argmax(state.model.predict(img_for_pred))\n cifar_img_true_label = y_test[cifar_img_idx].argmax() \n \n #Update the GUI\n state.cifar_image_path = \"images/cifar10_saved/{}.jpg\".format(cifar_img_idx)\n state.cifar_predicted_label = str(class_names[cifar_img_pred_label])\n state.cifar_true_label = str(class_names[cifar_img_true_label])\n\ndef load_online_image(state):\n urllib.request.urlretrieve(state.online_image_url, \"images/online_image.jpg\")\n state.online_image_path = \"images/online_image.jpg\"\n\ndef predict_online_image(state):\n #Retrieve & save online image in order to show on the image box\n urllib.request.urlretrieve(state.online_image_url , \"images/saved_images/{}.jpg\".format(state.online_image_count))\n state.online_image_path = \"images/saved_images/{}.jpg\".format(state.online_image_count)\n\n #Predict the label of the online image\n img_array = tf.keras.utils.load_img(state.online_image_path, target_size=(32, 32))\n image = tf.keras.utils.img_to_array(img_array) # (height, width, channels)\n image = np.expand_dims(image, axis=0) / 255. # (1, height, width, channels) + normalize\n\n #Update the GUI\n state.online_image_predicted_label = class_names[np.argmax(state.model.predict(image))]\n state.online_image_count += 1\n\nGui(page=p1+p2).run(dark_mode=False)\n"}