{"text": "# Display df_selection in an expandable\n<|Sales Table|expandable|expanded=False|"} {"text": "# Create a Generate text button\n<|Generate text|button|on_action=generate_text|label=Generate text|>"} {"text": "# Create a text input for the tweet\n<|{tweet}|input|multiline|label=Resulting tweet|class_name=fullwidth|>"} {"text": "# Break line\n
"} {"text": "# Display image\n
<|{image}|image|height=400px|>
"} {"text": "# Display image from path\n<|{path}|image|width=500px|height=500px|class_name=img|>"} {"text": "# Create a slider to select iterations\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>"} {"text": "# Slider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>"} {"text": "# Create a toggle button\n<|{value}|toggle|lov=Item 1;Item 2;Item 3|>"} {"text": "# Create a dropdown to select between A B and C\n<|{value}|selector|lov=A;B;C|dropdown|>"} {"text": "# Create a date selector\n<|{dt}|date|>"} {"text": "# Date selection\n<|{dt}|date|>"} {"text": "# Create a gauge to visualize progress\n<|message|indicator|value={progress}|min=0|max=100|>"} {"text": "# Create and register a page:\nfrom taipy import Gui\nGui(\"# This is my page title\")"} {"text": "# Create a page variable and register it:\nmd = \"# This is my page title\"\nGui(md)"} {"text": "# Create 2 pages and register them:\npages = {\n 'page1': Markdown(\"# My first page\"),\n 'page2': Markdown(\"# My second page\")\n}\nGui(pages=pages)"} {"text": "# Create a multi-page application:\nfrom taipy import Gui\n\n root_md=\"# Multi-page application\"\n page1_md=\"# This is page 1\"\n page2_md=\"# This is page 2\"\n\n pages = {\n \"/\": root_md,\n \"page1\": page1_md,\n \"page2\": page2_md\n }\n Gui(pages=pages).run()"} {"text": "# Create a multi-page application with content placeholder:\n from taipy import Gui\n\n root_md=\"\"\"#\n Multi-page application\n\n\n This application was created with [Taipy](http://taipy.avaiga.com).\n \"\"\"\n page1_md=\"# This is page 1\"\n page2_md=\"# This is page 2\"\n\n pages = {\n \"/\": root_md,\n \"page1\": page1_md,\n \"page2\": page2_md\n }\n Gui(pages=pages).run()"} {"text": "# Create a dialog in Markdown content:\n page=\"\"\"\n <|{dialog_is_visible}|dialog|\n Enter a name:\n <|{name}|input|>\n |>\"\"\"\n Gui(page).run()"} {"text": "# Create a partial page:\n gui = Gui()\n prompt_user = gui.add_partial(\n \"\"\"\n Enter a name:\n <|{name}|input|>\n \"\"\"\n )\n gui.run()"} {"text": "# Display a value:\n<|{value}|>"} {"text": "# Format the value with 2 decimal places:\n<|{value}|text|format=%.2f|>"} {"text": "# Create a button with Button Label:\n<|Button Label|button|>"} {"text": "# Create a Save button:\n<|Button Label|button|>"} {"text": "# Create a Cancel button and button_action_function_name:\n<|Cancel|button|on_action=button_action_function_name|>"} {"text": "# Create a Cancel button with cancel function:\n<|Cancel|button|on_action=cancel|>"} {"text": "# Create input field for name:\n<|{name}|input|>"} {"text": "# Create input field for address:\n<|{address}|input|>"} {"text": "# Create number field for age:\n<|{age}|number|>"} {"text": "# Create a slider for value between 1 and 10:\n<|{value}|slider|min=1|max=10|>"} {"text": "# Create a list of toggle buttons for Item 1, Item 2, Item 3:\n<|{value}|toggle|lov=Item 1;Item 2;Item 3|>"} {"text": "# Create a toggle control that lets you pick a specific user:\n<|{user_sel}|toggle|lov={users}|type=User|adapter={lambda u: (u.id, u.name)}|>"} {"text": "# Create a date selector:\n<|{dt}|date|>"} {"text": "# Create a date selector without time:\n<|{dt}|date|not with_time|>"} {"text": "# Create a date selector with only date:\n<|{dt}|date|not with_time|>"} {"text": "# Create a file download:\n<|{content}|file_download|>"} {"text": "# Create a file download with action:\n<|{content}|file_download|label=Download File|on_action=function_name|name=filename|>"} {"text": "# Create a file download with no review:\n<|{content}|file_download|bypass_preview=False|>"} {"text": "# Create an auto download file download:\n<|{content}|file_download|auto|>"} {"text": "# Create a file selector:\n<|{content}|file_selector|>"} {"text": "# Create a file selector with label and action:\n<|{content}|file_selector|label=Download File|on_action=function_name|extensions=.csv,.xlsx|drop_message=Drop Message|>"} {"text": "# Create a multiple file uploader:\n<|{content}|file_selector|multiple|>"} {"text": "# Show an image:\n<|{content}|image|>"} {"text": "# Show an image with label and callback:\n<|{content}|image|label=this is an image|on_action=function_name|>"} {"text": "# Defining a simple static menu:\n<|menu|lov=menu 1;menu 2|>"} {"text": "# Show a table:\n<|{data}|table|>"} {"text": "# Display a list of string:\n<|{value}|tree|lov=Item 1;Item 2;Item 3|>"} {"text": "# Display df_display in an expandable\n<|Display Table|expandable|expanded=False|"} {"text": "# Create a text input for the note\n<|{note}|input|multiline|label=Note|class_name=fullwidth|>"} {"text": "# Insert a line break\n
"} {"text": "# Display plot\n
<|{plot}|image|height=400px|>
"} {"text": "# Display plot from file location\n<|{file_location}|image|width=500px|height=500px|class_name=img|>"} {"text": "# Create a slider to adjust volume\n<|{volume}|slider|min=0|max=100|continuous=False|on_change=adjust_volume|>"} {"text": "# Slider dx_threshold\n<|{dx_threshold}|slider|min=0|max=10|continuous=False|on_change=update_threshold|>"} {"text": "# Create a toggle button\n<|{is_enabled}|toggle|lov=Off;On|>"} {"text": "# Create a dropdown to select between Option X, Option Y, and Option Z\n<|{option}|selector|lov=Option X;Option Y;Option Z|dropdown|>"} {"text": "# Create a date picker\n<|{selected_date}|date|>"} {"text": "# Date picker\n<|{selected_date}|date|>"} {"text": "# Create a Cancel button with cancel_handler function:\n<|Cancel|button|on_action=cancel_handler|>"} {"text": "# Create an input field for username:\n<|{username}|input|>"} {"text": "# Create an input field for location:\n<|{location}|input|>"} {"text": "# Create a numeric field for age:\n<|{age}|number|>"} {"text": "# Create a slider for value between 1 and 10:\n<|{rating}|slider|min=1|max=10|>"} {"text": "# Create a set of toggle buttons for Option 1, Option 2, Option 3:\n<|{choice}|toggle|lov=Option 1;Option 2;Option 3|>"} {"text": "# Create a toggle control to select a specific category:\n<|{category_sel}|toggle|lov={categories}|type=Category|adapter={lambda c: (c.id, c.name)}|>"} {"text": "# Create a date picker:\n<|{event_date}|date|>"} {"text": "# Create a date picker without time:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a date picker with only date:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a file download link:\n<|{document}|file_download|>"} {"text": "# Create a file download link with action:\n<|{document}|file_download|label=Download Document|on_action=download_file|name=file_name|>"} {"text": "# Create a file download link without preview:\n<|{document}|file_download|bypass_preview=False|>"} {"text": "# Create an auto download file link:\n<|{document}|file_download|auto|>"} {"text": "# Create a file selector:\n<|{selected_file}|file_selector|>"} {"text": "# Create a file selector with label and action:\n<|{selected_file}|file_selector|label=Select File|on_action=file_selected|extensions=.csv,.xlsx|drop_message=Drop file here|>"} {"text": "# Create a multiple file uploader:\n<|{selected_files}|file_selector|multiple|>"} {"text": "# Show an illustration:\n<|{illustration}|image|>"} {"text": "# Show an image with description and callback:\n<|{picture}|image|label=This is a picture|on_action=image_clicked|>"} {"text": "# Display a message at a specified position between min and max:\n<|status|indicator|value={percentage}|min=0|max=100|>"} {"text": "# Define a basic static menu:\n<|menu|lov=menu_item1;menu_item2|>"} {"text": "# Display df_results in an expandable\n<|Results Table|expandable|expanded=False|"} {"text": "# Create a Generate summary button\n<|Generate summary|button|on_action=generate_summary|label=Generate summary|>"} {"text": "# Create a text input for the article\n<|{article}|input|multiline|label=Resulting article|class_name=fullwidth|>"} {"text": "# Insert a line break\n
"} {"text": "# Display chart\n
<|{chart}|image|height=400px|>
"} {"text": "# Display chart from file path\n<|{file_path}|image|width=500px|height=500px|class_name=img|>"} {"text": "# Create a slider to adjust zoom level\n<|{zoom_level}|slider|min=1|max=5|continuous=False|on_change=adjust_zoom|>"} {"text": "# Slider dx_threshold\n<|{dx_threshold}|slider|min=0|max=10|continuous=False|on_change=update_threshold|>"} {"text": "# Create a toggle switch\n<|{state}|toggle|lov=Off;On;Auto|>"} {"text": "# Create a dropdown to select between X, Y, and Z\n<|{axis}|selector|lov=X;Y;Z|dropdown|>"} {"text": "# Date picker\n<|{selected_date}|date|>"} {"text": "# Create a gauge to visualize value\n<|status|indicator|value={progress}|min=0|max=100|>"} {"text": "# Define a page variable and initialize it:\nmd_content = \"# My Page Title\"\nGui(md_content)"} {"text": "# Define 2 pages and set them up:\ncontent_pages = {\n 'first_page': Markdown(\"# Introduction\"),\n 'second_page': Markdown(\"# Advanced Topics\")\n}\nGui(pages=content_pages)"} {"text": "# Set up a multi-page application:\nfrom taipy import Gui\n\n root_content=\"# Welcome to the App\"\n page1_content=\"# This is Page 1\"\n page2_content=\"# This is Page 2\"\n\n pages_dict = {\n \"/\": root_content,\n \"page1\": page1_content,\n \"page2\": page2_content\n }\n Gui(pages=pages_dict).run()"} {"text": "# Set up a multi-page application with content placeholder:\n from taipy import Gui\n\n root_content=\"\"\"#\n Multi-page application\n\n\n This app was built using Taipy.\n \"\"\"\n page1_content=\"# Page 1\"\n page2_content=\"# Page 2\"\n\n pages_dict = {\n \"/\": root_content,\n \"page1\": page1_content,\n \"page2\": page2_content\n }\n Gui(pages=pages_dict).run()"} {"text": "# Create a dialog with Markdown content:\n dialog_content=\"\"\"\n <|{is_visible}|dialog|\n Please enter your name:\n <|{user_name}|input|>\n |>\"\"\"\n Gui(dialog_content).run()"} {"text": "# Set up a partial page:\n gui_instance = Gui()\n user_prompt = gui_instance.add_partial(\n \"\"\"\n Please enter your name:\n <|{user_name}|input|>\n \"\"\"\n )\n gui_instance.run()"} {"text": "# Display a calculated result:\n<|{result}|>"} {"text": "# Format the value with 2 decimal points:\n<|{value}|text|format=%.2f|>"} {"text": "# Create a button with the label 'Click Me':\n<|Click Me|button|>"} {"text": "# Create a Save button:\n<|Save|button|>"} {"text": "# Create a Cancel button with cancel_handler function:\n<|Cancel|button|on_action=cancel_handler|>"} {"text": "# Create an input field for username:\n<|{username}|input|>"} {"text": "# Create an input field for location:\n<|{location}|input|>"} {"text": "# Create a numeric field for age:\n<|{age}|number|>"} {"text": "# Create a slider for value between 1 and 10:\n<|{rating}|slider|min=1|max=10|>"} {"text": "# Create a set of toggle buttons for Option 1, Option 2, Option 3:\n<|{choice}|toggle|lov=Option 1;Option 2;Option 3|>"} {"text": "# Create a toggle control to select a specific category:\n<|{category_sel}|toggle|lov={categories}|type=Category|adapter={lambda c: (c.id, c.name)}|>"} {"text": "# Create a date picker:\n<|{event_date}|date|>"} {"text": "# Create a date picker without time:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a date picker with only date:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a file download link:\n<|{document}|file_download|>"} {"text": "# Create a file download link with action:\n<|{document}|file_download|label=Download Document|on_action=download_file|name=file_name|>"} {"text": "# Create a file download link without preview:\n<|{document}|file_download|bypass_preview=False|>"} {"text": "# Create an auto download file link:\n<|{document}|file_download|auto|>"} {"text": "# Create a file selector:\n<|{selected_file}|file_selector|>"} {"text": "# Create a file selector with label and action:\n<|{selected_file}|file_selector|label=Select File|on_action=file_selected|extensions=.csv,.xlsx|drop_message=Drop file here|>"} {"text": "# Show an illustration:\n<|{illustration}|image|>"} {"text": "# Show an image with description and callback:\n<|{picture}|image|label=This is a picture|on_action=image_clicked|>"} {"text": "# Display a message at a specified position between min and max:\n<|status|indicator|value={percentage}|min=0|max=100|>"} {"text": "# Define a basic static menu:\n<|menu|lov=menu_item1;menu_item2|>"} {"text": "# Display df_output in an expandable\n<|Output Table|expandable|expanded=False|"} {"text": "# Create a Visualize button\n<|Visualize|button|on_action=visualize_data|label=Visualize|>"} {"text": "# Create a text input for the description\n<|{description}|input|multiline|label=Description|class_name=fullwidth|>"} {"text": "# Insert a line break\n
"} {"text": "# Display diagram\n
<|{diagram}|image|height=400px|>
"} {"text": "# Display diagram from file path\n<|{diagram_path}|image|width=500px|height=500px|class_name=img|>"} {"text": "# Create a slider to adjust brightness\n<|{brightness}|slider|min=0|max=100|continuous=False|on_change=adjust_brightness|>"} {"text": "# Slider threshold\n<|{threshold}|slider|min=0|max=10|continuous=False|on_change=update_threshold|>"} {"text": "# Create a toggle button\n<|{is_enabled}|toggle|lov=Off;On|>"} {"text": "# Create a dropdown to select between Choice A, Choice B, and Choice C\n<|{choice}|selector|lov=Choice A;Choice B;Choice C|dropdown|>"} {"text": "# Create a date picker\n<|{selected_date}|date|>"} {"text": "# Date picker\n<|{selected_date}|date|>"} {"text": "# Create a gauge to visualize value\n<|status|indicator|value={progress}|min=0|max=100|>"} {"text": "# Create and initialize a page:\nfrom taipy import Gui\nGui(\"# Dashboard\")"} {"text": "# Define a page variable and initialize it:\nmd_content = \"# Dashboard\"\nGui(md_content)"} {"text": "# Define 2 pages and set them up:\ncontent_pages = {\n 'overview': Markdown(\"# Overview\"),\n 'details': Markdown(\"# Details\")\n}\nGui(pages=content_pages)"} {"text": "# Set up a multi-page application:\nfrom taipy import Gui\n\n root_content=\"# Welcome to the App\"\n page1_content=\"# Page 1\"\n page2_content=\"# Page 2\"\n\n pages_dict = {\n \"/\": root_content,\n \"page1\": page1_content,\n \"page2\": page2_content\n }\n Gui(pages=pages_dict).run()"} {"text": "# Set up a multi-page application with content placeholder:\n from taipy import Gui\n\n root_content=\"\"\"#\n Multi-page application\n\n\n This app was built using Taipy.\n \"\"\"\n page1_content=\"# Page 1\"\n page2_content=\"# Page 2\"\n\n pages_dict = {\n \"/\": root_content,\n \"page1\": page1_content,\n \"page2\": page2_content\n }\n Gui(pages=pages_dict).run()"} {"text": "# Create a dialog with Markdown content:\n dialog_content=\"\"\"\n <|{is_visible}|dialog|\n Please enter your name:\n <|{user_name}|input|>\n |>\"\"\"\n Gui(dialog_content).run()"} {"text": "# Set up a partial page:\n gui_instance = Gui()\n user_prompt = gui_instance.add_partial(\n \"\"\"\n Please enter your name:\n <|{user_name}|input|>\n \"\"\"\n )\n gui_instance.run()"} {"text": "# Display a calculated result:\n<|{result}|>"} {"text": "# Format the value with 2 decimal points:\n<|{value}|text|format=%.2f|>"} {"text": "# Create a button with the label 'Click Me':\n<|Click Me|button|>"} {"text": "# Create a Save button:\n<|Save|button|>"} {"text": "# Create a Cancel button and set the action function name:\n<|Cancel|button|on_action=cancel_action_function|>"} {"text": "# Create a Cancel button with cancel_handler function:\n<|Cancel|button|on_action=cancel_handler|>"} {"text": "# Create an input field for username:\n<|{username}|input|>"} {"text": "# Create an input field for location:\n<|{location}|input|>"} {"text": "# Create a numeric field for age:\n<|{age}|number|>"} {"text": "# Create a slider for value between 1 and 10:\n<|{rating}|slider|min=1|max=10|>"} {"text": "# Create a set of toggle buttons for Option 1, Option 2, Option 3:\n<|{choice}|toggle|lov=Option 1;Option 2;Option 3|>"} {"text": "# Create a toggle control to select a specific category:\n<|{category_sel}|toggle|lov={categories}|type=Category|adapter={lambda c: (c.id, c.name)}|>"} {"text": "# Create a date picker:\n<|{event_date}|date|>"} {"text": "# Create a date picker without time:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a date picker with only date:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a file download link:\n<|{document}|file_download|>"} {"text": "# Create a file download link with action:\n<|{document}|file_download|label=Download Document|on_action=download_file|name=file_name|>"} {"text": "# Create a file download link without preview:\n<|{document}|file_download|bypass_preview=False|>"} {"text": "# Create an auto download file link:\n<|{document}|file_download|auto|>"} {"text": "# Create a file selector:\n<|{selected_file}|file_selector|>"} {"text": "# Create a file selector with label and action:\n<|{selected_file}|file_selector|label=Select File|on_action=file_selected|extensions=.csv,.xlsx|drop_message=Drop file here|>"} {"text": "# Create a multiple file uploader:\n<|{selected_files}|file_selector|multiple|>"} {"text": "# Show an illustration:\n<|{illustration}|image|>"} {"text": "# Show an image with description and callback:\n<|{picture}|image|label=This is a picture|on_action=image_clicked|>"} {"text": "# Display a message at a specified position between min and max:\n<|status|indicator|value={percentage}|min=0|max=100|>"} {"text": "# Define a basic static menu:\n<|menu|lov=menu_item1;menu_item2|>"} {"text": "# Display df_display in an expandable\n<|Display Data|expandable|expanded=False|"} {"text": "# Create an Explore button\n<|Explore|button|on_action=explore_data|label=Explore|>"} {"text": "# Create a text input for the notes\n<|{notes}|input|multiline|label=Notes|class_name=fullwidth|>"} {"text": "# Insert a line break\n
"} {"text": "# Display visualization\n
<|{visualization}|image|height=400px|>
"} {"text": "# Display visualization from file path\n<|{file_path}|image|width=500px|height=500px|class_name=img|>"} {"text": "# Create a slider to adjust contrast\n<|{contrast}|slider|min=0|max=100|continuous=False|on_change=adjust_contrast|>"} {"text": "# Slider threshold\n<|{threshold}|slider|min=0|max=10|continuous=False|on_change=update_threshold|>"} {"text": "# Create and initialize a page:\nfrom taipy import Gui\nGui(\"# Data Overview\")"} {"text": "# Define a page variable and initialize it:\nmd_content = \"# Data Overview\"\nGui(md_content)"} {"text": "# Define 2 pages and set them up:\ncontent_pages = {\n 'overview': Markdown(\"# Overview\"),\n 'details': Markdown(\"# Details\")\n}\nGui(pages=content_pages)"} {"text": "# Set up a multi-page application:\nfrom taipy import Gui\n\n root_content=\"# Welcome to the App\"\n page1_content=\"# Page 1\"\n page2_content=\"# Page 2\"\n\n pages_dict = {\n \"/\": root_content,\n \"page1\": page1_content,\n \"page2\": page2_content\n }\n Gui(pages=pages_dict).run()"} {"text": "# Set up a multi-page application with content placeholder:\n from taipy import Gui\n\n root_content=\"\"\"#\n Multi-page application\n\n\n This app was built using Taipy.\n \"\"\"\n page1_content=\"# Page 1\"\n page2_content=\"# Page 2\"\n\n pages_dict = {\n \"/\": root_content,\n \"page1\": page1_content,\n \"page2\": page2_content\n }\n Gui(pages=pages_dict).run()"} {"text": "# Create a dialog with Markdown content:\n dialog_content=\"\"\"\n <|{is_visible}|dialog|\n Please enter your name:\n <|{user_name}|input|>\n |>\"\"\"\n Gui(dialog_content).run()"} {"text": "# Set up a partial page:\n gui_instance = Gui()\n user_prompt = gui_instance.add_partial(\n \"\"\"\n Please enter your name:\n <|{user_name}|input|>\n \"\"\"\n )\n gui_instance.run()"} {"text": "# Display a calculated result:\n<|{result}|>"} {"text": "# Format the value with 2 decimal points:\n<|{value}|text|format=%.2f|>"} {"text": "# Create a button with the label 'Click Me':\n<|Click Me|button|>"} {"text": "# Create a Save button:\n<|Save|button|>"} {"text": "# Create a Cancel button and set the action function name:\n<|Cancel|button|on_action=cancel_action_function|>"} {"text": "# Create a Cancel button with cancel_handler function:\n<|Cancel|button|on_action=cancel_handler|>"} {"text": "# Create an input field for username:\n<|{username}|input|>"} {"text": "# Create an input field for location:\n<|{location}|input|>"} {"text": "# Create a numeric field for age:\n<|{age}|number|>"} {"text": "# Create a slider for value between 1 and 10:\n<|{rating}|slider|min=1|max=10|>"} {"text": "# Create a set of toggle buttons for Option 1, Option 2, Option 3:\n<|{choice}|toggle|lov=Option 1;Option 2;Option 3|>"} {"text": "# Create a toggle control to select a specific category:\n<|{category_sel}|toggle|lov={categories}|type=Category|adapter={lambda c: (c.id, c.name)}|>"} {"text": "# Create a date picker:\n<|{event_date}|date|>"} {"text": "# Create a date picker without time:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a date picker with only date:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a file download link:\n<|{document}|file_download|>"} {"text": "# Create a file download link with action:\n<|{document}|file_download|label=Download Document|on_action=download_file|name=file_name|>"} {"text": "# Create a file download link without preview:\n<|{document}|file_download|bypass_preview=False|>"} {"text": "# Create an auto download file link:\n<|{document}|file_download|auto|>"} {"text": "# Create a file selector:\n<|{selected_file}|file_selector|>"} {"text": "# Create a file selector with label and action:\n<|{selected_file}|file_selector|label=Select File|on_action=file_selected|extensions=.csv,.xlsx|drop_message=Drop file here|>"} {"text": "# Create a multiple file uploader:\n<|{selected_files}|file_selector|multiple|>"} {"text": "# Show an illustration:\n<|{illustration}|image|>"} {"text": "# Show an image with description and callback:\n<|{picture}|image|label=This is a picture|on_action=image_clicked|>"} {"text": "# Display a message at a specified position between min and max:\n<|status|indicator|value={percentage}|min=0|max=100|>"} {"text": "# Define a basic static menu:\n<|menu|lov=menu_item1;menu_item2|>"} {"text": "# Plot Sales according to Date in a line chart:\n<|{data}|chart|type=lines|x=DATE|y=SALES|>"} {"text": "# Plot Sales according to Date in a line chart titled \"Sales according to Revenue\":\n<|{data}|chart|type=lines|x=DATE|x=SALES|title=SALES according to Revenue|>"} {"text": "# Plot Sales and Revenue according to Date:\n<|{data}|chart|type=lines|x=DATE|y[1]=SALES|y[2]=REVENUE|>"} {"text": "# Plot Sales according to Date on a Dashed line:\n<|{data}|chart|type=lines|x=DATE|x=SALES|line=dash|>"} {"text": "# Plot Revenue by Date on a dotted line:\n<|{data}|chart|type=lines|x=DATE|x=SALES|line=dot|>"} {"text": "# Plot Sales by Date in Red:\n<|{data}|chart|type=lines|x=DATE|x=SALES|color=Red|>"} {"text": "# Plot Revenue according to Date in yellow:\n<|{data}|chart|type=lines|x=DATE|x=SALES|color=Yellow|>"} {"text": "# Plot Revenue according to Date in yellow titled Revenue Plot:\n<|{data}|chart|type=lines|x=DATE|x=SALES|color=Yellow|title=REVENUE Plot>"} {"text": "# Plot Sales in blue and Revenue in green according to Date:\n<|{data}|chart|type=lines|x=DATE|y[1]=SALES|y[2]=REVENUE|color[1]=blue|color[2]=green|>"} {"text": "# Plot Revenue by Date in a red dashed line and Sales in a yellow Dotted line:\n<|{data}|chart|type=lines|x=DATE|y[1]=REVENUE|y[2]=SALES|line[1]=dash|line[2]=dot|color[1]=red|color[2]=yellow|>"} {"text": "# Display Date according to Sales:\n<|{data}|chart|type=lines|x=DATE|x=SALES|>"} {"text": "# Plot in a bar chart the Sales according to Date:\n<|{data}|chart|type=bar|x=DATE|x=SALES|>"} {"text": "# Plot in a bar chart the Sales according to Date and Revenue according to Date:\n<|{data}|chart|type=bar|x=DATE|y[1]=SALES|y[2]=REVENUE|>"} {"text": "# Plot Sales and Revenue by Date in a bar chart:\n<|{data}|chart|type=bar|x=DATE|y[1]=SALES|y[2]=REVENUE|>"} {"text": "# Plot in a bar chart the Sales according to Date and Revenue according to Date titled Finance:\n<|{data}|chart|type=bar|x=DATE|y[1]=SALES|y[2]=REVENUE|title=Finance|>"} {"text": "# Plot in a scatter plot Sales according to Date:\n<|{data}|chart|type=scatter|mode=markers|x=DATE|x=SALES|>"} {"text": "# Draw Sales and Revenue by Date in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=DATE|y[1]=SALES|y[2]=REVENUE|>"} {"text": "# Plot Revenue in green points and Sales in yellow points by Date:\n<|{data}|chart|type=scatter|mode=markers|x=DATE|y[1]=REVENUE|y[2]=SALES|color[1]=green|color[2]=yellow|>"} {"text": "# Plot a histogram of Sales:\n<|{data}|chart|type=histogram|x=SALES|>"} {"text": "# Display a horizontal histogram of Sales:\n<|{data}|chart|type=histogram|x=SALES|>"} {"text": "# Plot the distribution of Sales and Revenue:\n<|{data}|chart|type=histogram|x[1]=SALES|x[2]=REVENUE|>"} {"text": "# Plot the distribution of Sales and Revenue titled \"Sales and Revenue Distribution\":\n<|{data}|chart|type=histogram|x[1]=SALES|x[2]=REVENUE|title=SALES and Revenue Distribution|>"} {"text": "# Display a horizontal distribution of Sales and Revenue titled \"Sales and Revenue Distribution\":\n<|{data}|chart|type=histogram|y[1]=SALES|y[2]=REVENUE|title=SALES and Revenue Distribution|>"} {"text": "# Plot a pie chart of Sales by Date:\n<|{data}|chart|type=pie|values=SALES|labels=Date|>"} {"text": "# Draw a pie chart of Sales by Date titled \"Sales Pie Chart\":\n<|{data}|chart|type=pie|values=SALES|labels=Date|title=SALES Pie Chart|>"} {"text": "# Plot a pie chart of Revenue by Date:\n<|{data}|chart|type=pie|values=REVENUE|labels=Date|>"} {"text": "# Visualize Profit over Time in a line chart:\n<|{data}|chart|type=lines|x=TIME|y=PROFIT|>"} {"text": "# Showcase Profit over Time in a line chart titled \"Profit Trend\":\n<|{data}|chart|type=lines|x=TIME|y=PROFIT|title=Profit Trend|>"} {"text": "# Depict Profit and Loss over Time:\n<|{data}|chart|type=lines|x=TIME|y[1]=PROFIT|y[2]=LOSS|>"} {"text": "# Illustrate Profit over Time with a Dashed line:\n<|{data}|chart|type=lines|x=TIME|y=PROFIT|line=dash|>"} {"text": "# Present Loss by Time on a Dotted line:\n<|{data}|chart|type=lines|x=TIME|y=LOSS|line=dot|>"} {"text": "# Plot Profit over Time in Red:\n<|{data}|chart|type=lines|x=TIME|y=PROFIT|color=Red|>"} {"text": "# Exhibit Loss over Time in yellow:\n<|{data}|chart|type=lines|x=TIME|y=LOSS|color=Yellow|>"} {"text": "# Show Profit over Time in yellow titled Profit Overview:\n<|{data}|chart|type=lines|x=TIME|y=PROFIT|color=Yellow|title=Profit Overview|>"} {"text": "# Display Profit in blue and Loss in green over Time:\n<|{data}|chart|type=lines|x=TIME|y[1]=PROFIT|y[2]=LOSS|color[1]=blue|color[2]=green|>"} {"text": "# Visualize Loss by Time in a red dashed line and Profit in a yellow Dotted line:\n<|{data}|chart|type=lines|x=TIME|y[1]=LOSS|y[2]=PROFIT|line[1]=dash|line[2]=dot|color[1]=red|color[2]=yellow|>"} {"text": "# Highlight Time according to Profit:\n<|{data}|chart|type=lines|x=TIME|y=PROFIT|>"} {"text": "# Depict in a bar chart the Profit over Time:\n<|{data}|chart|type=bar|x=TIME|y=PROFIT|>"} {"text": "# Depict in a bar chart the Profit over Time and Loss over Time:\n<|{data}|chart|type=bar|x=TIME|y[1]=PROFIT|y[2]=LOSS|>"} {"text": "# Showcase Profit and Loss by Time in a bar chart:\n<|{data}|chart|type=bar|x=TIME|y[1]=PROFIT|y[2]=LOSS|>"} {"text": "# Depict in a bar chart the Profit over Time and Loss over Time titled Financial Overview:\n<|{data}|chart|type=bar|x=TIME|y[1]=PROFIT|y[2]=LOSS|title=Financial Overview|>"} {"text": "# Depict in a scatter plot Profit over Time:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y=PROFIT|>"} {"text": "# Illustrate Profit and Loss by Time in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y[1]=PROFIT|y[2]=LOSS|>"} {"text": "# Plot Loss in green points and Profit in yellow points by Time:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y[1]=LOSS|y[2]=PROFIT|color[1]=green|color[2]=yellow|>"} {"text": "# Display a histogram of Profit:\n<|{data}|chart|type=histogram|x=PROFIT|>"} {"text": "# Showcase a horizontal histogram of Profit:\n<|{data}|chart|type=histogram|x=PROFIT|>"} {"text": "# Illustrate the distribution of Profit and Loss:\n<|{data}|chart|type=histogram|x[1]=PROFIT|x[2]=LOSS|>"} {"text": "# Illustrate the distribution of Profit and Loss titled \"Profit and Loss Distribution\":\n<|{data}|chart|type=histogram|x[1]=PROFIT|x[2]=LOSS|title=Profit and Loss Distribution|>"} {"text": "# Present a horizontal distribution of Profit and Loss titled \"Profit and Loss Distribution\":\n<|{data}|chart|type=histogram|y[1]=PROFIT|y[2]=LOSS|title=Profit and Loss Distribution|>"} {"text": "# Depict a pie chart of Profit by Time:\n<|{data}|chart|type=pie|values=PROFIT|labels=Time|>"} {"text": "# Illustrate a pie chart of Profit by Time titled \"Profit Pie Chart\":\n<|{data}|chart|type=pie|values=PROFIT|labels=Time|title=Profit Pie Chart|>"} {"text": "# Depict a pie chart of Loss by Time:\n<|{data}|chart|type=pie|values=LOSS|labels=Time|>"} {"text": "# Visualize Quantity over Time in a line chart:\n<|{data}|chart|type=lines|x=TIME|y=QUANTITY|>"} {"text": "# Showcase Quantity over Time in a line chart titled \"Quantity Trend\":\n<|{data}|chart|type=lines|x=TIME|y=QUANTITY|title=Quantity Trend|>"} {"text": "# Depict Quantity and Price over Time:\n<|{data}|chart|type=lines|x=TIME|y[1]=QUANTITY|y[2]=PRICE|>"} {"text": "# Illustrate Quantity over Time with a Dashed line:\n<|{data}|chart|type=lines|x=TIME|y=QUANTITY|line=dash|>"} {"text": "# Present Price by Time on a Dotted line:\n<|{data}|chart|type=lines|x=TIME|y=PRICE|line=dot|>"} {"text": "# Plot Quantity over Time in Green:\n<|{data}|chart|type=lines|x=TIME|y=QUANTITY|color=Green|>"} {"text": "# Exhibit Price over Time in Blue:\n<|{data}|chart|type=lines|x=TIME|y=PRICE|color=Blue|>"} {"text": "# Show Price over Time in Blue titled Price Overview:\n<|{data}|chart|type=lines|x=TIME|y=PRICE|color=Blue|title=Price Overview|>"} {"text": "# Display Quantity in Red and Price in Yellow over Time:\n<|{data}|chart|type=lines|x=TIME|y[1]=QUANTITY|y[2]=PRICE|color[1]=Red|color[2]=Yellow|>"} {"text": "# Visualize Price by Time in a Green dashed line and Quantity in a Yellow Dotted line:\n<|{data}|chart|type=lines|x=TIME|y[1]=PRICE|y[2]=QUANTITY|line[1]=dash|line[2]=dot|color[1]=Green|color[2]=Yellow|>"} {"text": "# Highlight Time according to Quantity:\n<|{data}|chart|type=lines|x=TIME|y=QUANTITY|>"} {"text": "# Depict in a bar chart the Quantity over Time:\n<|{data}|chart|type=bar|x=TIME|y=QUANTITY|>"} {"text": "# Depict in a bar chart the Quantity over Time and Price over Time:\n<|{data}|chart|type=bar|x=TIME|y[1]=QUANTITY|y[2]=PRICE|>"} {"text": "# Showcase Quantity and Price by Time in a bar chart:\n<|{data}|chart|type=bar|x=TIME|y[1]=QUANTITY|y[2]=PRICE|>"} {"text": "# Depict in a bar chart the Quantity over Time and Price over Time titled Product Overview:\n<|{data}|chart|type=bar|x=TIME|y[1]=QUANTITY|y[2]=PRICE|title=Product Overview|>"} {"text": "# Depict in a scatter plot Quantity over Time:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y=QUANTITY|>"} {"text": "# Illustrate Quantity and Price by Time in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y[1]=QUANTITY|y[2]=PRICE|>"} {"text": "# Plot Price in Green points and Quantity in Yellow points by Time:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y[1]=PRICE|y[2]=QUANTITY|color[1]=Green|color[2]=Yellow|>"} {"text": "# Display a histogram of Quantity:\n<|{data}|chart|type=histogram|x=QUANTITY|>"} {"text": "# Showcase a horizontal histogram of Quantity:\n<|{data}|chart|type=histogram|x=QUANTITY|>"} {"text": "# Illustrate the distribution of Quantity and Price:\n<|{data}|chart|type=histogram|x[1]=QUANTITY|x[2]=PRICE|>"} {"text": "# Illustrate the distribution of Quantity and Price titled \"Quantity and Price Distribution\":\n<|{data}|chart|type=histogram|x[1]=QUANTITY|x[2]=PRICE|title=Quantity and Price Distribution|>"} {"text": "# Present a horizontal distribution of Quantity and Price titled \"Quantity and Price Distribution\":\n<|{data}|chart|type=histogram|y[1]=QUANTITY|y[2]=PRICE|title=Quantity and Price Distribution|>"} {"text": "# Depict a pie chart of Quantity by Time:\n<|{data}|chart|type=pie|values=QUANTITY|labels=Time|>"} {"text": "# Illustrate a pie chart of Quantity by Time titled \"Quantity Pie Chart\":\n<|{data}|chart|type=pie|values=QUANTITY|labels=Time|title=Quantity Pie Chart|>"} {"text": "# Depict a pie chart of Price by Time:\n<|{data}|chart|type=pie|values=PRICE|labels=Time|>"} {"text": "# Plot Temperature against Time in a line chart:\n<|{data}|chart|type=lines|x=TIME|y=TEMPERATURE|>"} {"text": "# Showcase Temperature against Time in a line chart titled \"Temperature Trend\":\n<|{data}|chart|type=lines|x=TIME|y=TEMPERATURE|title=Temperature Trend|>"} {"text": "# Depict Temperature and Humidity against Time:\n<|{data}|chart|type=lines|x=TIME|y[1]=TEMPERATURE|y[2]=HUMIDITY|>"} {"text": "# Illustrate Temperature against Time with a Dashed line:\n<|{data}|chart|type=lines|x=TIME|y=TEMPERATURE|line=dash|>"} {"text": "# Present Humidity by Time on a Dotted line:\n<|{data}|chart|type=lines|x=TIME|y=HUMIDITY|line=dot|>"} {"text": "# Plot Temperature against Time in Blue:\n<|{data}|chart|type=lines|x=TIME|y=TEMPERATURE|color=Blue|>"} {"text": "# Exhibit Humidity against Time in Green:\n<|{data}|chart|type=lines|x=TIME|y=HUMIDITY|color=Green|>"} {"text": "# Show Humidity against Time in Green titled Humidity Overview:\n<|{data}|chart|type=lines|x=TIME|y=HUMIDITY|color=Green|title=Humidity Overview|>"} {"text": "# Display Temperature in Red and Humidity in Yellow against Time:\n<|{data}|chart|type=lines|x=TIME|y[1]=TEMPERATURE|y[2]=HUMIDITY|color[1]=Red|color[2]=Yellow|>"} {"text": "# Visualize Humidity against Time in a Red dashed line and Temperature in a Yellow Dotted line:\n<|{data}|chart|type=lines|x=TIME|y[1]=HUMIDITY|y[2]=TEMPERATURE|line[1]=dash|line[2]=dot|color[1]=Red|color[2]=Yellow|>"} {"text": "# Highlight Time according to Temperature:\n<|{data}|chart|type=lines|x=TIME|y=TEMPERATURE|>"} {"text": "# Depict in a bar chart the Temperature against Time:\n<|{data}|chart|type=bar|x=TIME|y=TEMPERATURE|>"} {"text": "# Depict in a bar chart the Temperature against Time and Humidity against Time:\n<|{data}|chart|type=bar|x=TIME|y[1]=TEMPERATURE|y[2]=HUMIDITY|>"} {"text": "# Showcase Temperature and Humidity against Time in a bar chart:\n<|{data}|chart|type=bar|x=TIME|y[1]=TEMPERATURE|y[2]=HUMIDITY|>"} {"text": "# Depict in a bar chart the Temperature against Time and Humidity against Time titled Climate Overview:\n<|{data}|chart|type=bar|x=TIME|y[1]=TEMPERATURE|y[2]=HUMIDITY|title=Climate Overview|>"} {"text": "# Depict in a scatter plot Temperature against Time:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y=TEMPERATURE|>"} {"text": "# Illustrate Temperature and Humidity against Time in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y[1]=TEMPERATURE|y[2]=HUMIDITY|>"} {"text": "# Plot Humidity in Green points and Temperature in Yellow points against Time:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y[1]=HUMIDITY|y[2]=TEMPERATURE|color[1]=Green|color[2]=Yellow|>"} {"text": "# Display a histogram of Temperature:\n<|{data}|chart|type=histogram|x=TEMPERATURE|>"} {"text": "# Showcase a horizontal histogram of Temperature:\n<|{data}|chart|type=histogram|x=TEMPERATURE|>"} {"text": "# Illustrate the distribution of Temperature and Humidity:\n<|{data}|chart|type=histogram|x[1]=TEMPERATURE|x[2]=HUMIDITY|>"} {"text": "# Illustrate the distribution of Temperature and Humidity titled \"Temperature and Humidity Distribution\":\n<|{data}|chart|type=histogram|x[1]=TEMPERATURE|x[2]=HUMIDITY|title=Temperature and Humidity Distribution|>"} {"text": "# Present a horizontal distribution of Temperature and Humidity titled \"Temperature and Humidity Distribution\":\n<|{data}|chart|type=histogram|y[1]=TEMPERATURE|y[2]=HUMIDITY|title=Temperature and Humidity Distribution|>"} {"text": "# Depict a pie chart of Temperature against Time:\n<|{data}|chart|type=pie|values=TEMPERATURE|labels=Time|>"} {"text": "# Illustrate a pie chart of Temperature against Time titled \"Temperature Pie Chart\":\n<|{data}|chart|type=pie|values=TEMPERATURE|labels=Time|title=Temperature Pie Chart|>"} {"text": "# Depict a pie chart of Humidity against Time:\n<|{data}|chart|type=pie|values=HUMIDITY|labels=Time|>"} {"text": "# Plot Sales against Region in a line chart:\n<|{data}|chart|type=lines|x=REGION|y=SALES|>"} {"text": "# Showcase Sales against Region in a line chart titled \"Sales by Region\":\n<|{data}|chart|type=lines|x=REGION|y=SALES|title=Sales by Region|>"} {"text": "# Depict Sales and Profit against Region:\n<|{data}|chart|type=lines|x=REGION|y[1]=SALES|y[2]=PROFIT|>"} {"text": "# Illustrate Sales against Region with a Dashed line:\n<|{data}|chart|type=lines|x=REGION|y=SALES|line=dash|>"} {"text": "# Present Profit by Region on a Dotted line:\n<|{data}|chart|type=lines|x=REGION|y=PROFIT|line=dot|>"} {"text": "# Plot Sales against Region in Blue:\n<|{data}|chart|type=lines|x=REGION|y=SALES|color=Blue|>"} {"text": "# Exhibit Profit against Region in Green:\n<|{data}|chart|type=lines|x=REGION|y=PROFIT|color=Green|>"} {"text": "# Show Profit against Region in Green titled Profit Overview:\n<|{data}|chart|type=lines|x=REGION|y=PROFIT|color=Green|title=Profit Overview|>"} {"text": "# Display Sales in Red and Profit in Yellow against Region:\n<|{data}|chart|type=lines|x=REGION|y[1]=SALES|y[2]=PROFIT|color[1]=Red|color[2]=Yellow|>"} {"text": "# Visualize Profit by Region in a Red dashed line and Sales in a Yellow Dotted line:\n<|{data}|chart|type=lines|x=REGION|y[1]=PROFIT|y[2]=SALES|line[1]=dash|line[2]=dot|color[1]=Red|color[2]=Yellow|>"} {"text": "# Highlight Region according to Sales:\n<|{data}|chart|type=lines|x=REGION|y=SALES|>"} {"text": "# Depict in a bar chart the Sales against Region:\n<|{data}|chart|type=bar|x=REGION|y=SALES|>"} {"text": "# Depict in a bar chart the Sales against Region and Profit against Region:\n<|{data}|chart|type=bar|x=REGION|y[1]=SALES|y[2]=PROFIT|>"} {"text": "# Showcase Sales and Profit against Region in a bar chart:\n<|{data}|chart|type=bar|x=REGION|y[1]=SALES|y[2]=PROFIT|>"} {"text": "# Depict in a bar chart the Sales against Region and Profit against Region titled Financial Overview:\n<|{data}|chart|type=bar|x=REGION|y[1]=SALES|y[2]=PROFIT|title=Financial Overview|>"} {"text": "# Depict in a scatter plot Sales against Region:\n<|{data}|chart|type=scatter|mode=markers|x=REGION|y=SALES|>"} {"text": "# Illustrate Sales and Profit against Region in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=REGION|y[1]=SALES|y[2]=PROFIT|>"} {"text": "# Plot Profit in Green points and Sales in Yellow points against Region:\n<|{data}|chart|type=scatter|mode=markers|x=REGION|y[1]=PROFIT|y[2]=SALES|color[1]=Green|color[2]=Yellow|>"} {"text": "# Display a histogram of Sales against Region:\n<|{data}|chart|type=histogram|x=SALES|>"} {"text": "# Showcase a horizontal histogram of Sales against Region:\n<|{data}|chart|type=histogram|x=SALES|>"} {"text": "# Illustrate the distribution of Sales and Profit against Region:\n<|{data}|chart|type=histogram|x[1]=SALES|x[2]=PROFIT|>"} {"text": "# Illustrate the distribution of Sales and Profit against Region titled \"Sales and Profit Distribution\":\n<|{data}|chart|type=histogram|x[1]=SALES|x[2]=PROFIT|title=Sales and Profit Distribution|>"} {"text": "# Present a horizontal distribution of Sales and Profit against Region titled \"Sales and Profit Distribution\":\n<|{data}|chart|type=histogram|y[1]=SALES|y[2]=PROFIT|title=Sales and Profit Distribution|>"} {"text": "# Depict a pie chart of Sales against Region:\n<|{data}|chart|type=pie|values=SALES|labels=Region|>"} {"text": "# Illustrate a pie chart of Sales against Region titled \"Sales Pie Chart\":\n<|{data}|chart|type=pie|values=SALES|labels=Region|title=Sales Pie Chart|>"} {"text": "# Depict a pie chart of Profit against Region:\n<|{data}|chart|type=pie|values=PROFIT|labels=Region|>"} {"text": "# Visualize Productivity against Employee in a line chart:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=PRODUCTIVITY|>"} {"text": "# Showcase Productivity against Employee in a line chart titled \"Employee Productivity Trend\":\n<|{data}|chart|type=lines|x=EMPLOYEE|y=PRODUCTIVITY|title=Employee Productivity Trend|>"} {"text": "# Depict Productivity and Satisfaction against Employee:\n<|{data}|chart|type=lines|x=EMPLOYEE|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|>"} {"text": "# Illustrate Productivity against Employee with a Dashed line:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=PRODUCTIVITY|line=dash|>"} {"text": "# Present Satisfaction by Employee on a Dotted line:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=SATISFACTION|line=dot|>"} {"text": "# Plot Productivity against Employee in Blue:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=PRODUCTIVITY|color=Blue|>"} {"text": "# Exhibit Satisfaction against Employee in Green:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=SATISFACTION|color=Green|>"} {"text": "# Show Satisfaction against Employee in Green titled Satisfaction Overview:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=SATISFACTION|color=Green|title=Satisfaction Overview|>"} {"text": "# Display Productivity in Red and Satisfaction in Yellow against Employee:\n<|{data}|chart|type=lines|x=EMPLOYEE|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|color[1]=Red|color[2]=Yellow|>"} {"text": "# Visualize Satisfaction by Employee in a Red dashed line and Productivity in a Yellow Dotted line:\n<|{data}|chart|type=lines|x=EMPLOYEE|y[1]=SATISFACTION|y[2]=PRODUCTIVITY|line[1]=dash|line[2]=dot|color[1]=Red|color[2]=Yellow|>"} {"text": "# Highlight Employee according to Productivity:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=PRODUCTIVITY|>"} {"text": "# Depict in a bar chart the Productivity against Employee:\n<|{data}|chart|type=bar|x=EMPLOYEE|y=PRODUCTIVITY|>"} {"text": "# Depict in a bar chart the Productivity against Employee and Satisfaction against Employee:\n<|{data}|chart|type=bar|x=EMPLOYEE|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|>"} {"text": "# Showcase Productivity and Satisfaction against Employee in a bar chart:\n<|{data}|chart|type=bar|x=EMPLOYEE|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|>"} {"text": "# Depict in a bar chart the Productivity against Employee and Satisfaction against Employee titled Work Overview:\n<|{data}|chart|type=bar|x=EMPLOYEE|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|title=Work Overview|>"} {"text": "# Depict in a scatter plot Productivity against Employee:\n<|{data}|chart|type=scatter|mode=markers|x=EMPLOYEE|y=PRODUCTIVITY|>"} {"text": "# Illustrate Productivity and Satisfaction against Employee in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=EMPLOYEE|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|>"} {"text": "# Plot Satisfaction in Green points and Productivity in Yellow points against Employee:\n<|{data}|chart|type=scatter|mode=markers|x=EMPLOYEE|y[1]=SATISFACTION|y[2]=PRODUCTIVITY|color[1]=Green|color[2]=Yellow|>"} {"text": "# Display a histogram of Productivity against Employee:\n<|{data}|chart|type=histogram|x=PRODUCTIVITY|>"} {"text": "# Showcase a horizontal histogram of Productivity against Employee:\n<|{data}|chart|type=histogram|x=PRODUCTIVITY|>"} {"text": "# Illustrate the distribution of Productivity and Satisfaction against Employee:\n<|{data}|chart|type=histogram|x[1]=PRODUCTIVITY|x[2]=SATISFACTION|>"} {"text": "# Illustrate the distribution of Productivity and Satisfaction against Employee titled \"Productivity and Satisfaction Distribution\":\n<|{data}|chart|type=histogram|x[1]=PRODUCTIVITY|x[2]=SATISFACTION|title=Productivity and Satisfaction Distribution|>"} {"text": "# Present a horizontal distribution of Productivity and Satisfaction against Employee titled \"Productivity and Satisfaction Distribution\":\n<|{data}|chart|type=histogram|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|title=Productivity and Satisfaction Distribution|>"} {"text": "# Depict a pie chart of Productivity against Employee:\n<|{data}|chart|type=pie|values=PRODUCTIVITY|labels=Employee|>"} {"text": "# Illustrate a pie chart of Productivity against Employee titled \"Productivity Pie Chart\":\n<|{data}|chart|type=pie|values=PRODUCTIVITY|labels=Employee|title=Productivity Pie Chart|>"} {"text": "# Depict a pie chart of Satisfaction against Employee:\n<|{data}|chart|type=pie|values=SATISFACTION|labels=Employee|>"} {"text": "# Plot Population against Country in a line chart:\n<|{data}|chart|type=lines|x=COUNTRY|y=POPULATION|>"} {"text": "# Showcase Population against Country in a line chart titled \"Population Trends\":\n<|{data}|chart|type=lines|x=COUNTRY|y=POPULATION|title=Population Trends|>"} {"text": "# Depict Population and GDP against Country:\n<|{data}|chart|type=lines|x=COUNTRY|y[1]=POPULATION|y[2]=GDP|>"} {"text": "# Illustrate Population against Country with a Dashed line:\n<|{data}|chart|type=lines|x=COUNTRY|y=POPULATION|line=dash|>"} {"text": "# Present GDP by Country on a Dotted line:\n<|{data}|chart|type=lines|x=COUNTRY|y=GDP|line=dot|>"} {"text": "# Plot Population against Country in Blue:\n<|{data}|chart|type=lines|x=COUNTRY|y=POPULATION|color=Blue|>"} {"text": "# Exhibit GDP against Country in Green:\n<|{data}|chart|type=lines|x=COUNTRY|y=GDP|color=Green|>"} {"text": "# Show GDP against Country in Green titled GDP Overview:\n<|{data}|chart|type=lines|x=COUNTRY|y=GDP|color=Green|title=GDP Overview|>"} {"text": "# Display Population in Red and GDP in Yellow against Country:\n<|{data}|chart|type=lines|x=COUNTRY|y[1]=POPULATION|y[2]=GDP|color[1]=Red|color[2]=Yellow|>"} {"text": "# Visualize GDP by Country in a Red dashed line and Population in a Yellow Dotted line:\n<|{data}|chart|type=lines|x=COUNTRY|y[1]=GDP|y[2]=POPULATION|line[1]=dash|line[2]=dot|color[1]=Red|color[2]=Yellow|>"} {"text": "# Highlight Country according to Population:\n<|{data}|chart|type=lines|x=COUNTRY|y=POPULATION|>"} {"text": "# Depict in a bar chart the Population against Country:\n<|{data}|chart|type=bar|x=COUNTRY|y=POPULATION|>"} {"text": "# Depict in a bar chart the Population against Country and GDP against Country:\n<|{data}|chart|type=bar|x=COUNTRY|y[1]=POPULATION|y[2]=GDP|>"} {"text": "# Showcase Population and GDP against Country in a bar chart:\n<|{data}|chart|type=bar|x=COUNTRY|y[1]=POPULATION|y[2]=GDP|>"} {"text": "# Depict in a bar chart the Population against Country and GDP against Country titled Economic Overview:\n<|{data}|chart|type=bar|x=COUNTRY|y[1]=POPULATION|y[2]=GDP|title=Economic Overview|>"} {"text": "# Depict in a scatter plot Population against Country:\n<|{data}|chart|type=scatter|mode=markers|x=COUNTRY|y=POPULATION|>"} {"text": "# Illustrate Population and GDP against Country in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=COUNTRY|y[1]=POPULATION|y[2]=GDP|>"} {"text": "# Plot GDP in Green points and Population in Yellow points against Country:\n<|{data}|chart|type=scatter|mode=markers|x=COUNTRY|y[1]=GDP|y[2]=POPULATION|color[1]=Green|color[2]=Yellow|>"} {"text": "# Display a histogram of Population against Country:\n<|{data}|chart|type=histogram|x=POPULATION|>"} {"text": "# Showcase a horizontal histogram of Population against Country:\n<|{data}|chart|type=histogram|x=POPULATION|>"} {"text": "# Illustrate the distribution of Population and GDP against Country:\n<|{data}|chart|type=histogram|x[1]=POPULATION|x[2]=GDP|>"} {"text": "# Illustrate the distribution of Population and GDP against Country titled \"Population and GDP Distribution\":\n<|{data}|chart|type=histogram|x[1]=POPULATION|x[2]=GDP|title=Population and GDP Distribution|>"} {"text": "# Present a horizontal distribution of Population and GDP against Country titled \"Population and GDP Distribution\":\n<|{data}|chart|type=histogram|y[1]=POPULATION|y[2]=GDP|title=Population and GDP Distribution|>"} {"text": "# Depict a pie chart of Population against Country:\n<|{data}|chart|type=pie|values=POPULATION|labels=Country|>"} {"text": "# Illustrate a pie chart of Population against Country titled \"Population Pie Chart\":\n<|{data}|chart|type=pie|values=POPULATION|labels=Country|title=Population Pie Chart|>"} {"text": "# Depict a pie chart of GDP against Country:\n<|{data}|chart|type=pie|values=GDP|labels=Country|>"} {"text": "# **Worldwide**{: .color-primary} Health and Fitness Trends\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Average Life Expectancy**{: .color-primary}\n<|{'{:.1f}'.format(np.average(data_world_health['Life Expectancy']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Obesity Rate**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_health['Obesity Rate']))}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Gym Memberships**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_health['Gym Memberships']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_health_metric}|toggle|lov={health_metric_selector}|>\n\n<|part|render={selected_health_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_health_pie_absolute}|chart|type=pie|labels=Country|values=Life Expectancy|title=Global Life Expectancy Distribution|>\n\n<|{data_world_health_evolution_absolute}|chart|properties={data_world_health_evolution_properties}|title=Health and Fitness Evolution Worldwide|>\n|>\n|>\n\n<|part|render={selected_health_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_health_pie_relative}|chart|type=pie|labels=Country|values=Obesity Rate|>\n\n<|{data_world_health_evolution_relative}|chart|properties={data_world_health_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load task data\ntask_data = pd.read_csv(\"task_data.csv\")\n\n# Initialize variables\npriorities = list(task_data[\"Priority\"].unique())\ncategories = list(task_data[\"Category\"].unique())\npriority = priorities\ncategory = categories\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{category}|selector|lov={categories}|multiple|label=Select Category|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_tasks>\n\n hours\n|average_completion_time>\n\n\n|task_table>\n|main_page>\n|>\n\nCode adapted from [Task Management](https://github.com/task_management_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/task-management-app)\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\nfrom PIL import Image\nimport io\n\nimage_data = None\nimage_path = \"\"\n\ndef image_upload(state):\n if state.image_path:\n with open(state.image_path, \"rb\") as file:\n state.image_data = Image.open(io.BytesIO(file.read()))\n\nimage_page = \"\"\"\n<|{image_path}|file_selector|accept=image/*|on_action=image_upload|>\n<|{image_data}|image|>\n\"\"\"\n\nGui(image_page).run()\n"} {"text": "<|{all_reservations}|table|columns={reservation_columns}|width='100%'|on_action={on_reservation_select}|style=reservation_style|>\n<|Create Reservation|button|on_action={open_create_reservation_dialog}|>\n<|Refresh Reservations|button|on_action={refresh_reservation_list}|>\n\n<|{show_create_reservation_dialog}|dialog|title=Create Reservation|\n<|{customer_name}|input|placeholder='Customer Name'|\n<|{reservation_date}|datetime_picker|>\n<|{table_number}|number_input|min=1|placeholder='Table Number'|\n<|Create|button|on_action={create_reservation}|>\n<|Cancel|button|on_action={close_create_reservation_dialog}|>\n|>\n\n<|{show_reservation_details}|pane|\n\n# Reservation Details <|Edit|button|on_action=edit_selected_reservation|> <|Cancel|button|on_action=cancel_selected_reservation|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Customer Name\n<|{selected_reservation.customer_name}|>\n|>\n\n<|part|class_name=card|\n## Date and Time\n<|{selected_reservation.date.strftime(\"%b %d, %Y at %H:%M\")}|>\n|>\n\n<|part|class_name=card|\n## Table Number\n<|{selected_reservation.table_number}|>\n|>\n\n----\n|>\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Product Category
\n<|{product_category_selected}|selector|lov=category_electronics;category_clothing;category_food|dropdown|on_change=on_product_category_change|>\n|>\n\n<|part|class_name=card|\n### Select Store Location
\n<|{store_location_selected}|selector|lov=location_downtown;location_suburb;location_rural|dropdown|on_change=on_store_location_change|>\n|>\n\n|>\n\n<|Inventory Data Overview|expandable|expanded=True|\nDisplay category_data and location_data\n<|layout|columns=1 1|\n<|{category_data}|table|page_size=5|>\n\n<|{location_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{stock_levels_chart}|chart|type=bar|x=Product|y=Stock Level|title=Stock Levels by Category|>\n|>\n\n<|part|class_name=card|\n<|{sales_by_location_chart}|chart|type=pie|options={sales_options}|layout={sales_layout}|title=Sales by Location|>\n|>\n|>\n\n
\n### Analyze Inventory Efficiency:\n<|{inventory_efficiency_analysis}|scenario|on_submission_change=on_inventory_efficiency_status_change|expandable=False|expanded=False|>\n\n<|{inventory_efficiency_analysis}|scenario_dag|>\n\n
\n### View inventory efficiency results:\n<|{inventory_efficiency_analysis.results if inventory_efficiency_analysis else None}|data_node|>\n"} {"text": "from taipy import Gui\n\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Art Generator\n\n<|layout|columns=35 65|\nDisplay Mandelbrot Art from path\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nCreate a slider to select iterations\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Art Generator\")\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Stock
\n<|{stock_selected}|selector|lov=stock_apple;stock_google;stock_amazon|dropdown|on_change=on_stock_change|>\n|>\n\n<|part|class_name=card|\n### Select Comparison Market Index
\n<|{market_index_selected}|selector|lov=index_nasdaq;index_s&p500;index_dowjones|dropdown|on_change=on_market_index_change|>\n|>\n\n|>\n\n<|Stock and Market Data|expandable|expanded=True|\nDisplay stock_data and market_index_data\n<|layout|columns=1 1|\n<|{stock_data}|table|page_size=5|>\n\n<|{market_index_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{stock_price_chart}|chart|type=line|x=Date|y=Price|title=Stock Price Trend|>\n|>\n\n<|part|class_name=card|\n<|{market_index_chart}|chart|type=line|x=Date|y=Index Value|title=Market Index Trend|>\n|>\n|>\n\n
\n### Run Financial Analysis:\n<|{financial_analysis}|scenario|on_submission_change=on_financial_analysis_status_change|expandable=False|expanded=False|>\n\n<|{financial_analysis}|scenario_dag|>\n\n
\n### View financial analysis results:\n<|{financial_analysis.results if financial_analysis else None}|data_node|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load sales data\nsales_data = pd.read_csv(\"sales_data.csv\")\n\n# Initialize variables\nregions = list(sales_data[\"Region\"].unique())\nproducts = list(sales_data[\"Product\"].unique())\nregion = regions\nproduct = products\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{product}|selector|lov={products}|multiple|label=Select Product|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_sales>\n\n\n|average_profit>\n\n\n\n<|{profit_chart}|chart|x=Month|y=Profit|type=line|title=Profit by Month|color=#ff462b|width=100%|>\n|sales_chart>\n|main_page>\n|>\n\nCode adapted from [Sales Analysis](https://github.com/sales_analysis_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/sales-analysis-app)\n"} {"text": "<|{all_events}|table|columns={event_columns}|width='100%'|on_action={on_event_click}|style=event_style|>\n<|Create Event|button|on_action={open_create_event_dialog}|>\n<|Refresh Events|button|on_action={refresh_event_list}|>\n\n<|{show_create_event_dialog}|dialog|title=Create New Event|\n<|{event_title}|input|placeholder='Event Title'|\n<|{event_date}|date_picker|>\n<|Create Event|button|on_action={create_event}|>\n<|Cancel|button|on_action={close_create_event_dialog}|>\n|>\n\n<|{show_event_details}|pane|\n\n# Event Details <|Edit|button|on_action=edit_selected_event|> <|Cancel|button|on_action=cancel_selected_event|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Title\n<|{selected_event.title}|>\n|>\n\n<|part|class_name=card|\n## Date\n<|{selected_event.date.strftime(\"%b %d, %Y\")}|>\n|>\n\n<|part|class_name=card|\n## Description\n<|{selected_event.description}|textarea|disabled=True|>\n|>\n\n----\n|>\n"} {"text": "# **Country**{: .color-primary} Energy Consumption\n\n<|layout|columns=1 1 1|\n<|{selected_country_energy}|selector|lov={selector_country_energy}|on_change=on_change_country_energy|dropdown|label=Country|>\n\n<|{selected_energy_source}|toggle|lov={energy_source_selector}|on_change=update_energy_source_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Total Consumption**{: .color-primary}\n<|{'{:,}'.format(int(energy_data.iloc[-1]['Total']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Renewable Sources**{: .color-primary}\n<|{'{:,}'.format(int(energy_data.iloc[-1]['Renewable']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Non-Renewable Sources**{: .color-primary}\n<|{'{:,}'.format(int(energy_data.iloc[-1]['Non-Renewable']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{energy_data}|chart|type=line|x=Year|y[3]=Total|y[2]=Renewable|y[1]=Non-Renewable|layout={layout}|options={options}|title=Energy Consumption Trends|>\n\n<|{energy_source_distribution_chart}|chart|type=pie|values=energy_source_values|labels=energy_source_labels|title=Energy Source Distribution|>\n|>\n"} {"text": "<|{inventory_items}|table|columns={inventory_columns}|width='100%'|on_action={on_inventory_item_select}|style=inventory_style|>\n<|Add Item|button|on_action={open_add_item_dialog}|>\n<|Refresh Inventory|button|on_action={refresh_inventory}|>\n\n<|{show_add_item_dialog}|dialog|title=Add Inventory Item|\n<|{item_name}|input|placeholder='Item Name'|\n<|{item_quantity}|number_input|min=0|>\n<|Add Item|button|on_action={add_inventory_item}|>\n<|Cancel|button|on_action={close_add_item_dialog}|>\n|>\n\n<|{show_item_details}|pane|\n\n# Item Details <|Remove|button|on_action=remove_selected_item|> <|Update|button|on_action=update_selected_item|>\n\n<|layout|columns=2|\n<|part|class_name=card|\n## Name\n<|{selected_item.name}|>\n|>\n\n<|part|class_name=card|\n## Quantity\n<|{selected_item.quantity}|>\n|>\n\n<|part|class_name=card|\n## ID\n<|{selected_item.id}|>\n|>\n\n<|part|class_name=card|\n## Last Updated\n<|{selected_item.last_updated.strftime(\"%b %d, %Y at %H:%M:%S\")}|>\n|>\n\n----\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Technology\"\n mood = \"tech\"\n style = \"techexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Technology Tweets\n\n This mini-app generates Tweets related to Technology using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Tech Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load event registration data\nevent_data = pd.read_csv(\"event_registration_data.csv\")\n\n# Initialize variables\nevents = list(event_data[\"Event\"].unique())\nregistrants = list(event_data[\"Registrant\"].unique())\nevent = events\nregistrant = registrants\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{registrant}|selector|lov={registrants}|multiple|label=Select Registrant|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_registrations>\n\n attendees\n|average_attendance>\n\n\n|event_registration_table>\n|main_page>\n|>\n\nCode adapted from [Event Registration](https://github.com/event_registration_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/event-registration-app)\n"} {"text": "<|{all_courses}|table|columns={course_columns}|width='100%'|on_action={on_course_select}|style=course_style|>\n<|Add Course|button|on_action={open_add_course_dialog}|>\n<|Refresh Courses|button|on_action={refresh_course_list}|>\n\n<|{show_add_course_dialog}|dialog|title=Add New Course|\n<|{course_title}|input|placeholder='Course Title'|\n<|{course_instructor}|input|placeholder='Instructor Name'|\n<|{course_duration}|number_input|placeholder='Duration in Hours'|\n<|{course_category}|selector|lov={get_all_categories()}|>\n<|Add Course|button|on_action={add_course}|>\n<|Cancel|button|on_action={close_add_course_dialog}|>\n|>\n\n<|{show_course_details}|pane|\n\n# Course Details <|Edit|button|on_action=edit_selected_course|> <|Remove|button|on_action=remove_selected_course|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Title\n<|{selected_course.title}|>\n|>\n\n<|part|class_name=card|\n## Instructor\n<|{selected_course.instructor}|>\n|>\n\n<|part|class_name=card|\n## Duration\n<|{selected_course.duration}|>\n|>\n\n<|part|class_name=card|\n## Category\n<|{selected_course.category}|>\n|>\n\n----\n|>\n"} {"text": "<|{all_itineraries}|table|columns={itinerary_columns}|width='100%'|on_action={on_itinerary_select}|style=itinerary_style|>\n<|Create Itinerary|button|on_action={open_create_itinerary_dialog}|>\n<|Refresh Itineraries|button|on_action={refresh_itinerary_list}|>\n\n<|{show_create_itinerary_dialog}|dialog|title=Create Travel Itinerary|\n<|{destination}|input|placeholder='Destination'|\n<|{start_date}|date_picker|>\n<|{end_date}|date_picker|>\n<|Create Itinerary|button|on_action={create_itinerary}|>\n<|Cancel|button|on_action={close_create_itinerary_dialog}|>\n|>\n\n<|{show_itinerary_details}|pane|\n\n# Itinerary Details <|Edit|button|on_action=edit_selected_itinerary|> <|Remove|button|on_action=remove_selected_itinerary|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Destination\n<|{selected_itinerary.destination}|>\n|>\n\n<|part|class_name=card|\n## Start Date\n<|{selected_itinerary.start_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n<|part|class_name=card|\n## End Date\n<|{selected_itinerary.end_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n----\n|>\n"} {"text": "<|{all_exhibits}|table|columns={exhibit_columns}|width='100%'|on_action={on_exhibit_select}|style=exhibit_style|>\n<|Add Exhibit|button|on_action={open_add_exhibit_dialog}|>\n<|Refresh Exhibits|button|on_action={refresh_exhibit_list}|>\n\n<|{show_add_exhibit_dialog}|dialog|title=Add New Exhibit|\n<|{exhibit_name}|input|placeholder='Exhibit Name'|\n<|{exhibit_artist}|input|placeholder='Artist Name'|\n<|{exhibit_start_date}|date_picker|>\n<|{exhibit_end_date}|date_picker|>\n<|Add Exhibit|button|on_action={add_exhibit}|>\n<|Cancel|button|on_action={close_add_exhibit_dialog}|>\n|>\n\n<|{show_exhibit_details}|pane|\n\n# Exhibit Details <|Edit|button|on_action=edit_selected_exhibit|> <|Remove|button|on_action=remove_selected_exhibit|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Name\n<|{selected_exhibit.name}|>\n|>\n\n<|part|class_name=card|\n## Artist\n<|{selected_exhibit.artist}|>\n|>\n\n<|part|class_name=card|\n## Start Date\n<|{selected_exhibit.start_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n<|part|class_name=card|\n## End Date\n<|{selected_exhibit.end_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n----\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load employee data\nemployee_data = pd.read_csv(\"employee_data.csv\")\n\n# Initialize variables\ndepartments = list(employee_data[\"Department\"].unique())\npositions = list(employee_data[\"Position\"].unique())\ndepartment = departments\nposition = positions\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{position}|selector|lov={positions}|multiple|label=Select Position|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_employees>\n\n\n|average_salary>\n\n\n|employee_table>\n|main_page>\n|>\n\nCode adapted from [Employee Management](https://github.com/employee_management_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/employee-management-app)\n\"\"\"\n\ndef filter(department, position):\n df_selection = employee_data[\n employee_data[\"Department\"].isin(department)\n & employee_data[\"Position\"].isin(position)\n ]\n return df_selection\n\ndef on_filter(state):\n state.df_selection = filter(state.department, state.position)\n\nif __name__ == \"__main__\":\n # Initialize dataframe\n df_selection = filter(department, position)\n\n # Run the app\n Gui(page).run()\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load customer feedback data\nfeedback_data = pd.read_csv(\"customer_feedback_data.csv\")\n\n# Initialize variables\nproducts = list(feedback_data[\"Product\"].unique())\nratings = list(feedback_data[\"Rating\"].unique())\nproduct = products\nrating = ratings\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{rating}|selector|lov={ratings}|multiple|label=Select Rating|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_feedback>\n\n\n|average_rating>\n\n\n|feedback_table>\n|main_page>\n|>\n\nCode adapted from [Customer Feedback](https://github.com/customer_feedback_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/customer-feedback-app)\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages related to the finance sector (replace with actual imports)\nfrom pages.finance.finance import finance_md\n\n# Define the pages\npages = {\n '/': root, # Replace with the root page if you have one\n \"Finance\": finance_md,\n}\n\n# Create a Gui instance with the pages\ngui_multi_pages = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n\n # Run the multi-page app\n gui_multi_pages.run(title=\"Finance Dashboard\")\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load inventory data\ninventory_data = pd.read_csv(\"inventory_data.csv\")\n\n# Initialize variables\ncategories = list(inventory_data[\"Category\"].unique())\nlocations = list(inventory_data[\"Location\"].unique())\ncategory = categories\nlocation = locations\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{location}|selector|lov={locations}|multiple|label=Select Location|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_items>\n\n units\n|average_quantity>\n\n\n|inventory_table>\n|main_page>\n|>\n\nCode adapted from [Inventory Management](https://github.com/inventory_management_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/inventory-management-app)\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|region>\n|>\n\n<|{selected_market_analysis}|market_analysis|on_submission_change=on_submission_change_market_analysis|not expanded|>\n\n---------------------------------------\n\n## **Market Predictions**{: .color-primary} and Data Exploration\n\n<|{selected_market_analysis.result.read() if selected_market_analysis and selected_market_analysis.result.read() is not None else default_market_result}|chart|x=Date|y[1]=Average Price|y[2]=Volume|y[3]=Trend Analysis|type[1]=line|title=Real Estate Market Trends|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|market_analysis>\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load product inventory data\nproduct_inventory = pd.read_csv(\"product_inventory.csv\")\n\n# Initialize variables\ncategories = list(product_inventory[\"Category\"].unique())\nbrands = list(product_inventory[\"Brand\"].unique())\ncategory = categories\nbrand = brands\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{brand}|selector|lov={brands}|multiple|label=Select Brand|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_products>\n\n\n|average_price>\n\n\n|product_table>\n|main_page>\n|>\n\nCode adapted from [Product Inventory](https://github.com/product_inventory_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/product-inventory-app)\n"} {"text": "# **Country**{: .color-primary} Agricultural Production\n\n<|layout|columns=1 1 1|\n<|{selected_country_agriculture}|selector|lov={selector_country_agriculture}|on_change=on_change_country_agriculture|dropdown|label=Country|>\n\n<|{selected_crop}|toggle|lov={crop_selector}|on_change=update_crop_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Total Production**{: .color-primary}\n<|{'{:,}'.format(int(agriculture_data.iloc[-1]['Total']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Crop Yield**{: .color-primary}\n<|{'{:,}'.format(int(agriculture_data.iloc[-1]['Yield']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Export Volume**{: .color-primary}\n<|{'{:,}'.format(int(agriculture_data.iloc[-1]['Export']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{agriculture_data}|chart|type=area|x=Year|y[3]=Total|y[2]=Yield|y[1]=Export|layout={layout}|options={options}|title=Agricultural Trends|>\n\n<|{crop_distribution_chart}|chart|type=pie|values=crop_values|labels=crop_labels|title=Crop Distribution|>\n|>\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages related to the e-commerce sector (replace with actual imports)\nfrom pages.ecommerce.ecommerce import ecommerce_md\n\n# Define the pages\npages = {\n '/': root, # Replace with the root page if you have one\n \"E-commerce\": ecommerce_md,\n}\n\n# Create a Gui instance with the pages\ngui_multi_pages = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n\n # Run the multi-page app\n gui_multi_pages.run(title=\"E-commerce Dashboard\")\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load employee data\nemployee_data = pd.read_csv(\"employee_data.csv\")\n\n# Initialize variables\ndepartments = list(employee_data[\"Department\"].unique())\npositions = list(employee_data[\"Position\"].unique())\ndepartment = departments\nposition = positions\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{position}|selector|lov={positions}|multiple|label=Select Position|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_employees>\n\n\n|average_salary>\n\n\n|employee_table>\n|main_page>\n|>\n\nCode adapted from [Employee Management](https://github.com/employee_management_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/employee-management-app)\n\"\"\"\n\ndef filter(department, position):\n df_selection = employee_data[\n employee_data[\"Department\"].isin(department)\n & employee_data[\"Position\"].isin(position)\n ]\n return df_selection\n\ndef on_filter(state):\n state.df_selection = filter(state.department, state.position)\n\nif __name__ == \"__main__\":\n # Initialize dataframe\n df_selection = filter(department, position)\n\n # Run the app\n Gui(page).run()\n"} {"text": "<|{all_users}|table|columns={user_columns}|width='100%'|on_action={on_user_table_click}|style=user_style|>\n<|Add User|button|on_action={open_add_user_dialog}|>\n<|Refresh Users|button|on_action={refresh_user_list}|>\n\n<|{show_dialog_add_user}|dialog|title=Add new user|\n<|{new_user_name}|input|placeholder='Enter user name'|\n<|{new_user_role}|selector|lov={get_all_roles()}|>\n<|Add|button|on_action={add_user}|>\n<|Cancel|button|on_action={close_add_user_dialog}|>\n|>\n\n<|{show_user_details}|pane|\n\n# User Details <|Delete|button|on_action=delete_selected_user|> <|Disable|button|on_action=disable_selected_user|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n## Name\n<|{selected_user.name}|>\n|>\n\n<|part|class_name=card|\n## Role\n<|{selected_user.role}|>\n|>\n\n<|part|class_name=card|\n## ID\n<|{selected_user.id}|>\n|>\n\n<|part|class_name=card|\n## Creation Date\n<|{selected_user.creation_date.strftime(\"%b %d %y %H:%M:%S\")}|>\n|>\n\n<|part|class_name=card|\n## Status\n<|{get_status(selected_user)}|>\n|>\n\n----\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load sales data\nsales_data = pd.read_csv(\"sales_data.csv\")\n\n# Initialize variables\nregions = list(sales_data[\"Region\"].unique())\ncategories = list(sales_data[\"Category\"].unique())\nregion = regions\ncategory = categories\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{category}|selector|lov={categories}|multiple|label=Select Category|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_sales>\n\n\n|average_profit>\n\n\n|sales_dashboard>\n|main_page>\n|>\n\nCode adapted from [Sales Performance Dashboard](https://github.com/sales_performance_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/sales-performance-dashboard-app)\n"} {"text": "<|{all_resources}|table|columns={resource_columns}|width='100%'|on_action={on_resource_select}|style=resource_style|>\n<|Allocate Resource|button|on_action={open_allocate_resource_dialog}|>\n<|Refresh Resources|button|on_action={refresh_resource_list}|>\n\n<|{show_allocate_resource_dialog}|dialog|title=Allocate Resource|\n<|{resource_name}|selector|lov={get_all_resources()}|>\n<|{resource_quantity}|number_input|min=0|placeholder='Quantity'|\n<|Allocate|button|on_action={allocate_resource}|>\n<|Cancel|button|on_action={close_allocate_resource_dialog}|>\n|>\n\n<|{show_resource_details}|pane|\n\n# Resource Details <|Release|button|on_action=release_selected_resource|> <|Update|button|on_action=update_selected_resource|>\n\n<|layout|columns=2|\n<|part|class_name=card|\n## Name\n<|{selected_resource.name}|>\n|>\n\n<|part|class_name=card|\n## Allocated Quantity\n<|{selected_resource.allocated_quantity}|>\n|>\n\n<|part|class_name=card|\n## Total Quantity\n<|{selected_resource.total_quantity}|>\n|>\n\n<|part|class_name=card|\n## Last Allocation Date\n<|{selected_resource.last_allocation_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n----\n|>\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal Education\n\n<|layout|columns=35 65|\nDisplay Mandelbrot Fractal for Educational Purposes\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nCreate a slider to select iterations\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal Education\")\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Economics\"\n mood = \"economicanalysis\"\n style = \"economicspro\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Economics Tweets\n\n This mini-app generates Tweets related to Economics using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Economics Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Traffic Zone
\n<|{traffic_zone_selected}|selector|lov=zone_downtown;zone_suburbs;zone_industrial|dropdown|on_change=on_traffic_zone_change|>\n|>\n\n<|part|class_name=card|\n### Select Time of Day
\n<|{time_of_day_selected}|selector|lov=time_morning;time_afternoon;time_evening|dropdown|on_change=on_time_of_day_change|>\n|>\n\n|>\n\n<|Traffic Data Overview|expandable|expanded=True|\nDisplay traffic_zone_data and time_of_day_data\n<|layout|columns=1 1|\n<|{traffic_zone_data}|table|page_size=5|>\n\n<|{time_of_day_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{traffic_volume_chart}|chart|type=bar|x=Zone|y=Traffic Volume|title=Traffic Volume by Zone|>\n|>\n\n<|part|class_name=card|\n<|{peak_hours_chart}|chart|type=line|x=Time|y=Vehicles|title=Peak Traffic Hours|>\n|>\n|>\n\n
\n### Analyze Traffic Patterns:\n<|{traffic_pattern_analysis}|scenario|on_submission_change=on_traffic_pattern_status_change|expandable=False|expanded=False|>\n\n<|{traffic_pattern_analysis}|scenario_dag|>\n\n
\n### View traffic pattern analysis results:\n<|{traffic_pattern_analysis.results if traffic_pattern_analysis else None}|data_node|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\ntext_data = \"\"\ntext_path = \"\"\n\ndef text_upload(state):\n if state.text_path:\n with open(state.text_path, \"r\") as file:\n state.text_data = file.read()\n\ntext_page = \"\"\"\n<|{text_path}|file_selector|accept=.txt|on_action=text_upload|>\n<|{text_data}|textarea|rows=10|>\n\"\"\"\n\nGui(text_page).run()\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal for Agriculture Visualization\n\n<|layout|columns=35 65|\nVisualize Complex Patterns with Mandelbrot Fractals\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nSelect the number of iterations to explore fractal patterns\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal for Agriculture Visualization\")\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|energy_type>\n|>\n\n<|{selected_demand_forecast}|demand_forecast|on_submission_change=on_submission_change_demand_forecast|not expanded|>\n\n---------------------------------------\n\n## **Energy Demand Projections**{: .color-primary} and Data Analysis\n\n<|{selected_demand_forecast.result.read() if selected_demand_forecast and selected_demand_forecast.result.read() is not None else default_demand_result}|chart|x=Date|y[1]=Projected Demand|y[2]=Historical Demand|type[1]=bar|title=Energy Demand Forecast|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|demand_forecast>\n|>\n"} {"text": "# **Country**{: .color-primary} Economic Indicators\n\n<|layout|columns=1 1 1|\n<|{selected_country_economy}|selector|lov={selector_country_economy}|on_change=on_change_country_economy|dropdown|label=Country|>\n\n<|{selected_economic_indicator}|toggle|lov={economic_indicator_selector}|on_change=update_economic_indicator|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**GDP**{: .color-primary}\n<|{'${:,.2f}'.format(economy_data.iloc[-1]['GDP'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Inflation Rate**{: .color-primary}\n<|{'{:.2f}%'.format(economy_data.iloc[-1]['Inflation Rate'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Unemployment Rate**{: .color-primary}\n<|{'{:.2f}%'.format(economy_data.iloc[-1]['Unemployment Rate'])}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{economy_data}|chart|type=line|x=Year|y[3]=GDP|y[2]=Inflation Rate|y[1]=Un\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Energy Source
\n<|{energy_source_selected}|selector|lov=source_solar;source_wind;source_hydro|dropdown|on_change=on_energy_source_change|>\n|>\n\n<|part|class_name=card|\n### Select Region
\n<|{region_selected}|selector|lov=region_north;region_south;region_east;region_west|dropdown|on_change=on_region_change|>\n|>\n\n|>\n\n<|Energy Data Overview|expandable|expanded=True|\nDisplay energy_data and region_data\n<|layout|columns=1 1|\n<|{energy_data}|table|page_size=5|>\n\n<|{region_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{consumption_chart}|chart|type=bar|x=Month|y=Consumption|title=Energy Consumption by Source|>\n|>\n\n<|part|class_name=card|\n<|{region_comparison_chart}|chart|type=line|x=Month|y=Region Consumption|title=Regional Energy Consumption|>\n|>\n|>\n\n
\n### Analyze Energy Trends:\n<|{energy_trend_analysis}|scenario|on_submission_change=on_energy_trend_status_change|expandable=False|expanded=False|>\n\n<|{energy_trend_analysis}|scenario_dag|>\n\n
\n### View energy trend analysis results:\n<|{energy_trend_analysis.results if energy_trend_analysis else None}|data_node|>\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\nfrom pages.country.country import country_md\nfrom pages.world.world import world_md\nfrom pages.map.map import map_md\nfrom pages.predictions.predictions import predictions_md, selected_scenario\nfrom pages.root import root, selected_country, selector_country\n\nfrom config.config import Config\n\n# Define the pages\npages = {\n '/': root,\n \"Country\": country_md,\n \"World\": world_md,\n \"Map\": map_md,\n \"Predictions\": predictions_md\n}\n\n# Create a Gui instance with the pages\ngui_multi_pages = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n\n # Run the multi-page app\n gui_multi_pages.run(title=\"Covid Dashboard\")\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Research Area
\n<|{research_area_selected}|selector|lov=area_physics;area_chemistry;area_biology|dropdown|on_change=on_research_area_change|>\n|>\n\n<|part|class_name=card|\n### Select Year
\n<|{publication_year_selected}|selector|lov=year_2020;year_2021;year_2022|dropdown|on_change=on_publication_year_change|>\n|>\n\n|>\n\n<|Research Publications Overview|expandable|expanded=True|\nDisplay research_data and year_data\n<|layout|columns=1 1|\n<|{research_data}|table|page_size=5|>\n\n<|{year_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{publication_count_chart}|chart|type=bar|x=Research Area|y=Publications|title=Publications Count|>\n|>\n\n<|part|class_name=card|\n<|{citation_chart}|chart|type=line|x=Year|y=Citations|title=Citation Trends|>\n|>\n|>\n\n
\n### Analyze Research Impact:\n<|{research_impact_analysis}|scenario|on_submission_change=on_research_impact_status_change|expandable=False|expanded=False|>\n\n<|{research_impact_analysis}|scenario_dag|>\n\n
\n### View research impact results:\n<|{research_impact_analysis.results if research_impact_analysis else None}|data_node|>\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Department
\n<|{department_selected}|selector|lov=dept_sales;dept_marketing;dept_technical|dropdown|on_change=on_department_change|>\n|>\n\n<|part|class_name=card|\n### Select Time Period
\n<|{time_period_selected}|selector|lov=period_this_month;period_last_month;period_this_quarter|dropdown|on_change=on_time_period_change|>\n|>\n\n|>\n\n<|Employee Performance Overview|expandable|expanded=True|\nDisplay department_data and time_period_data\n<|layout|columns=1 1|\n<|{department_data}|table|page_size=5|>\n\n<|{time_period_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{performance_chart}|chart|type=bar|x=Employee|y=Performance Score|title=Department Performance|>\n|>\n\n<|part|class_name=card|\n<|{attendance_chart}|chart|type=line|options={attendance_options}|layout={attendance_layout}|>\n|>\n|>\n\n
\n### Run Employee Analysis:\n<|{employee_analysis}|scenario|on_submission_change=on_employee_analysis_status_change|expandable=False|expanded=False|>\n\n<|{employee_analysis}|scenario_dag|>\n\n
\n### View the employee analysis results:\n<|{employee_analysis.results if employee_analysis else None}|data_node|>\n"} {"text": "from taipy.gui import Gui, notify, state\nimport random\nimport re\nimport logging\nimport taipy\n\n# Import OpenAI module\nimport oai\n\n# Configure logger\nlogging.basicConfig(format=\"\\n%(asctime)s\\n%(message)s\", level=logging.INFO, force=True)\n\n\n# Define functions\ndef error_prompt_flagged(state, prompt):\n \"\"\"Notify user that a prompt has been flagged.\"\"\"\n notify(state, \"error\", \"Prompt flagged as inappropriate.\")\n logging.info(f\"Prompt flagged as inappropriate: {prompt}\")\n\n\ndef error_too_many_requests(state):\n \"\"\"Notify user that too many requests have been made.\"\"\"\n notify(\n state,\n \"error\",\n \"Too many requests. Please wait a few seconds before generating another text or image.\",\n )\n logging.info(f\"Session request limit reached: {state.n_requests}\")\n state.n_requests = 1\n\n\ndef generate_text(state):\n \"\"\"Generate Tweet text.\"\"\"\n state.tweet = \"\"\n state.image = None\n\n # Check the number of requests done by the user\n if state.n_requests >= 5:\n error_too_many_requests(state)\n return\n\n # Check if the user has put a topic\n if state.topic == \"\":\n notify(state, \"error\", \"Please enter a topic\")\n return\n\n # Create the prompt and add a style or not\n if state.style == \"\":\n state.prompt = (\n f\"Write a {state.mood}Tweet about {state.topic} in less than 120 characters \"\n f\"and with the style of {state.style}:\\n\\n\\n\\n\"\n )\n else:\n state.prompt = f\"Write a {state.mood}Tweet about {state.topic} in less than 120 characters:\\n\\n\"\n\n # openai configured and check if text is flagged\n openai = oai.Openai()\n flagged = openai.moderate(state.prompt)\n\n if flagged:\n error_prompt_flagged(state, f\"Prompt: {state.prompt}\\n\")\n return\n else:\n # Generate the tweet\n state.n_requests += 1\n state.tweet = openai.complete(state.prompt).strip().replace('\"', \"\")\n\n # Notify the user in console and in the GUI\n logging.info(\n f\"Topic: {state.prompt}{state.mood}{state.style}\\n\" f\"Tweet: {state.tweet}\"\n )\n notify(state, \"success\", \"Tweet created!\")\n\n\ndef generate_image(state):\n \"\"\"Generate Tweet image.\"\"\"\n notify(state, \"info\", \"Generating image...\")\n\n # Check the number of requests done by the user\n if state.n_requests >= 5:\n error_too_many_requests(state)\n return\n\n state.image = None\n\n # Creates the prompt\n prompt_wo_hashtags = re.sub(\"#[A-Za-z0-9_]+\", \"\", state.prompt)\n processing_prompt = (\n \"Create a detailed but brief description of an image that captures \"\n f\"the essence of the following text:\\n{prompt_wo_hashtags}\\n\\n\"\n )\n\n # Openai configured and check if text is flagged\n openai = oai.Openai()\n flagged = openai.moderate(processing_prompt)\n\n if flagged:\n error_prompt_flagged(state, processing_prompt)\n return\n else:\n state.n_requests += 1\n # Generate the prompt that will create the image\n processed_prompt = (\n openai.complete(prompt=processing_prompt, temperature=0.5, max_tokens=40)\n .strip()\n .replace('\"', \"\")\n .split(\".\")[0]\n + \".\"\n )\n\n # Generate the image\n state.image = openai.image(processed_prompt)\n\n # Notify the user in console and in the GUI\n logging.info(f\"Tweet: {state.prompt}\\nImage prompt: {processed_prompt}\")\n notify(state, \"success\", f\"Image created!\")\n\n\ndef feeling_lucky(state):\n \"\"\"Generate a feeling-lucky tweet.\"\"\"\n with open(\"moods.txt\") as f:\n sample_moods = f.read().splitlines()\n state.topic = \"an interesting topic\"\n state.mood = random.choice(sample_moods)\n state.style = \"\"\n generate_text(state)\n\n\n# Variables\ntweet = \"\"\nprompt = \"\"\nn_requests = 0\n\ntopic = \"AI\"\nmood = \"inspirational\"\nstyle = \"elonmusk\"\n\nimage = None\n\n\n# Called whenever there is a problem\ndef on_exception(state, function_name: str, ex: Exception):\n logging.error(f\"Problem {ex} \\nin {function_name}\")\n notify(state, \"error\", f\"Problem {ex} \\nin {function_name}\")\n\n\n# Markdown for the entire page\npage = \"\"\"\n<|container|\n# **Generate**{: .color-primary} Tweets\n\nThis mini-app generates Tweets using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n<|layout|columns=1 1 1|gap=30px|class_name=card|\n\n|topic>\n\n\n|mood>\n\n\n|style>\n\nCreate a Generate text button\n<|Generate text|button|on_action=generate_text|label=Generate text|>\n\n<|Feeling lucky|button|on_action=feeling_lucky|label=Feeling Lucky|>\n|>\n\n
\n\n---\n\n
\n\n### Generated **Tweet**{: .color-primary}\n\nCreate a text input for the tweet\n<|{tweet}|input|multiline|label=Resulting tweet|class_name=fullwidth|\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal for Scientific Visualization\n\n<|layout|columns=35 65|\nVisualize Complex Patterns with Mandelbrot Fractals\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nSelect the number of iterations to explore fractal patterns\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal for Scientific Visualization\")\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages related to the automotive sector (replace with actual imports)\nfrom pages.automotive.automotive import automotive_md\n\n# Define the pages\npages = {\n '/': root, # Replace with the root page if you have one\n \"Automotive\": automotive_md,\n}\n\n# Create a Gui instance with the pages\ngui_multi_pages = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n\n # Run the multi-page app\n gui_multi_pages.run(title=\"Automotive Dashboard\")\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\nimport json\n\njson_data = []\njson_path = \"\"\n\ndef json_upload(state):\n with open(state.json_path, 'r') as file:\n state.json_data = pd.read_json(file)\n\njson_page = \"\"\"\n<|{json_path}|file_selector|accept=.json|on_action=json_upload|>\n<|{json_data}|table|>\n\"\"\"\n\nGui(json_page).run()\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Primary City for Weather Data
\n<|{primary_city_selected}|selector|lov=city_new_york;city_london;city_tokyo|dropdown|on_change=on_primary_city_change|>\n|>\n\n<|part|class_name=card|\n### Select City to Compare
\n<|{compare_city_selected}|selector|lov=city_new_york;city_london;city_tokyo|dropdown|on_change=on_compare_city_change|>\n|>\n\n|>\n\n<|Weather Data Overview|expandable|expanded=True|\nDisplay primary_city_data and compare_city_data\n<|layout|columns=1 1|\n<|{primary_city_data}|table|page_size=5|>\n\n<|{compare_city_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{temperature_chart}|chart|type=line|x=Day|y[1]=Primary City Temp|y[2]=Compare City Temp|title=Temperature Comparison|>\n|>\n\n<|part|class_name=card|\n<|{humidity_chart}|chart|type=bar|x=Day|y[1]=Primary City Humidity|y[2]=Compare City Humidity|title=Humidity Comparison|>\n|>\n|>\n\n
\n### Analyze Weather Patterns:\n<|{weather_pattern_analysis}|scenario|on_submission_change=on_weather_pattern_status_change|expandable=False|expanded=False|>\n\n<|{weather_pattern_analysis}|scenario_dag|>\n\n
\n### View weather analysis results:\n<|{weather_pattern_analysis.results if weather_pattern_analysis else None}|data_node|>\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages for the logistics sector\nfrom pages.logistics.dashboard import dashboard_md\nfrom pages.logistics.orders import orders_md\nfrom pages.logistics.inventory import inventory_md\nfrom pages.logistics.shipping import shipping_md\n\n# Define your pages dictionary\npages = {\n '/dashboard': dashboard_md,\n '/orders': orders_md,\n '/inventory': inventory_md,\n '/shipping': shipping_md\n}\n\n# Create a Gui with your pages\ngui_logistics = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n \n # Run the multi-page app\n gui_logistics.run(title=\"Logistics Dashboard\")\n"} {"text": "<|{all_projects}|table|columns={project_columns}|width='100%'|on_action={on_project_table_click}|style=project_style|>\n<|Create Project|button|on_action={open_create_project_dialog}|>\n<|Refresh Projects|button|on_action={refresh_project_list}|>\n\n<|{show_dialog_create_project}|dialog|title=Create new project|\n<|{project_name}|input|placeholder='Enter project name'|\n<|Create|button|on_action={create_project}|>\n<|Cancel|button|on_action={close_create_project_dialog}|>\n|>\n\n<|{show_project_details}|pane|\n\n# Project Details <|Delete|button|on_action=delete_selected_project|> <|Archive|button|on_action=archive_selected_project|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n## Project Name\n<|{selected_project.name}|>\n|>\n\n<|part|class_name=card|\n## Project Manager\n<|{selected_project.manager}|>\n|>\n\n<|part|class_name=card|\n## ID\n<|{selected_project.id}|>\n|>\n\n<|part|class_name=card|\n## Start Date\n<|{selected_project.start_date.strftime(\"%b %d %y\")}|>\n|>\n\n<|part|class_name=card|\n## Status\n<|{get_project_status(selected_project)}|>\n|>\n\n----\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load employee time tracking data\ntime_tracking_data = pd.read_csv(\"time_tracking_data.csv\")\n\n# Initialize variables\nemployees = list(time_tracking_data[\"Employee\"].unique())\nprojects = list(time_tracking_data[\"Project\"].unique())\nemployee = employees\nproject = projects\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{project}|selector|lov={projects}|multiple|label=Select Project|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_entries>\n\n hours\n|average_hours>\n\n\n|time_tracking_table>\n|main_page>\n|>\n\nCode adapted from [Employee Time Tracking](https://github.com/time_tracking_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/employee-time-tracking-app)\n"} {"text": "# **Worldwide**{: .color-primary} Education Statistics\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Literacy Rate**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_education['Literacy Rate']))}|text|class_name=h2|>\n|>\n\n<|card|\n**School Enrollment**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_education['School Enrollment']))}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Education Spending**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_education['Education Spending']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_education_metric}|toggle|lov={education_metric_selector}|>\n\n<|part|render={selected_education_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_education_pie_absolute}|chart|type=pie|labels=Country|values=Literacy Rate|title=Global Literacy Rate Distribution|>\n\n<|{data_world_education_evolution_absolute}|chart|properties={data_world_education_evolution_properties}|title=Education Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_education_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_education_pie_relative}|chart|type=pie|labels=Country|values=School Enrollment|>\n\n<|{data_world_education_evolution_relative}|chart|properties={data_world_education_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|route>\n|>\n\n<|{selected_transport_scenario}|transport_scenario|on_submission_change=on_transport_scenario_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Transport Efficiency Metrics**{: .color-primary} and Trends\n\n<|{selected_transport_scenario.result.read() if selected_transport_scenario and selected_transport_scenario.result.read() is not None else default_transport_result}|chart|x=Time|y[1]=Passenger Count|y[2]=Average Delay|type[1]=bar|title=Route Efficiency Analysis|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|transport_scenario>\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\naudio_data = None\naudio_path = \"\"\n\ndef audio_upload(state):\n if state.audio_path:\n audio_data = state.audio_path # Directly use the path for audio elements\n\naudio_page = \"\"\"\n<|{audio_path}|file_selector|accept=audio/*|on_action=audio_upload|>\n<|{audio_data}|audio|controls=True|>\n\"\"\"\n\nGui(audio_page).run()\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport io\n\ncsv_data = None\ncsv_path = \"\"\n\ndef csv_upload_analyze(state):\n if state.csv_path:\n state.csv_data = pd.read_csv(state.csv_path)\n plt.figure()\n state.csv_data.hist()\n plt.xlabel(\"Values\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Data Distribution\")\n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n buf.seek(0)\n state.plot_image = buf.read()\n\ncsv_analyze_page = \"\"\"\n<|{csv_path}|file_selector|accept=.csv|on_action=csv_upload_analyze|>\n<|{csv_data}|table|>\n<|{plot_image}|image|>\n\"\"\"\n\nGui(csv_analyze_page).run()\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load project task data\ntask_data = pd.read_csv(\"project_task_data.csv\")\n\n# Initialize variables\nprojects = list(task_data[\"Project\"].unique())\npriorities = list(task_data[\"Priority\"].unique())\nproject = projects\npriority = priorities\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{priority}|selector|lov={priorities}|multiple|label=Select Priority|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_tasks>\n\n days\n|average_completion>\n\n\n|task_management_table>\n|main_page>\n|>\n\nCode adapted from [Project Task Management](https://github.com/task_management_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/project-task-management-app)\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages for the homecare sector\nfrom pages.homecare.home import home_md\nfrom pages.homecare.services import services_md\nfrom pages.homecare.appointments import appointments_md\nfrom pages.homecare.contacts import contacts_md\n\n# Define your pages dictionary\npages = {\n '/home': home_md,\n '/services': services_md,\n '/appointments': appointments_md,\n '/contacts': contacts_md\n}\n\n# Create a Gui with your pages\ngui_homecare = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n \n # Run the multi-page app\n gui_homecare.run(title=\"Homecare Dashboard\")\n"} {"text": "# **Global**{: .color-primary} Technology Adoption\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Internet Users**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_tech['Internet Users']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Smartphone Penetration**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_tech['Smartphone Penetration']))}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**AI Adoption**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_tech['AI Adoption']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_tech_metric}|toggle|lov={tech_metric_selector}|>\n\n<|part|render={selected_tech_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_tech_pie_absolute}|chart|type=pie|labels=Country|values=Internet Users|title=Global Internet Usage|>\n\n<|{data_world_tech_evolution_absolute}|chart|properties={data_world_tech_evolution_properties}|title=Technology Evolution Worldwide|>\n|>\n|>\n\n<|part|render={selected_tech_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_tech_pie_relative}|chart|type=pie|labels=Country|values=Smartphone Penetration|>\n\n<|{data_world_tech_evolution_relative}|chart|properties={data_world_tech_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|product>\n|>\n\n<|{selected_supply_chain_scenario}|supply_chain_scenario|on_submission_change=on_supply_chain_scenario_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Supply Chain Dynamics**{: .color-primary} and Forecast\n\n<|{selected_supply_chain_scenario.result.read() if selected_supply_chain_scenario and selected_supply_chain_scenario.result.read() is not None else default_supply_chain_result}|chart|x=Date|y[1]=Demand|y[2]=Supply|type[1]=line|title=Supply vs. Demand Forecast|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|supply_chain_scenario>\n|>\n"} {"text": "# **Global**{: .color-primary} Tourism Statistics\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**International Tourist Arrivals**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_tourism['Tourist Arrivals']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Tourism Revenue**{: .color-primary}\n<|{'${:,.2f}'.format(np.sum(data_world_tourism['Revenue']))}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Hotel Occupancy Rates**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_tourism['Occupancy']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_tourism_metric}|toggle|lov={tourism_metric_selector}|>\n\n<|part|render={selected_tourism_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_tourism_pie_absolute}|chart|type=pie|labels=Country|values=Tourist Arrivals|title=Global Tourist Arrivals|>\n\n<|{data_world_tourism_evolution_absolute}|chart|properties={data_world_tourism_evolution_properties}|title=Tourism Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_tourism_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_tourism_pie_relative}|chart|type=pie|labels=Country|values=Occupancy|>\n\n<|{data_world_tourism_evolution_relative}|chart|properties={data_world_tourism_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "# **Global**{: .color-primary} E-Commerce Trends\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Online Sales Volume**{: .color-primary}\n<|{'${:,.2f}'.format(np.sum(data_world_ecommerce['Sales Volume']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Active Online Users**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_ecommerce['Active Users']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Average Purchase Value**{: .color-primary}\n<|{'${:,.2f}'.format(np.average(data_world_ecommerce['Purchase Value']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_ecommerce_metric}|toggle|lov={ecommerce_metric_selector}|>\n\n<|part|render={selected_ecommerce_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_ecommerce_pie_absolute}|chart|type=pie|labels=Country|values=Sales Volume|title=Global Online Sales Volume|>\n\n<|{data_world_ecommerce_evolution_absolute}|chart|properties={data_world_ecommerce_evolution_properties}|title=E-Commerce Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_ecommerce_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_ecommerce_pie_relative}|chart|type=pie|labels=Country|values=Active Users|>\n\n<|{data_world_ecommerce_evolution_relative}|chart|properties={data_world_ecommerce_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "# **Global**{: .color-primary} Automotive Industry\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Total Vehicle Production**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_automotive['Vehicle Production']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Electric Vehicle Sales**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_automotive['EV Sales']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Market Share**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_automotive['Market Share']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_automotive_metric}|toggle|lov={automotive_metric_selector}|>\n\n<|part|render={selected_automotive_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_automotive_pie_absolute}|chart|type=pie|labels=Country|values=Vehicle Production|title=Global Vehicle Production|>\n\n<|{data_world_automotive_evolution_absolute}|chart|properties={data_world_automotive_evolution_properties}|title=Automotive Industry Trends|>\n|>\n|>\n\n<|part|render={selected_automotive_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_automotive_pie_relative}|chart|type=pie|labels=Country|values=EV Sales|>\n\n<|{data_world_automotive_evolution_relative}|chart|properties={data_world_automotive_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "from taipy import Gui\n\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Generator\n\n<|layout|columns=35 65|\nDisplay image from path\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nCreate a slider to select iterations\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Generator\")\n"} {"text": "<|{all_appointments}|table|columns={appointment_columns}|width='100%'|on_action={on_appointment_select}|style=appointment_style|>\n<|Book Appointment|button|on_action={open_book_appointment_dialog}|>\n<|Refresh Appointments|button|on_action={refresh_appointment_list}|>\n\n<|{show_book_appointment_dialog}|dialog|title=Book New Appointment|\n<|{patient_name}|input|placeholder='Patient Name'|\n<|{appointment_date}|datetime_picker|>\n<|{doctor_selector}|selector|lov={get_all_doctors()}|>\n<|Book|button|on_action={book_appointment}|>\n<|Cancel|button|on_action={close_book_appointment_dialog}|>\n|>\n\n<|{show_appointment_details}|pane|\n\n# Appointment Details <|Edit|button|on_action=edit_selected_appointment|> <|Cancel|button|on_action=cancel_selected_appointment|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Patient Name\n<|{selected_appointment.patient_name}|>\n|>\n\n<|part|class_name=card|\n## Date and Time\n<|{selected_appointment.date.strftime(\"%b %d, %Y at %H:%M\")}|>\n|>\n\n<|part|class_name=card|\n## Doctor\n<|{selected_appointment.doctor}|>\n|>\n\n<|part|class_name=card|\n## Status\n<|{get_appointment_status(selected_appointment)}|>\n|>\n\n----\n|>\n"} {"text": "# **Global**{: .color-primary} Food Security and Agriculture\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Cereal Production**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_agri['Cereal Production']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Agricultural Land**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_agri['Agricultural Land']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Undernourishment Rate**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_agri['Undernourishment Rate']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_agri_metric}|toggle|lov={agri_metric_selector}|>\n\n<|part|render={selected_agri_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_agri_pie_absolute}|chart|type=pie|labels=Country|values=Cereal Production|title=Global Cereal Production|>\n\n<|{data_world_agri_evolution_absolute}|chart|properties={data_world_agri_evolution_properties}|title=Agricultural Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_agri_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_agri_pie_relative}|chart|type=pie|labels=Country|values=Undernourishment Rate|>\n\n<|{data_world_agri_evolution_relative}|chart|properties={data_world_agri_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"E-commerce\"\n mood = \"shopping\"\n style = \"ecommerceexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} E-commerce Tweets\n\n This mini-app generates Tweets related to E-commerce using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate E-commerce Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages related to education and healthcare (replace with actual imports)\nfrom pages.education.education import education_md\nfrom pages.healthcare.healthcare import healthcare_md\n\n# Define the pages\npages = {\n '/': root, # Replace with the root page if you have one\n \"Education\": education_md,\n \"Healthcare\": healthcare_md,\n}\n\n# Create a Gui instance with the pages\ngui_multi_pages = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n\n # Run the multi-page app\n gui_multi_pages.run(title=\"Education & Healthcare Dashboard\")\n"} {"text": "<|{all_properties}|table|columns={property_columns}|width='100%'|on_action={on_property_select}|style=property_style|>\n<|Add Property|button|on_action={open_add_property_dialog}|>\n<|Refresh Properties|button|on_action={refresh_property_list}|>\n\n<|{show_add_property_dialog}|dialog|title=Add New Property|\n<|{property_address}|input|placeholder='Address'|\n<|{property_price}|number_input|placeholder='Price'|\n<|Add Property|button|on_action={add_property}|>\n<|Cancel|button|on_action={close_add_property_dialog}|>\n|>\n\n<|{show_property_details}|pane|\n\n# Property Details <|Edit|button|on_action=edit_selected_property|> <|Remove|button|on_action=remove_selected_property|>\n\n<|layout|columns=2|\n<|part|class_name=card|\n## Address\n<|{selected_property.address}|>\n|>\n\n<|part|class_name=card|\n## Price\n<|{selected_property.price}|>\n|>\n\n<|part|class_name=card|\n## Listed Date\n<|{selected_property.listed_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n----\n|>\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages for the education sector\nfrom pages.education.home import home_md\nfrom pages.education.courses import courses_md\nfrom pages.education.students import students_md\nfrom pages.education.teachers import teachers_md\n\n# Define your pages dictionary\npages = {\n '/home': home_md,\n '/courses': courses_md,\n '/students': students_md,\n '/teachers': teachers_md\n}\n\n# Create a Gui with your pages\ngui_education = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n \n # Run the multi-page app\n gui_education.run(title=\"Education Dashboard\")\n"} {"text": "# **Country**{: .color-primary} Education Statistics\n\n<|layout|columns=1 1 1|\n<|{selected_country_education}|selector|lov={selector_country_education}|on_change=on_change_country_education|dropdown|label=Country|>\n\n<|{selected_education_level}|toggle|lov={education_level_selector}|on_change=update_education_level_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Literacy Rate**{: .color-primary}\n<|{'{:.2f}%'.format(education_data.iloc[-1]['Literacy Rate'])}|text|class_name=h2|>\n|>\n\n<|card|\n**School Enrollment**{: .color-primary}\n<|{'{:.2f}%'.format(education_data.iloc[-1]['School Enrollment'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Average Education Years**{: .color-primary}\n<|{'{:.1f}'.format(education_data.iloc[-1]['Average Education Years'])}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{education_data}|chart|type=bar|x=Year|y[3]=Literacy Rate|y[2]=School Enrollment|y[1]=Average Education Years|layout={layout}|options={options}|title=Education Progress|>\n\n<|{education_level_chart}|chart|type=pie|values=education_level_values|labels=education_level_labels|title=Education Level Distribution|>\n|>\n"} {"text": "# **Worldwide**{: .color-primary} Renewable Energy Usage\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Solar Energy Production**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_energy['Solar Energy']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Wind Energy Production**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_energy['Wind Energy']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Hydropower Energy Production**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_energy['Hydropower']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_energy_metric}|toggle|lov={energy_metric_selector}|>\n\n<|part|render={selected_energy_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_energy_pie_absolute}|chart|type=pie|labels=Country|values=Solar Energy|title=Global Solar Energy Production|>\n\n<|{data_world_energy_evolution_absolute}|chart|properties={data_world_energy_evolution_properties}|title=Renewable Energy Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_energy_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_energy_pie_relative}|chart|type=pie|labels=Country|values=Wind Energy|>\n\n<|{data_world_energy_evolution_relative}|chart|properties={data_world_energy_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\nexcel_data = []\nexcel_path = \"\"\n\ndef excel_upload(state):\n state.excel_data = pd.read_excel(state.excel_path)\n\nexcel_page = \"\"\"\n<|{excel_path}|file_selector|accept=.xlsx|on_action=excel_upload|>\n<|{excel_data}|table|>\n\"\"\"\n\nGui(excel_page).run()\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages for the retail sector\nfrom pages.retail.home import home_md\nfrom pages.retail.products import products_md\nfrom pages.retail.customers import customers_md\nfrom pages.retail.sales import sales_md\n\n# Define your pages dictionary\npages = {\n '/home': home_md,\n '/products': products_md,\n '/customers': customers_md,\n '/sales': sales_md\n}\n\n# Create a Gui with your pages\ngui_retail = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n \n # Run the multi-page app\n gui_retail.run(title=\"Retail Dashboard\")\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load customer feedback data\nfeedback_data = pd.read_csv(\"customer_feedback_data.csv\")\n\n# Initialize variables\nproducts = list(feedback_data[\"Product\"].unique())\nsentiments = list(feedback_data[\"Sentiment\"].unique())\nproduct = products\nsentiment = sentiments\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{sentiment}|selector|lov={sentiments}|multiple|label=Select Sentiment|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_feedback>\n\n out of 5\n|average_rating>\n\n\n|feedback_table>\n|main_page>\n|>\n\nCode adapted from [Customer Feedback Analysis](https://github.com/feedback_analysis_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/customer-feedback-analysis-app)\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|market>\n|>\n\n<|{selected_forecast}|forecast|on_submission_change=on_submission_change_forecast|not expanded|>\n\n---------------------------------------\n\n## **Market Predictions**{: .color-primary} and Data Explorer\n\n<|{selected_forecast.result.read() if selected_forecast and selected_forecast.result.read() is not None else default_market_result}|chart|x=Date|y[1]=Market Value|y[2]=Trend Prediction|type[1]=line|title=Market Forecast|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|forecast>\n|>\n"} {"text": "<|{all_classes}|table|columns={class_columns}|width='100%'|on_action={on_class_select}|style=class_style|>\n<|Schedule Class|button|on_action={open_schedule_class_dialog}|>\n<|Refresh Classes|button|on_action={refresh_class_list}|>\n\n<|{show_schedule_class_dialog}|dialog|title=Schedule New Class|\n<|{class_subject}|input|placeholder='Class Subject'|\n<|{class_date}|date_picker|>\n<|Schedule|button|on_action={schedule_class}|>\n<|Cancel|button|on_action={close_schedule_class_dialog}|>\n|>\n\n<|{show_class_details}|pane|\n\n# Class Details <|Edit|button|on_action=edit_selected_class|> <|Cancel|button|on_action=cancel_selected_class|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Subject\n<|{selected_class.subject}|>\n|>\n\n<|part|class_name=card|\n## Date\n<|{selected_class.date.strftime(\"%b %d, %Y at %H:%M\")}|>\n|>\n\n<|part|class_name=card|\n## Instructor\n<|{selected_class.instructor}|>\n|>\n\n<|part|class_name=card|\n## Enrollment\n<|{selected_class.enrollment}|number_input|disabled=True|>\n|>\n\n----\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Healthcare\"\n mood = \"medical\"\n style = \"healthexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Healthcare Tweets\n\n This mini-app generates Tweets related to Healthcare using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Healthcare Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\nvideo_data = None\nvideo_path = \"\"\n\ndef video_upload(state):\n if state.video_path:\n video_data = state.video_path # Directly use the path for video elements\n\nvideo_page = \"\"\"\n<|{video_path}|file_selector|accept=video/*|on_action=video_upload|>\n<|{video_data}|video|controls=True|>\n\"\"\"\n\nGui(video_page).run()\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|region>\n|>\n\n<|{selected_environmental_scenario}|environmental_scenario|on_submission_change=on_environmental_scenario_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Environmental Metrics**{: .color-primary} and Trends\n\n<|{selected_environmental_scenario.result.read() if selected_environmental_scenario and selected_environmental_scenario.result.read() is not None else default_environmental_result}|chart|x=Date|y[1]=Air Quality Index|y[2]=Water Purity Level|type[1]=line|title=Environmental Conditions|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|environmental_scenario>\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Healthcare\"\n mood = \"healthtech\"\n style = \"healthcarepro\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Healthcare Tweets\n\n This mini-app generates Tweets related to Healthcare using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Healthcare Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport io\n\ncsv_data = pd.DataFrame()\ncsv_path = \"\"\n\ndef csv_upload_plot(state):\n if state.csv_path:\n state.csv_data = pd.read_csv(state.csv_path)\n plt.figure()\n state.csv_data.plot(kind='line')\n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n buf.seek(0)\n state.plot_image = buf.read()\n\ncsv_plot_page = \"\"\"\n<|{csv_path}|file_selector|accept=.csv|on_action=csv_upload_plot|>\n<|{plot_image}|image|>\n\"\"\"\n\nGui(csv_plot_page).run()\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|location>\n|>\n\n<|{selected_weather_forecast}|weather_forecast|on_submission_change=on_submission_change_weather|not expanded|>\n\n---------------------------------------\n\n## **Weather Predictions**{: .color-primary} and Data Analysis\n\n<|{selected_weather_forecast.result.read() if selected_weather_forecast and selected_weather_forecast.result.read() is not None else default_weather_result}|chart|x=Date|y[1]=Temperature|y[2]=Humidity|y[3]=Precipitation|type[1]=line|title=Weather Forecast|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|weather_forecast>\n|>\n"} {"text": "<|{all_books}|table|columns={book_columns}|width='100%'|on_action={on_book_select}|style=book_style|>\n<|Add Book|button|on_action={open_add_book_dialog}|>\n<|Refresh Books|button|on_action={refresh_book_list}|>\n\n<|{show_add_book_dialog}|dialog|title=Add New Book|\n<|{book_title}|input|placeholder='Title'|\n<|{book_author}|input|placeholder='Author'|\n<|{book_genre}|selector|lov={get_all_genres()}|>\n<|Add Book|button|on_action={add_book}|>\n<|Cancel|button|on_action={close_add_book_dialog}|>\n|>\n\n<|{show_book_details}|pane|\n\n# Book Details <|Edit|button|on_action=edit_selected_book|> <|Remove|button|on_action=remove_selected_book|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Title\n<|{selected_book.title}|>\n|>\n\n<|part|class_name=card|\n## Author\n<|{selected_book.author}|>\n|>\n\n<|part|class_name=card|\n## Genre\n<|{selected_book.genre}|>\n|>\n\n<|part|class_name=card|\n## ISBN\n<|{selected_book.isbn}|>\n|>\n\n----\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Transportation\"\n mood = \"travel\"\n style = \"transitexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Transportation Tweets\n\n This mini-app generates Tweets related to Transportation using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Transportation Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "# **Worldwide**{: .color-primary} Energy Consumption\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Total Energy Consumption**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_energy_consumption['Total Energy']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Renewable Energy Consumption**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_energy_consumption['Renewable Energy']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Fossil Fuel Consumption**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_energy_consumption['Fossil Fuels']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_energy_consumption_metric}|toggle|lov={energy_consumption_metric_selector}|>\n\n<|part|render={selected_energy_consumption_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_energy_consumption_pie_absolute}|chart|type=pie|labels=Country|values=Total Energy|title=Global Energy Consumption|>\n\n<|{data_world_energy_consumption_evolution_absolute}|chart|properties={data_world_energy_consumption_evolution_properties}|title=Energy Consumption Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_energy_consumption_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_energy_consumption_pie_relative}|chart|type=pie|labels=Country|values=Renewable Energy|>\n\n<|{data_world_energy_consumption_evolution_relative}|chart|properties={data_world_energy_consumption_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni =\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal for Automotive Visualization\n\n<|layout|columns=35 65|\nVisualize Complex Patterns with Mandelbrot Fractals\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nSelect the number of iterations to explore fractal patterns\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal for Automotive Visualization\")\n"} {"text": "<|{all_fitness_classes}|table|columns={fitness_class_columns}|width='100%'|on_action={on_fitness_class_select}|style=fitness_class_style|>\n<|Schedule Fitness Class|button|on_action={open_schedule_fitness_class_dialog}|>\n<|Refresh Classes|button|on_action={refresh_fitness_classes}|>\n\n<|{show_schedule_fitness_class_dialog}|dialog|title=Schedule Fitness Class|\n<|{class_type}|selector|lov={get_all_class_types()}|>\n<|{class_instructor}|input|placeholder='Instructor Name'|\n<|{class_time}|time_picker|>\n<|Schedule Class|button|on_action={schedule_fitness_class}|>\n<|Cancel|button|on_action={close_schedule_fitness_class_dialog}|>\n|>\n\n<|{show_fitness_class_details}|pane|\n\n# Class Details <|Edit|button|on_action=edit_selected_class|> <|Cancel|button|on_action=cancel_selected_class|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Class Type\n<|{selected_fitness_class.type}|>\n|>\n\n<|part|class_name=card|\n## Instructor\n<|{selected_fitness_class.instructor}|>\n|>\n\n<|part|class_name=card|\n## Time\n<|{selected_fitness_class.time.strftime(\"%H:%M\")}|>\n|>\n\n----\n|>\n"} {"text": "# **Global**{: .color-primary} Digital Connectivity\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Internet Connectivity Rate**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_digital['Connectivity Rate']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Smart Device Usage**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_digital['Smart Devices']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Broadband Subscriptions**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_digital['Broadband Subscriptions']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_digital_metric}|toggle|lov={digital_metric_selector}|>\n\n<|part|render={selected_digital_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_digital_pie_absolute}|chart|type=pie|labels=Country|values=Connectivity Rate|title=Global Internet Connectivity|>\n\n<|{data_world_digital_evolution_absolute}|chart|properties={data_world_digital_evolution_properties}|title=Digital Connectivity Trends|>\n|>\n|>\n\n<|part|render={selected_digital_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_digital_pie_relative}|chart|type=pie|labels=Country|values=Smart Devices|>\n\n<|{data_world_digital_evolution_relative}|chart|properties={data_world_digital_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Automotive\"\n mood = \"automotivenews\"\n style = \"automotivepro\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Automotive Tweets\n\n This mini-app generates Tweets related to Automotive using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Automotive Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal for Logistics Visualization\n\n<|layout|columns=35 65|\nVisualize Complex Patterns with Mandelbrot Fractals\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nSelect the number of iterations to explore fractal patterns\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal for Logistics Visualization\")\n"} {"text": "# **Country**{: .color-primary} Population Growth\n\n<|layout|columns=1 1 1|\n<|{selected_country_population}|selector|lov={selector_country_population}|on_change=on_change_country_population|dropdown|label=Country|>\n\n<|{selected_population_metric}|toggle|lov={population_metric_selector}|on_change=update_population_metric|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Total Population**{: .color-primary}\n<|{'{:,}'.format(int(population_data.iloc[-1]['Total Population']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Urban Population**{: .color-primary}\n<|{'{:,}'.format(int(population_data.iloc[-1]['Urban Population']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Rural Population**{: .color-primary}\n<|{'{:,}'.format(int(population_data.iloc[-1]['Rural Population']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{population_data}|chart|type=bar|x=Year|y[3]=Total Population|y[2]=Urban Population|y[1]=Rural Population|layout={layout}|options={options}|title=Population Trends|>\n\n<|{population_distribution_chart}|chart|type=pie|values=distribution_values|labels=distribution_labels|title=Urban vs Rural Population|>\n|>\n"} {"text": "# **Stock**{: .color-primary} Market Overview\n\n<|layout|columns=1 1 1|\n<|{selected_stock}|selector|lov={selector_stock}|on_change=on_change_stock|dropdown|label=Stock|>\n\n<|{selected_indicator}|toggle|lov={indicator_selector}|on_change=update_indicator_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Market Value**{: .color-primary}\n<|{'${:,.2f}'.format(stock_data.iloc[-1]['Market Value'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Volume**{: .color-primary}\n<|{'{:,}'.format(stock_data.iloc[-1]['Volume'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Change (%)**{: .color-primary}\n<|{'{:+.2f}%'.format(stock_data.iloc[-1]['Change'])}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{stock_data}|chart|type=line|x=Date|y[3]=Market Value|y[2]=Volume|y[1]=Change|layout={layout}|options={options}|title=Stock Performance|>\n\n<|{sector_distribution_chart}|chart|type=pie|values=sector_values|labels=sector_labels|title=Market Sector Distribution|>\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load the sales data\ndf = pd.read_excel(\n io=\"data/supermarkt_sales.xlsx\",\n engine=\"openpyxl\",\n sheet_name=\"Sales\",\n skiprows=3,\n usecols=\"B:R\",\n nrows=1000,\n)\n\n# Add 'hour' column to the dataframe\ndf[\"hour\"] = pd.to_datetime(df[\"Time\"], format=\"%H:%M:%S\").dt.hour\n\n# Initialize variables\ncities = list(df[\"City\"].unique())\ntypes = list(df[\"Customer_type\"].unique())\ngenders = list(df[\"Gender\"].unique())\ncity = cities\ncustomer_type = types\ngender = genders\n\nlayout = {\"margin\": {\"l\": 220}}\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{customer_type}|selector|lov={types}|multiple|label=Select the Customer Type|dropdown|on_change=on_filter|width=100%|>\n\n<|{gender}|selector|lov={genders}|multiple|label=Select the Gender|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_sales>\n\n <|{\"\u2b50\" * int(round(round(df_selection[\"Rating\"].mean(), 1), 0))}|>\n|average_rating>\n\n\n|average_sale>\n|>\n\n
\n\nDisplay df_selection in an expandable\n<|Sales Table|expandable|expanded=False|\n<|{df_selection}|table|width=100%|page_size=5|rebuild|class_name=table|>\n|>\n\n\n\n<|{sales_by_product_line}|chart|x=Total|y=Product|type=bar|orientation=h|title=Sales by Product|layout={layout}|color=#ff462b|>\n|charts>\n|main_page>\n|>\n\nCode from [Coding is Fun](https://github.com/Sven-Bo)\n\nGet the Taipy Code [here](https://github.com/Avaiga/demo-sales-dashboard) and the original code [here](https://github.com/Sven-Bo/streamlit-sales-dashboard)\n\"\"\"\n\n\ndef filter(city, customer_type, gender):\n df_selection = df[\n df[\"City\"].isin(city)\n & df[\"Customer_type\"].isin(customer_type)\n & df[\"Gender\"].isin(gender)\n ]\n\n # SALES BY PRODUCT LINE [BAR CHART]\n sales_by_product_line = (\n df_selection[[\"Product line\", \"Total\"]]\n .groupby(by=[\"Product line\"])\n .sum()[[\"Total\"]]\n .sort_values(by=\"Total\")\n )\n sales_by_product_line[\"Product\"] = sales_by_product_line.index\n\n # SALES BY HOUR [BAR CHART]\n sales_by_hour = (\n df_selection[[\"hour\", \"Total\"]].groupby(by=[\"hour\"]).sum()[[\"Total\"]]\n )\n sales_by_hour[\"Hour\"] = sales_by_hour.index\n return df_selection, sales_by_product_line, sales_by_hour\n\n\ndef on_filter(state):\n state.df_selection, state.sales_by_product_line, state.sales_by_hour = filter(\n state.city, state.customer_type, state.gender\n )\n\n\nif __name__ == \"__main__\":\n # Initialize dataframes\n df_selection, sales_by_product_line, sales_by_hour = filter(\n city, customer_type, gender\n )\n\n # Run the app\n Gui(page).run()\n"} {"text": "# **Country**{: .color-primary} Health Indicators\n\n<|layout|columns=1 1 1|\n<|{selected_country_health}|selector|lov={selector_country_health}|on_change=on_change_country_health|dropdown|label=Country|>\n\n<|{selected_health_metric}|toggle|lov={health_metric_selector}|on_change=update_health_metric_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Life Expectancy**{: .color-primary}\n<|{'{:.2f}'.format(health_data.iloc[-1]['Life Expectancy'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Infant Mortality Rate**{: .color-primary}\n<|{'{:.2f}'.format(health_data.iloc[-1]['Infant Mortality'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Healthcare Expenditure**{: .color-primary}\n<|{'${:,.2f}'.format(health_data.iloc[-1]['Expenditure'])}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{health_data}|chart|type=line|x=Year|y[3]=Life Expectancy|y[2]=Infant Mortality|y[1]=Expenditure|layout={layout}|options={options}|title=Healthcare Trends|>\n\n<|{healthcare_distribution_chart}|chart|type=pie|values=healthcare_values|labels=healthcare_labels|title=Healthcare Distribution|>\n|>\n"} {"text": "# **Worldwide**{: .color-primary} Space Exploration\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Space Missions**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_space['Missions']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Satellites Launched**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_space['Satellites']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Budget for Space Programs**{: .color-primary}\n<|{'${:,.2f}'.format(np.sum(data_world_space['Budget']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_space_metric}|toggle|lov={space_metric_selector}|>\n\n<|part|render={selected_space_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_space_pie_absolute}|chart|type=pie|labels=Country|values=Missions|title=Global Space Missions|>\n\n<|{data_world_space_evolution_absolute}|chart|properties={data_world_space_evolution_properties}|title=Space Exploration Trends|>\n|>\n|>\n\n<|part|render={selected_space_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_space_pie_relative}|chart|type=pie|labels=Country|values=Satellites|>\n\n<|{data_world_space_evolution_relative}|chart|properties={data_world_space_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Education\"\n mood = \"educational\"\n style = \"educationexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Education Tweets\n\n This mini-app generates Tweets related to Education using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Education Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages for the energy sector\nfrom pages.energy.dashboard import dashboard_md\nfrom pages.energy.consumption import consumption_md\nfrom pages.energy.production import production_md\nfrom pages.energy.renewables import renewables_md\n\n# Define your pages dictionary\npages = {\n '/dashboard': dashboard_md,\n '/consumption': consumption_md,\n '/production': production_md,\n '/renewables': renewables_md\n}\n\n# Create a Gui with your pages\ngui_energy = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n \n # Run the multi-page app\n gui_energy.run(title=\"Energy Dashboard\")\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|area>\n|>\n\n<|{selected_traffic_scenario}|traffic_scenario|on_submission_change=on_traffic_scenario_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Traffic Predictions**{: .color-primary} and Data Visualization\n\n<|{selected_traffic_scenario.result.read() if selected_traffic_scenario and selected_traffic_scenario.result.read() is not None else default_traffic_result}|chart|x=Time|y[1]=Vehicle Count|y[2]=Congestion Level|type[1]=line|title=Traffic Forecast|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|traffic_scenario>\n|>\n"} {"text": "# **City**{: .color-primary} Urban Development Index\n\n<|layout|columns=1 1 1|\n<|{selected_city_development}|selector|lov={selector_city_development}|on_change=on_change_city_development|dropdown|label=City|>\n\n<|{selected_development_aspect}|toggle|lov={development_aspect_selector}|on_change=update_development_aspect_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Population Growth**{: .color-primary}\n<|{'{:.2f}%'.format(development_data.iloc[-1]['Population Growth'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Infrastructure Rating**{: .color-primary}\n<|{'{:.2f}'.format(development_data.iloc[-1]['Infrastructure'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Economic Activity**{: .color-primary}\n<|{'{:.2f}'.format(development_data.iloc[-1]['Economic Activity'])}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{development_data}|chart|type=bar|x=Year|y[3]=Population Growth|y[2]=Infrastructure|y[1]=Economic Activity|layout={layout}|options={options}|title=Urban Development Trends|>\n\n<|{development_aspect_chart}|chart|type=pie|values=development_aspect_values|labels=development_aspect_labels|title=Aspect Distribution|>\n|>\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|school>\n|>\n\n<|{selected_educational_scenario}|educational_scenario|on_submission_change=on_educational_scenario_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Student Performance Trends**{: .color-primary} and Insights\n\n<|{selected_educational_scenario.result.read() if selected_educational_scenario and selected_educational_scenario.result.read() is not None else default_educational_result}|chart|x=Subject|y[1]=Average Score|y[2]=Grade Level|type[1]=bar|title=Academic Performance|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|educational_scenario>\n|>\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Patient Group
\n<|{patient_group_selected}|selector|lov=group_diabetes;group_cardio;group_respiratory|dropdown|on_change=on_patient_group_change|>\n|>\n\n<|part|class_name=card|\n### Select Comparison Group
\n<|{comparison_group_selected}|selector|lov=group_diabetes;group_cardio;group_respiratory|dropdown|on_change=on_comparison_group_change|>\n|>\n\n|>\n\n<|Patient Data Overview|expandable|expanded=True|\nDisplay patient_group_data and comparison_group_data\n<|layout|columns=1 1|\n<|{patient_group_data}|table|page_size=5|>\n\n<|{comparison_group_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{age_distribution_chart}|chart|type=bar|x=Age Group|y=Patients|title=Age Distribution|>\n|>\n\n<|part|class_name=card|\n<|{disease_prevalence_chart}|chart|type=pie|options={disease_options}|\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Logistics\"\n mood = \"supplychain\"\n style = \"logisticsexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Logistics Tweets\n\n This mini-app generates Tweets related to Logistics using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Logistics Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Team
\n<|{team_selected}|selector|lov=team_lakers;team_warriors;team_celtics|dropdown|on_change=on_team_change|>\n|>\n\n<|part|class_name=card|\n### Select Season
\n<|{season_selected}|selector|lov=season_2020;season_2021;season_2022|dropdown|on_change=on_season_change|>\n|>\n\n|>\n\n<|Team Performance Overview|expandable|expanded=True|\nDisplay team_data and season_data\n<|layout|columns=1 1|\n<|{team_data}|table|page_size=5|>\n\n<|{season_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{player_stats_chart}|chart|type=bar|x=Player|y=Points|title=Player Performance|>\n|>\n\n<|part|class_name=card|\n<|{win_loss_chart}|chart|type=line|x=Game|y=Win/Loss|title=Win-Loss Record|>\n|>\n|>\n\n
\n### Analyze Team Dynamics:\n<|{team_dynamics_analysis}|scenario|on_submission_change=on_team_dynamics_status_change|expandable=False|expanded=False|>\n\n<|{team_dynamics_analysis}|scenario_dag|>\n\n
\n### View team dynamics results:\n<|{team_dynamics_analysis.results if team_dynamics_analysis else None}|data_node|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Technology\"\n mood = \"innovative\"\n style = \"techgiant\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Technology Tweets\n\n This mini-app generates Tweets related to Technology using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Technology Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "# **Global**{: .color-primary} Environmental Statistics\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Carbon Emissions**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_global_environment['Emissions']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Deforestation**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_global_environment['Deforestation']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Renewable Energy Usage**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_global_environment['Renewable Energy']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_metric}|toggle|lov={metric_selector}|>\n\n<|part|render={selected_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_global_pie_absolute}|chart|type=pie|labels=Country|values=Emissions|title=Emissions Distribution Worldwide|>\n\n<|{data_global_environment_evolution_absolute}|chart|properties={data_global_environment_evolution_properties}|title=Environmental Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_global_pie_relative}|chart|type=pie|labels=Country|values=Deforestation|>\n\n<|{data_global_environment_evolution_relative}|chart|properties={data_global_environment_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "from taipy.gui import Gui\nimport open3d as o3d\nimport numpy as np\n\nmodel_data = None\nmodel_path = \"\"\n\ndef model_upload_view(state):\n if state.model_path:\n model = o3d.io.read_triangle_mesh(state.model_path)\n state.model_data = np.asarray(model.vertices)\n\nmodel_view_page = \"\"\"\n<|{model_path}|file_selector|accept=.ply,.stl|on_action=model_upload_view|>\n<|{model_data}|viewer3d|>\n\"\"\"\n\nGui(model_view_page).run()\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal for Financial Visualization\n\n<|layout|columns=35 65|\nVisualize Complex Patterns with Mandelbrot Fractals\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nSelect the number of iterations to explore fractal patterns\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal for Financial Visualization\")\n"} {"text": "<|{all_feedback}|table|columns={feedback_columns}|width='100%'|on_action={on_feedback_select}|style=feedback_style|>\n<|Add Feedback|button|on_action={open_add_feedback_dialog}|>\n<|Refresh Feedback|button|on_action={refresh_feedback_list}|>\n\n<|{show_add_feedback_dialog}|dialog|title=Submit New Feedback|\n<|{customer_name}|input|placeholder='Customer Name'|\n<|{feedback_content}|textarea|placeholder='Enter feedback here...'|\n<|Submit|button|on_action={submit_feedback}|>\n<|Cancel|button|on_action={close_add_feedback_dialog}|>\n|>\n\n<|{show_feedback_details}|pane|\n\n# Feedback Details <|Archive|button|on_action=archive_selected_feedback|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Customer Name\n<|{selected_feedback.customer_name}|>\n|>\n\n<|part|class_name=card|\n## Feedback\n<|{selected_feedback.content}|textarea|disabled=True|>\n|>\n\n<|part|class_name=card|\n## Submission Date\n<|{selected_feedback.date.strftime(\"%b %d, %Y at %H:%M:%S\")}|>\n|>\n\n----\n|>\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal for Sports Visualization\n\n<|layout|columns=35 65|\nVisualize Complex Patterns with Mandelbrot Fractals\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nSelect the number of iterations to explore fractal patterns\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal for Sports Visualization\")\n"} {"text": "<|{all_vehicles}|table|columns={vehicle_columns}|width='100%'|on_action={on_vehicle_select}|style=vehicle_style|>\n<|Log Maintenance|button|on_action={open_log_maintenance_dialog}|>\n<|Refresh Vehicles|button|on_action={refresh_vehicle_list}|>\n\n<|{show_log_maintenance_dialog}|dialog|title=Log Vehicle Maintenance|\n<|{vehicle_id}|selector|lov={get_all_vehicle_ids()}|>\n<|{maintenance_type}|input|placeholder='Maintenance Type'|\n<|{maintenance_date}|date_picker|>\n<|Log Maintenance|button|on_action={log_maintenance}|>\n<|Cancel|button|on_action={close_log_maintenance_dialog}|>\n|>\n\n<|{show_vehicle_details}|pane|\n\n# Vehicle Details <|Edit|button|on_action=edit_selected_vehicle|> <|Remove|button|on_action=remove_selected_vehicle|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Vehicle ID\n<|{selected_vehicle.id}|>\n|>\n\n<|part|class_name=card|\n## Maintenance Type\n<|{selected_vehicle.maintenance_type}|>\n|>\n\n<|part|class_name=card|\n## Maintenance Date\n<|{selected_vehicle.maintenance_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n----\n|>\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|hospital>\n|>\n\n<|{selected_healthcare_scenario}|healthcare_scenario|on_submission_change=on_healthcare_scenario_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Resource Allocation**{: .color-primary} and Analysis\n\n<|{selected_healthcare_scenario.result.read() if selected_healthcare_scenario and selected_healthcare_scenario.result.read() is not None else default_resource_allocation}|chart|x=Resource|y[1]=Allocated|y[2]=Required|type[1]=bar|title=Healthcare Resource Allocation|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|healthcare_scenario>\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load inventory data\ninventory_data = pd.read_csv(\"inventory_data.csv\")\n\n# Initialize variables\ncategories = list(inventory_data[\"Category\"].unique())\nlocations = list(inventory_data[\"Location\"].unique())\ncategory = categories\nlocation = locations\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{location}|selector|lov={locations}|multiple|label=Select Location|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_items>\n\n\n|average_price>\n\n\n|inventory_table>\n|main_page>\n|>\n\nCode adapted from [Inventory Management](https://github.com/inventory_management_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/inventory-management-app)\n"} {"text": "from taipy.gui import Gui\nimport fitz # PyMuPDF\nimport io\n\npdf_data = None\npdf_path = \"\"\n\ndef pdf_upload(state):\n if state.pdf_path:\n pdf_doc = fitz.open(state.pdf_path)\n page = pdf_doc.load_page(0) # Display the first page\n state.pdf_data = io.BytesIO(page.get_pixmap().tobytes(\"png\"))\n\npdf_page = \"\"\"\n<|{pdf_path}|file_selector|accept=.pdf|on_action=pdf_upload|>\n<|{pdf_data}|image|>\n\"\"\"\n\nGui(pdf_page).run()\n"} {"text": "# **City**{: .color-primary} Environmental Data\n\n<|layout|columns=1 1 1|\n<|{selected_city}|selector|lov={selector_city}|on_change=on_change_city|dropdown|label=City|>\n\n<|{selected_pollutant}|toggle|lov={pollutant_selector}|on_change=update_pollutant_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Air Quality Index**{: .color-primary}\n<|{'{:,}'.format(int(city_data.iloc[-1]['AQI'])).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Pollution Level**{: .color-primary}\n<|{city_data.iloc[-1]['Pollution Level']}|text|class_name=h2|>\n|>\n\n<|card|\n**Temperature**{: .color-primary}\n<|{'{:.1f}\u00b0C'.format(city_data.iloc[-1]['Temperature'])}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{city_data}|chart|type=line|x=Date|y[3]=AQI|y[2]=Pollution Level|y[1]=Temperature|layout={layout}|options={options}|title=Environmental Trends|>\n\n<|{pollution_distribution_chart}|chart|type=pie|values=pollution_values|labels=pollution_labels|title=Pollution Source Distribution|>\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Finance\"\n mood = \"financial\"\n style = \"financialexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Finance Tweets\n\n This mini-app generates Tweets related to Finance using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Finance Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n"} {"text": "# **City**{: .color-primary} Crime Statistics\n\n<|layout|columns=1 1 1|\n<|{selected_city_crime}|selector|lov={selector_city_crime}|on_change=on_change_city_crime|dropdown|label=City|>\n\n<|{selected_crime_type}|toggle|lov={crime_type_selector}|on_change=update_crime_type_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Total Crimes**{: .color-primary}\n<|{'{:,}'.format(int(crime_data.iloc[-1]['Total']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Violent Crimes**{: .color-primary}\n<|{'{:,}'.format(int(crime_data.iloc[-1]['Violent']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Property Crimes**{: .color-primary}\n<|{'{:,}'.format(int(crime_data.iloc[-1]['Property']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{crime_data}|chart|type=bar|x=Year|y[3]=Total|y[2]=Violent|y[1]=Property|layout={layout}|options={options}|title=Crime Trends|>\n\n<|{crime_type_distribution_chart}|chart|type=pie|values=crime_type_values|labels=crime_type_labels|title=Crime Type Distribution|>\n|>\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|crop>\n|>\n\n<|{selected_yield_forecast}|yield_forecast|on_submission_change=on_yield_forecast_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Crop Yield Predictions**{: .color-primary} and Data Analysis\n\n<|{selected_yield_forecast.result.read() if selected_yield_forecast and selected_yield_forecast.result.read() is not None else default_yield_result}|chart|x=Date|y[1]=Predicted Yield|y[2]=Historical Yield|type[1]=bar|title=Crop Yield Forecast|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|yield_forecast>\n|>\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Sales Period
\n<|{sales_period_selected}|selector|lov=period_last_month;period_last_quarter;period_last_year|dropdown|on_change=on_sales_period_change|>\n|>\n\n<|part|class_name=card|\n### Select Product Category
\n<|{product_category_selected}|selector|lov=category_electronics;category_clothing;category_home_goods|dropdown|on_change=on_product_category_change|>\n|>\n\n|>\n\n<|Sales Data Overview|expandable|expanded=True|\nDisplay sales_data and category_data\n<|layout|columns=1 1|\n<|{sales_data}|table|page_size=5|>\n\n<|{category_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{sales_volume_chart}|chart|type=line|x=Month|y=Sales Volume|title=Monthly Sales Volume|>\n|>\n\n<|part|class_name=card|\n<|{product_category_chart}|chart|type=pie|options={category_options}|layout={category_layout}|>\n|>\n|>\n\n
\n### Analyze Sales Performance:\n<|{sales_performance}|scenario|on_submission_change=on_sales_performance_status_change|expandable=False|expanded=False|>\n\n<|{sales_performance}|scenario_dag|>\n\n
\n### View the analysis results:\n<|{sales_performance.results if sales_performance else None}|data_node|>\n"} {"text": "#!/usr/bin/env python \"\"\"The setup script.\"\"\" import json import os from setuptools import find_namespace_packages, find_packages, setup with open(\"README.md\") as readme_file: readme = readme_file.read() with open(f\"src{os.sep}taipy{os.sep}core{os.sep}version.json\") as version_file: version = json.load(version_file) version_string = f'{version.get(\"major\", 0)}.{version.get(\"minor\", 0)}.{version.get(\"patch\", 0)}' if vext := version.get(\"ext\"): version_string = f\"{version_string}.{vext}\" requirements = [ \"pyarrow>=10.0.1,<11.0\", \"networkx>=2.6,<3.0\", \"openpyxl>=3.1.2,<3.2\", \"modin[dask]>=0.23.0,<1.0\", \"pymongo[srv]>=4.2.0,<5.0\", \"sqlalchemy>=2.0.16,<2.1\", \"toml>=0.10,<0.11\", \"taipy-config@git+https://git@github.com/Avaiga/taipy-config.git@develop\", ] test_requirements = [\"pytest>=3.8\"] extras_require = { \"fastparquet\": [\"fastparquet==2022.11.0\"], \"mssql\": [\"pyodbc>=4,<4.1\"], \"mysql\": [\"pymysql>1,<1.1\"], \"postgresql\": [\"psycopg2>2.9,<2.10\"], } setup( author=\"Avaiga\", author_email=\"dev@taipy.io\", python_requires=\">=3.8\", classifiers=[ \"Intended Audience :: Developers\", \"License :: OSI Approved :: Apache Software License\", \"Natural Language :: English\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.8\", \"Programming Language :: Python :: 3.9\", \"Programming Language :: Python :: 3.10\", \"Programming Language :: Python :: 3.11\", ], description=\"A Python library to build powerful and customized data-driven back-end applications.\", install_requires=requirements, long_description=readme, long_description_content_type=\"text/markdown\", license=\"Apache License 2.0\", keywords=\"taipy-core\", name=\"taipy-core\", package_dir={\"\": \"src\"}, packages=find_namespace_packages(where=\"src\") + find_packages(include=[\"taipy\", \"taipy.core\", \"taipy.core.*\"]), include_package_data=True, test_suite=\"tests\", tests_require=test_requirements, url=\"https://github.com/avaiga/taipy-core\", version=version_string, zip_safe=False, extras_require=extras_require, ) "} {"text": "\"\"\"Unit test package for taipy.\"\"\" "} {"text": "import pytest from src.taipy.core import Core from src.taipy.core._orchestrator._dispatcher import _DevelopmentJobDispatcher, _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.exceptions.exceptions import CoreServiceIsAlreadyRunning from taipy.config import Config from taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked class TestCore: def test_run_core_trigger_config_check(self, caplog): Config.configure_data_node(id=\"d0\", storage_type=\"toto\") with pytest.raises(SystemExit): core = Core() core.run() expected_error_message = ( \"`storage_type` field of DataNodeConfig `d0` must be either csv, sql_table,\" \" sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory.\" ' Current value of property `storage_type` is \"toto\".' ) assert expected_error_message in caplog.text core.stop() def test_run_core_as_a_service_development_mode(self): _OrchestratorFactory._dispatcher = None core = Core() assert core._orchestrator is None assert core._dispatcher is None assert _OrchestratorFactory._dispatcher is None core.run() assert core._orchestrator is not None assert core._orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._orchestrator == _Orchestrator assert core._dispatcher is not None assert isinstance(core._dispatcher, _DevelopmentJobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) core.stop() def test_run_core_as_a_service_standalone_mode(self): _OrchestratorFactory._dispatcher = None core = Core() assert core._orchestrator is None assert core._dispatcher is None assert _OrchestratorFactory._dispatcher is None Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) core.run() assert core._orchestrator is not None assert core._orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._orchestrator == _Orchestrator assert core._dispatcher is not None assert isinstance(core._dispatcher, _StandaloneJobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher) assert core._dispatcher.is_running() assert _OrchestratorFactory._dispatcher.is_running() core.stop() def test_core_service_can_only_be_run_once(self): core_instance_1 = Core() core_instance_2 = Core() core_instance_1.run() with pytest.raises(CoreServiceIsAlreadyRunning): core_instance_1.run() with pytest.raises(CoreServiceIsAlreadyRunning): core_instance_2.run() # Stop the Core service and run it again should work core_instance_1.stop() core_instance_1.run() core_instance_1.stop() core_instance_2.run() core_instance_2.stop() def test_block_config_update_when_core_service_is_running_development_mode(self): _OrchestratorFactory._dispatcher = None core = Core() core.run() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_data_node(id=\"i1\") core.stop() def test_block_config_update_when_core_service_is_running_standalone_mode(self): _OrchestratorFactory._dispatcher = None core = Core() Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) core.run() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_data_node(id=\"i1\") core.stop() "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import json import os from datetime import datetime, timedelta import pytest from src.taipy.core._repository._decoder import _Decoder from src.taipy.core._repository._encoder import _Encoder @pytest.fixture(scope=\"function\", autouse=True) def create_and_delete_json_file(): test_json_file = { \"name\": \"testing\", \"date\": datetime(1991, 1, 1), \"default_data\": \"data for testing encoder\", \"validity_period\": timedelta(days=1), } with open(\"data.json\", \"w\") as f: json.dump(test_json_file, f, ensure_ascii=False, indent=4, cls=_Encoder) yield os.unlink(\"data.json\") def test_json_encoder(): with open(\"data.json\") as json_file: data = json.load(json_file) assert data[\"name\"] == \"testing\" assert data[\"default_data\"] == \"data for testing encoder\" assert data[\"date\"] == { \"__type__\": \"Datetime\", \"__value__\": \"1991-01-01T00:00:00\", } assert data[\"date\"].get(\"__type__\") == \"Datetime\" assert data[\"date\"].get(\"__value__\") == \"1991-01-01T00:00:00\" def test_json_decoder(): with open(\"data.json\") as json_file: data = json.load(json_file, cls=_Decoder) assert data[\"name\"] == \"testing\" assert data[\"default_data\"] == \"data for testing encoder\" assert data[\"date\"] == datetime(1991, 1, 1) "} {"text": "import src.taipy.core.taipy as tp from src.taipy.core.config import Config def test_no_special_characters(): scenario_config = Config.configure_scenario(\"scenario_1\") scenario = tp.create_scenario(scenario_config, name=\"martin\") assert scenario.name == \"martin\" scenarios = tp.get_scenarios() assert len(scenarios) == 1 assert scenarios[0].name == \"martin\" def test_many_special_characters(): scenario_config = Config.configure_scenario(\"scenario_1\") special_characters = ( \"!#$%&'()*+,-./:;<=>?@[]^_`\\\\{\" \"\u00bb\u00bc\u00bd\u00be\u00bf\u00c0\u00c1\u00c2\u00c3\u00c4\u00c5\u00c6\u00c7\u00c8\u00c9\u00ca\u00cb\u00cc\u00cd\u00ce\u00cf\u00d0\u00d1\u00d2\u00d3\u00d4\u00d5\u00d6\" \"\u00d7\u00d8\u00d9\u00da\u00db\u00dc\u00dd\u00de\u00df\u00e0\u00e1\u00e2\u00e3\u00e4\u00e5\u00e6\u00e7\u00e8\u00e9\u00ea\u00eb\u00ec\u00ed\u00ee\u00ef\u00f0\u00f1\u00f2\" \"\u00f3\u00f4\u00f5\u00f6\u00f7\u00f8\u00f9\u00fa\u00fb\u00fc\u00fd\u00fe\u00ff\u0100\u0101\u0102\u0103\u0104\u0105\u0106\u0107\u0108\u0109\u010a\u010b\u010c\u010d\u010e\" \"\u010f\u0110\u0111\u0112\u0113\u0114\u0115\u0116\u0117\u0118\u0119\u011a\u011b\u011c\u011d\u011e\u011f\u0120\u0121\u0122\u0123\u0124\u0125\u0126\u0127\u0128\u0129\u012a\" \"\u012b\u012c\u012d\u012e\u012f\u0130\u0132\u0133\u0134\u0135\u0136\u0137\u0138\u0139\u013a\u013b\u013c\u013d\u013e\u013f\u0140\u0141\u0142\u0143\u0144\u0145\u0146\u0147\" \"\u0148\u0149\u014a\u014b\u014c\u014d\u014e\u014f\u0150\u0151\u0152\u0153\u0154\u0155\u0156\u0157\u0158\u0159\u015a\u015b\u015c\u015d\u015e\u015f\u0160\u0161\u0162\u0163\" \"\u0164\u0165\u0166\u0167\u0168\u0169\u016a\u016b\u016c\u016d\u016e\u016f\u0170\u0171\u0172\u0173\u0174\u0175\u0176\u0177\u0178\u0179\u017a\u017b\u017c\u017d\u017e\u017f\" ) scenario = tp.create_scenario(scenario_config, name=special_characters) assert scenario.name == special_characters scenarios = tp.get_scenarios() assert len(scenarios) == 1 assert scenarios[0].name == special_characters "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import json import os import pathlib import shutil import pytest from src.taipy.core.exceptions.exceptions import InvalidExportPath from taipy.config.config import Config from .mocks import MockConverter, MockFSRepository, MockModel, MockObj, MockSQLRepository class TestRepositoriesStorage: @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_save_and_fetch_model(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) m = MockObj(\"uuid\", \"foo\") r._save(m) fetched_model = r._load(m.id) assert m == fetched_model @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_exists(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) m = MockObj(\"uuid\", \"foo\") r._save(m) assert r._exists(m.id) assert not r._exists(\"not-existed-model\") @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_get_all(self, mock_repo, params, init_sql_repo): objs = [] r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) r._save(m) _objs = r._load_all() assert len(_objs) == 5 for obj in _objs: assert isinstance(obj, MockObj) assert sorted(objs, key=lambda o: o.id) == sorted(_objs, key=lambda o: o.id) @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_delete_all(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f\"uuid-{i}\", f\"Foo{i}\") r._save(m) _models = r._load_all() assert len(_models) == 5 r._delete_all() _models = r._load_all() assert len(_models) == 0 @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_delete_many(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f\"uuid-{i}\", f\"Foo{i}\") r._save(m) _models = r._load_all() assert len(_models) == 5 r._delete_many([\"uuid-0\", \"uuid-1\"]) _models = r._load_all() assert len(_models) == 3 @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_search(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() m = MockObj(\"uuid\", \"foo\") r._save(m) m1 = r._search(\"name\", \"bar\") m2 = r._search(\"name\", \"foo\") assert m1 == [] assert m2 == [m] @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) @pytest.mark.parametrize(\"export_path\", [\"tmp\"]) def test_export(self, mock_repo, params, export_path, init_sql_repo): r = mock_repo(**params) m = MockObj(\"uuid\", \"foo\") r._save(m) r._export(\"uuid\", export_path) assert pathlib.Path(os.path.join(export_path, \"mock_model/uuid.json\")).exists() with open(os.path.join(export_path, \"mock_model/uuid.json\"), \"r\") as exported_file: exported_data = json.load(exported_file) assert exported_data[\"id\"] == \"uuid\" assert exported_data[\"name\"] == \"foo\" # Export to same location again should work r._export(\"uuid\", export_path) assert pathlib.Path(os.path.join(export_path, \"mock_model/uuid.json\")).exists() if mock_repo == MockFSRepository: with pytest.raises(InvalidExportPath): r._export(\"uuid\", Config.core.storage_folder) shutil.rmtree(export_path, ignore_errors=True) "} {"text": "import dataclasses import pathlib from dataclasses import dataclass from typing import Any, Dict, Optional from sqlalchemy import Column, String, Table from sqlalchemy.dialects import sqlite from sqlalchemy.orm import declarative_base, registry from sqlalchemy.schema import CreateTable from src.taipy.core._repository._abstract_converter import _AbstractConverter from src.taipy.core._repository._filesystem_repository import _FileSystemRepository from src.taipy.core._repository._sql_repository import _SQLRepository from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config class Base: __allow_unmapped__ = True Base = declarative_base(cls=Base) # type: ignore mapper_registry = registry() @dataclass class MockObj: def __init__(self, id: str, name: str, version: Optional[str] = None) -> None: self.id = id self.name = name if version: self._version = version else: self._version = _VersionManager._get_latest_version() @dataclass class MockModel(Base): # type: ignore __table__ = Table( \"mock_model\", mapper_registry.metadata, Column(\"id\", String(200), primary_key=True), Column(\"name\", String(200)), Column(\"version\", String(200)), ) id: str name: str version: str def to_dict(self): return dataclasses.asdict(self) @staticmethod def from_dict(data: Dict[str, Any]): return MockModel(id=data[\"id\"], name=data[\"name\"], version=data[\"version\"]) def _to_entity(self): return MockObj(id=self.id, name=self.name, version=self.version) @classmethod def _from_entity(cls, entity: MockObj): return MockModel(id=entity.id, name=entity.name, version=entity._version) def to_list(self): return [self.id, self.name, self.version] class MockConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, entity): return MockModel(id=entity.id, name=entity.name, version=entity._version) @classmethod def _model_to_entity(cls, model): return MockObj(id=model.id, name=model.name, version=model.version) class MockFSRepository(_FileSystemRepository): def __init__(self, **kwargs): super().__init__(**kwargs) @property def _storage_folder(self) -> pathlib.Path: return pathlib.Path(Config.core.storage_folder) # type: ignore class MockSQLRepository(_SQLRepository): def __init__(self, **kwargs): super().__init__(**kwargs) self.db.execute(str(CreateTable(MockModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect()))) "} {"text": "import pytest from taipy.config.config import Config def test_job_config(): assert Config.job_config.mode == \"development\" job_c = Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) assert job_c.mode == \"standalone\" assert job_c.max_nb_of_workers == 2 assert Config.job_config.mode == \"standalone\" assert Config.job_config.max_nb_of_workers == 2 Config.configure_job_executions(foo=\"bar\") assert Config.job_config.foo == \"bar\" def test_clean_config(): job_config = Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2, prop=\"foo\") assert Config.job_config is job_config job_config._clean() # Check if the instance before and after _clean() is the same assert Config.job_config is job_config assert job_config.mode == \"development\" assert job_config._config == {\"max_nb_of_workers\": 1} assert job_config.properties == {} "} {"text": "from taipy.config.config import Config def migrate_pickle_path(dn): dn.path = \"s1.pkl\" def migrate_skippable(task): task.skippable = True def test_migration_config(): assert Config.migration_functions.migration_fcts == {} data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") migration_cfg = Config.add_migration_function( target_version=\"1.0\", config=data_nodes1, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == {\"1.0\": {\"data_nodes1\": migrate_pickle_path}} assert migration_cfg.properties == {} data_nodes2 = Config.configure_data_node(\"data_nodes2\", \"pickle\") migration_cfg = Config.add_migration_function( target_version=\"1.0\", config=data_nodes2, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == { \"1.0\": {\"data_nodes1\": migrate_pickle_path, \"data_nodes2\": migrate_pickle_path} } def test_clean_config(): assert Config.migration_functions.migration_fcts == {} data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") migration_cfg = Config.add_migration_function( target_version=\"1.0\", config=data_nodes1, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == {\"1.0\": {\"data_nodes1\": migrate_pickle_path}} assert migration_cfg.properties == {} migration_cfg._clean() assert migration_cfg.migration_fcts == {} assert migration_cfg._properties == {} "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from src.taipy.core.config import CoreSection from src.taipy.core.config.data_node_config import DataNodeConfig from src.taipy.core.config.job_config import JobConfig from src.taipy.core.config.migration_config import MigrationConfig from src.taipy.core.config.scenario_config import ScenarioConfig from src.taipy.core.config.task_config import TaskConfig from taipy.config._config import _Config from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.global_app.global_app_config import GlobalAppConfig def _test_default_job_config(job_config: JobConfig): assert job_config is not None assert job_config.mode == JobConfig._DEFAULT_MODE def _test_default_core_section(core_section: CoreSection): assert core_section is not None assert core_section.mode == CoreSection._DEFAULT_MODE assert core_section.version_number == \"\" assert not core_section.force assert core_section.root_folder == \"./taipy/\" assert core_section.storage_folder == \".data/\" assert core_section.repository_type == \"filesystem\" assert core_section.repository_properties == {} assert len(core_section.properties) == 0 def _test_default_data_node_config(dn_config: DataNodeConfig): assert dn_config is not None assert dn_config.id is not None assert dn_config.storage_type == \"pickle\" assert dn_config.scope == Scope.SCENARIO assert dn_config.validity_period is None assert len(dn_config.properties) == 0 # type: ignore def _test_default_task_config(task_config: TaskConfig): assert task_config is not None assert task_config.id is not None assert task_config.input_configs == [] assert task_config.output_configs == [] assert task_config.function is None assert not task_config.skippable assert len(task_config.properties) == 0 # type: ignore def _test_default_scenario_config(scenario_config: ScenarioConfig): assert scenario_config is not None assert scenario_config.id is not None assert scenario_config.tasks == [] assert scenario_config.task_configs == [] assert scenario_config.additional_data_nodes == [] assert scenario_config.additional_data_node_configs == [] assert scenario_config.data_nodes == [] assert scenario_config.data_node_configs == [] assert scenario_config.sequences == {} assert len(scenario_config.properties) == 0 # type: ignore def _test_default_version_migration_config(version_migration_config: MigrationConfig): assert version_migration_config is not None assert version_migration_config.migration_fcts == {} assert len(version_migration_config.properties) == 0 # type: ignore def _test_default_global_app_config(global_config: GlobalAppConfig): assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_default_configuration(): default_config = Config._default_config assert default_config._global_config is not None _test_default_global_app_config(default_config._global_config) _test_default_global_app_config(Config.global_config) _test_default_global_app_config(GlobalAppConfig().default_config()) assert default_config._unique_sections is not None assert len(default_config._unique_sections) == 3 assert len(default_config._sections) == 3 _test_default_job_config(default_config._unique_sections[JobConfig.name]) _test_default_job_config(Config.job_config) _test_default_job_config(JobConfig().default_config()) _test_default_version_migration_config(default_config._unique_sections[MigrationConfig.name]) _test_default_version_migration_config(Config.migration_functions) _test_default_version_migration_config(MigrationConfig.default_config()) _test_default_core_section(default_config._unique_sections[CoreSection.name]) _test_default_core_section(Config.core) _test_default_core_section(CoreSection().default_config()) _test_default_data_node_config(default_config._sections[DataNodeConfig.name][_Config.DEFAULT_KEY]) _test_default_data_node_config(Config.data_nodes[_Config.DEFAULT_KEY]) _test_default_data_node_config(DataNodeConfig.default_config()) assert len(default_config._sections[DataNodeConfig.name]) == 1 assert len(Config.data_nodes) == 1 _test_default_task_config(default_config._sections[TaskConfig.name][_Config.DEFAULT_KEY]) _test_default_task_config(Config.tasks[_Config.DEFAULT_KEY]) _test_default_task_config(TaskConfig.default_config()) assert len(default_config._sections[TaskConfig.name]) == 1 assert len(Config.tasks) == 1 _test_default_scenario_config(default_config._sections[ScenarioConfig.name][_Config.DEFAULT_KEY]) Config.scenarios[_Config.DEFAULT_KEY] _test_default_scenario_config(Config.scenarios[_Config.DEFAULT_KEY]) _test_default_scenario_config(ScenarioConfig.default_config()) assert len(default_config._sections[ScenarioConfig.name]) == 1 assert len(Config.scenarios) == 1 "} {"text": "from unittest.mock import patch import pytest from src.taipy.core._init_version import _read_version from src.taipy.core.config.core_section import CoreSection from src.taipy.core.exceptions import ConfigCoreVersionMismatched from taipy.config.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile _MOCK_CORE_VERSION = \"3.1.1\" @pytest.fixture(scope=\"function\", autouse=True) def mock_core_version(): with patch(\"src.taipy.core.config.core_section._read_version\") as mock_read_version: mock_read_version.return_value = _MOCK_CORE_VERSION CoreSection._CURRENT_CORE_VERSION = _MOCK_CORE_VERSION Config.unique_sections[CoreSection.name] = CoreSection.default_config() Config._default_config._unique_sections[CoreSection.name] = CoreSection.default_config() yield @pytest.fixture(scope=\"session\", autouse=True) def reset_core_version(): yield CoreSection._CURRENT_CORE_VERSION = _read_version() class TestCoreVersionInCoreSectionConfig: major, minor, patch = _MOCK_CORE_VERSION.split(\".\") current_version = f\"{major}.{minor}.{patch}\" current_dev_version = f\"{major}.{minor}.{patch}.dev0\" compatible_future_version = f\"{major}.{minor}.{int(patch) + 1}\" compatible_future_dev_version = f\"{major}.{minor}.{int(patch) + 1}.dev0\" core_version_is_compatible = [ # Current version and dev version should be compatible (f\"{major}.{minor}.{patch}\", True), (f\"{major}.{minor}.{patch}.dev0\", True), # Future versions with same major and minor should be compatible (f\"{major}.{minor}.{int(patch) + 1}\", True), (f\"{major}.{minor}.{int(patch) + 1}.dev0\", True), # Past versions with same major and minor should be compatible (f\"{major}.{minor}.{int(patch) - 1}\", True), (f\"{major}.{minor}.{int(patch) - 1}.dev0\", True), # Future versions with different minor number should be incompatible (f\"{major}.{int(minor) + 1}.{patch}\", False), (f\"{major}.{int(minor) + 1}.{patch}.dev0\", False), # Past versions with different minor number should be incompatible (f\"{major}.{int(minor) - 1}.{patch}\", False), (f\"{major}.{int(minor) - 1}.{patch}.dev0\", False), ] @pytest.mark.parametrize(\"core_version, is_compatible\", core_version_is_compatible) def test_load_configuration_file(self, core_version, is_compatible): file_config = NamedTemporaryFile( f\"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" core_version = \"{core_version}\" [VERSION_MIGRATION.migration_fcts] \"\"\" ) if is_compatible: Config.load(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION else: with pytest.raises(ConfigCoreVersionMismatched): Config.load(file_config.filename) @pytest.mark.parametrize(\"core_version,is_compatible\", core_version_is_compatible) def test_override_configuration_file(self, core_version, is_compatible): file_config = NamedTemporaryFile( f\"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" core_version = \"{core_version}\" [VERSION_MIGRATION.migration_fcts] \"\"\" ) if is_compatible: Config.override(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION else: with pytest.raises(ConfigCoreVersionMismatched): Config.override(file_config.filename) def test_load_configuration_file_without_core_section(self): file_config = NamedTemporaryFile( \"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" [VERSION_MIGRATION.migration_fcts] \"\"\" ) Config.load(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION "} {"text": "from datetime import timedelta from taipy.config import Config from taipy.config.common.scope import Scope class TestConfig: def test_configure_csv_data_node(self): a, b, c, d, e, f = \"foo\", \"path\", True, \"numpy\", Scope.SCENARIO, timedelta(1) Config.configure_csv_data_node(a, b, c, d, e, f) assert len(Config.data_nodes) == 2 def test_configure_excel_data_node(self): a, b, c, d, e, f, g = \"foo\", \"path\", True, \"Sheet1\", \"numpy\", Scope.SCENARIO, timedelta(1) Config.configure_excel_data_node(a, b, c, d, e, f, g) assert len(Config.data_nodes) == 2 def test_configure_generic_data_node(self): a, b, c, d, e, f, g, h = \"foo\", print, print, tuple([]), tuple([]), Scope.SCENARIO, timedelta(1), \"qux\" Config.configure_generic_data_node(a, b, c, d, e, f, g, property=h) assert len(Config.data_nodes) == 2 def test_configure_in_memory_data_node(self): a, b, c, d, e = \"foo\", 0, Scope.SCENARIO, timedelta(1), \"qux\" Config.configure_in_memory_data_node(a, b, c, d, property=e) assert len(Config.data_nodes) == 2 def test_configure_pickle_data_node(self): a, b, c, d, e = \"foo\", 0, Scope.SCENARIO, timedelta(1), \"path\" Config.configure_pickle_data_node(a, b, c, d, path=e) assert len(Config.data_nodes) == 2 def test_configure_json_data_node(self): a, dp, ec, dc, sc, f, p = \"foo\", \"path\", \"ec\", \"dc\", Scope.SCENARIO, timedelta(1), \"qux\" Config.configure_json_data_node(a, dp, ec, dc, sc, f, path=p) assert len(Config.data_nodes) == 2 def test_configure_sql_table_data_node(self): a, b, c, d, e, f, g, h, i, extra_args, exposed_type, scope, vp, k = ( \"foo\", \"user\", \"pwd\", \"db\", \"engine\", \"table_name\", \"port\", \"host\", \"driver\", {\"foo\": \"bar\"}, \"exposed_type\", Scope.SCENARIO, timedelta(1), \"qux\", ) Config.configure_sql_table_data_node(a, b, c, d, e, f, g, h, i, extra_args, exposed_type, scope, vp, property=k) assert len(Config.data_nodes) == 2 def test_configure_sql_data_node(self): a, b, c, d, e, f, g, h, i, j, k, extra_args, exposed_type, scope, vp, k = ( \"foo\", \"user\", \"pwd\", \"db\", \"engine\", \"read_query\", \"write_query_builder\", \"append_query_builder\", \"port\", \"host\", \"driver\", {\"foo\": \"bar\"}, \"exposed_type\", Scope.SCENARIO, timedelta(1), \"qux\", ) Config.configure_sql_data_node(a, b, c, d, e, f, g, h, i, j, k, extra_args, exposed_type, scope, vp, property=k) assert len(Config.data_nodes) == 2 def test_configure_mongo_data_node(self): a, b, c, d, e, f, g, h, extra_args, scope, vp, k = ( \"foo\", \"db_name\", \"collection_name\", None, \"user\", \"pwd\", \"host\", \"port\", {\"foo\": \"bar\"}, Scope.SCENARIO, timedelta(1), \"qux\", ) Config.configure_mongo_collection_data_node(a, b, c, d, e, f, g, h, extra_args, scope, vp, property=k) assert len(Config.data_nodes) == 2 "} {"text": "from unittest.mock import patch from src.taipy.core import Core from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from taipy.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile def test_core_section(): with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() assert not Config.core.force core.stop() with patch(\"sys.argv\", [\"prog\"]): Config.configure_core(mode=\"experiment\", version_number=\"test_num\", force=True) core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"test_num\" assert Config.core.force core.stop() toml_config = NamedTemporaryFile( content=\"\"\" [TAIPY] [CORE] mode = \"production\" version_number = \"test_num_2\" force = \"true:bool\" \"\"\" ) Config.load(toml_config.filename) with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() assert Config.core.mode == \"production\" assert Config.core.version_number == \"test_num_2\" assert Config.core.force core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"test_num_3\", \"--no-taipy-force\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"test_num_3\" assert not Config.core.force core.stop() def test_clean_config(): core_config = Config.configure_core(mode=\"experiment\", version_number=\"test_num\", force=True) assert Config.core is core_config core_config._clean() # Check if the instance before and after _clean() is the same assert Config.core is core_config assert core_config.mode == \"development\" assert core_config.version_number == \"\" assert core_config.force is False assert core_config.properties == {} "} {"text": "from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config import MigrationConfig from taipy.config.config import Config def mock_func(): pass def test_check_if_entity_property_key_used_is_predefined(caplog): with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() assert caplog.text == \"\" core.stop() caplog.clear() Config.unique_sections[MigrationConfig.name]._properties[\"_entity_owner\"] = None with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() assert ( \"Properties of MigrationConfig `VERSION_MIGRATION` cannot have `_entity_owner` as its property.\" in caplog.text ) caplog.clear() Config.unique_sections[MigrationConfig.name]._properties[\"_entity_owner\"] = \"entity_owner\" with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( \"Properties of MigrationConfig `VERSION_MIGRATION` cannot have `_entity_owner` as its property.\" ' Current value of property `_entity_owner` is \"entity_owner\".' ) assert expected_error_message in caplog.text def test_check_valid_version(caplog): data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") Config.add_migration_function(\"2.0\", data_nodes1, mock_func) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() assert \"The target version for a migration function must be a production version.\" in caplog.text caplog.clear() Config.unblock_update() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() assert caplog.text == \"\" core.stop() def test_check_callable_function(caplog): data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") Config.add_migration_function(\"1.0\", data_nodes1, 1) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( \"The migration function of config `data_nodes1` from version 1.0 must be populated with\" \" Callable value. Current value of property `migration_fcts` is 1.\" ) assert expected_error_message in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function(\"1.0\", data_nodes1, \"bar\") with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( \"The migration function of config `data_nodes1` from version 1.0 must be populated with\" ' Callable value. Current value of property `migration_fcts` is \"bar\".' ) assert expected_error_message in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function(\"1.0\", data_nodes1, mock_func) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() def test_check_migration_from_productions_to_productions_exist(caplog): _VersionManager._set_production_version(\"1.0\", True) _VersionManager._set_production_version(\"1.1\", True) _VersionManager._set_production_version(\"1.2\", True) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() assert 'There is no migration function from production version \"1.0\" to version \"1.1\".' in caplog.text assert 'There is no migration function from production version \"1.1\" to version \"1.2\".' in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function(\"1.2\", \"data_nodes1\", mock_func) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() assert 'There is no migration function from production version \"1.0\" to version \"1.1\".' in caplog.text "} {"text": "import pytest from taipy.config.checker.issue_collector import IssueCollector from taipy.config.config import Config class TestConfigIdChecker: def test_check_standalone_mode(self, caplog): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") Config.configure_scenario(id=\"bar\", task_configs=[], additional_data_node_configs=[]) Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id=\"bar\", task_configs=[]) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"`bar` is used as the config_id of multiple configurations ['DATA_NODE', 'SCENARIO']\" ' Current value of property `config_id` is \"bar\".' ) assert expected_error_message in caplog.text Config.configure_task(id=\"bar\", function=print) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"`bar` is used as the config_id of multiple configurations ['DATA_NODE', 'TASK', 'SCENARIO']\" ' Current value of property `config_id` is \"bar\".' ) assert expected_error_message in caplog.text Config.configure_task(id=\"foo\", function=print) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 2 expected_error_message = ( \"`foo` is used as the config_id of multiple configurations ['DATA_NODE', 'TASK']\" ' Current value of property `config_id` is \"foo\".' ) assert expected_error_message in caplog.text "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pytest from src.taipy.core.config.job_config import JobConfig from taipy.config.checker.issue_collector import IssueCollector from taipy.config.config import Config class TestJobConfigChecker: def test_check_standalone_mode(self, caplog): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE, max_nb_of_workers=2) Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=1) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"DataNode `foo`: In-memory storage type can ONLY be used in development mode. Current\" ' value of property `storage_type` is \"in_memory\".' ) assert expected_error_message in caplog.text "} {"text": "from src.taipy.core.config.checkers._core_section_checker import _CoreSectionChecker from src.taipy.core.config.core_section import CoreSection from taipy.config import Config from taipy.config.checker.issue_collector import IssueCollector class TestCoreSectionChecker: _CoreSectionChecker._ACCEPTED_REPOSITORY_TYPES.update([\"mock_repo_type\"]) def test_check_valid_repository(self): Config.configure_core(repository_type=\"mock_repo_type\") Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 0 Config.configure_core(repository_type=\"filesystem\") Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 0 Config.configure_core(repository_type=\"sql\") Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 0 def test_check_repository_type_value_wrong_str(self): Config.configure_core(repository_type=\"any\") Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 1 assert Config._collector.warnings[0].field == CoreSection._REPOSITORY_TYPE_KEY assert Config._collector.warnings[0].value == \"any\" def test_check_repository_type_value_wrong_type(self): Config.configure_core(repository_type=1) Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 1 assert Config._collector.warnings[0].field == CoreSection._REPOSITORY_TYPE_KEY assert Config._collector.warnings[0].value == 1 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from queue import SimpleQueue from src.taipy.core import taipy as tp from src.taipy.core.notification.core_event_consumer import CoreEventConsumerBase from src.taipy.core.notification.event import Event, EventEntityType, EventOperation from src.taipy.core.notification.notifier import Notifier from taipy.config import Config, Frequency from tests.core.utils import assert_true_after_time class AllCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.event_collected = 0 self.event_entity_type_collected: dict = {} self.event_operation_collected: dict = {} super().__init__(registration_id, queue) def process_event(self, event: Event): self.event_collected += 1 self.event_entity_type_collected[event.entity_type] = ( self.event_entity_type_collected.get(event.entity_type, 0) + 1 ) self.event_operation_collected[event.operation] = self.event_operation_collected.get(event.operation, 0) + 1 class ScenarioCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.scenario_event_collected = 0 self.event_operation_collected: dict = {} super().__init__(registration_id, queue) def process_event(self, event: Event): self.scenario_event_collected += 1 self.event_operation_collected[event.operation] = self.event_operation_collected.get(event.operation, 0) + 1 class TaskCreationCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.task_event_collected = 0 self.creation_event_operation_collected = 0 super().__init__(registration_id, queue) def process_event(self, event: Event): self.task_event_collected += 1 self.creation_event_operation_collected += 1 def test_core_event_consumer(): register_id_0, register_queue_0 = Notifier.register() all_evt_csumer_0 = AllCoreEventConsumerProcessor(register_id_0, register_queue_0) register_id_1, register_queue_1 = Notifier.register(entity_type=EventEntityType.SCENARIO) sc_evt_csumer_1 = ScenarioCoreEventConsumerProcessor(register_id_1, register_queue_1) register_id_2, register_queue_2 = Notifier.register( entity_type=EventEntityType.TASK, operation=EventOperation.CREATION ) task_creation_evt_csumer_2 = TaskCreationCoreEventConsumerProcessor(register_id_2, register_queue_2) all_evt_csumer_0.start() sc_evt_csumer_1.start() task_creation_evt_csumer_2.start() dn_config = Config.configure_data_node(\"dn_config\") task_config = Config.configure_task(\"task_config\", print, [dn_config]) scenario_config = Config.configure_scenario( \"scenario_config\", [task_config], frequency=Frequency.DAILY, sequences={\"seq\": [task_config]} ) # Create a scenario trigger 5 creation events scenario = tp.create_scenario(scenario_config) assert_true_after_time(lambda: all_evt_csumer_0.event_collected == 5, time=10) assert_true_after_time(lambda: len(all_evt_csumer_0.event_entity_type_collected) == 5, time=10) assert_true_after_time(lambda: all_evt_csumer_0.event_operation_collected[EventOperation.CREATION] == 5, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.scenario_event_collected == 1, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.event_operation_collected[EventOperation.CREATION] == 1, time=10) assert_true_after_time(lambda: len(sc_evt_csumer_1.event_operation_collected) == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.task_event_collected == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.creation_event_operation_collected == 1, time=10) # Delete a scenario trigger 5 update events tp.delete(scenario.id) assert_true_after_time(lambda: all_evt_csumer_0.event_collected == 10, time=10) assert_true_after_time(lambda: len(all_evt_csumer_0.event_entity_type_collected) == 5, time=10) assert_true_after_time(lambda: all_evt_csumer_0.event_operation_collected[EventOperation.DELETION] == 5, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.scenario_event_collected == 2, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.event_operation_collected[EventOperation.DELETION] == 1, time=10) assert_true_after_time(lambda: len(sc_evt_csumer_1.event_operation_collected) == 2, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.task_event_collected == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.creation_event_operation_collected == 1, time=10) all_evt_csumer_0.stop() sc_evt_csumer_1.stop() task_creation_evt_csumer_2.stop() "} {"text": " from queue import SimpleQueue from src.taipy.core.notification import EventEntityType, EventOperation from src.taipy.core.notification._registration import _Registration from src.taipy.core.notification._topic import _Topic def test_create_registration(): registration_0 = _Registration() assert isinstance(registration_0.registration_id, str) assert registration_0.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_0.queue, SimpleQueue) assert registration_0.queue.qsize() == 0 assert isinstance(registration_0.topic, _Topic) assert registration_0.topic.entity_type is None assert registration_0.topic.entity_id is None assert registration_0.topic.operation is None assert registration_0.topic.attribute_name is None registration_1 = _Registration( entity_type=EventEntityType.SCENARIO, entity_id=\"SCENARIO_scenario_id\", operation=EventOperation.CREATION ) assert isinstance(registration_1.registration_id, str) assert registration_1.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_1.queue, SimpleQueue) assert registration_1.queue.qsize() == 0 assert isinstance(registration_1.topic, _Topic) assert registration_1.topic.entity_type == EventEntityType.SCENARIO assert registration_1.topic.entity_id == \"SCENARIO_scenario_id\" assert registration_1.topic.operation == EventOperation.CREATION assert registration_1.topic.attribute_name is None registration_2 = _Registration( entity_type=EventEntityType.SEQUENCE, entity_id=\"SEQUENCE_scenario_id\", operation=EventOperation.UPDATE, attribute_name=\"tasks\", ) assert isinstance(registration_2.registration_id, str) assert registration_2.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_2.queue, SimpleQueue) assert registration_2.queue.qsize() == 0 assert isinstance(registration_2.topic, _Topic) assert registration_2.topic.entity_type == EventEntityType.SEQUENCE assert registration_2.topic.entity_id == \"SEQUENCE_scenario_id\" assert registration_2.topic.operation == EventOperation.UPDATE assert registration_2.topic.attribute_name == \"tasks\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import pytest from src.taipy.core.cycle._cycle_fs_repository import _CycleFSRepository from src.taipy.core.cycle._cycle_sql_repository import _CycleSQLRepository from src.taipy.core.cycle.cycle import Cycle, CycleId from src.taipy.core.exceptions import ModelNotFound class TestCycleRepositories: @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_save_and_load(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) obj = repository._load(cycle.id) assert isinstance(obj, Cycle) @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_exists(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) assert repository._exists(cycle.id) assert not repository._exists(\"not-existed-cycle\") @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_load_all(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") repository._save(cycle) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_load_all_with_filters(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") cycle._name = f\"cycle-{i}\" repository._save(cycle) objs = repository._load_all(filters=[{\"id\": \"cycle-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_CycleSQLRepository]) def test_delete(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) repository._delete(cycle.id) with pytest.raises(ModelNotFound): repository._load(cycle.id) @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_delete_all(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") repository._save(cycle) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_delete_many(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") repository._save(cycle) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_search(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") cycle.name = f\"cycle-{i}\" repository._save(cycle) assert len(repository._load_all()) == 10 objs = repository._search(\"name\", \"cycle-2\") assert len(objs) == 1 assert isinstance(objs[0], Cycle) @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_export(self, tmpdir, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) repository._export(cycle.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _CycleFSRepository else os.path.join(tmpdir.strpath, \"cycle\") assert os.path.exists(os.path.join(dir_path, f\"{cycle.id}.json\")) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import dataclasses import pathlib from dataclasses import dataclass from typing import Any, Dict, Iterable, List, Optional, Union from src.taipy.core._manager._manager import _Manager from src.taipy.core._repository._abstract_converter import _AbstractConverter from src.taipy.core._repository._abstract_repository import _AbstractRepository from src.taipy.core._repository._filesystem_repository import _FileSystemRepository from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config @dataclass class MockModel: id: str name: str version: str def to_dict(self): return dataclasses.asdict(self) @staticmethod def from_dict(data: Dict[str, Any]): return MockModel(id=data[\"id\"], name=data[\"name\"], version=data[\"version\"]) @dataclass class MockEntity: def __init__(self, id: str, name: str, version: str = None) -> None: self.id = id self.name = name if version: self._version = version else: self._version = _VersionManager._get_latest_version() class MockConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, entity: MockEntity) -> MockModel: return MockModel(id=entity.id, name=entity.name, version=entity._version) @classmethod def _model_to_entity(cls, model: MockModel) -> MockEntity: return MockEntity(id=model.id, name=model.name, version=model.version) class MockRepository(_AbstractRepository): # type: ignore def __init__(self, **kwargs): self.repo = _FileSystemRepository(**kwargs, converter=MockConverter) def _to_model(self, obj: MockEntity): return MockModel(obj.id, obj.name, obj._version) def _from_model(self, model: MockModel): return MockEntity(model.id, model.name, model.version) def _load(self, entity_id: str) -> MockEntity: return self.repo._load(entity_id) def _load_all(self, filters: Optional[List[Dict]] = None) -> List[MockEntity]: return self.repo._load_all(filters) def _save(self, entity: MockEntity): return self.repo._save(entity) def _exists(self, entity_id: str) -> bool: return self.repo._exists(entity_id) def _delete(self, entity_id: str): return self.repo._delete(entity_id) def _delete_all(self): return self.repo._delete_all() def _delete_many(self, ids: Iterable[str]): return self.repo._delete_many(ids) def _delete_by(self, attribute: str, value: str): return self.repo._delete_by(attribute, value) def _search(self, attribute: str, value: Any, filters: Optional[List[Dict]] = None) -> List[MockEntity]: return self.repo._search(attribute, value, filters) def _export(self, entity_id: str, folder_path: Union[str, pathlib.Path]): return self.repo._export(self, entity_id, folder_path) @property def _storage_folder(self) -> pathlib.Path: return pathlib.Path(Config.core.storage_folder) # type: ignore class MockManager(_Manager[MockEntity]): _ENTITY_NAME = MockEntity.__name__ _repository = MockRepository(model_type=MockModel, dir_name=\"foo\") class TestManager: def test_save_and_fetch_model(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) fetched_model = MockManager._get(m.id) assert m == fetched_model def test_exists(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) assert MockManager._exists(m.id) def test_get(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) assert MockManager._get(m.id) == m def test_get_all(self): MockManager._delete_all() objs = [] for i in range(5): m = MockEntity(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) MockManager._set(m) _objs = MockManager._get_all() assert len(_objs) == 5 def test_delete(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) MockManager._delete(m.id) assert MockManager._get(m.id) is None def test_delete_all(self): objs = [] for i in range(5): m = MockEntity(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) MockManager._set(m) MockManager._delete_all() assert MockManager._get_all() == [] def test_delete_many(self): objs = [] for i in range(5): m = MockEntity(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) MockManager._set(m) MockManager._delete_many([\"uuid-0\", \"uuid-1\"]) assert len(MockManager._get_all()) == 3 def test_is_editable(self): m = MockEntity(\"uuid\", \"Foo\") MockManager._set(m) assert MockManager._is_editable(m) def test_is_readable(self): m = MockEntity(\"uuid\", \"Foo\") MockManager._set(m) assert MockManager._is_readable(m) "} {"text": " class NotifyMock: \"\"\" A shared class for testing notification on jobStatus of sequence level and scenario level \"entity\" can be understood as either \"scenario\" or \"sequence\". \"\"\" def __init__(self, entity): self.scenario = entity self.nb_called = 0 self.__name__ = \"NotifyMock\" def __call__(self, entity, job): assert entity == self.scenario if self.nb_called == 0: assert job.is_pending() if self.nb_called == 1: assert job.is_running() if self.nb_called == 2: assert job.is_finished() self.nb_called += 1 def assert_called_3_times(self): assert self.nb_called == 3 def assert_not_called(self): assert self.nb_called == 0 def reset(self): self.nb_called = 0 "} {"text": " def assert_true_after_time(assertion, msg=None, time=120): from datetime import datetime from time import sleep loops = 0 start = datetime.now() while (datetime.now() - start).seconds < time: sleep(1) # Limit CPU usage try: if assertion(): return except BaseException as e: print(\"Raise : \", e) loops += 1 continue if msg: print(msg) assert assertion() "} {"text": "import os import tempfile class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile(\"w\", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, \"r\") as fp: return fp.read() def __del__(self): os.unlink(self.filename) "} {"text": "from unittest import mock import pytest from src.taipy.core import taipy from src.taipy.core._entity._labeled import _Labeled from taipy.config import Config, Frequency, Scope class MockOwner: label = \"owner_label\" def get_label(self): return self.label def test_get_label(): labeled_entity = _Labeled() with pytest.raises(NotImplementedError): labeled_entity.get_label() with pytest.raises(NotImplementedError): labeled_entity.get_simple_label() with pytest.raises(AttributeError): labeled_entity._get_label() with pytest.raises(AttributeError): labeled_entity._get_simple_label() labeled_entity.id = \"id\" assert labeled_entity._get_label() == \"id\" assert labeled_entity._get_simple_label() == \"id\" labeled_entity.config_id = \"the config id\" assert labeled_entity._get_label() == \"the config id\" assert labeled_entity._get_simple_label() == \"the config id\" labeled_entity._properties = {\"name\": \"a name\"} assert labeled_entity._get_label() == \"a name\" assert labeled_entity._get_simple_label() == \"a name\" labeled_entity.owner_id = \"owner_id\" with mock.patch(\"src.taipy.core.get\") as get_mck: get_mck.return_value = MockOwner() assert labeled_entity._get_label() == \"owner_label > a name\" assert labeled_entity._get_simple_label() == \"a name\" labeled_entity._properties[\"label\"] = \"a wonderful label\" assert labeled_entity._get_label() == \"a wonderful label\" assert labeled_entity._get_simple_label() == \"a wonderful label\" def mult(n1, n2): return n1 * n2 def test_get_label_complex_case(): dn1_cfg = Config.configure_data_node(\"dn1\", scope=Scope.GLOBAL) dn2_cfg = Config.configure_data_node(\"dn2\", scope=Scope.CYCLE) dn3_cfg = Config.configure_data_node(\"dn3\", scope=Scope.CYCLE) dn4_cfg = Config.configure_data_node(\"dn4\", scope=Scope.SCENARIO) dn5_cfg = Config.configure_data_node(\"dn5\", scope=Scope.SCENARIO) tA_cfg = Config.configure_task(\"t_A_C\", mult, [dn1_cfg, dn2_cfg], dn3_cfg) tB_cfg = Config.configure_task(\"t_B_S\", mult, [dn3_cfg, dn4_cfg], dn5_cfg) scenario_cfg = Config.configure_scenario(\"scenario_cfg\", [tA_cfg, tB_cfg], [], Frequency.DAILY) scenario_cfg.add_sequences( { \"sequence_C\": [tA_cfg], \"sequence_S\": [tA_cfg, tB_cfg], } ) scenario = taipy.create_scenario(scenario_cfg, name=\"My Name\") cycle = scenario.cycle cycle.name = \"Today\" sequence_C = scenario.sequence_C sequence_S = scenario.sequence_S tA = scenario.t_A_C tB = scenario.t_B_S dn1 = scenario.dn1 dn2 = scenario.dn2 dn3 = scenario.dn3 dn4 = scenario.dn4 dn5 = scenario.dn5 assert cycle.get_label() == scenario.cycle.name assert cycle.get_simple_label() == scenario.cycle.name assert scenario.get_label() == \"Today > My Name\" assert scenario.get_simple_label() == \"My Name\" assert sequence_C.get_label() == \"Today > My Name > sequence_C\" assert sequence_C.get_simple_label() == \"sequence_C\" assert sequence_S.get_label() == \"Today > My Name > sequence_S\" assert sequence_S.get_simple_label() == \"sequence_S\" assert tA.get_label() == \"Today > t_A_C\" assert tA.get_simple_label() == \"t_A_C\" assert tB.get_label() == \"Today > My Name > t_B_S\" assert tB.get_simple_label() == \"t_B_S\" assert dn1.get_label() == \"dn1\" assert dn1.get_simple_label() == \"dn1\" assert dn2.get_label() == \"Today > dn2\" assert dn2.get_simple_label() == \"dn2\" assert dn3.get_label() == \"Today > dn3\" assert dn3.get_simple_label() == \"dn3\" assert dn4.get_label() == \"Today > My Name > dn4\" assert dn4.get_simple_label() == \"dn4\" assert dn5.get_label() == \"Today > My Name > dn5\" assert dn5.get_simple_label() == \"dn5\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.core._entity._entity_ids import _EntityIds class TestEntityIds: def test_add_two_entity_ids(self): entity_ids_1 = _EntityIds() entity_ids_2 = _EntityIds() entity_ids_1_address = id(entity_ids_1) entity_ids_1.data_node_ids.update([\"data_node_id_1\", \"data_node_id_2\"]) entity_ids_1.task_ids.update([\"task_id_1\", \"task_id_2\"]) entity_ids_1.job_ids.update([\"job_id_1\", \"job_id_2\"]) entity_ids_1.sequence_ids.update([\"sequence_id_1\", \"sequence_id_2\"]) entity_ids_1.scenario_ids.update([\"scenario_id_1\", \"scenario_id_2\"]) entity_ids_1.cycle_ids.update([\"cycle_id_1\", \"cycle_id_2\"]) entity_ids_2.data_node_ids.update([\"data_node_id_2\", \"data_node_id_3\"]) entity_ids_2.task_ids.update([\"task_id_2\", \"task_id_3\"]) entity_ids_2.job_ids.update([\"job_id_2\", \"job_id_3\"]) entity_ids_2.sequence_ids.update([\"sequence_id_2\", \"sequence_id_3\"]) entity_ids_2.scenario_ids.update([\"scenario_id_2\", \"scenario_id_3\"]) entity_ids_2.cycle_ids.update([\"cycle_id_2\", \"cycle_id_3\"]) entity_ids_1 += entity_ids_2 # += operator should not change the address of entity_ids_1 assert id(entity_ids_1) == entity_ids_1_address assert entity_ids_1.data_node_ids == {\"data_node_id_1\", \"data_node_id_2\", \"data_node_id_3\"} assert entity_ids_1.task_ids == {\"task_id_1\", \"task_id_2\", \"task_id_3\"} assert entity_ids_1.job_ids == {\"job_id_1\", \"job_id_2\", \"job_id_3\"} assert entity_ids_1.sequence_ids == {\"sequence_id_1\", \"sequence_id_2\", \"sequence_id_3\"} assert entity_ids_1.scenario_ids == {\"scenario_id_1\", \"scenario_id_2\", \"scenario_id_3\"} assert entity_ids_1.cycle_ids == {\"cycle_id_1\", \"cycle_id_2\", \"cycle_id_3\"} "} {"text": "import pytest from src.taipy.core.common._utils import _retry_read_entity from taipy.config import Config def test_retry_decorator(mocker): func = mocker.Mock(side_effect=Exception()) @_retry_read_entity((Exception,)) def decorated_func(): func() with pytest.raises(Exception): decorated_func() # Called once in the normal flow and no retry # The Config.core.read_entity_retry is set to 0 at conftest.py assert Config.core.read_entity_retry == 0 assert func.call_count == 1 func.reset_mock() Config.core.read_entity_retry = 3 with pytest.raises(Exception): decorated_func() # Called once in the normal flow and 3 more times on the retry flow assert func.call_count == 4 def test_retry_decorator_exception_not_in_list(mocker): func = mocker.Mock(side_effect=KeyError()) Config.core.read_entity_retry = 3 @_retry_read_entity((Exception,)) def decorated_func(): func() with pytest.raises(KeyError): decorated_func() # Called only on the first time and not trigger retry because KeyError is not on the exceptions list assert func.called == 1 "} {"text": "from src.taipy.core.common.warn_if_inputs_not_ready import _warn_if_inputs_not_ready from src.taipy.core.data._data_manager_factory import _DataManagerFactory from taipy.config import Config def test_warn_inputs_all_not_ready(caplog): one = Config.configure_data_node(\"one\") two = Config.configure_data_node(\"two\") three = Config.configure_data_node(\"three\") data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}).values() _warn_if_inputs_not_ready(data_nodes) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in data_nodes ] assert all([expected_output in stdout for expected_output in expected_outputs]) def test_warn_inputs_all_ready(caplog): one = Config.configure_data_node(\"one\", default_data=1) two = Config.configure_data_node(\"two\", default_data=2) three = Config.configure_data_node(\"three\", default_data=3) data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}).values() _warn_if_inputs_not_ready(data_nodes) stdout = caplog.text not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in data_nodes ] assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_warn_inputs_one_ready(caplog): one = Config.configure_data_node(\"one\", default_data=1) two = Config.configure_data_node(\"two\") three = Config.configure_data_node(\"three\") data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}) _warn_if_inputs_not_ready(data_nodes.values()) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in [data_nodes[two], data_nodes[three]] ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in [data_nodes[one]] ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_submit_task_with_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") excel_dn_cfg = Config.configure_excel_data_node(\"wrong_excel_file_path\", default_path=\"wrong_path.xlsx\") json_dn_cfg = Config.configure_json_data_node(\"wrong_json_file_path\", default_path=\"wrong_path.json\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_path=\"wrong_path.pickle\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") input_dn_cfgs = [csv_dn_cfg, excel_dn_cfg, json_dn_cfg, pickle_dn_cfg, parquet_dn_cfg] dn_manager = _DataManagerFactory._build_manager() dns = [dn_manager._bulk_get_or_create([input_dn_cfg])[input_dn_cfg] for input_dn_cfg in input_dn_cfgs] _warn_if_inputs_not_ready(dns) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in dns ] assert all([expected_output in stdout for expected_output in expected_outputs]) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import pytest from src.taipy.core.exceptions import ModelNotFound from src.taipy.core.scenario._scenario_fs_repository import _ScenarioFSRepository from src.taipy.core.scenario._scenario_sql_repository import _ScenarioSQLRepository from src.taipy.core.scenario.scenario import Scenario, ScenarioId class TestScenarioFSRepository: @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_save_and_load(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) obj = repository._load(scenario.id) assert isinstance(obj, Scenario) @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_exists(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) assert repository._exists(scenario.id) assert not repository._exists(\"not-existed-scenario\") @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_load_all(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_load_all_with_filters(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) objs = repository._load_all(filters=[{\"id\": \"scenario-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) repository._delete(scenario.id) with pytest.raises(ModelNotFound): repository._load(scenario.id) @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_all(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_many(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_by(self, scenario, repo, init_sql_repo): repository = repo() # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") scenario._version = f\"{(i+1) // 5}.0\" repository._save(scenario) objs = repository._load_all() assert len(objs) == 10 repository._delete_by(\"version\", \"1.0\") assert len(repository._load_all()) == 5 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_search(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) assert len(repository._load_all()) == 10 objs = repository._search(\"id\", \"scenario-2\") assert len(objs) == 1 assert isinstance(objs[0], Scenario) objs = repository._search(\"id\", \"scenario-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], Scenario) assert repository._search(\"id\", \"scenario-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_export(self, tmpdir, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) repository._export(scenario.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _ScenarioFSRepository else os.path.join(tmpdir.strpath, \"scenario\") assert os.path.exists(os.path.join(dir_path, f\"{scenario.id}.json\")) "} {"text": "from src.taipy.core._version._version import _Version from taipy.config.config import Config def test_create_version(): v = _Version(\"foo\", config=Config.configure_data_node(\"dn\")) assert v.id == \"foo\" assert v.config is not None "} {"text": "import os import pytest from src.taipy.core._version._version import _Version from src.taipy.core._version._version_fs_repository import _VersionFSRepository from src.taipy.core._version._version_sql_repository import _VersionSQLRepository from src.taipy.core.exceptions import ModelNotFound class TestVersionFSRepository: @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_save_and_load(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) obj = repository._load(_version.id) assert isinstance(obj, _Version) @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_exists(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) assert repository._exists(_version.id) assert not repository._exists(\"not-existed-version\") @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_load_all(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" repository._save(_version) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_load_all_with_filters(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" _version.name = f\"_version_{i}\" repository._save(_version) objs = repository._load_all(filters=[{\"id\": \"_version_2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_delete(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) repository._delete(_version.id) with pytest.raises(ModelNotFound): repository._load(_version.id) @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_delete_all(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" repository._save(_version) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_delete_many(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" repository._save(_version) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_search(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" _version.name = f\"_version_{i}\" repository._save(_version) assert len(repository._load_all()) == 10 objs = repository._search(\"id\", \"_version_2\") assert len(objs) == 1 assert isinstance(objs[0], _Version) @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_export(self, tmpdir, _version, repo, init_sql_repo): repository = repo() repository._save(_version) repository._export(_version.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _VersionFSRepository else os.path.join(tmpdir.strpath, \"version\") assert os.path.exists(os.path.join(dir_path, f\"{_version.id}.json\")) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pytest from src.taipy.core._version._version import _Version from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config def test_save_and_get_version_entity(tmpdir): _VersionManager._repository.base_path = tmpdir assert len(_VersionManager._get_all()) == 0 version = _Version(id=\"foo\", config=Config._applied_config) _VersionManager._get_or_create(id=\"foo\", force=False) version_1 = _VersionManager._get(version.id) assert version_1.id == version.id assert Config._serializer._str(version_1.config) == Config._serializer._str(version.config) assert len(_VersionManager._get_all()) == 1 assert _VersionManager._get(version.id) == version "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from unittest import mock import pytest from src.taipy.core._orchestrator._dispatcher import _DevelopmentJobDispatcher, _JobDispatcher, _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.exceptions.exceptions import OrchestratorNotBuilt from taipy.config import Config def test_build_orchestrator(): _OrchestratorFactory._orchestrator = None _OrchestratorFactory._dispatcher = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is None orchestrator = _OrchestratorFactory._build_orchestrator() assert orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator == _Orchestrator dispatcher = _OrchestratorFactory._build_dispatcher() assert isinstance(dispatcher, _JobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _JobDispatcher) _OrchestratorFactory._orchestrator = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is not None with mock.patch( \"src.taipy.core._orchestrator._orchestrator_factory._OrchestratorFactory._build_dispatcher\" ) as build_dispatcher, mock.patch( \"src.taipy.core._orchestrator._orchestrator._Orchestrator.initialize\" ) as initialize: orchestrator = _OrchestratorFactory._build_orchestrator() assert orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator == _Orchestrator build_dispatcher.assert_not_called() initialize.assert_called_once() def test_build_development_dispatcher(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._orchestrator = None _OrchestratorFactory._dispatcher = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is None with pytest.raises(OrchestratorNotBuilt): _OrchestratorFactory._build_dispatcher() _OrchestratorFactory._build_orchestrator() assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._dispatcher is None _OrchestratorFactory._build_dispatcher() assert isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) def test_build_standalone_dispatcher(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher) assert not isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 2 _OrchestratorFactory._dispatcher._nb_available_workers = 1 _OrchestratorFactory._build_dispatcher(force_restart=False) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 1 _OrchestratorFactory._build_dispatcher(force_restart=True) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 2 "} {"text": "import multiprocessing from concurrent.futures import ProcessPoolExecutor from functools import partial from unittest import mock from unittest.mock import MagicMock from pytest import raises from src.taipy.core import DataNodeId, JobId, TaskId from src.taipy.core._orchestrator._dispatcher._development_job_dispatcher import _DevelopmentJobDispatcher from src.taipy.core._orchestrator._dispatcher._standalone_job_dispatcher import _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.job.job import Job from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.task.task import Task from taipy.config.config import Config from tests.core.utils import assert_true_after_time def execute(lock): with lock: ... return None def _error(): raise RuntimeError(\"Something bad has happened\") def test_build_development_job_dispatcher(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() dispatcher = _OrchestratorFactory._dispatcher assert isinstance(dispatcher, _DevelopmentJobDispatcher) assert dispatcher._nb_available_workers == 1 with raises(NotImplementedError): assert dispatcher.start() assert dispatcher.is_running() with raises(NotImplementedError): dispatcher.stop() def test_build_standalone_job_dispatcher(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() dispatcher = _OrchestratorFactory._dispatcher assert not isinstance(dispatcher, _DevelopmentJobDispatcher) assert isinstance(dispatcher, _StandaloneJobDispatcher) assert isinstance(dispatcher._executor, ProcessPoolExecutor) assert dispatcher._nb_available_workers == 2 assert_true_after_time(dispatcher.is_running) dispatcher.stop() dispatcher.join() assert_true_after_time(lambda: not dispatcher.is_running()) def test_can_execute_2_workers(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock = m.Lock() task_id = TaskId(\"task_id1\") output = list(_DataManager._bulk_get_or_create([Config.configure_data_node(\"input1\", default_data=21)]).values()) _OrchestratorFactory._build_dispatcher() task = Task( config_id=\"name\", properties={}, input=[], function=partial(execute, lock), output=output, id=task_id, ) job_id = JobId(\"id1\") job = Job(job_id, task, \"submit_id\", task.id) dispatcher = _StandaloneJobDispatcher(_OrchestratorFactory._orchestrator) with lock: assert dispatcher._can_execute() dispatcher._dispatch(job) assert dispatcher._can_execute() dispatcher._dispatch(job) assert not dispatcher._can_execute() assert_true_after_time(lambda: dispatcher._can_execute()) def test_can_execute_synchronous(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId(\"task_id1\") task = Task(config_id=\"name\", properties={}, input=[], function=print, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id) job_id = JobId(\"id1\") job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher assert dispatcher._can_execute() dispatcher._dispatch(job) assert dispatcher._can_execute() def test_exception_in_user_function(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId(\"task_id1\") job_id = JobId(\"id1\") task = Task(config_id=\"name\", properties={}, input=[], function=_error, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id) job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher dispatcher._dispatch(job) assert job.is_failed() assert 'RuntimeError(\"Something bad has happened\")' in str(job.stacktrace[0]) def test_exception_in_writing_data(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId(\"task_id1\") job_id = JobId(\"id1\") output = MagicMock() output.id = DataNodeId(\"output_id\") output.config_id = \"my_raising_datanode\" output._is_in_cache = False output.write.side_effect = ValueError() task = Task(config_id=\"name\", properties={}, input=[], function=print, output=[output], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id) job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher with mock.patch(\"src.taipy.core.data._data_manager._DataManager._get\") as get: get.return_value = output dispatcher._dispatch(job) assert job.is_failed() assert \"node\" in job.stacktrace[0] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.core.data import InMemoryDataNode from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.task._task_model import _TaskModel from taipy.config.common.scope import Scope def test_none_properties_attribute_compatible(): model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"parent_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"output_id\"], \"version\": \"latest\", \"skippable\": False, } ) assert len(model.properties) == 0 def test_skippable_compatibility_with_non_existing_output(): model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"output_id\"], \"version\": \"latest\", \"skippable\": False, } ) assert not model.skippable def test_skippable_compatibility_with_no_output(): model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [], \"version\": \"latest\", \"skippable\": False, } ) assert not model.skippable def test_skippable_compatibility_with_one_output(): manager = _DataManagerFactory._build_manager() manager._set(InMemoryDataNode(\"cfg_id\", Scope.SCENARIO, id=\"dn_id\")) model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"dn_id\"], \"version\": \"latest\", \"skippable\": True, } ) assert model.skippable def test_skippable_compatibility_with_many_outputs(): manager = _DataManagerFactory._build_manager() manager._set(InMemoryDataNode(\"cfg_id\", Scope.SCENARIO, id=\"dn_id\")) manager._set(InMemoryDataNode(\"cfg_id_2\", Scope.SCENARIO, id=\"dn_2_id\")) model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"dn_id\", \"dn_2_id\"], \"version\": \"latest\", \"skippable\": True, } ) assert model.skippable "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.core.sequence._sequence_converter import _SequenceConverter from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.task.task import Task def test_entity_to_model(sequence): sequence_model_1 = _SequenceConverter._entity_to_model(sequence) expected_sequence_model_1 = { \"id\": \"sequence_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id_1\", \"parent_id_2\"], \"properties\": {}, \"tasks\": [], \"subscribers\": [], \"version\": \"random_version_number\", } sequence_model_1[\"parent_ids\"] = sorted(sequence_model_1[\"parent_ids\"]) assert sequence_model_1 == expected_sequence_model_1 task_1 = Task(\"task_1\", {}, print) task_2 = Task(\"task_2\", {}, print) sequence_2 = Sequence( {\"name\": \"sequence_2\"}, [task_1, task_2], \"SEQUENCE_sq_1_SCENARIO_sc\", \"SCENARIO_sc\", [\"SCENARIO_sc\"], [], \"random_version\", ) sequence_model_2 = _SequenceConverter._entity_to_model(sequence_2) expected_sequence_model_2 = { \"id\": \"SEQUENCE_sq_1_SCENARIO_sc\", \"owner_id\": \"SCENARIO_sc\", \"parent_ids\": [\"SCENARIO_sc\"], \"properties\": {\"name\": \"sequence_2\"}, \"tasks\": [task_1.id, task_2.id], \"subscribers\": [], \"version\": \"random_version\", } assert sequence_model_2 == expected_sequence_model_2 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.core.data.data_node import DataNode from src.taipy.core.data.in_memory import InMemoryDataNode from taipy.config.common.scope import Scope class FakeDataNode(InMemoryDataNode): read_has_been_called = 0 write_has_been_called = 0 def __init__(self, config_id, **kwargs): scope = kwargs.pop(\"scope\", Scope.SCENARIO) super().__init__(config_id=config_id, scope=scope, **kwargs) def _read(self, query=None): self.read_has_been_called += 1 def _write(self, data): self.write_has_been_called += 1 @classmethod def storage_type(cls) -> str: return \"fake_inmemory\" write = DataNode.write # Make sure that the writing behavior comes from DataNode class FakeDataframeDataNode(DataNode): COLUMN_NAME_1 = \"a\" COLUMN_NAME_2 = \"b\" def __init__(self, config_id, default_data_frame, **kwargs): super().__init__(config_id, **kwargs) self.data = default_data_frame def _read(self): return self.data @classmethod def storage_type(cls) -> str: return \"fake_df_dn\" class FakeNumpyarrayDataNode(DataNode): def __init__(self, config_id, default_array, **kwargs): super().__init__(config_id, **kwargs) self.data = default_array def _read(self): return self.data @classmethod def storage_type(cls) -> str: return \"fake_np_dn\" class FakeListDataNode(DataNode): class Row: def __init__(self, value): self.value = value def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = [self.Row(i) for i in range(10)] def _read(self): return self.data @classmethod def storage_type(cls) -> str: return \"fake_list_dn\" class CustomClass: def __init__(self, a, b): self.a = a self.b = b class FakeCustomDataNode(DataNode): def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = [CustomClass(i, i * 2) for i in range(10)] def _read(self): return self.data class FakeMultiSheetExcelDataFrameDataNode(DataNode): def __init__(self, config_id, default_data_frame, **kwargs): super().__init__(config_id, **kwargs) self.data = { \"Sheet1\": default_data_frame, \"Sheet2\": default_data_frame, } def _read(self): return self.data class FakeMultiSheetExcelCustomDataNode(DataNode): def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = { \"Sheet1\": [CustomClass(i, i * 2) for i in range(10)], \"Sheet2\": [CustomClass(i, i * 2) for i in range(10)], } def _read(self): return self.data "} {"text": "import os import pytest from src.taipy.core.data._data_fs_repository import _DataFSRepository from src.taipy.core.data._data_sql_repository import _DataSQLRepository from src.taipy.core.data.data_node import DataNode, DataNodeId from src.taipy.core.exceptions import ModelNotFound class TestDataNodeRepository: @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_save_and_load(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) obj = repository._load(data_node.id) assert isinstance(obj, DataNode) @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_exists(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) assert repository._exists(data_node.id) assert not repository._exists(\"not-existed-data-node\") @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_load_all(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") repository._save(data_node) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_load_all_with_filters(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") data_node.owner_id = f\"task-{i}\" repository._save(data_node) objs = repository._load_all(filters=[{\"owner_id\": \"task-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) repository._delete(data_node.id) with pytest.raises(ModelNotFound): repository._load(data_node.id) @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete_all(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") repository._save(data_node) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete_many(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") repository._save(data_node) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete_by(self, data_node, repo, init_sql_repo): repository = repo() # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") data_node._version = f\"{(i+1) // 5}.0\" repository._save(data_node) objs = repository._load_all() assert len(objs) == 10 repository._delete_by(\"version\", \"1.0\") assert len(repository._load_all()) == 5 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_search(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") data_node.owner_id = f\"task-{i}\" repository._save(data_node) assert len(repository._load_all()) == 10 objs = repository._search(\"owner_id\", \"task-2\") assert len(objs) == 1 assert isinstance(objs[0], DataNode) objs = repository._search(\"owner_id\", \"task-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], DataNode) assert repository._search(\"owner_id\", \"task-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_export(self, tmpdir, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) repository._export(data_node.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _DataFSRepository else os.path.join(tmpdir.strpath, \"data_node\") assert os.path.exists(os.path.join(dir_path, f\"{data_node.id}.json\")) "} {"text": "import pytest from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import NoData from taipy.config.common.scope import Scope from taipy.config.exceptions.exceptions import InvalidConfigurationId class TestInMemoryDataNodeEntity: def test_create(self): dn = InMemoryDataNode( \"foobar_bazy\", Scope.SCENARIO, DataNodeId(\"id_uio\"), \"owner_id\", properties={\"default_data\": \"In memory Data Node\", \"name\": \"my name\"}, ) assert isinstance(dn, InMemoryDataNode) assert dn.storage_type() == \"in_memory\" assert dn.config_id == \"foobar_bazy\" assert dn.scope == Scope.SCENARIO assert dn.id == \"id_uio\" assert dn.name == \"my name\" assert dn.owner_id == \"owner_id\" assert dn.last_edit_date is not None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.read() == \"In memory Data Node\" dn_2 = InMemoryDataNode(\"foo\", Scope.SCENARIO) assert dn_2.last_edit_date is None assert not dn_2.is_ready_for_reading with pytest.raises(InvalidConfigurationId): InMemoryDataNode(\"foo bar\", Scope.SCENARIO, DataNodeId(\"dn_id\")) def test_get_user_properties(self): dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": 1, \"foo\": \"bar\"}) assert dn._get_user_properties() == {\"foo\": \"bar\"} def test_read_and_write(self): no_data_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\")) with pytest.raises(NoData): assert no_data_dn.read() is None no_data_dn.read_or_raise() in_mem_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": \"bar\"}) assert isinstance(in_mem_dn.read(), str) assert in_mem_dn.read() == \"bar\" in_mem_dn.properties[\"default_data\"] = \"baz\" # this modifies the default data value but not the data itself assert in_mem_dn.read() == \"bar\" in_mem_dn.write(\"qux\") assert in_mem_dn.read() == \"qux\" in_mem_dn.write(1998) assert isinstance(in_mem_dn.read(), int) assert in_mem_dn.read() == 1998 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from datetime import datetime from time import sleep from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.submission.submission_status import SubmissionStatus from src.taipy.core.task.task import Task def test_create_submission(scenario): submission_1 = _SubmissionManagerFactory._build_manager()._create(scenario.id) assert submission_1.id is not None assert submission_1.entity_id == scenario.id assert submission_1.jobs == [] assert isinstance(submission_1.creation_date, datetime) assert submission_1._submission_status == SubmissionStatus.SUBMITTED def test_get_submission(): submission_manager = _SubmissionManagerFactory._build_manager() assert submission_manager._get(\"random_submission_id\") is None submission_1 = submission_manager._create(\"entity_id\") submission_2 = submission_manager._get(submission_1.id) assert submission_1.id == submission_2.id assert submission_1.entity_id == submission_2.entity_id == \"entity_id\" assert submission_1.jobs == submission_2.jobs assert submission_1.creation_date == submission_2.creation_date assert submission_1.submission_status == submission_2.submission_status def test_get_all_submission(): submission_manager = _SubmissionManagerFactory._build_manager() version_manager = _VersionManagerFactory._build_manager() submission_manager._set(Submission(\"entity_id\", \"submission_id\", version=version_manager._get_latest_version())) for version_name in [\"abc\", \"xyz\"]: for i in range(10): submission_manager._set( Submission(\"entity_id\", f\"submission_{version_name}_{i}\", version=f\"{version_name}\") ) assert len(submission_manager._get_all()) == 1 version_manager._set_experiment_version(\"xyz\") version_manager._set_experiment_version(\"abc\") assert len(submission_manager._get_all()) == 10 assert len(submission_manager._get_all(\"abc\")) == 10 assert len(submission_manager._get_all(\"xyz\")) == 10 def test_get_latest_submission(): task_1 = Task(\"task_config_1\", {}, print, id=\"task_id_1\") task_2 = Task(\"task_config_2\", {}, print, id=\"task_id_2\") submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(task_1.id) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_2 = submission_manager._create(task_2.id) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_3 = submission_manager._create(task_1.id) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_4 = submission_manager._create(task_2.id) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_4 def test_delete_submission(): submission_manager = _SubmissionManagerFactory._build_manager() submission = Submission(\"entity_id\", \"submission_id\") submission_manager._set(submission) for i in range(10): submission_manager._set(Submission(\"entity_id\", f\"submission_{i}\")) assert len(submission_manager._get_all()) == 11 assert isinstance(submission_manager._get(submission.id), Submission) submission_manager._delete(submission.id) assert len(submission_manager._get_all()) == 10 assert submission_manager._get(submission.id) is None submission_manager._delete_all() assert len(submission_manager._get_all()) == 0 "} {"text": "from datetime import datetime from time import sleep from src.taipy.core import Task from src.taipy.core._repository.db._sql_connection import _SQLConnection from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.submission.submission_status import SubmissionStatus def init_managers(): _VersionManagerFactory._build_manager()._delete_all() _SubmissionManagerFactory._build_manager()._delete_all() def test_create_submission(scenario, init_sql_repo): init_managers() submission_1 = _SubmissionManagerFactory._build_manager()._create(scenario.id) assert submission_1.id is not None assert submission_1.entity_id == scenario.id assert submission_1.jobs == [] assert isinstance(submission_1.creation_date, datetime) assert submission_1._submission_status == SubmissionStatus.SUBMITTED def test_get_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(\"entity_id\") submission_2 = submission_manager._get(submission_1.id) assert submission_1.id == submission_2.id assert submission_1.entity_id == submission_2.entity_id == \"entity_id\" assert submission_1.jobs == submission_2.jobs assert submission_1.creation_date == submission_2.creation_date assert submission_1.submission_status == submission_2.submission_status def test_get_all_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() version_manager = _VersionManagerFactory._build_manager() submission_manager._set(Submission(\"entity_id\", \"submission_id\", version=version_manager._get_latest_version())) for version_name in [\"abc\", \"xyz\"]: for i in range(10): submission_manager._set( Submission(\"entity_id\", f\"submission_{version_name}_{i}\", version=f\"{version_name}\") ) assert len(submission_manager._get_all()) == 1 version_manager._set_experiment_version(\"xyz\") version_manager._set_experiment_version(\"abc\") assert len(submission_manager._get_all()) == 10 assert len(submission_manager._get_all(\"abc\")) == 10 assert len(submission_manager._get_all(\"xyz\")) == 10 def test_get_latest_submission(init_sql_repo): init_managers() task_1 = Task(\"task_config_1\", {}, print, id=\"task_id_1\") task_2 = Task(\"task_config_2\", {}, print, id=\"task_id_2\") submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(task_1.id) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_2 = submission_manager._create(task_2.id) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_3 = submission_manager._create(task_1.id) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_4 = submission_manager._create(task_2.id) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_4 def test_delete_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() submission = Submission(\"entity_id\", \"submission_id\") submission_manager._set(submission) for i in range(10): submission_manager._set(Submission(\"entity_id\", f\"submission_{i}\")) assert len(submission_manager._get_all()) == 11 assert isinstance(submission_manager._get(submission.id), Submission) submission_manager._delete(submission.id) assert len(submission_manager._get_all()) == 10 assert submission_manager._get(submission.id) is None submission_manager._delete_all() assert len(submission_manager._get_all()) == 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from importlib.util import find_spec if find_spec(\"taipy\"): if find_spec(\"taipy.config\"): from taipy.config._init import * # type: ignore if find_spec(\"taipy.gui\"): from taipy.gui._init import * # type: ignore if find_spec(\"taipy.core\"): from taipy.core._init import * # type: ignore if find_spec(\"taipy.rest\"): from taipy.rest._init import * # type: ignore if find_spec(\"taipy.gui_core\"): from taipy.gui_core._init import * # type: ignore if find_spec(\"taipy.enterprise\"): from taipy.enterprise._init import * # type: ignore if find_spec(\"taipy._run\"): from taipy._run import _run as run # type: ignore "} {"text": "from ._core import Core from ._entity.submittable import Submittable from .cycle.cycle import Cycle from .cycle.cycle_id import CycleId from .data.data_node import DataNode from .data.data_node_id import DataNodeId from .job.job import Job from .job.job_id import JobId from .job.status import Status from .scenario.scenario import Scenario from .scenario.scenario_id import ScenarioId from .sequence.sequence import Sequence from .sequence.sequence_id import SequenceId from .taipy import ( cancel_job, clean_all_entities_by_version, compare_scenarios, create_global_data_node, create_scenario, delete, delete_job, delete_jobs, exists, export_scenario, get, get_cycles, get_cycles_scenarios, get_data_nodes, get_entities_by_config_id, get_jobs, get_latest_job, get_parents, get_primary, get_primary_scenarios, get_scenarios, get_sequences, get_tasks, is_deletable, is_editable, is_promotable, is_readable, is_submittable, set, set_primary, submit, subscribe_scenario, subscribe_sequence, tag, unsubscribe_scenario, unsubscribe_sequence, untag, ) from .task.task import Task from .task.task_id import TaskId "} {"text": "\"\"\"# Taipy Core The Taipy Core package is a Python library designed to build powerful, customized, data-driven back-end applications. It provides the tools to help Python developers transform their algorithms into a complete back-end application. More details on the [Taipy Core](../../core/index.md) functionalities are available in the user manual. To build a Taipy Core application, the first step consists of setting up the Taipy configuration to design your application's characteristics and behaviors. Import `Config^` from the `taipy.config^` module, then use the various methods of the `Config^` singleton class to configure your core application. In particular, configure the [data nodes](../../core/config/data-node-config.md), [tasks](../../core/config/task-config.md), and [scenarios](../../core/config/scenario-config.md). Please refer to the [Core configuration user manual](../../core/config/index.md) for more information and detailed examples. Once your application is configured, import module `import taipy as tp` so you can use any function described in the following section on [Functionc](#functions). In particular, the most used functions are `tp.create_scenario()`, `tp.get_scenarios()`, `tp.get_data_nodes()`, `tp.submit()`, used to get, create, and submit entities. !!! Note Taipy Core provides a runnable service, `Core^` that runs as a service in a dedicated thread. The purpose is to have a dedicated thread responsible for dispatching the submitted jobs to an available executor for their execution. In particular, this `Core^` service is automatically run when Core is used with Taipy REST or Taipy GUI. See the [running services](../../run-deploy/run/running_services.md) page of the user manual for more details. \"\"\" from ._init import * from ._init_version import _read_version from .common.mongo_default_document import MongoDefaultDocument from .data.data_node_id import Edit from .exceptions import exceptions __version__ = _read_version() "} {"text": "import json import os from pathlib import Path def _read_version(): with open(f\"{Path(os.path.abspath(__file__)).parent}{os.sep}version.json\") as version_file: version = json.load(version_file) version_string = f'{version.get(\"major\", 0)}.{version.get(\"minor\", 0)}.{version.get(\"patch\", 0)}' if vext := version.get(\"ext\"): version_string = f\"{version_string}.{vext}\" return version_string "} {"text": "from multiprocessing import Lock from typing import Optional from taipy.config import Config from taipy.logger._taipy_logger import _TaipyLogger from ._backup._backup import _init_backup_file_with_storage_folder from ._core_cli import _CoreCLI from ._orchestrator._dispatcher._job_dispatcher import _JobDispatcher from ._orchestrator._orchestrator import _Orchestrator from ._orchestrator._orchestrator_factory import _OrchestratorFactory from ._version._version_manager_factory import _VersionManagerFactory from .config import CoreSection from .exceptions.exceptions import CoreServiceIsAlreadyRunning class Core: \"\"\" Core service \"\"\" _is_running = False __lock_is_running = Lock() __logger = _TaipyLogger._get_logger() _orchestrator: Optional[_Orchestrator] = None _dispatcher: Optional[_JobDispatcher] = None def __init__(self): \"\"\" Initialize a Core service. \"\"\" pass def run(self, force_restart=False): \"\"\" Start a Core service. This function checks the configuration, manages application's version, and starts a dispatcher and lock the Config. \"\"\" if self.__class__._is_running: raise CoreServiceIsAlreadyRunning with self.__class__.__lock_is_running: self.__class__._is_running = True self.__update_core_section() self.__manage_version() self.__check_and_block_config() if self._orchestrator is None: self._orchestrator = _OrchestratorFactory._build_orchestrator() self.__start_dispatcher(force_restart) def stop(self): \"\"\" Stop the Core service. This function stops the dispatcher and unblock the Config for update. \"\"\" Config.unblock_update() if self._dispatcher: self._dispatcher = _OrchestratorFactory._remove_dispatcher() self.__logger.info(\"Core service has been stopped.\") with self.__class__.__lock_is_running: self.__class__._is_running = False @staticmethod def __update_core_section(): _CoreCLI.create_parser() Config._applied_config._unique_sections[CoreSection.name]._update(_CoreCLI.parse_arguments()) @staticmethod def __manage_version(): _VersionManagerFactory._build_manager()._manage_version() Config._applied_config._unique_sections[CoreSection.name]._update( {\"version_number\": _VersionManagerFactory._build_manager()._get_latest_version()} ) @staticmethod def __check_and_block_config(): Config.check() Config.block_update() _init_backup_file_with_storage_folder() def __start_dispatcher(self, force_restart): if dispatcher := _OrchestratorFactory._build_dispatcher(force_restart=force_restart): self._dispatcher = dispatcher if Config.job_config.is_development: _Orchestrator._check_and_execute_jobs_if_development_mode() "} {"text": "from typing import Dict from taipy._cli._base_cli import _CLI from .config import CoreSection class _CoreCLI: \"\"\"Command-line interface for Taipy Core application.\"\"\" __MODE_ARGS: Dict[str, Dict] = { \"--development\": { \"action\": \"store_true\", \"dest\": \"taipy_development\", \"help\": \"\"\" When execute Taipy application in `development` mode, all entities from the previous development version will be deleted before running new Taipy application. \"\"\", }, \"--experiment\": { \"dest\": \"taipy_experiment\", \"nargs\": \"?\", \"const\": \"\", \"metavar\": \"VERSION\", \"help\": \"\"\" When execute Taipy application in `experiment` mode, the current Taipy application is saved to a new version. If version name already exists, check for compatibility with current Python Config and run the application. Without being specified, the version number will be a random string. \"\"\", }, \"--production\": { \"dest\": \"taipy_production\", \"nargs\": \"?\", \"const\": \"\", \"metavar\": \"VERSION\", \"help\": \"\"\" When execute in `production` mode, the current version is used in production. All production versions should have the same configuration and share all entities. Without being specified, the latest version is used. \"\"\", }, } __FORCE_ARGS: Dict[str, Dict] = { \"--force\": { \"dest\": \"taipy_force\", \"action\": \"store_true\", \"help\": \"\"\" Force override the configuration of the version if existed and run the application. Default to False. \"\"\", }, \"--no-force\": { \"dest\": \"no_taipy_force\", \"action\": \"store_true\", \"help\": \"Stop the application if any Config conflict exists.\", }, } @classmethod def create_parser(cls): core_parser = _CLI._add_groupparser(\"Taipy Core\", \"Optional arguments for Taipy Core service\") mode_group = core_parser.add_mutually_exclusive_group() for mode_arg, mode_arg_dict in cls.__MODE_ARGS.items(): mode_group.add_argument(mode_arg, cls.__add_taipy_prefix(mode_arg), **mode_arg_dict) force_group = core_parser.add_mutually_exclusive_group() for force_arg, force_arg_dict in cls.__FORCE_ARGS.items(): force_group.add_argument(cls.__add_taipy_prefix(force_arg), **force_arg_dict) @classmethod def create_run_parser(cls): run_parser = _CLI._add_subparser(\"run\", help=\"Run a Taipy application.\") mode_group = run_parser.add_mutually_exclusive_group() for mode_arg, mode_arg_dict in cls.__MODE_ARGS.items(): mode_group.add_argument(mode_arg, **mode_arg_dict) force_group = run_parser.add_mutually_exclusive_group() for force_arg, force_arg_dict in cls.__FORCE_ARGS.items(): force_group.add_argument(force_arg, **force_arg_dict) @classmethod def parse_arguments(cls): args = _CLI._parse() as_dict = {} if args.taipy_development: as_dict[CoreSection._MODE_KEY] = CoreSection._DEVELOPMENT_MODE elif args.taipy_experiment is not None: as_dict[CoreSection._MODE_KEY] = CoreSection._EXPERIMENT_MODE as_dict[CoreSection._VERSION_NUMBER_KEY] = args.taipy_experiment elif args.taipy_production is not None: as_dict[CoreSection._MODE_KEY] = CoreSection._PRODUCTION_MODE as_dict[CoreSection._VERSION_NUMBER_KEY] = args.taipy_production if args.taipy_force: as_dict[CoreSection._FORCE_KEY] = True elif args.no_taipy_force: as_dict[CoreSection._FORCE_KEY] = False return as_dict @classmethod def __add_taipy_prefix(cls, key: str): if key.startswith(\"--no-\"): return key[:5] + \"taipy-\" + key[5:] return key[:2] + \"taipy-\" + key[2:] "} {"text": "import json import re from datetime import datetime, timedelta class _Decoder(json.JSONDecoder): def __init__(self, *args, **kwargs): json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) def _str_to_timedelta(self, timedelta_str: str) -> timedelta: \"\"\" Parse a time string e.g. (2h13m) into a timedelta object. :param timedelta_str: A string identifying a duration. (eg. 2h13m) :return datetime.timedelta: A datetime.timedelta object \"\"\" regex = re.compile( r\"^((?P[\\.\\d]+?)d)? *\" r\"((?P[\\.\\d]+?)h)? *\" r\"((?P[\\.\\d]+?)m)? *\" r\"((?P[\\.\\d]+?)s)?$\" ) parts = regex.match(timedelta_str) if not parts: raise TypeError(\"Can not deserialize string into timedelta\") time_params = {name: float(param) for name, param in parts.groupdict().items() if param} # mypy has an issue with dynamic keyword parameters, hence the type ignore on the line bellow. return timedelta(**time_params) # type: ignore def object_hook(self, source): if source.get(\"__type__\") == \"Datetime\": return datetime.fromisoformat(source.get(\"__value__\")) if source.get(\"__type__\") == \"Timedelta\": return self._str_to_timedelta(source.get(\"__value__\")) else: return source def loads(d): return json.loads(d, cls=_Decoder) "} {"text": "import pathlib from abc import abstractmethod from typing import Any, Dict, Generic, Iterable, List, Optional, TypeVar, Union ModelType = TypeVar(\"ModelType\") Entity = TypeVar(\"Entity\") class _AbstractRepository(Generic[ModelType, Entity]): @abstractmethod def _save(self, entity: Entity): \"\"\" Save an entity in the repository. Parameters: entity: The data from an object. \"\"\" raise NotImplementedError @abstractmethod def _exists(self, entity_id: str) -> bool: \"\"\" Check if an entity with id entity_id exists in the repository. Parameters: entity_id: The entity id, i.e., its primary key. Returns: True if the entity id exists. \"\"\" raise NotImplementedError @abstractmethod def _load(self, entity_id: str) -> Entity: \"\"\" Retrieve the entity data from the repository. Parameters: entity_id: The entity id, i.e., its primary key. Returns: An entity. \"\"\" raise NotImplementedError @abstractmethod def _load_all(self, filters: Optional[List[Dict]] = None) -> List[Entity]: \"\"\" Retrieve all the entities' data from the repository taking any passed filter into account. Returns: A list of entities. \"\"\" raise NotImplementedError @abstractmethod def _delete(self, entity_id: str): \"\"\" Delete an entity in the repository. Parameters: entity_id: The id of the entity to be deleted. \"\"\" raise NotImplementedError @abstractmethod def _delete_all(self): \"\"\" Delete all entities from the repository. \"\"\" raise NotImplementedError @abstractmethod def _delete_many(self, ids: Iterable[str]): \"\"\" Delete all entities from the list of ids from the repository. Parameters: ids: List of ids to be deleted. \"\"\" raise NotImplementedError @abstractmethod def _delete_by(self, attribute: str, value: str): \"\"\" Delete all entities from the list of ids from the repository. Parameters: attribute: The entity property that is the key to the search. value: The value of the attribute that are being searched. \"\"\" raise NotImplementedError @abstractmethod def _search(self, attribute: str, value: Any, filters: Optional[List[Dict]] = None) -> List[Entity]: \"\"\" Parameters: attribute: The entity property that is the key to the search. value: The value of the attribute that are being searched. Returns: A list of entities that match the search criteria. \"\"\" raise NotImplementedError @abstractmethod def _export(self, entity_id: str, folder_path: Union[str, pathlib.Path]): \"\"\" Export an entity from the repository. Parameters: entity_id (str): The id of the entity to be exported. folder_path (Union[str, pathlib.Path]): The folder path to export the entity to. \"\"\" raise NotImplementedError "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import json from datetime import datetime, timedelta from enum import Enum from typing import Any class _Encoder(json.JSONEncoder): def _timedelta_to_str(self, obj: timedelta) -> str: total_seconds = obj.total_seconds() return ( f\"{int(total_seconds // 86400)}d\" f\"{int(total_seconds % 86400 // 3600)}h\" f\"{int(total_seconds % 3600 // 60)}m\" f\"{int(total_seconds % 60)}s\" ) def default(self, o: Any): if isinstance(o, Enum): result = o.value elif isinstance(o, datetime): result = {\"__type__\": \"Datetime\", \"__value__\": o.isoformat()} elif isinstance(o, timedelta): result = {\"__type__\": \"Timedelta\", \"__value__\": self._timedelta_to_str(o)} else: result = json.JSONEncoder.default(self, o) return result def dumps(d): return json.dumps(d, cls=_Encoder) "} {"text": "from abc import ABC, abstractmethod class _AbstractConverter(ABC): @classmethod @abstractmethod def _entity_to_model(cls, entity): raise NotImplementedError @classmethod @abstractmethod def _model_to_entity(cls, model): raise NotImplementedError "} {"text": "import dataclasses import enum import json from typing import Any, Dict from sqlalchemy import Table from ._decoder import _Decoder from ._encoder import _Encoder class _BaseModel: __table__: Table def __iter__(self): for attr, value in self.__dict__.items(): yield attr, value def to_dict(self) -> Dict[str, Any]: model_dict = {**dataclasses.asdict(self)} for k, v in model_dict.items(): if isinstance(v, enum.Enum): model_dict[k] = repr(v) return model_dict @staticmethod def _serialize_attribute(value): return json.dumps(value, ensure_ascii=False, cls=_Encoder) @staticmethod def _deserialize_attribute(value): if isinstance(value, str): return json.loads(value.replace(\"'\", '\"'), cls=_Decoder) return value @staticmethod def from_dict(data: Dict[str, Any]): pass def to_list(self): pass "} {"text": "import sqlite3 from functools import lru_cache from sqlite3 import Connection from sqlalchemy.dialects import sqlite from sqlalchemy.schema import CreateTable from taipy.config.config import Config from ...exceptions import MissingRequiredProperty def dict_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d class _SQLConnection: _connection = None @classmethod def init_db(cls): if cls._connection: return cls._connection cls._connection = _build_connection() cls._connection.row_factory = dict_factory from ..._version._version_model import _VersionModel from ...cycle._cycle_model import _CycleModel from ...data._data_model import _DataNodeModel from ...job._job_model import _JobModel from ...scenario._scenario_model import _ScenarioModel from ...submission._submission_model import _SubmissionModel from ...task._task_model import _TaskModel cls._connection.execute( str(CreateTable(_CycleModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) cls._connection.execute( str(CreateTable(_DataNodeModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) cls._connection.execute( str(CreateTable(_JobModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) cls._connection.execute( str(CreateTable(_ScenarioModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) cls._connection.execute( str(CreateTable(_TaskModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) cls._connection.execute( str(CreateTable(_VersionModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) cls._connection.execute( str(CreateTable(_SubmissionModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) return cls._connection def _build_connection() -> Connection: # Set SQLite threading mode to Serialized, means that threads may share the module, connections and cursors sqlite3.threadsafety = 3 properties = Config.core.repository_properties try: db_location = properties[\"db_location\"] except KeyError: raise MissingRequiredProperty(\"Missing property db_location.\") return __build_connection(db_location) @lru_cache def __build_connection(db_location: str): return sqlite3.connect(db_location, check_same_thread=False) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from sqlalchemy.orm import declarative_base, registry _SQLBaseModel = declarative_base() mapper_registry = registry() "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": " import os from taipy.config import Config __BACKUP_FILE_PATH_ENVIRONMENT_VARIABLE_NAME = \"TAIPY_BACKUP_FILE_PATH\" def _init_backup_file_with_storage_folder(): if preserve_file_path := os.getenv(__BACKUP_FILE_PATH_ENVIRONMENT_VARIABLE_NAME): with open(preserve_file_path, \"a\") as f: f.write(f\"{Config.core.storage_folder}\\n\") def _append_to_backup_file(new_file_path: str): if preserve_file_path := os.getenv(__BACKUP_FILE_PATH_ENVIRONMENT_VARIABLE_NAME): storage_folder = os.path.abspath(Config.core.storage_folder) + os.sep if not os.path.abspath(new_file_path).startswith(storage_folder): with open(preserve_file_path, \"a\") as f: f.write(f\"{new_file_path}\\n\") def _remove_from_backup_file(to_remove_file_path: str): if preserve_file_path := os.getenv(__BACKUP_FILE_PATH_ENVIRONMENT_VARIABLE_NAME, None): storage_folder = os.path.abspath(Config.core.storage_folder) + os.sep if not os.path.abspath(to_remove_file_path).startswith(storage_folder): try: with open(preserve_file_path, \"r+\") as f: old_backup = f.read() to_remove_file_path = to_remove_file_path + \"\\n\" # To avoid removing the file path of different data nodes that are pointing # to the same file. We will only replace the file path only once. if old_backup.startswith(to_remove_file_path): new_backup = old_backup.replace(to_remove_file_path, \"\", 1) else: new_backup = old_backup.replace(\"\\n\" + to_remove_file_path, \"\\n\", 1) if new_backup is not old_backup: f.seek(0) f.write(new_backup) f.truncate() except Exception: pass def _replace_in_backup_file(old_file_path: str, new_file_path: str): _remove_from_backup_file(old_file_path) _append_to_backup_file(new_file_path) "} {"text": "from taipy.config import _inject_section from taipy.config.checker._checker import _Checker from taipy.config.common.frequency import Frequency # type: ignore from taipy.config.common.scope import Scope # type: ignore from taipy.config.config import Config # type: ignore from taipy.config.global_app.global_app_config import GlobalAppConfig # type: ignore from .checkers._config_id_checker import _ConfigIdChecker from .checkers._core_section_checker import _CoreSectionChecker from .checkers._data_node_config_checker import _DataNodeConfigChecker from .checkers._job_config_checker import _JobConfigChecker from .checkers._scenario_config_checker import _ScenarioConfigChecker from .checkers._task_config_checker import _TaskConfigChecker from .core_section import CoreSection from .data_node_config import DataNodeConfig from .job_config import JobConfig from .migration_config import MigrationConfig from .scenario_config import ScenarioConfig from .task_config import TaskConfig _inject_section( JobConfig, \"job_config\", JobConfig.default_config(), [(\"configure_job_executions\", JobConfig._configure)], add_to_unconflicted_sections=True, ) _inject_section( DataNodeConfig, \"data_nodes\", DataNodeConfig.default_config(), [ (\"configure_data_node\", DataNodeConfig._configure), (\"configure_data_node_from\", DataNodeConfig._configure_from), (\"set_default_data_node_configuration\", DataNodeConfig._set_default_configuration), (\"configure_csv_data_node\", DataNodeConfig._configure_csv), (\"configure_json_data_node\", DataNodeConfig._configure_json), (\"configure_parquet_data_node\", DataNodeConfig._configure_parquet), (\"configure_sql_table_data_node\", DataNodeConfig._configure_sql_table), (\"configure_sql_data_node\", DataNodeConfig._configure_sql), (\"configure_mongo_collection_data_node\", DataNodeConfig._configure_mongo_collection), (\"configure_in_memory_data_node\", DataNodeConfig._configure_in_memory), (\"configure_pickle_data_node\", DataNodeConfig._configure_pickle), (\"configure_excel_data_node\", DataNodeConfig._configure_excel), (\"configure_generic_data_node\", DataNodeConfig._configure_generic), ], ) _inject_section( TaskConfig, \"tasks\", TaskConfig.default_config(), [ (\"configure_task\", TaskConfig._configure), (\"set_default_task_configuration\", TaskConfig._set_default_configuration), ], ) _inject_section( ScenarioConfig, \"scenarios\", ScenarioConfig.default_config(), [ (\"configure_scenario\", ScenarioConfig._configure), (\"set_default_scenario_configuration\", ScenarioConfig._set_default_configuration), ], ) _inject_section( MigrationConfig, \"migration_functions\", MigrationConfig.default_config(), [(\"add_migration_function\", MigrationConfig._add_migration_function)], add_to_unconflicted_sections=True, ) _inject_section( CoreSection, \"core\", CoreSection.default_config(), [(\"configure_core\", CoreSection._configure)], add_to_unconflicted_sections=True, ) _Checker.add_checker(_ConfigIdChecker) _Checker.add_checker(_CoreSectionChecker) _Checker.add_checker(_DataNodeConfigChecker) _Checker.add_checker(_JobConfigChecker) # We don't need to add _MigrationConfigChecker because it is run only when the Core service is run. _Checker.add_checker(_TaskConfigChecker) _Checker.add_checker(_ScenarioConfigChecker) "} {"text": "import collections.abc from copy import deepcopy from typing import Any, Callable, Dict, Optional, Union from taipy.config._config import _Config from taipy.config.common._template_handler import _TemplateHandler as _tpl from taipy.config.config import Config from taipy.config.section import Section from taipy.config.unique_section import UniqueSection class MigrationConfig(UniqueSection): \"\"\" Configuration fields needed to register migration functions from an old version to newer one. Attributes: migration_fcts (Dict[str, Dict[str, Callable]]): A dictionary that maps the version that entities are migrated from to the migration functions. **properties (dict[str, Any]): A dictionary of additional properties. \"\"\" name = \"VERSION_MIGRATION\" _MIGRATION_FCTS_KEY = \"migration_fcts\" def __init__( self, migration_fcts: Dict[str, Dict[str, Callable]], **properties, ): self.migration_fcts = migration_fcts super().__init__(**properties) def __copy__(self): return MigrationConfig( deepcopy(self.migration_fcts), **deepcopy(self._properties), ) def _clean(self): self.migration_fcts.clear() self._properties.clear() def __getattr__(self, item: str) -> Optional[Any]: return _tpl._replace_templates(self._properties.get(item)) # type: ignore @classmethod def default_config(cls): return MigrationConfig({}) def _to_dict(self): return { self._MIGRATION_FCTS_KEY: self.migration_fcts, **self._properties, } @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config]): return MigrationConfig(**as_dict) def _update(self, as_dict, default_section=None): def deep_update(d, u): for k, v in u.items(): if isinstance(v, collections.abc.Mapping): d[k] = deep_update(d.get(k, {}), v) else: d[k] = v return d migration_fcts = as_dict.pop(self._MIGRATION_FCTS_KEY) deep_update(self.migration_fcts, migration_fcts) self._properties.update(as_dict) @staticmethod def _add_migration_function( target_version: str, config: Union[Section, str], migration_fct: Callable, **properties, ): \"\"\"Add a migration function for a Configuration to migrate entities to the target version. Parameters: target_version (str): The production version that entities are migrated to. config (Union[Section, str]): The configuration or the `id` of the config that needs to migrate. migration_fct (Callable): Migration function that takes an entity as input and returns a new entity that is compatible with the target production version. **properties (Dict[str, Any]): A keyworded variable length list of additional arguments. Returns: `MigrationConfig^`: The Migration configuration. \"\"\" config_id = config if isinstance(config, str) else config.id migration_fcts = {target_version: {config_id: migration_fct}} section = MigrationConfig( migration_fcts, **properties, ) Config._register(section) return Config.unique_sections[MigrationConfig.name] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from copy import copy from typing import Any, Dict, Optional, Union from taipy.config import Config from taipy.config._config import _Config from taipy.config.common._template_handler import _TemplateHandler as _tpl from taipy.config.unique_section import UniqueSection from ..exceptions.exceptions import ModeNotAvailable class JobConfig(UniqueSection): \"\"\" Configuration fields related to the jobs' executions. Parameters: mode (str): The Taipy operating mode. By default, the \"development\" mode is set for testing and debugging the executions of jobs. A \"standalone\" mode is also available. **properties (dict[str, any]): A dictionary of additional properties. \"\"\" name = \"JOB\" _MODE_KEY = \"mode\" _STANDALONE_MODE = \"standalone\" _DEVELOPMENT_MODE = \"development\" _DEFAULT_MODE = _DEVELOPMENT_MODE _MODES = [_STANDALONE_MODE, _DEVELOPMENT_MODE] def __init__(self, mode: Optional[str] = None, **properties): self.mode = mode or self._DEFAULT_MODE self._config = self._create_config(self.mode, **properties) super().__init__(**properties) def __copy__(self): return JobConfig(self.mode, **copy(self._properties)) def __getattr__(self, key: str) -> Optional[Any]: return self._config.get(key, None) @classmethod def default_config(cls): return JobConfig(cls._DEFAULT_MODE) def _clean(self): self.mode = self._DEFAULT_MODE self._config = self._create_config(self.mode) def _to_dict(self): as_dict = {} if self.mode is not None: as_dict[self._MODE_KEY] = self.mode as_dict.update(self._config) return as_dict @classmethod def _from_dict(cls, config_as_dict: Dict[str, Any], id=None, config: Optional[_Config] = None): mode = config_as_dict.pop(cls._MODE_KEY, None) job_config = JobConfig(mode, **config_as_dict) return job_config def _update(self, as_dict: Dict[str, Any], default_section=None): mode = _tpl._replace_templates(as_dict.pop(self._MODE_KEY, self.mode)) if self.mode != mode: self.mode = mode self._config = self._create_config(self.mode, **as_dict) if self._config is not None: self._update_config(as_dict) @staticmethod def _configure( mode: Optional[str] = None, max_nb_of_workers: Optional[Union[int, str]] = None, **properties ) -> \"JobConfig\": \"\"\"Configure job execution. Parameters: mode (Optional[str]): The job execution mode. Possible values are: *\"standalone\"* (the default value) or *\"development\"*. max_nb_of_workers (Optional[int, str]): Parameter used only in default *\"standalone\"* mode. This indicates the maximum number of jobs able to run in parallel.
The default value is 1.
A string can be provided to dynamically set the value using an environment variable. The string must follow the pattern: `ENV[<env_var>]` where `<env_var>` is the name of an environment variable. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new job execution configuration. \"\"\" section = JobConfig(mode, max_nb_of_workers=max_nb_of_workers, **properties) Config._register(section) return Config.unique_sections[JobConfig.name] def _update_config(self, config_as_dict: Dict[str, Any]): for k, v in config_as_dict.items(): type_to_convert = type(self.get_default_config(self.mode).get(k, None)) or str value = _tpl._replace_templates(v, type_to_convert) if value is not None: self._config[k] = value @property def is_standalone(self) -> bool: \"\"\"True if the config is set to standalone mode\"\"\" return self.mode == self._STANDALONE_MODE @property def is_development(self) -> bool: \"\"\"True if the config is set to development mode\"\"\" return self.mode == self._DEVELOPMENT_MODE @classmethod def get_default_config(cls, mode: str) -> Dict[str, Any]: if cls.is_standalone: # type: ignore return {\"max_nb_of_workers\": 1} if cls.is_development: return {} raise ModeNotAvailable(mode) @classmethod def _create_config(cls, mode, **properties): return {**cls.get_default_config(mode), **properties} "} {"text": "from typing import Set from taipy.config._config import _Config from taipy.config.checker._checkers._config_checker import _ConfigChecker from taipy.config.checker.issue_collector import IssueCollector from ..core_section import CoreSection class _CoreSectionChecker(_ConfigChecker): _ACCEPTED_REPOSITORY_TYPES: Set[str] = {\"filesystem\", \"sql\"} def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: if core_section := self._config._unique_sections.get(CoreSection.name): self._check_repository_type(core_section) return self._collector def _check_repository_type(self, core_section: CoreSection): value = core_section.repository_type if value not in self._ACCEPTED_REPOSITORY_TYPES: self._warning( core_section._REPOSITORY_TYPE_KEY, value, f'Value \"{value}\" for field {core_section._REPOSITORY_TYPE_KEY} of the CoreSection is not supported. ' f'Default value \"filesystem\" is applied.', ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from taipy.config._config import _Config from taipy.config.checker._checkers._config_checker import _ConfigChecker from taipy.config.checker.issue_collector import IssueCollector from ..._version._version_manager_factory import _VersionManagerFactory from ..migration_config import MigrationConfig class _MigrationConfigChecker(_ConfigChecker): def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: if migration_config := self._config._unique_sections.get(MigrationConfig.name): self._check_if_entity_property_key_used_is_predefined(migration_config) migration_fcts = migration_config.migration_fcts for target_version, migration_functions in migration_config.migration_fcts.items(): for config_id, migration_function in migration_functions.items(): self._check_callable(target_version, config_id, migration_function) self._check_valid_production_version(migration_fcts) self._check_migration_from_productions_to_productions_exist(migration_fcts) return self._collector def _check_callable(self, target_version, config_id, migration_function): if not callable(migration_function): self._error( MigrationConfig._MIGRATION_FCTS_KEY, migration_function, f\"The migration function of config `{config_id}` from version {target_version}\" f\" must be populated with Callable value.\", ) def _check_valid_production_version(self, migration_fcts): for target_version in migration_fcts.keys(): if target_version not in _VersionManagerFactory._build_manager()._get_production_versions(): self._error( MigrationConfig._MIGRATION_FCTS_KEY, target_version, \"The target version for a migration function must be a production version.\", ) def _check_migration_from_productions_to_productions_exist(self, migration_fcts): production_versions = _VersionManagerFactory._build_manager()._get_production_versions() for source_version, target_version in zip(production_versions[:-1], production_versions[1:]): if not migration_fcts.get(target_version): self._info( \"target_version\", None, f'There is no migration function from production version \"{source_version}\"' f' to version \"{target_version}\".', ) "} {"text": "from typing import Dict from taipy.config._config import _Config from taipy.config.checker._checkers._config_checker import _ConfigChecker from taipy.config.checker.issue_collector import IssueCollector from ..data_node_config import DataNodeConfig from ..job_config import JobConfig class _JobConfigChecker(_ConfigChecker): def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: if job_config := self._config._unique_sections.get(JobConfig.name): data_node_configs = self._config._sections[DataNodeConfig.name] self._check_multiprocess_mode(job_config, data_node_configs) return self._collector def _check_multiprocess_mode(self, job_config: JobConfig, data_node_configs: Dict[str, DataNodeConfig]): if job_config.is_standalone: for cfg_id, data_node_config in data_node_configs.items(): if data_node_config.storage_type == DataNodeConfig._STORAGE_TYPE_VALUE_IN_MEMORY: self._error( DataNodeConfig._STORAGE_TYPE_KEY, data_node_config.storage_type, f\"DataNode `{cfg_id}`: In-memory storage type can ONLY be used in \" f\"{JobConfig._DEVELOPMENT_MODE} mode.\", ) "} {"text": "from typing import Dict, List from taipy.config._config import _Config from taipy.config.checker._checkers._config_checker import _ConfigChecker from taipy.config.checker.issue_collector import IssueCollector class _ConfigIdChecker(_ConfigChecker): def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: existing_config_ids: Dict[str, List[str]] = dict() for entity_type, section_dictionary in self._config._sections.items(): for config_id in section_dictionary.keys(): if config_id in existing_config_ids.keys(): existing_config_ids[config_id].append(entity_type) else: existing_config_ids[config_id] = [entity_type] for config_id, entity_types in existing_config_ids.items(): if config_id != \"default\" and len(entity_types) > 1: self._error( \"config_id\", config_id, f\"`{config_id}` is used as the config_id of multiple configurations {str(entity_types)}\", ) "} {"text": "from taipy.config._config import _Config from taipy.config.checker._checkers._config_checker import _ConfigChecker from taipy.config.checker.issue_collector import IssueCollector from ..data_node_config import DataNodeConfig from ..task_config import TaskConfig class _TaskConfigChecker(_ConfigChecker): def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: task_configs = self._config._sections[TaskConfig.name] for task_config_id, task_config in task_configs.items(): if task_config_id != _Config.DEFAULT_KEY: self._check_existing_config_id(task_config) self._check_if_entity_property_key_used_is_predefined(task_config) self._check_existing_function(task_config_id, task_config) self._check_inputs(task_config_id, task_config) self._check_outputs(task_config_id, task_config) return self._collector def _check_inputs(self, task_config_id: str, task_config: TaskConfig): self._check_children( TaskConfig, task_config_id, task_config._INPUT_KEY, task_config.input_configs, DataNodeConfig ) def _check_outputs(self, task_config_id: str, task_config: TaskConfig): self._check_children( TaskConfig, task_config_id, task_config._OUTPUT_KEY, task_config.output_configs, DataNodeConfig ) def _check_existing_function(self, task_config_id: str, task_config: TaskConfig): if not task_config.function: self._error( task_config._FUNCTION, task_config.function, f\"{task_config._FUNCTION} field of TaskConfig `{task_config_id}` is empty.\", ) else: if not callable(task_config.function): self._error( task_config._FUNCTION, task_config.function, f\"{task_config._FUNCTION} field of TaskConfig `{task_config_id}` must be\" f\" populated with Callable value.\", ) "} {"text": "from dataclasses import dataclass, field from datetime import datetime from functools import singledispatch from typing import Any, Optional from ..common._repr_enum import _ReprEnum from ..exceptions.exceptions import InvalidEventAttributeName, InvalidEventOperation class EventOperation(_ReprEnum): \"\"\"Enum representing a type of operation performed on a Core entity. `EventOperation` is used as an attribute of the `Event^` object to describe the operation performed on an entity.
The possible operations are `CREATION`, `UPDATE`, `DELETION`, or `SUBMISSION`. \"\"\" CREATION = 1 UPDATE = 2 DELETION = 3 SUBMISSION = 4 class EventEntityType(_ReprEnum): \"\"\"Enum representing an entity type. `EventEntityType` is used as an attribute of the `Event^` object to describe an entity that was changed.
The possible operations are `CYCLE`, `SCENARIO`, `SEQUENCE`, `TASK`, `DATA_NODE`, `JOB` or `SUBMISSION`. \"\"\" CYCLE = 1 SCENARIO = 2 SEQUENCE = 3 TASK = 4 DATA_NODE = 5 JOB = 6 SUBMISSION = 7 _NO_ATTRIBUTE_NAME_OPERATIONS = set([EventOperation.CREATION, EventOperation.DELETION, EventOperation.SUBMISSION]) _UNSUBMITTABLE_ENTITY_TYPES = (EventEntityType.CYCLE, EventEntityType.DATA_NODE, EventEntityType.JOB) _ENTITY_TO_EVENT_ENTITY_TYPE = { \"scenario\": EventEntityType.SCENARIO, \"sequence\": EventEntityType.SEQUENCE, \"task\": EventEntityType.TASK, \"data\": EventEntityType.DATA_NODE, \"job\": EventEntityType.JOB, \"cycle\": EventEntityType.CYCLE, \"submission\": EventEntityType.SUBMISSION, } @dataclass(frozen=True) class Event: \"\"\"Event object used to notify any change in the Core service. An event holds the necessary attributes to identify the change. Attributes: entity_type (EventEntityType^): Type of the entity that was changed (`DataNode^`, `Scenario^`, `Cycle^`, etc. ). entity_id (Optional[str]): Unique identifier of the entity that was changed. operation (EventOperation^): Enum describing the operation (among `CREATION`, `UPDATE`, `DELETION`, and `SUBMISSION`) that was performed on the entity. attribute_name (Optional[str]): Name of the entity's attribute changed. Only relevant for `UPDATE` operations attribute_value (Optional[str]): Name of the entity's attribute changed. Only relevant for `UPDATE` operations metadata (dict): A dict of additional medata about the source of this event creation_date (datetime): Date and time of the event creation. \"\"\" entity_type: EventEntityType operation: EventOperation entity_id: Optional[str] = None attribute_name: Optional[str] = None attribute_value: Optional[Any] = None metadata: dict = field(default_factory=dict) creation_date: datetime = field(init=False) def __post_init__(self): # Creation date super().__setattr__(\"creation_date\", datetime.now()) # Check operation: if self.entity_type in _UNSUBMITTABLE_ENTITY_TYPES and self.operation == EventOperation.SUBMISSION: raise InvalidEventOperation # Check attribute name: if self.operation in _NO_ATTRIBUTE_NAME_OPERATIONS and self.attribute_name is not None: raise InvalidEventAttributeName @singledispatch def _make_event( entity: Any, operation: EventOperation, /, attribute_name: Optional[str] = None, attribute_value: Optional[Any] = None, **kwargs, ) -> Event: \"\"\"Helper function to make an event for this entity with the given `EventOperation^` type. In case of `EventOperation.UPDATE^` events, an attribute name and value must be given. Parameters: entity (Any): The entity object to generate an event for. operation (EventOperation^): The operation of the event. The possible values are:
  • CREATION
  • UPDATE
  • DELETION
  • SUBMISSION
attribute_name (Optional[str]): The name of the updated attribute for a `EventOperation.UPDATE`. This argument is always given in case of an UPDATE. attribute_value (Optional[Any]): The value of the udated attribute for a `EventOperation.UPDATE`. This argument is always given in case of an UPDATE. **kwargs (dict[str, any]): Any extra information that would be passed to the metadata event. Note: you should pass only simple types: str, float, double as values.\"\"\" raise Exception(f\"Unexpected entity type: {type(entity)}\") "} {"text": "\"\"\" Package for notifications about changes on `Core^` service entities. The Core service generates `Event^` objects to track changes on entities. These events are then relayed to a `Notifier^`, which handles the dispatch to consumers interested in specific event topics. To subscribe, a consumer needs to invoke the `Notifier.register()^` method. This call will yield a `RegistrationId^` and a dedicated event queue for receiving notifications. To handle notifications, an event consumer (e.g., the `CoreEventConsumerBase^` object) must be instantiated with an associated event queue. \"\"\" from ._registration import _Registration from ._topic import _Topic from .core_event_consumer import CoreEventConsumerBase from .event import _ENTITY_TO_EVENT_ENTITY_TYPE, Event, EventEntityType, EventOperation, _make_event from .notifier import Notifier, _publish_event from .registration_id import RegistrationId "} {"text": "from typing import NewType RegistrationId = NewType(\"RegistrationId\", str) RegistrationId.__doc__ = \"\"\"Registration identifier. It can be used to instantiate a `CoreEventConsumerBase^`.\"\"\" "} {"text": "from queue import SimpleQueue from typing import Optional from uuid import uuid4 from ._topic import _Topic from .event import EventEntityType, EventOperation from .registration_id import RegistrationId class _Registration: _ID_PREFIX = \"REGISTRATION\" __SEPARATOR = \"_\" def __init__( self, entity_type: Optional[EventEntityType] = None, entity_id: Optional[str] = None, operation: Optional[EventOperation] = None, attribute_name: Optional[str] = None, ): self.registration_id: str = self._new_id() self.topic: _Topic = _Topic(entity_type, entity_id, operation, attribute_name) self.queue: SimpleQueue = SimpleQueue() @staticmethod def _new_id() -> RegistrationId: \"\"\"Generate a unique registration identifier.\"\"\" return RegistrationId(_Registration.__SEPARATOR.join([_Registration._ID_PREFIX, str(uuid4())])) def __hash__(self) -> int: return hash(self.registration_id) "} {"text": "from typing import Optional from ..exceptions.exceptions import InvalidEventOperation from .event import _UNSUBMITTABLE_ENTITY_TYPES, EventEntityType, EventOperation class _Topic: def __init__( self, entity_type: Optional[EventEntityType] = None, entity_id: Optional[str] = None, operation: Optional[EventOperation] = None, attribute_name: Optional[str] = None, ): self.entity_type = entity_type self.entity_id = entity_id self.operation = self.__preprocess_operation(operation, self.entity_type) self.attribute_name = self.__preprocess_attribute_name(attribute_name, self.operation) @classmethod def __preprocess_attribute_name( cls, attribute_name: Optional[str] = None, operation: Optional[EventOperation] = None ) -> Optional[str]: # if operation in _NO_ATTRIBUTE_NAME_OPERATIONS and attribute_name is not None: # raise InvalidEventAttributeName return attribute_name @classmethod def __preprocess_operation( cls, operation: Optional[EventOperation] = None, entity_type: Optional[EventEntityType] = None ) -> Optional[EventOperation]: if ( entity_type and operation and entity_type in _UNSUBMITTABLE_ENTITY_TYPES and operation == EventOperation.SUBMISSION ): raise InvalidEventOperation return operation def __hash__(self): return hash((self.entity_type, self.entity_id, self.operation, self.attribute_name)) def __eq__(self, __value) -> bool: if ( self.entity_type == __value.entity_type and self.entity_id == __value.entity_id and self.operation == __value.operation and self.attribute_name == __value.attribute_name ): return True return False "} {"text": "import abc import threading from queue import Empty, SimpleQueue from .event import Event class CoreEventConsumerBase(threading.Thread): \"\"\"Abstract base class for implementing a Core event consumer. This class provides a framework for consuming events from a queue in a separate thread. It should be subclassed, and the `process_event` method should be implemented to define the custom logic for handling incoming events. Example usage: ```python class MyEventConsumer(CoreEventConsumerBase): def process_event(self, event: Event): # Custom event processing logic here print(f\"Received event created at : {event.creation_date}\") pass consumer = MyEventConsumer(\"consumer_1\", event_queue) consumer.start() # ... consumer.stop() ``` Subclasses should implement the `process_event` method to define their specific event handling behavior. Attributes: queue (SimpleQueue): The queue from which events will be consumed. \"\"\" def __init__(self, registration_id: str, queue: SimpleQueue): \"\"\"Initialize a CoreEventConsumerBase instance. Parameters: registration_id (str): A unique identifier of the registration. You can get a registration id invoking `Notifier.register()^` method. queue (SimpleQueue): The queue from which events will be consumed. You can get a queue invoking `Notifier.register()^` method. \"\"\" threading.Thread.__init__(self, name=f\"Thread-Taipy-Core-Consumer-{registration_id}\") self.daemon = True self.queue = queue self.__STOP_FLAG = False self._TIMEOUT = 0.1 def start(self): \"\"\"Start the event consumer thread.\"\"\" self.__STOP_FLAG = False threading.Thread.start(self) def stop(self): \"\"\"Stop the event consumer thread.\"\"\" self.__STOP_FLAG = True def run(self): while not self.__STOP_FLAG: try: event: Event = self.queue.get(block=True, timeout=self._TIMEOUT) self.process_event(event) except Empty: pass @abc.abstractmethod def process_event(self, event: Event): \"\"\"This method should be overridden in subclasses to define how events are processed.\"\"\" raise NotImplementedError "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._filesystem_repository import _FileSystemRepository from ._cycle_converter import _CycleConverter from ._cycle_model import _CycleModel class _CycleFSRepository(_FileSystemRepository): def __init__(self): super().__init__(model_type=_CycleModel, converter=_CycleConverter, dir_name=\"cycles\") "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._sql_repository import _SQLRepository from ._cycle_converter import _CycleConverter from ._cycle_model import _CycleModel class _CycleSQLRepository(_SQLRepository): def __init__(self): super().__init__(model_type=_CycleModel, converter=_CycleConverter) "} {"text": "from typing import NewType CycleId = NewType(\"CycleId\", str) CycleId.__doc__ = \"\"\"Type that holds a `Cycle^` identifier.\"\"\" "} {"text": "from dataclasses import dataclass from typing import Any, Dict from sqlalchemy import JSON, Column, Enum, String, Table from taipy.config.common.frequency import Frequency from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry from .cycle_id import CycleId @mapper_registry.mapped @dataclass class _CycleModel(_BaseModel): __table__ = Table( \"cycle\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"name\", String), Column(\"frequency\", Enum(Frequency)), Column(\"properties\", JSON), Column(\"creation_date\", String), Column(\"start_date\", String), Column(\"end_date\", String), ) id: CycleId name: str frequency: Frequency properties: Dict[str, Any] creation_date: str start_date: str end_date: str @staticmethod def from_dict(data: Dict[str, Any]): return _CycleModel( id=data[\"id\"], name=data[\"name\"], frequency=Frequency._from_repr(data[\"frequency\"]), properties=_BaseModel._deserialize_attribute(data[\"properties\"]), creation_date=data[\"creation_date\"], start_date=data[\"start_date\"], end_date=data[\"end_date\"], ) def to_list(self): return [ self.id, self.name, repr(self.frequency), _BaseModel._serialize_attribute(self.properties), self.creation_date, self.start_date, self.end_date, ] "} {"text": "from datetime import datetime from .._repository._abstract_converter import _AbstractConverter from ..cycle._cycle_model import _CycleModel from ..cycle.cycle import Cycle class _CycleConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, cycle: Cycle) -> _CycleModel: return _CycleModel( id=cycle.id, name=cycle._name, frequency=cycle._frequency, creation_date=cycle._creation_date.isoformat(), start_date=cycle._start_date.isoformat(), end_date=cycle._end_date.isoformat(), properties=cycle._properties.data, ) @classmethod def _model_to_entity(cls, model: _CycleModel) -> Cycle: return Cycle( id=model.id, name=model.name, frequency=model.frequency, properties=model.properties, creation_date=datetime.fromisoformat(model.creation_date), start_date=datetime.fromisoformat(model.start_date), end_date=datetime.fromisoformat(model.end_date), ) "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ..cycle._cycle_manager import _CycleManager from ._cycle_fs_repository import _CycleFSRepository from ._cycle_sql_repository import _CycleSQLRepository class _CycleManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _CycleFSRepository, \"sql\": _CycleSQLRepository} @classmethod def _build_manager(cls) -> Type[_CycleManager]: # type: ignore if cls._using_enterprise(): cycle_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".cycle._cycle_manager\", \"_CycleManager\" ) # type: ignore build_repository = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".cycle._cycle_manager_factory\", \"_CycleManagerFactory\" )._build_repository # type: ignore else: cycle_manager = _CycleManager build_repository = cls._build_repository cycle_manager._repository = build_repository() # type: ignore return cycle_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": "from abc import abstractmethod from importlib import util from typing import Type from taipy.config import Config from ._manager import _Manager class _ManagerFactory: _TAIPY_ENTERPRISE_MODULE = \"taipy.enterprise\" _TAIPY_ENTERPRISE_CORE_MODULE = _TAIPY_ENTERPRISE_MODULE + \".core\" @classmethod @abstractmethod def _build_manager(cls) -> Type[_Manager]: # type: ignore raise NotImplementedError @classmethod def _build_repository(cls): raise NotImplementedError @classmethod def _using_enterprise(cls) -> bool: return util.find_spec(cls._TAIPY_ENTERPRISE_MODULE) is not None @staticmethod def _get_repository_with_repo_map(repository_map: dict): return repository_map.get(Config.core.repository_type, repository_map.get(\"default\")) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pathlib from importlib import metadata from typing import Dict, Generic, Iterable, List, Optional, TypeVar, Union from taipy.logger._taipy_logger import _TaipyLogger from .._entity._entity_ids import _EntityIds from .._repository._abstract_repository import _AbstractRepository from ..exceptions.exceptions import ModelNotFound from ..notification import Event, EventOperation, Notifier EntityType = TypeVar(\"EntityType\") class _Manager(Generic[EntityType]): _repository: _AbstractRepository _logger = _TaipyLogger._get_logger() _ENTITY_NAME: str = \"Entity\" @classmethod def _delete_all(cls): \"\"\" Deletes all entities. \"\"\" cls._repository._delete_all() if hasattr(cls, \"_EVENT_ENTITY_TYPE\"): Notifier.publish( Event( cls._EVENT_ENTITY_TYPE, EventOperation.DELETION, metadata={\"delete_all\": True}, ) ) @classmethod def _delete_many(cls, ids: Iterable): \"\"\" Deletes entities by a list of ids. \"\"\" cls._repository._delete_many(ids) if hasattr(cls, \"_EVENT_ENTITY_TYPE\"): for entity_id in ids: Notifier.publish( Event( cls._EVENT_ENTITY_TYPE, # type: ignore EventOperation.DELETION, entity_id=entity_id, metadata={\"delete_all\": True}, ) ) @classmethod def _delete_by_version(cls, version_number: str): \"\"\" Deletes entities by version number. \"\"\" cls._repository._delete_by(attribute=\"version\", value=version_number) if hasattr(cls, \"_EVENT_ENTITY_TYPE\"): Notifier.publish( Event( cls._EVENT_ENTITY_TYPE, # type: ignore EventOperation.DELETION, metadata={\"delete_by_version\": version_number}, ) ) @classmethod def _delete(cls, id): \"\"\" Deletes an entity by id. \"\"\" cls._repository._delete(id) if hasattr(cls, \"_EVENT_ENTITY_TYPE\"): Notifier.publish( Event( cls._EVENT_ENTITY_TYPE, EventOperation.DELETION, entity_id=id, ) ) @classmethod def _set(cls, entity: EntityType): \"\"\" Save or update an entity. \"\"\" cls._repository._save(entity) @classmethod def _get_all(cls, version_number: Optional[str] = \"all\") -> List[EntityType]: \"\"\" Returns all entities. \"\"\" filters: List[Dict] = [] return cls._repository._load_all(filters) @classmethod def _get_all_by(cls, filters: Optional[List[Dict]] = None) -> List[EntityType]: \"\"\" Returns all entities based on a criteria. \"\"\" if not filters: filters = [] return cls._repository._load_all(filters) @classmethod def _get(cls, entity: Union[str, EntityType], default=None) -> EntityType: \"\"\" Returns an entity by id or reference. \"\"\" entity_id = entity if isinstance(entity, str) else entity.id # type: ignore try: return cls._repository._load(entity_id) except ModelNotFound: cls._logger.error(f\"{cls._ENTITY_NAME} not found: {entity_id}\") return default @classmethod def _exists(cls, entity_id: str) -> bool: \"\"\" Returns True if the entity id exists. \"\"\" return cls._repository._exists(entity_id) @classmethod def _delete_entities_of_multiple_types(cls, _entity_ids: _EntityIds): \"\"\" Deletes entities of multiple types. \"\"\" from ..cycle._cycle_manager_factory import _CycleManagerFactory from ..data._data_manager_factory import _DataManagerFactory from ..job._job_manager_factory import _JobManagerFactory from ..scenario._scenario_manager_factory import _ScenarioManagerFactory from ..sequence._sequence_manager_factory import _SequenceManagerFactory from ..submission._submission_manager_factory import _SubmissionManagerFactory from ..task._task_manager_factory import _TaskManagerFactory _CycleManagerFactory._build_manager()._delete_many(_entity_ids.cycle_ids) _SequenceManagerFactory._build_manager()._delete_many(_entity_ids.sequence_ids) _ScenarioManagerFactory._build_manager()._delete_many(_entity_ids.scenario_ids) _TaskManagerFactory._build_manager()._delete_many(_entity_ids.task_ids) _JobManagerFactory._build_manager()._delete_many(_entity_ids.job_ids) _DataManagerFactory._build_manager()._delete_many(_entity_ids.data_node_ids) _SubmissionManagerFactory._build_manager()._delete_many(_entity_ids.submission_ids) @classmethod def _export(cls, id: str, folder_path: Union[str, pathlib.Path]): \"\"\" Export an entity. \"\"\" return cls._repository._export(id, folder_path) @classmethod def _is_editable(cls, entity: Union[EntityType, _EntityIds]) -> bool: return True @classmethod def _is_readable(cls, entity: Union[EntityType, _EntityIds]) -> bool: return True "} {"text": "from datetime import datetime from typing import Any from taipy.config import Config from taipy.config._config import _Config from .._entity._entity import _Entity class _Version(_Entity): def __init__(self, id: str, config: Any) -> None: self.id: str = id self.config: _Config = config self.creation_date: datetime = datetime.now() def __eq__(self, other): return self.id == other.id and self.__is_config_eq(other) def __is_config_eq(self, other): return Config._serializer._str(self.config) == Config._serializer._str(other.config) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from typing import Dict, List from .._version._version_manager_factory import _VersionManagerFactory class _VersionMixin: _version_manager = _VersionManagerFactory._build_manager() @classmethod def __fetch_version_number(cls, version_number): version_number = _VersionManagerFactory._build_manager()._replace_version_number(version_number) if not isinstance(version_number, List): version_number = [version_number] if version_number else [] return version_number @classmethod def _build_filters_with_version(cls, version_number) -> List[Dict]: filters = [] if versions := cls.__fetch_version_number(version_number): filters = [{\"version\": version} for version in versions] return filters @classmethod def _get_latest_version(cls): return cls._version_manager._get_latest_version() "} {"text": "from .._manager._manager_factory import _ManagerFactory from ..common import _utils from ._version_fs_repository import _VersionFSRepository from ._version_manager import _VersionManager from ._version_sql_repository import _VersionSQLRepository class _VersionManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _VersionFSRepository, \"sql\": _VersionSQLRepository} @classmethod def _build_manager(cls) -> _VersionManager: # type: ignore if cls._using_enterprise(): version_manager = _utils._load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \"._version._version_manager\", \"_VersionManager\" ) # type: ignore build_repository = _utils._load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \"._version._version_manager_factory\", \"_VersionManagerFactory\" )._build_repository # type: ignore else: version_manager = _VersionManager build_repository = cls._build_repository version_manager._repository = build_repository() # type: ignore return version_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": "from abc import ABC, abstractmethod class _VersionRepositoryInterface(ABC): _LATEST_VERSION_KEY = \"latest_version\" _DEVELOPMENT_VERSION_KEY = \"development_version\" _PRODUCTION_VERSION_KEY = \"production_version\" @abstractmethod def _set_latest_version(self, version_number): raise NotImplementedError @abstractmethod def _get_latest_version(self): raise NotImplementedError @abstractmethod def _set_development_version(self, version_number): raise NotImplementedError @abstractmethod def _get_development_version(self): raise NotImplementedError @abstractmethod def _set_production_version(self, version_number): raise NotImplementedError @abstractmethod def _get_production_versions(self): raise NotImplementedError @abstractmethod def _delete_production_version(self, version_number): raise NotImplementedError "} {"text": "import json from typing import List from taipy.logger._taipy_logger import _TaipyLogger from .._repository._filesystem_repository import _FileSystemRepository from ..exceptions.exceptions import VersionIsNotProductionVersion from ._version_converter import _VersionConverter from ._version_model import _VersionModel from ._version_repository_interface import _VersionRepositoryInterface class _VersionFSRepository(_FileSystemRepository, _VersionRepositoryInterface): def __init__(self): super().__init__(model_type=_VersionModel, converter=_VersionConverter, dir_name=\"version\") @property def _version_file_path(self): return super()._storage_folder / \"version.json\" def _delete_all(self): super()._delete_all() if self._version_file_path.exists(): self._version_file_path.unlink() def _set_latest_version(self, version_number): if self._version_file_path.exists(): with open(self._version_file_path, \"r\") as f: file_content = json.load(f) file_content[self._LATEST_VERSION_KEY] = version_number else: self.dir_path.mkdir(parents=True, exist_ok=True) file_content = { self._LATEST_VERSION_KEY: version_number, self._DEVELOPMENT_VERSION_KEY: \"\", self._PRODUCTION_VERSION_KEY: [], } self._version_file_path.write_text( json.dumps( file_content, ensure_ascii=False, indent=0, ) ) def _get_latest_version(self) -> str: with open(self._version_file_path, \"r\") as f: file_content = json.load(f) return file_content[self._LATEST_VERSION_KEY] def _set_development_version(self, version_number): if self._version_file_path.exists(): with open(self._version_file_path, \"r\") as f: file_content = json.load(f) file_content[self._DEVELOPMENT_VERSION_KEY] = version_number file_content[self._LATEST_VERSION_KEY] = version_number else: self.dir_path.mkdir(parents=True, exist_ok=True) file_content = { self._LATEST_VERSION_KEY: version_number, self._DEVELOPMENT_VERSION_KEY: version_number, self._PRODUCTION_VERSION_KEY: [], } self._version_file_path.write_text( json.dumps( file_content, ensure_ascii=False, indent=0, ) ) def _get_development_version(self) -> str: with open(self._version_file_path, \"r\") as f: file_content = json.load(f) return file_content[self._DEVELOPMENT_VERSION_KEY] def _set_production_version(self, version_number): if self._version_file_path.exists(): with open(self._version_file_path, \"r\") as f: file_content = json.load(f) file_content[self._LATEST_VERSION_KEY] = version_number if version_number not in file_content[self._PRODUCTION_VERSION_KEY]: file_content[self._PRODUCTION_VERSION_KEY].append(version_number) else: _TaipyLogger._get_logger().info(f\"Version {version_number} is already a production version.\") else: self.dir_path.mkdir(parents=True, exist_ok=True) file_content = { self._LATEST_VERSION_KEY: version_number, self._DEVELOPMENT_VERSION_KEY: \"\", self._PRODUCTION_VERSION_KEY: [version_number], } self._version_file_path.write_text( json.dumps( file_content, ensure_ascii=False, indent=0, ) ) def _get_production_versions(self) -> List[str]: with open(self._version_file_path, \"r\") as f: file_content = json.load(f) return file_content[self._PRODUCTION_VERSION_KEY] def _delete_production_version(self, version_number): try: with open(self._version_file_path, \"r\") as f: file_content = json.load(f) if version_number not in file_content[self._PRODUCTION_VERSION_KEY]: raise VersionIsNotProductionVersion(f\"Version '{version_number}' is not a production version.\") file_content[self._PRODUCTION_VERSION_KEY].remove(version_number) self._version_file_path.write_text( json.dumps( file_content, ensure_ascii=False, indent=0, ) ) except FileNotFoundError: raise VersionIsNotProductionVersion(f\"Version '{version_number}' is not a production version.\") "} {"text": "from sqlalchemy.dialects import sqlite from .._repository._sql_repository import _SQLRepository from ..exceptions.exceptions import ModelNotFound, VersionIsNotProductionVersion from ._version_converter import _VersionConverter from ._version_model import _VersionModel from ._version_repository_interface import _VersionRepositoryInterface class _VersionSQLRepository(_SQLRepository, _VersionRepositoryInterface): def __init__(self): super().__init__(model_type=_VersionModel, converter=_VersionConverter) def _set_latest_version(self, version_number): if old_latest := self.db.execute(str(self.table.select().filter_by(is_latest=True))).fetchone(): old_latest = self.model_type.from_dict(old_latest) old_latest.is_latest = False self._update_entry(old_latest) version = self.__get_by_id(version_number) version.is_latest = True self._update_entry(version) def _get_latest_version(self): if latest := self.db.execute( str(self.table.select().filter_by(is_latest=True).compile(dialect=sqlite.dialect())) ).fetchone(): return latest[\"id\"] raise ModelNotFound(self.model_type, \"\") def _set_development_version(self, version_number): if old_development := self.db.execute(str(self.table.select().filter_by(is_development=True))).fetchone(): old_development = self.model_type.from_dict(old_development) old_development.is_development = False self._update_entry(old_development) version = self.__get_by_id(version_number) version.is_development = True self._update_entry(version) self._set_latest_version(version_number) def _get_development_version(self): if development := self.db.execute(str(self.table.select().filter_by(is_development=True))).fetchone(): return development[\"id\"] raise ModelNotFound(self.model_type, \"\") def _set_production_version(self, version_number): version = self.__get_by_id(version_number) version.is_production = True self._update_entry(version) self._set_latest_version(version_number) def _get_production_versions(self): if productions := self.db.execute( str(self.table.select().filter_by(is_production=True).compile(dialect=sqlite.dialect())), ).fetchall(): return [p[\"id\"] for p in productions] return [] def _delete_production_version(self, version_number): version = self.__get_by_id(version_number) if not version or not version.is_production: raise VersionIsNotProductionVersion(f\"Version '{version_number}' is not a production version.\") version.is_production = False self._update_entry(version) def __get_by_id(self, version_id): query = str(self.table.select().filter_by(id=version_id).compile(dialect=sqlite.dialect())) entry = self.db.execute(query, [version_id]).fetchone() return self.model_type.from_dict(entry) if entry else None "} {"text": "from datetime import datetime from taipy.config import Config from .._repository._abstract_converter import _AbstractConverter from .._version._version import _Version from .._version._version_model import _VersionModel class _VersionConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, version: _Version) -> _VersionModel: return _VersionModel( id=version.id, config=Config._to_json(version.config), creation_date=version.creation_date.isoformat() ) @classmethod def _model_to_entity(cls, model: _VersionModel) -> _Version: version = _Version(id=model.id, config=Config._from_json(model.config)) version.creation_date = datetime.fromisoformat(model.creation_date) return version "} {"text": "from dataclasses import dataclass from typing import Any, Dict from sqlalchemy import Boolean, Column, String, Table from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry @mapper_registry.mapped @dataclass class _VersionModel(_BaseModel): __table__ = Table( \"version\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"config\", String), # config is store as a json string Column(\"creation_date\", String), Column(\"is_production\", Boolean), Column(\"is_development\", Boolean), Column(\"is_latest\", Boolean), ) id: str config: Dict[str, Any] creation_date: str @staticmethod def from_dict(data: Dict[str, Any]): model = _VersionModel( id=data[\"id\"], config=data[\"config\"], creation_date=data[\"creation_date\"], ) model.is_production = data.get(\"is_production\") # type: ignore model.is_development = data.get(\"is_development\") # type: ignore model.is_latest = data.get(\"is_latest\") # type: ignore return model def to_list(self): return [ self.id, self.config, self.creation_date, self.is_production, self.is_development, self.is_latest, ] "} {"text": "from typing import Callable, List from taipy.config.config import Config from .._entity._reload import _Reloader from ..config import MigrationConfig from ._version_manager_factory import _VersionManagerFactory def _migrate_entity(entity): if ( latest_version := _VersionManagerFactory._build_manager()._get_latest_version() ) in _VersionManagerFactory._build_manager()._get_production_versions(): if migration_fcts := __get_migration_fcts_to_latest(entity._version, entity.config_id): with _Reloader(): for fct in migration_fcts: entity = fct(entity) entity._version = latest_version return entity def __get_migration_fcts_to_latest(source_version: str, config_id: str) -> List[Callable]: migration_fcts_to_latest: List[Callable] = [] production_versions = _VersionManagerFactory._build_manager()._get_production_versions() try: start_index = production_versions.index(source_version) + 1 except ValueError: return migration_fcts_to_latest versions_to_migrate = production_versions[start_index:] for version in versions_to_migrate: migration_fct = Config.unique_sections[MigrationConfig.name].migration_fcts.get(version, {}).get(config_id) if migration_fct: migration_fcts_to_latest.append(migration_fct) return migration_fcts_to_latest "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import sys def _vt_codes_enabled_in_windows_registry(): \"\"\" Check the Windows Registry to see if VT code handling has been enabled by default, see https://superuser.com/a/1300251/447564. \"\"\" try: # winreg is only available on Windows. import winreg except ImportError: return False else: try: reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, \"Console\") reg_key_value, _ = winreg.QueryValueEx(reg_key, \"VirtualTerminalLevel\") except FileNotFoundError: return False else: return reg_key_value == 1 def _is_color_supported(): \"\"\" Return True if the running system's terminal supports color, and False otherwise. \"\"\" is_a_tty = hasattr(sys.stdout, \"isatty\") and sys.stdout.isatty() return is_a_tty and ( sys.platform != \"win32\" or \"ANSICON\" in os.environ or \"WT_SESSION\" in os.environ # Windows Terminal supports VT codes. or os.environ.get(\"TERM_PROGRAM\") == \"vscode\" # VSCode's built-in terminal supports colors. or _vt_codes_enabled_in_windows_registry() ) class _Bcolors: PURPLE = \"\\033[95m\" if _is_color_supported() else \"\" BLUE = \"\\033[94m\" if _is_color_supported() else \"\" CYAN = \"\\033[96m\" if _is_color_supported() else \"\" GREEN = \"\\033[92m\" if _is_color_supported() else \"\" BOLD = \"\\033[1m\" if _is_color_supported() else \"\" UNDERLINE = \"\\033[4m\" if _is_color_supported() else \"\" END = \"\\033[0m\" if _is_color_supported() else \"\" "} {"text": "from .exceptions import * "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from __future__ import annotations import abc from typing import Any, Callable, List, Optional, Set, Union import networkx as nx from ..common._listattributes import _ListAttributes from ..common._utils import _Subscriber from ..data.data_node import DataNode from ..job.job import Job from ..task.task import Task from ._dag import _DAG class Submittable: \"\"\"Instance of an entity that can be submitted for execution. A submittable holds functions that can be used to build the execution directed acyclic graph. Attributes: subscribers (List[Callable]): The list of callbacks to be called on `Job^`'s status change. \"\"\" def __init__(self, subscribers: Optional[List[_Subscriber]] = None): self._subscribers = _ListAttributes(self, subscribers or list()) @abc.abstractmethod def submit( self, callbacks: Optional[List[Callable]] = None, force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, ): raise NotImplementedError def get_inputs(self) -> Set[DataNode]: \"\"\"Return the set of input data nodes of the submittable entity. Returns: The set of input data nodes. \"\"\" dag = self._build_dag() return self.__get_inputs(dag) def __get_inputs(self, dag: nx.DiGraph) -> Set[DataNode]: return {node for node, degree in dict(dag.in_degree).items() if degree == 0 and isinstance(node, DataNode)} def get_outputs(self) -> Set[DataNode]: \"\"\"Return the set of output data nodes of the submittable entity. Returns: The set of output data nodes. \"\"\" dag = self._build_dag() return self.__get_outputs(dag) def __get_outputs(self, dag: nx.DiGraph) -> set[DataNode]: return {node for node, degree in dict(dag.out_degree).items() if degree == 0 and isinstance(node, DataNode)} def get_intermediate(self) -> Set[DataNode]: \"\"\"Return the set of intermediate data nodes of the submittable entity. Returns: The set of intermediate data nodes. \"\"\" dag = self._build_dag() all_data_nodes_in_dag = {node for node in dag.nodes if isinstance(node, DataNode)} return all_data_nodes_in_dag - self.__get_inputs(dag) - self.__get_outputs(dag) def is_ready_to_run(self) -> bool: \"\"\"Indicate if the entity is ready to be run. Returns: True if the given entity is ready to be run. False otherwise. \"\"\" return all(dn.is_ready_for_reading for dn in self.get_inputs()) def data_nodes_being_edited(self) -> Set[DataNode]: \"\"\"Return the set of data nodes of the submittable entity that are being edited. Returns: The set of data nodes that are being edited. \"\"\" dag = self._build_dag() return {node for node in dag.nodes if isinstance(node, DataNode) and node.edit_in_progress} @abc.abstractmethod def subscribe(self, callback: Callable[[Submittable, Job], None], params: Optional[List[Any]] = None): raise NotImplementedError @abc.abstractmethod def unsubscribe(self, callback: Callable[[Submittable, Job], None], params: Optional[List[Any]] = None): raise NotImplementedError @abc.abstractmethod def _get_set_of_tasks(self) -> Set[Task]: raise NotImplementedError def _get_dag(self) -> _DAG: return _DAG(self._build_dag()) def _build_dag(self) -> nx.DiGraph: graph = nx.DiGraph() tasks = self._get_set_of_tasks() for task in tasks: if has_input := task.input: for predecessor in task.input.values(): graph.add_edges_from([(predecessor, task)]) if has_output := task.output: for successor in task.output.values(): graph.add_edges_from([(task, successor)]) if not has_input and not has_output: graph.add_node(task) return graph def _get_sorted_tasks(self) -> List[List[Task]]: dag = self._build_dag() remove = [node for node, degree in dict(dag.in_degree).items() if degree == 0 and isinstance(node, DataNode)] dag.remove_nodes_from(remove) return list(nodes for nodes in nx.topological_generations(dag) if (Task in (type(node) for node in nodes))) def _add_subscriber(self, callback: Callable, params: Optional[List[Any]] = None): params = [] if params is None else params self._subscribers.append(_Subscriber(callback=callback, params=params)) def _remove_subscriber(self, callback: Callable, params: Optional[List[Any]] = None): if params is not None: self._subscribers.remove(_Subscriber(callback, params)) else: elem = [x for x in self._subscribers if x.callback == callback] if not elem: raise ValueError self._subscribers.remove(elem[0]) "} {"text": "from typing import List from .._entity._reload import _get_manager from ..notification import Notifier class _Entity: _MANAGER_NAME: str _is_in_context = False _in_context_attributes_changed_collector: List def __enter__(self): self._is_in_context = True self._in_context_attributes_changed_collector = list() return self def __exit__(self, exc_type, exc_value, exc_traceback): # If multiple entities is in context, the last to enter will be the first to exit self._is_in_context = False if hasattr(self, \"_properties\"): for to_delete_key in self._properties._pending_deletions: self._properties.data.pop(to_delete_key, None) self._properties.data.update(self._properties._pending_changes) _get_manager(self._MANAGER_NAME)._set(self) for event in self._in_context_attributes_changed_collector: Notifier.publish(event) _get_manager(self._MANAGER_NAME)._set(self) "} {"text": "from collections import UserDict from ..notification import _ENTITY_TO_EVENT_ENTITY_TYPE, EventOperation, Notifier, _make_event class _Properties(UserDict): __PROPERTIES_ATTRIBUTE_NAME = \"properties\" def __init__(self, entity_owner, **kwargs): super().__init__(**kwargs) self._entity_owner = entity_owner self._pending_changes = {} self._pending_deletions = set() def __setitem__(self, key, value): super(_Properties, self).__setitem__(key, value) from ... import core as tp if hasattr(self, \"_entity_owner\"): event = _make_event( self._entity_owner, EventOperation.UPDATE, attribute_name=self.__PROPERTIES_ATTRIBUTE_NAME, attribute_value=value, ) if not self._entity_owner._is_in_context: tp.set(self._entity_owner) Notifier.publish(event) else: if key in self._pending_deletions: self._pending_deletions.remove(key) self._pending_changes[key] = value self._entity_owner._in_context_attributes_changed_collector.append(event) def __getitem__(self, key): from taipy.config.common._template_handler import _TemplateHandler as _tpl return _tpl._replace_templates(super(_Properties, self).__getitem__(key)) def __delitem__(self, key): super(_Properties, self).__delitem__(key) from ... import core as tp if hasattr(self, \"_entity_owner\"): event = _make_event( self._entity_owner, EventOperation.UPDATE, attribute_name=self.__PROPERTIES_ATTRIBUTE_NAME, attribute_value=None, ) if not self._entity_owner._is_in_context: tp.set(self._entity_owner) Notifier.publish(event) else: self._pending_changes.pop(key, None) self._pending_deletions.add(key) self._entity_owner._in_context_attributes_changed_collector.append(event) "} {"text": "from __future__ import annotations class _EntityIds: def __init__(self): self.data_node_ids = set() self.task_ids = set() self.scenario_ids = set() self.sequence_ids = set() self.job_ids = set() self.cycle_ids = set() self.submission_ids = set() def __add__(self, other: _EntityIds): self.data_node_ids.update(other.data_node_ids) self.task_ids.update(other.task_ids) self.scenario_ids.update(other.scenario_ids) self.sequence_ids.update(other.sequence_ids) self.job_ids.update(other.job_ids) self.cycle_ids.update(other.cycle_ids) self.submission_ids.update(other.submission_ids) return self def __iadd__(self, other: _EntityIds): self.__add__(other) return self "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import math from functools import reduce from typing import Any, Dict, List, Tuple import networkx as nx class _Node: def __init__(self, entity: Any, x, y): self.type = entity.__class__.__name__ self.entity = entity self.x = x self.y = y class _Edge: def __init__(self, src: _Node, dest: _Node): self.src = src self.dest = dest class _DAG: def __init__(self, dag: nx.DiGraph): self._sorted_nodes = list(nodes for nodes in nx.topological_generations(dag)) self._length, self._width = self.__compute_size() self._grid_length, self._grid_width = self.__compute_grid_size() self._nodes = self.__compute_nodes() self._edges = self.__compute_edges(dag) @property def width(self) -> int: return self._width @property def length(self) -> int: return self._length @property def nodes(self) -> Dict[str, _Node]: return self._nodes @property def edges(self) -> List[_Edge]: return self._edges def __compute_size(self) -> Tuple[int, int]: return len(self._sorted_nodes), max([len(i) for i in self._sorted_nodes]) def __compute_grid_size(self) -> Tuple[int, int]: if self._width == 1: grd_wdt = 1 else: grd_wdt = self.__lcm(*[len(i) + 1 if len(i) != self._width else len(i) - 1 for i in self._sorted_nodes]) + 1 return len(self._sorted_nodes), grd_wdt def __compute_nodes(self) -> Dict[str, _Node]: nodes = {} x = 0 for same_lvl_nodes in self._sorted_nodes: lcl_wdt = len(same_lvl_nodes) is_max = lcl_wdt != self.width if self.width != 1: y_incr = (self._grid_width - 1) / (lcl_wdt + 1) if is_max else (self._grid_width - 1) / (lcl_wdt - 1) else: y_incr = 1 y = 0 if is_max else -y_incr for node in same_lvl_nodes: y += y_incr nodes[node.id] = _Node(node, x, y) x += 1 return nodes def __compute_edges(self, dag) -> List[_Edge]: edges = [] for edge in dag.edges(): edges.append(_Edge(self.nodes[edge[0].id], self.nodes[edge[1].id])) return edges @staticmethod def __lcm(*integers) -> int: # Function math.lcm is only implemented for Python 3.9+ # For compatibility with Python 3.8 it has been re implemented. if 0 in integers: return 0 return reduce(lambda x, y: (x * y) // math.gcd(x, y), integers) "} {"text": "import sys from typing import List from taipy._cli._base_cli import _CLI from taipy.logger._taipy_logger import _TaipyLogger from ._migrate import ( _migrate_fs_entities, _migrate_mongo_entities, _migrate_sql_entities, _remove_backup_file_entities, _remove_backup_mongo_entities, _remove_backup_sql_entities, _restore_migrate_file_entities, _restore_migrate_mongo_entities, _restore_migrate_sql_entities, ) class _MigrateCLI: __logger = _TaipyLogger._get_logger() @classmethod def create_parser(cls): migrate_parser = _CLI._add_subparser( \"migrate\", help=\"Migrate entities created from old taipy versions to be compatible with the current taipy version. \" \" The entity migration should be performed only after updating taipy code to the current version.\", ) migrate_parser.add_argument( \"--repository-type\", required=True, nargs=\"+\", help=\"The type of repository to migrate. If filesystem or sql, a path to the database folder/.sqlite file \" \"should be informed. In case of mongo host, port, user and password must be informed, if left empty it \" \"is assumed default values\", ) migrate_parser.add_argument( \"--skip-backup\", action=\"store_true\", help=\"Skip the backup of entities before migration.\", ) migrate_parser.add_argument( \"--restore\", action=\"store_true\", help=\"Restore the migration of entities from backup folder.\", ) migrate_parser.add_argument( \"--remove-backup\", action=\"store_true\", help=\"Remove the backup of entities. Only use this option if the migration was successful.\", ) @classmethod def parse_arguments(cls): args = _CLI._parse() if getattr(args, \"which\", None) != \"migrate\": return repository_type = args.repository_type[0] repository_args = args.repository_type[1:] if len(args.repository_type) > 1 else [None] if args.restore: cls.__handle_restore_backup(repository_type, repository_args) if args.remove_backup: cls.__handle_remove_backup(repository_type, repository_args) do_backup = False if args.skip_backup else True cls.__migrate_entities(repository_type, repository_args, do_backup) sys.exit(0) @classmethod def __handle_remove_backup(cls, repository_type: str, repository_args: List): if repository_type == \"filesystem\": path = repository_args[0] or \".data\" if not _remove_backup_file_entities(path): sys.exit(1) elif repository_type == \"sql\": if not _remove_backup_sql_entities(repository_args[0]): sys.exit(1) elif repository_type == \"mongo\": if not _remove_backup_mongo_entities(): sys.exit(1) else: cls.__logger.error(f\"Unknown repository type {repository_type}\") sys.exit(1) sys.exit(0) @classmethod def __handle_restore_backup(cls, repository_type: str, repository_args: List): if repository_type == \"filesystem\": path = repository_args[0] or \".data\" if not _restore_migrate_file_entities(path): sys.exit(1) elif repository_type == \"sql\": if not _restore_migrate_sql_entities(repository_args[0]): sys.exit(1) elif repository_type == \"mongo\": mongo_args = repository_args[1:5] if repository_args[0] else [] if not _restore_migrate_mongo_entities(*mongo_args): sys.exit(1) else: cls.__logger.error(f\"Unknown repository type {repository_type}\") sys.exit(1) sys.exit(0) @classmethod def __migrate_entities(cls, repository_type: str, repository_args: List, do_backup: bool): if repository_type == \"filesystem\": path = repository_args[0] or \".data\" if not _migrate_fs_entities(path, do_backup): sys.exit(1) elif repository_type == \"sql\": if not _migrate_sql_entities(repository_args[0], do_backup): sys.exit(1) elif repository_type == \"mongo\": mongo_args = repository_args[1:5] if repository_args[0] else [] _migrate_mongo_entities(*mongo_args, backup=do_backup) # type: ignore else: cls.__logger.error(f\"Unknown repository type {repository_type}\") sys.exit(1) "} {"text": "import functools from ..notification import EventOperation, Notifier, _make_event class _Reloader: \"\"\"The _Reloader singleton class\"\"\" _instance = None _no_reload_context = False def __new__(class_, *args, **kwargs): if not isinstance(class_._instance, class_): class_._instance = object.__new__(class_, *args, **kwargs) return class_._instance def _reload(self, manager: str, obj): if self._no_reload_context: return obj entity = _get_manager(manager)._get(obj, obj) if obj._is_in_context and hasattr(entity, \"_properties\"): if obj._properties._pending_changes: entity._properties._pending_changes = obj._properties._pending_changes if obj._properties._pending_deletions: entity._properties._pending_deletions = obj._properties._pending_deletions entity._properties._entity_owner = obj return entity def __enter__(self): self._no_reload_context = True return self def __exit__(self, exc_type, exc_value, exc_traceback): self._no_reload_context = False def _self_reload(manager): def __reload(fct): @functools.wraps(fct) def _do_reload(self, *args, **kwargs): self = _Reloader()._reload(manager, self) return fct(self, *args, **kwargs) return _do_reload return __reload def _self_setter(manager): def __set_entity(fct): @functools.wraps(fct) def _do_set_entity(self, *args, **kwargs): fct(self, *args, **kwargs) entity_manager = _get_manager(manager) if len(args) == 1: value = args[0] else: value = args event = _make_event( self, EventOperation.UPDATE, attribute_name=fct.__name__, attribute_value=value, ) if not self._is_in_context: entity = _Reloader()._reload(manager, self) fct(entity, *args, **kwargs) entity_manager._set(entity) Notifier.publish(event) else: self._in_context_attributes_changed_collector.append(event) return _do_set_entity return __set_entity @functools.lru_cache def _get_manager(manager: str): from ..cycle._cycle_manager_factory import _CycleManagerFactory from ..data._data_manager_factory import _DataManagerFactory from ..job._job_manager_factory import _JobManagerFactory from ..scenario._scenario_manager_factory import _ScenarioManagerFactory from ..sequence._sequence_manager_factory import _SequenceManagerFactory from ..submission._submission_manager_factory import _SubmissionManagerFactory from ..task._task_manager_factory import _TaskManagerFactory return { \"scenario\": _ScenarioManagerFactory._build_manager(), \"sequence\": _SequenceManagerFactory._build_manager(), \"data\": _DataManagerFactory._build_manager(), \"cycle\": _CycleManagerFactory._build_manager(), \"job\": _JobManagerFactory._build_manager(), \"task\": _TaskManagerFactory._build_manager(), \"submission\": _SubmissionManagerFactory._build_manager(), }[manager] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import abc from typing import Optional class _Labeled: __LABEL_SEPARATOR = \" > \" @abc.abstractmethod def get_label(self) -> str: raise NotImplementedError def _get_label(self) -> str: \"\"\"Returns the entity label made of the simple label prefixed by the owner label. Returns: The label of the entity as a string. \"\"\" return self._get_explicit_label() or self._generate_label() @abc.abstractmethod def get_simple_label(self) -> str: raise NotImplementedError def _get_simple_label(self) -> str: \"\"\"Returns the simple label. Returns: The simple label of the entity as a string. \"\"\" return self._get_explicit_label() or self._generate_label(True) def _generate_label(self, simple=False) -> str: ls = [] if not simple: if owner_id := self._get_owner_id(): if getattr(self, \"id\") != owner_id: from ... import core as tp owner = tp.get(owner_id) ls.append(owner.get_label()) ls.append(self._generate_entity_label()) return self.__LABEL_SEPARATOR.join(ls) def _get_explicit_label(self) -> Optional[str]: if hasattr(self, \"_properties\"): return getattr(self, \"_properties\").get(\"label\") return None def _get_owner_id(self) -> Optional[str]: if hasattr(self, \"owner_id\"): return getattr(self, \"owner_id\") return None def _get_name(self) -> Optional[str]: if hasattr(self, \"name\"): return getattr(self, \"name\") if hasattr(self, \"_properties\"): return getattr(self, \"_properties\").get(\"name\") return None def _get_config_id(self) -> Optional[str]: if hasattr(self, \"config_id\"): return getattr(self, \"config_id\") return None def _generate_entity_label(self) -> str: if name := self._get_name(): return name if config_id := self._get_config_id(): return config_id return getattr(self, \"id\") "} {"text": "import os import shutil from functools import lru_cache from typing import Dict import bson import pymongo from taipy.logger._taipy_logger import _TaipyLogger from ._utils import _migrate __logger = _TaipyLogger._get_logger() OLD_COLLECTIONS = [ \"cycle\", \"scenario\", \"pipeline\", \"task\", \"data_node\", \"job\", \"version\", ] NEW_COLLECTIONS = [ \"cycle\", \"scenario\", \"task\", \"data_node\", \"job\", \"version\", ] DATABASE_NAME = \"taipy\" MONGO_BACKUP_FOLDER = \".mongo_backup\" @lru_cache def _connect_mongodb(db_host: str, db_port: int, db_username: str, db_password: str) -> pymongo.MongoClient: auth_str = \"\" if db_username and db_password: auth_str = f\"{db_username}:{db_password}@\" connection_string = f\"mongodb://{auth_str}{db_host}:{db_port}\" return pymongo.MongoClient(connection_string) def __load_all_entities_from_mongo( hostname: str, port: int, user: str, password: str, ): client = _connect_mongodb(hostname, port, user, password) entities = {} for collection in OLD_COLLECTIONS: db = client[DATABASE_NAME] cursor = db[collection].find({}) for document in cursor: entities[document[\"id\"]] = {\"data\": document} return entities def __write_entities_to_mongo( _entities: Dict, hostname: str, port: int, user: str, password: str, ): client = _connect_mongodb(hostname, port, user, password) for collection in NEW_COLLECTIONS: db = client[DATABASE_NAME] db[collection].insert_many( [entity[\"data\"] for entity in _entities.values() if collection in entity[\"data\"][\"id\"]] ) def _backup_mongo_entities( hostname: str = \"localhost\", port: int = 27017, user: str = \"\", password: str = \"\", ) -> bool: client = _connect_mongodb(hostname, port, user, password) db = client[DATABASE_NAME] if not os.path.exists(MONGO_BACKUP_FOLDER): os.makedirs(MONGO_BACKUP_FOLDER, exist_ok=True) for collection in OLD_COLLECTIONS: with open(os.path.join(MONGO_BACKUP_FOLDER, f\"{collection}.bson\"), \"wb+\") as f: for doc in db[collection].find(): f.write(bson.BSON.encode(doc)) __logger.info(f\"Backed up entities to folder '{MONGO_BACKUP_FOLDER}' before migration.\") return True def _restore_migrate_mongo_entities( hostname: str = \"localhost\", port: int = 27017, user: str = \"\", password: str = \"\", ) -> bool: client = _connect_mongodb(hostname, port, user, password) db = client[DATABASE_NAME] if not os.path.isdir(MONGO_BACKUP_FOLDER): __logger.info(f\"The backup folder '{MONGO_BACKUP_FOLDER}' does not exist.\") return False for collection in os.listdir(MONGO_BACKUP_FOLDER): if collection.endswith(\".bson\"): with open(os.path.join(MONGO_BACKUP_FOLDER, collection), \"rb+\") as f: if bson_data := bson.decode_all(f.read()): # type: ignore db[collection.split(\".\")[0]].insert_many(bson_data) shutil.rmtree(MONGO_BACKUP_FOLDER) __logger.info(f\"Restored entities from the backup folder '{MONGO_BACKUP_FOLDER}'.\") return True def _remove_backup_mongo_entities() -> bool: if not os.path.isdir(MONGO_BACKUP_FOLDER): __logger.info(f\"The backup folder '{MONGO_BACKUP_FOLDER}' does not exist.\") return False shutil.rmtree(MONGO_BACKUP_FOLDER) __logger.info(f\"Removed backup entities from the backup folder '{MONGO_BACKUP_FOLDER}'.\") return True def _migrate_mongo_entities( hostname: str = \"localhost\", port: int = 27017, user: str = \"\", password: str = \"\", backup: bool = True, ) -> bool: \"\"\"Migrate entities from mongodb to the current version. Args: hostname (str, optional): The hostname of the mongodb. Defaults to \"localhost\". port (int, optional): The port of the mongodb. Defaults to 27017. user (str, optional): The username of the mongodb. Defaults to \"\". password (str, optional): The password of the mongodb. Defaults to \"\". backup (bool, optional): Whether to backup the entities before migrating. Defaults to True. Returns: bool: True if the migration was successful, False otherwise. \"\"\" if backup: _backup_mongo_entities(hostname=hostname, port=port, user=user, password=password) __logger.info(f\"Starting entity migration from MongoDB {hostname}:{port}\") entities = __load_all_entities_from_mongo(hostname, port, user, password) entities, _ = _migrate(entities) __write_entities_to_mongo(entities, hostname, port, user, password) __logger.info(\"Migration finished\") return True "} {"text": "from ._migrate_fs import _migrate_fs_entities, _remove_backup_file_entities, _restore_migrate_file_entities from ._migrate_mongo import _migrate_mongo_entities, _remove_backup_mongo_entities, _restore_migrate_mongo_entities from ._migrate_sql import _migrate_sql_entities, _remove_backup_sql_entities, _restore_migrate_sql_entities "} {"text": "import json import os import shutil from typing import Dict from taipy.logger._taipy_logger import _TaipyLogger from ._utils import _migrate __logger = _TaipyLogger._get_logger() def _load_all_entities_from_fs(root: str) -> Dict: # run through all files in the data folder and load them entities = {} for root, dirs, files in os.walk(root): for file in files: if file.endswith(\".json\"): with open(os.path.join(root, file)) as f: _id = file.split(\".\")[0] if \"version\" in root: _id = f\"VERSION_{_id}\" entities[_id] = { \"data\": json.load(f), \"path\": os.path.join(root, file), } return entities def __write_entities_to_fs(_entities: Dict, root: str): if not os.path.exists(root): os.makedirs(root, exist_ok=True) for _id, entity in _entities.items(): # Do not write pipeline entities if \"PIPELINE\" in _id: continue with open(entity[\"path\"], \"w\") as f: json.dump(entity[\"data\"], f, indent=0) # Remove pipelines folder pipelines_path = os.path.join(root, \"pipelines\") if os.path.exists(pipelines_path): shutil.rmtree(pipelines_path) def _restore_migrate_file_entities(path: str) -> bool: backup_path = f\"{path}_backup\" if not os.path.exists(backup_path): __logger.error(f\"The backup folder '{backup_path}' does not exist.\") return False if os.path.exists(path): shutil.rmtree(path) else: __logger.warning(f\"The original entities folder '{path}' does not exist.\") os.rename(backup_path, path) __logger.info(f\"Restored entities from the backup folder '{backup_path}' to '{path}'.\") return True def _remove_backup_file_entities(path: str) -> bool: backup_path = f\"{path}_backup\" if not os.path.exists(backup_path): __logger.error(f\"The backup folder '{backup_path}' does not exist.\") return False shutil.rmtree(backup_path) __logger.info(f\"Removed backup entities from the backup folder '{backup_path}'.\") return True def _migrate_fs_entities(path: str, backup: bool = True) -> bool: \"\"\"Migrate entities from filesystem to the current version. Args: path (str): The path to the folder containing the entities. backup (bool, optional): Whether to backup the entities before migrating. Defaults to True. Returns: bool: True if the migration was successful, False otherwise. \"\"\" if not os.path.isdir(path): __logger.error(f\"Folder '{path}' does not exist.\") return False if backup: backup_path = f\"{path}_backup\" try: shutil.copytree(path, backup_path) except FileExistsError: __logger.warning(f\"The backup folder '{backup_path}' already exists. Migration canceled.\") return False else: __logger.info(f\"Backed up entities from '{path}' to '{backup_path}' folder before migration.\") __logger.info(f\"Starting entity migration from '{path}' folder.\") entities = _load_all_entities_from_fs(path) entities, _ = _migrate(entities) __write_entities_to_fs(entities, path) __logger.info(\"Migration finished\") return True "} {"text": "from functools import lru_cache import pymongo @lru_cache def _connect_mongodb( db_host: str, db_port: int, db_username: str, db_password: str, db_extra_args: frozenset, db_driver: str ) -> pymongo.MongoClient: \"\"\"Create a connection to a Mongo database. The `\"mongodb_extra_args\"` passed by the user is originally a dictionary, but since `@lru_cache` wrapper only accepts hashable parameters, the `\"mongodb_extra_args\"` should be converted into a frozenset beforehand. Parameters: db_host (str): the database host. db_port (int): the database port. db_username (str): the database username. db_password (str): the database password. db_extra_args (frozenset): A frozenset converted from a dictionary of additional arguments to be passed into database connection string. Returns: pymongo.MongoClient \"\"\" auth_str = \"\" if db_username and db_password: auth_str = f\"{db_username}:{db_password}@\" extra_args_str = \"&\".join(f\"{k}={str(v)}\" for k, v in db_extra_args) if extra_args_str: extra_args_str = \"/?\" + extra_args_str driver = \"mongodb\" if db_driver: driver = f\"{driver}+{db_driver}\" connection_string = f\"{driver}://{auth_str}{db_host}\" connection_string = connection_string if db_driver else f\"{connection_string}:{db_port}\" connection_string += extra_args_str return pymongo.MongoClient(connection_string) "} {"text": "from taipy.config.common._validate_id import _validate_id class MongoDefaultDocument: \"\"\"The default class for \\\"custom_document\\\" property to configure a `MongoCollectionDataNode^`. Attributes: **kwargs: Attributes of the MongoDefaultDocument object. Example: - `document = MongoDefaultDocument(name=\"example\", age=30})` will return a MongoDefaultDocument object so that `document.name` returns `\"example\"`, and `document.age` returns `30`. - `document = MongoDefaultDocument(date=\"12/24/2018\", temperature=20})` will return a MongoDefaultDocument object so that `document.date` returns `\"12/24/2018\"`, and `document.temperature` returns `20`. \"\"\" def __init__(self, **kwargs): for attribute_name, value in kwargs.items(): setattr(self, _validate_id(attribute_name), value) "} {"text": "from .mongo_default_document import MongoDefaultDocument "} {"text": "from collections import UserList class _ListAttributes(UserList): def __init__(self, parent, *args, **kwargs): super().__init__(*args, **kwargs) self._parent = parent def __add_iterable(self, iterable): for i in iterable: super(_ListAttributes, self).append(i) def __set_self(self): from ... import core as tp if hasattr(self, \"_parent\"): tp.set(self._parent) def __add__(self, value): if hasattr(value, \"__iter__\"): self.__add_iterable(value) else: self.append(value) return self def extend(self, value) -> None: super(_ListAttributes, self).extend(value) self.__set_self() def append(self, value) -> None: super(_ListAttributes, self).append(value) self.__set_self() def remove(self, value): super(_ListAttributes, self).remove(value) self.__set_self() def clear(self) -> None: super(_ListAttributes, self).clear() self.__set_self() "} {"text": "import functools import warnings from typing import Optional warnings.simplefilter(\"once\", ResourceWarning) def _warn_deprecated(deprecated: str, suggest: Optional[str] = None, stacklevel: int = 3) -> None: category = DeprecationWarning message = f\"{deprecated} is deprecated.\" if suggest: message += f\" Use {suggest} instead.\" warnings.warn(message=message, category=category, stacklevel=stacklevel) def _warn_no_core_service(stacklevel: int = 3): def inner(f): @functools.wraps(f) def _check_if_core_service_is_running(*args, **kwargs): from .._orchestrator._orchestrator_factory import _OrchestratorFactory if not _OrchestratorFactory._dispatcher: message = \"The Core service is NOT running\" warnings.warn(message=message, category=ResourceWarning, stacklevel=stacklevel) return f(*args, **kwargs) return _check_if_core_service_is_running return inner "} {"text": "import functools from enum import Enum class _ReprEnum(Enum): @classmethod @functools.lru_cache def _from_repr(cls, repr_: str): return next(filter(lambda e: repr(e) == repr_, cls)) # type: ignore "} {"text": "from typing import Iterable from taipy.logger._taipy_logger import _TaipyLogger from ..data import DataNode def _warn_if_inputs_not_ready(inputs: Iterable[DataNode]): from ..data import CSVDataNode, ExcelDataNode, JSONDataNode, ParquetDataNode, PickleDataNode from ..data._data_manager_factory import _DataManagerFactory logger = _TaipyLogger._get_logger() data_manager = _DataManagerFactory._build_manager() for dn in inputs: dn = data_manager._get(dn.id) if dn.is_ready_for_reading is False and not dn._last_edit_date: if dn.storage_type() in [ CSVDataNode.storage_type(), ExcelDataNode.storage_type(), JSONDataNode.storage_type(), PickleDataNode.storage_type(), ParquetDataNode.storage_type(), ]: logger.warning( f\"{dn.id} cannot be read because it has never been written. \" f\"Hint: The data node may refer to a wrong path : {dn.path} \" ) else: logger.warning(f\"{dn.id} cannot be read because it has never been written.\") "} {"text": "from typing import TypeVar, Union from .._repository._abstract_converter import _AbstractConverter from .._repository._base_taipy_model import _BaseModel ModelType = TypeVar(\"ModelType\", bound=_BaseModel) Entity = TypeVar(\"Entity\") Converter = TypeVar(\"Converter\", bound=_AbstractConverter) Json = Union[dict, list, str, int, float, bool] "} {"text": "import functools import time from collections import namedtuple from importlib import import_module from operator import attrgetter from typing import Callable, Optional, Tuple from taipy.config import Config @functools.lru_cache def _load_fct(module_name: str, fct_name: str) -> Callable: module = import_module(module_name) return attrgetter(fct_name)(module) def _retry_read_entity(exceptions: Tuple, sleep_time: float = 0.2): \"\"\" Retries the wrapped function/method if the exceptions listed in ``exceptions`` are thrown. The number of retries is defined by Config.core.read_entity_retry. Parameters: exceptions (tuple): Tuple of exceptions that trigger a retry attempt. sleep_time (float): Time to sleep between retries. \"\"\" def decorator(func): def newfn(*args, **kwargs): for _ in range(Config.core.read_entity_retry): try: return func(*args, **kwargs) except exceptions: time.sleep(sleep_time) return func(*args, **kwargs) return newfn return decorator @functools.lru_cache def _get_fct_name(f) -> Optional[str]: # Mock function does not have __qualname__ attribute -> return __name__ # Partial or anonymous function does not have __name__ or __qualname__ attribute -> return None name = getattr(f, \"__qualname__\", getattr(f, \"__name__\", None)) return name def _fct_to_dict(obj): params = [] callback = obj if isinstance(obj, _Subscriber): callback = obj.callback params = obj.params fct_name = _get_fct_name(callback) if not fct_name: return None return { \"fct_name\": fct_name, \"fct_params\": params, \"fct_module\": callback.__module__, } def _fcts_to_dict(objs): return [d for obj in objs if (d := _fct_to_dict(obj)) is not None] _Subscriber = namedtuple(\"_Subscriber\", \"callback params\") "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ._scenario_fs_repository import _ScenarioFSRepository from ._scenario_manager import _ScenarioManager from ._scenario_sql_repository import _ScenarioSQLRepository class _ScenarioManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _ScenarioFSRepository, \"sql\": _ScenarioSQLRepository} @classmethod def _build_manager(cls) -> Type[_ScenarioManager]: # type: ignore if cls._using_enterprise(): scenario_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".scenario._scenario_manager\", \"_ScenarioManager\" ) # type: ignore build_repository = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".scenario._scenario_manager_factory\", \"_ScenarioManagerFactory\" )._build_repository # type: ignore else: scenario_manager = _ScenarioManager build_repository = cls._build_repository scenario_manager._repository = build_repository() # type: ignore return scenario_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from dataclasses import dataclass from typing import Any, Dict, List, Optional from sqlalchemy import JSON, Boolean, Column, String, Table from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry from ..cycle.cycle_id import CycleId from ..data.data_node_id import DataNodeId from ..task.task_id import TaskId from .scenario_id import ScenarioId @mapper_registry.mapped @dataclass class _ScenarioModel(_BaseModel): __table__ = Table( \"scenario\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"config_id\", String), Column(\"tasks\", JSON), Column(\"additional_data_nodes\", JSON), Column(\"properties\", JSON), Column(\"creation_date\", String), Column(\"primary_scenario\", Boolean), Column(\"subscribers\", JSON), Column(\"tags\", JSON), Column(\"version\", String), Column(\"sequences\", JSON), Column(\"cycle\", String), ) id: ScenarioId config_id: str tasks: List[TaskId] additional_data_nodes: List[DataNodeId] properties: Dict[str, Any] creation_date: str primary_scenario: bool subscribers: List[Dict] tags: List[str] version: str sequences: Optional[Dict[str, Dict]] = None cycle: Optional[CycleId] = None @staticmethod def from_dict(data: Dict[str, Any]): return _ScenarioModel( id=data[\"id\"], config_id=data[\"config_id\"], tasks=_BaseModel._deserialize_attribute(data[\"tasks\"]), additional_data_nodes=_BaseModel._deserialize_attribute(data[\"additional_data_nodes\"]), properties=_BaseModel._deserialize_attribute(data[\"properties\"]), creation_date=data[\"creation_date\"], primary_scenario=data[\"primary_scenario\"], subscribers=_BaseModel._deserialize_attribute(data[\"subscribers\"]), tags=_BaseModel._deserialize_attribute(data[\"tags\"]), version=data[\"version\"], sequences=_BaseModel._deserialize_attribute(data[\"sequences\"]), cycle=CycleId(data[\"cycle\"]) if \"cycle\" in data else None, ) def to_list(self): return [ self.id, self.config_id, _BaseModel._serialize_attribute(self.tasks), _BaseModel._serialize_attribute(self.additional_data_nodes), _BaseModel._serialize_attribute(self.properties), self.creation_date, self.primary_scenario, _BaseModel._serialize_attribute(self.subscribers), _BaseModel._serialize_attribute(self.tags), self.version, _BaseModel._serialize_attribute(self.sequences), self.cycle, ] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._filesystem_repository import _FileSystemRepository from ._scenario_converter import _ScenarioConverter from ._scenario_model import _ScenarioModel class _ScenarioFSRepository(_FileSystemRepository): def __init__(self): super().__init__(model_type=_ScenarioModel, converter=_ScenarioConverter, dir_name=\"scenarios\") "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._sql_repository import _SQLRepository from ._scenario_converter import _ScenarioConverter from ._scenario_model import _ScenarioModel class _ScenarioSQLRepository(_SQLRepository): def __init__(self): super().__init__(model_type=_ScenarioModel, converter=_ScenarioConverter) "} {"text": "from datetime import datetime from typing import Dict, List, Optional, Set, Union from .._repository._abstract_converter import _AbstractConverter from .._version._utils import _migrate_entity from ..common import _utils from ..cycle._cycle_manager_factory import _CycleManagerFactory from ..cycle.cycle import Cycle, CycleId from ..data.data_node import DataNode, DataNodeId from ..scenario._scenario_model import _ScenarioModel from ..scenario.scenario import Scenario from ..task.task import Task, TaskId class _ScenarioConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, scenario: Scenario) -> _ScenarioModel: sequences: Dict[str, Dict[str, Union[List[TaskId], Dict, List]]] = {} for p_name, sequence_data in scenario._sequences.items(): sequences[p_name] = { Scenario._SEQUENCE_TASKS_KEY: [ t.id if isinstance(t, Task) else t for t in sequence_data.get(\"tasks\", []) ], Scenario._SEQUENCE_PROPERTIES_KEY: sequence_data.get(\"properties\", {}), Scenario._SEQUENCE_SUBSCRIBERS_KEY: _utils._fcts_to_dict(sequence_data.get(\"subscribers\", [])), } return _ScenarioModel( id=scenario.id, config_id=scenario.config_id, tasks=[task.id if isinstance(task, Task) else TaskId(str(task)) for task in list(scenario._tasks)], additional_data_nodes=[ dn.id if isinstance(dn, DataNode) else DataNodeId(str(dn)) for dn in list(scenario._additional_data_nodes) ], properties=scenario._properties.data, creation_date=scenario._creation_date.isoformat(), primary_scenario=scenario._primary_scenario, subscribers=_utils._fcts_to_dict(scenario._subscribers), tags=list(scenario._tags), version=scenario._version, cycle=scenario._cycle.id if scenario._cycle else None, sequences=sequences if sequences else None, ) @classmethod def _model_to_entity(cls, model: _ScenarioModel) -> Scenario: tasks: Union[Set[TaskId], Set[Task], Set] = set() if model.tasks: tasks = set(model.tasks) if model.sequences: for sequence_name, sequence_data in model.sequences.items(): if subscribers := sequence_data.get(Scenario._SEQUENCE_SUBSCRIBERS_KEY): model.sequences[sequence_name][Scenario._SEQUENCE_SUBSCRIBERS_KEY] = [ _utils._Subscriber(_utils._load_fct(it[\"fct_module\"], it[\"fct_name\"]), it[\"fct_params\"]) for it in subscribers ] scenario = Scenario( scenario_id=model.id, config_id=model.config_id, tasks=tasks, additional_data_nodes=set(model.additional_data_nodes), properties=model.properties, creation_date=datetime.fromisoformat(model.creation_date), is_primary=model.primary_scenario, tags=set(model.tags), cycle=cls.__to_cycle(model.cycle), subscribers=[ _utils._Subscriber(_utils._load_fct(it[\"fct_module\"], it[\"fct_name\"]), it[\"fct_params\"]) for it in model.subscribers ], version=model.version, sequences=model.sequences, ) return _migrate_entity(scenario) @staticmethod def __to_cycle(cycle_id: Optional[CycleId] = None) -> Optional[Cycle]: return _CycleManagerFactory._build_manager()._get(cycle_id) if cycle_id else None "} {"text": "from typing import NewType ScenarioId = NewType(\"ScenarioId\", str) ScenarioId.__doc__ = \"\"\"Type that holds a `Scenario^` identifier.\"\"\" "} {"text": "from abc import abstractmethod from typing import Callable, Iterable, List, Optional, Union from ..job.job import Job from ..task.task import Task class _AbstractOrchestrator: \"\"\"Creates, enqueues, and orchestrates jobs as instances of `Job^` class.\"\"\" @classmethod @abstractmethod def initialize(cls): raise NotImplementedError @classmethod @abstractmethod def submit( cls, sequence, callbacks: Optional[Iterable[Callable]], force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, ) -> List[Job]: raise NotImplementedError @classmethod @abstractmethod def submit_task( cls, task: Task, callbacks: Optional[Iterable[Callable]] = None, force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, ) -> Job: raise NotImplementedError @classmethod @abstractmethod def cancel_job(cls, job): raise NotImplementedError "} {"text": "from importlib import util from typing import Optional, Type from taipy.config.config import Config from ..common._utils import _load_fct from ..exceptions.exceptions import ModeNotAvailable, OrchestratorNotBuilt from ._abstract_orchestrator import _AbstractOrchestrator from ._dispatcher import _DevelopmentJobDispatcher, _JobDispatcher, _StandaloneJobDispatcher from ._orchestrator import _Orchestrator class _OrchestratorFactory: _TAIPY_ENTERPRISE_MODULE = \"taipy.enterprise\" _TAIPY_ENTERPRISE_CORE_ORCHESTRATOR_MODULE = _TAIPY_ENTERPRISE_MODULE + \".core._orchestrator._orchestrator\" _TAIPY_ENTERPRISE_CORE_DISPATCHER_MODULE = _TAIPY_ENTERPRISE_MODULE + \".core._orchestrator._dispatcher\" __TAIPY_ENTERPRISE_BUILD_DISPATCHER_METHOD = \"_build_dispatcher\" _orchestrator: Optional[_Orchestrator] = None _dispatcher: Optional[_JobDispatcher] = None @classmethod def _build_orchestrator(cls) -> Type[_AbstractOrchestrator]: if util.find_spec(cls._TAIPY_ENTERPRISE_MODULE) is not None: cls._orchestrator = _load_fct( cls._TAIPY_ENTERPRISE_CORE_ORCHESTRATOR_MODULE, \"Orchestrator\", ) # type: ignore else: cls._orchestrator = _Orchestrator # type: ignore cls._orchestrator.initialize() # type: ignore return cls._orchestrator # type: ignore @classmethod def _build_dispatcher(cls, force_restart=False) -> Optional[_JobDispatcher]: if not cls._orchestrator: raise OrchestratorNotBuilt if Config.job_config.is_standalone: cls.__build_standalone_job_dispatcher(force_restart=force_restart) elif Config.job_config.is_development: cls.__build_development_job_dispatcher() elif util.find_spec(cls._TAIPY_ENTERPRISE_MODULE): cls.__build_enterprise_job_dispatcher(force_restart=force_restart) else: raise ModeNotAvailable(f\"Job mode {Config.job_config.mode} is not available.\") return cls._dispatcher @classmethod def _remove_dispatcher(cls) -> Optional[_JobDispatcher]: if cls._dispatcher is not None and not isinstance(cls._dispatcher, _DevelopmentJobDispatcher): cls._dispatcher.stop() cls._dispatcher = None return cls._dispatcher @classmethod def __build_standalone_job_dispatcher(cls, force_restart=False): if isinstance(cls._dispatcher, _StandaloneJobDispatcher): if force_restart: cls._dispatcher.stop() else: return if util.find_spec(cls._TAIPY_ENTERPRISE_MODULE) is not None: cls._dispatcher = _load_fct( cls._TAIPY_ENTERPRISE_CORE_DISPATCHER_MODULE, cls.__TAIPY_ENTERPRISE_BUILD_DISPATCHER_METHOD )(cls._orchestrator) else: cls._dispatcher = _StandaloneJobDispatcher(cls._orchestrator) # type: ignore cls._dispatcher.start() # type: ignore @classmethod def __build_development_job_dispatcher(cls): if isinstance(cls._dispatcher, _StandaloneJobDispatcher): cls._dispatcher.stop() cls._dispatcher = _DevelopmentJobDispatcher(cls._orchestrator) # type: ignore @classmethod def __build_enterprise_job_dispatcher(cls, force_restart=False): cls._dispatcher = _load_fct( cls._TAIPY_ENTERPRISE_CORE_DISPATCHER_MODULE, cls.__TAIPY_ENTERPRISE_BUILD_DISPATCHER_METHOD )(cls._orchestrator, force_restart) if cls._dispatcher: cls._dispatcher.start() else: raise ModeNotAvailable(f\"Job mode {Config.job_config.mode} is not available.\") "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from ._development_job_dispatcher import _DevelopmentJobDispatcher from ._job_dispatcher import _JobDispatcher from ._standalone_job_dispatcher import _StandaloneJobDispatcher "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from typing import Optional from ...job.job import Job from .._abstract_orchestrator import _AbstractOrchestrator from ._job_dispatcher import _JobDispatcher class _DevelopmentJobDispatcher(_JobDispatcher): \"\"\"Manages job dispatching (instances of `Job^` class) in a synchronous way.\"\"\" def __init__(self, orchestrator: Optional[_AbstractOrchestrator]): super().__init__(orchestrator) def start(self): raise NotImplementedError def is_running(self) -> bool: return True def stop(self): raise NotImplementedError def run(self): raise NotImplementedError def _dispatch(self, job: Job): \"\"\"Dispatches the given `Job^` on an available worker for execution. Parameters: job (Job^): The job to submit on an executor with an available worker. \"\"\" rs = self._wrapped_function(job.id, job.task) self._update_job_status(job, rs) "} {"text": "from concurrent.futures import ProcessPoolExecutor from functools import partial from typing import Optional from taipy.config._serializer._toml_serializer import _TomlSerializer from taipy.config.config import Config from ...job.job import Job from .._abstract_orchestrator import _AbstractOrchestrator from ._job_dispatcher import _JobDispatcher class _StandaloneJobDispatcher(_JobDispatcher): \"\"\"Manages job dispatching (instances of `Job^` class) in an asynchronous way using a ProcessPoolExecutor.\"\"\" def __init__(self, orchestrator: Optional[_AbstractOrchestrator]): super().__init__(orchestrator) self._executor = ProcessPoolExecutor(Config.job_config.max_nb_of_workers or 1) # type: ignore self._nb_available_workers = self._executor._max_workers # type: ignore def _dispatch(self, job: Job): \"\"\"Dispatches the given `Job^` on an available worker for execution. Parameters: job (Job^): The job to submit on an executor with an available worker. \"\"\" self._nb_available_workers -= 1 config_as_string = _TomlSerializer()._serialize(Config._applied_config) future = self._executor.submit(self._wrapped_function_with_config_load, config_as_string, job.id, job.task) self._set_dispatched_processes(job.id, future) # type: ignore future.add_done_callback(self._release_worker) future.add_done_callback(partial(self._update_job_status_from_future, job)) def _release_worker(self, _): self._nb_available_workers += 1 def _update_job_status_from_future(self, job: Job, ft): self._pop_dispatched_process(job.id) # type: ignore self._update_job_status(job, ft.result()) "} {"text": "import threading from abc import abstractmethod from typing import Dict, Optional from taipy.config.config import Config from taipy.logger._taipy_logger import _TaipyLogger from ...data._data_manager_factory import _DataManagerFactory from ...job._job_manager_factory import _JobManagerFactory from ...job.job import Job from ...task.task import Task from .._abstract_orchestrator import _AbstractOrchestrator from ._task_function_wrapper import _TaskFunctionWrapper class _JobDispatcher(threading.Thread, _TaskFunctionWrapper): \"\"\"Manages job dispatching (instances of `Job^` class) on executors.\"\"\" _STOP_FLAG = False _dispatched_processes: Dict = {} __logger = _TaipyLogger._get_logger() _nb_available_workers: int = 1 def __init__(self, orchestrator: Optional[_AbstractOrchestrator]): threading.Thread.__init__(self, name=\"Thread-Taipy-JobDispatcher\") self.daemon = True self.orchestrator = orchestrator self.lock = self.orchestrator.lock # type: ignore Config.block_update() def start(self): \"\"\"Start the dispatcher\"\"\" threading.Thread.start(self) def is_running(self) -> bool: \"\"\"Return True if the dispatcher is running\"\"\" return self.is_alive() def stop(self): \"\"\"Stop the dispatcher\"\"\" self._STOP_FLAG = True def run(self): _TaipyLogger._get_logger().info(\"Start job dispatcher...\") while not self._STOP_FLAG: try: if self._can_execute(): with self.lock: job = self.orchestrator.jobs_to_run.get(block=True, timeout=0.1) self._execute_job(job) except Exception: # In case the last job of the queue has been removed. pass def _can_execute(self) -> bool: \"\"\"Returns True if the dispatcher have resources to execute a new job.\"\"\" return self._nb_available_workers > 0 def _execute_job(self, job: Job): if job.force or self._needs_to_run(job.task): if job.force: self.__logger.info(f\"job {job.id} is forced to be executed.\") job.running() self._dispatch(job) else: job._unlock_edit_on_outputs() job.skipped() self.__logger.info(f\"job {job.id} is skipped.\") def _execute_jobs_synchronously(self): while not self.orchestrator.jobs_to_run.empty(): with self.lock: try: job = self.orchestrator.jobs_to_run.get() except Exception: # In case the last job of the queue has been removed. self.__logger.warning(f\"{job.id} is no longer in the list of jobs to run.\") self._execute_job(job) @staticmethod def _needs_to_run(task: Task) -> bool: \"\"\" Returns True if the task has no output or if at least one input was modified since the latest run. Parameters: task (Task^): The task to run. Returns: True if the task needs to run. False otherwise. \"\"\" if not task.skippable: return True data_manager = _DataManagerFactory._build_manager() if len(task.output) == 0: return True are_outputs_in_cache = all(data_manager._get(dn.id).is_valid for dn in task.output.values()) if not are_outputs_in_cache: return True if len(task.input) == 0: return False input_last_edit = max(data_manager._get(dn.id).last_edit_date for dn in task.input.values()) output_last_edit = min(data_manager._get(dn.id).last_edit_date for dn in task.output.values()) return input_last_edit > output_last_edit @abstractmethod def _dispatch(self, job: Job): \"\"\" Dispatches the given `Job^` on an available worker for execution. Parameters: job (Job^): The job to submit on an executor with an available worker. \"\"\" raise NotImplementedError @staticmethod def _update_job_status(job: Job, exceptions): job.update_status(exceptions) _JobManagerFactory._build_manager()._set(job) @classmethod def _set_dispatched_processes(cls, job_id, process): cls._dispatched_processes[job_id] = process @classmethod def _pop_dispatched_process(cls, job_id, default=None): return cls._dispatched_processes.pop(job_id, default) # type: ignore "} {"text": "from typing import Any, List from taipy.config._serializer._toml_serializer import _TomlSerializer from taipy.config.config import Config from ...data._data_manager_factory import _DataManagerFactory from ...data.data_node import DataNode from ...exceptions import DataNodeWritingError from ...job.job_id import JobId from ...task.task import Task class _TaskFunctionWrapper: @classmethod def _wrapped_function_with_config_load(cls, config_as_string, job_id: JobId, task: Task): Config._applied_config._update(_TomlSerializer()._deserialize(config_as_string)) Config.block_update() return cls._wrapped_function(job_id, task) @classmethod def _wrapped_function(cls, job_id: JobId, task: Task): try: inputs: List[DataNode] = list(task.input.values()) outputs: List[DataNode] = list(task.output.values()) fct = task.function results = fct(*cls.__read_inputs(inputs)) return cls.__write_data(outputs, results, job_id) except Exception as e: return [e] @classmethod def __read_inputs(cls, inputs: List[DataNode]) -> List[Any]: data_manager = _DataManagerFactory._build_manager() return [data_manager._get(dn.id).read_or_raise() for dn in inputs] @classmethod def __write_data(cls, outputs: List[DataNode], results, job_id: JobId): data_manager = _DataManagerFactory._build_manager() try: if outputs: _results = cls.__extract_results(outputs, results) exceptions = [] for res, dn in zip(_results, outputs): try: data_node = data_manager._get(dn.id) data_node.write(res, job_id=job_id) data_manager._set(data_node) except Exception as e: exceptions.append(DataNodeWritingError(f\"Error writing in datanode id {dn.id}: {e}\")) return exceptions except Exception as e: return [e] @classmethod def __extract_results(cls, outputs: List[DataNode], results: Any) -> List[Any]: _results: List[Any] = [results] if len(outputs) == 1 else results if len(_results) != len(outputs): raise DataNodeWritingError(\"Error: wrong number of result or task output\") return _results "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ._task_fs_repository import _TaskFSRepository from ._task_manager import _TaskManager from ._task_sql_repository import _TaskSQLRepository class _TaskManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _TaskFSRepository, \"sql\": _TaskSQLRepository} @classmethod def _build_manager(cls) -> Type[_TaskManager]: # type: ignore if cls._using_enterprise(): task_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".task._task_manager\", \"_TaskManager\" ) # type: ignore build_repository = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".task._task_manager_factory\", \"_TaskManagerFactory\" )._build_repository # type: ignore else: task_manager = _TaskManager build_repository = cls._build_repository task_manager._repository = build_repository() # type: ignore return task_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": " from .._repository._abstract_converter import _AbstractConverter from .._version._utils import _migrate_entity from ..common._utils import _load_fct from ..data._data_manager_factory import _DataManagerFactory from ..exceptions import NonExistingDataNode from ..task._task_model import _TaskModel from ..task.task import Task from .task import TaskId class _TaskConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, task: Task) -> _TaskModel: return _TaskModel( id=task.id, owner_id=task.owner_id, parent_ids=list(task._parent_ids), config_id=task.config_id, input_ids=cls.__to_ids(task.input.values()), function_name=task._function.__name__, function_module=task._function.__module__, output_ids=cls.__to_ids(task.output.values()), version=task._version, skippable=task._skippable, properties=task._properties.data.copy(), ) @classmethod def _model_to_entity(cls, model: _TaskModel) -> Task: task = Task( id=TaskId(model.id), owner_id=model.owner_id, parent_ids=set(model.parent_ids), config_id=model.config_id, function=_load_fct(model.function_module, model.function_name), input=cls.__to_data_nodes(model.input_ids), output=cls.__to_data_nodes(model.output_ids), version=model.version, skippable=model.skippable, properties=model.properties, ) return _migrate_entity(task) @staticmethod def __to_ids(data_nodes): return [i.id for i in data_nodes] @staticmethod def __to_data_nodes(data_nodes_ids): data_nodes = [] data_manager = _DataManagerFactory._build_manager() for _id in data_nodes_ids: if data_node := data_manager._get(_id): data_nodes.append(data_node) else: raise NonExistingDataNode(_id) return data_nodes "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._sql_repository import _SQLRepository from ._task_converter import _TaskConverter from ._task_model import _TaskModel class _TaskSQLRepository(_SQLRepository): def __init__(self): super().__init__(model_type=_TaskModel, converter=_TaskConverter) "} {"text": "from dataclasses import dataclass from typing import Any, Dict, List, Optional from sqlalchemy import JSON, Boolean, Column, String, Table from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry @mapper_registry.mapped @dataclass class _TaskModel(_BaseModel): __table__ = Table( \"task\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"owner_id\", String), Column(\"parent_ids\", JSON), Column(\"config_id\", String), Column(\"input_ids\", JSON), Column(\"function_name\", String), Column(\"function_module\", String), Column(\"output_ids\", JSON), Column(\"version\", String), Column(\"skippable\", Boolean), Column(\"properties\", JSON), ) id: str owner_id: Optional[str] parent_ids: List[str] config_id: str input_ids: List[str] function_name: str function_module: str output_ids: List[str] version: str skippable: bool properties: Dict[str, Any] @staticmethod def from_dict(data: Dict[str, Any]): return _TaskModel( id=data[\"id\"], owner_id=data.get(\"owner_id\"), parent_ids=_BaseModel._deserialize_attribute(data.get(\"parent_ids\", [])), config_id=data[\"config_id\"], input_ids=_BaseModel._deserialize_attribute(data[\"input_ids\"]), function_name=data[\"function_name\"], function_module=data[\"function_module\"], output_ids=_BaseModel._deserialize_attribute(data[\"output_ids\"]), version=data[\"version\"], skippable=data[\"skippable\"], properties=_BaseModel._deserialize_attribute(data[\"properties\"] if \"properties\" in data.keys() else {}), ) def to_list(self): return [ self.id, self.owner_id, _BaseModel._serialize_attribute(self.parent_ids), self.config_id, _BaseModel._serialize_attribute(self.input_ids), self.function_name, self.function_module, _BaseModel._serialize_attribute(self.output_ids), self.version, self.skippable, _BaseModel._serialize_attribute(self.properties), ] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._filesystem_repository import _FileSystemRepository from ._task_converter import _TaskConverter from ._task_model import _TaskModel class _TaskFSRepository(_FileSystemRepository): def __init__(self): super().__init__(model_type=_TaskModel, converter=_TaskConverter, dir_name=\"tasks\") "} {"text": "from typing import NewType TaskId = NewType(\"TaskId\", str) TaskId.__doc__ = \"\"\"Type that holds a `Task^` identifier.\"\"\" "} {"text": "from dataclasses import dataclass from typing import Any, Dict, List from sqlalchemy import JSON, Boolean, Column, Enum, String, Table from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry from .job_id import JobId from .status import Status @mapper_registry.mapped @dataclass class _JobModel(_BaseModel): __table__ = Table( \"job\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"task_id\", String), Column(\"status\", Enum(Status)), Column(\"force\", Boolean), Column(\"submit_id\", String), Column(\"submit_entity_id\", String), Column(\"creation_date\", String), Column(\"subscribers\", JSON), Column(\"stacktrace\", JSON), Column(\"version\", String), ) id: JobId task_id: str status: Status force: bool submit_id: str submit_entity_id: str creation_date: str subscribers: List[Dict] stacktrace: List[str] version: str @staticmethod def from_dict(data: Dict[str, Any]): return _JobModel( id=data[\"id\"], task_id=data[\"task_id\"], status=Status._from_repr(data[\"status\"]), force=data[\"force\"], submit_id=data[\"submit_id\"], submit_entity_id=data[\"submit_entity_id\"], creation_date=data[\"creation_date\"], subscribers=_BaseModel._deserialize_attribute(data[\"subscribers\"]), stacktrace=_BaseModel._deserialize_attribute(data[\"stacktrace\"]), version=data[\"version\"], ) def to_list(self): return [ self.id, self.task_id, repr(self.status), self.force, self.submit_id, self.submit_entity_id, self.creation_date, _BaseModel._serialize_attribute(self.subscribers), _BaseModel._serialize_attribute(self.stacktrace), self.version, ] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._filesystem_repository import _FileSystemRepository from ._job_converter import _JobConverter from ._job_model import _JobModel class _JobFSRepository(_FileSystemRepository): def __init__(self): super().__init__(model_type=_JobModel, converter=_JobConverter, dir_name=\"jobs\") "} {"text": "import uuid from typing import Callable, Iterable, List, Optional, Union from .._manager._manager import _Manager from .._repository._abstract_repository import _AbstractRepository from .._version._version_manager_factory import _VersionManagerFactory from .._version._version_mixin import _VersionMixin from ..exceptions.exceptions import JobNotDeletedException from ..notification import EventEntityType, EventOperation, Notifier, _make_event from ..task.task import Task from .job import Job from .job_id import JobId class _JobManager(_Manager[Job], _VersionMixin): _ENTITY_NAME = Job.__name__ _ID_PREFIX = \"JOB_\" _repository: _AbstractRepository _EVENT_ENTITY_TYPE = EventEntityType.JOB @classmethod def _get_all(cls, version_number: Optional[str] = None) -> List[Job]: \"\"\" Returns all entities. \"\"\" filters = cls._build_filters_with_version(version_number) return cls._repository._load_all(filters) @classmethod def _create( cls, task: Task, callbacks: Iterable[Callable], submit_id: str, submit_entity_id: str, force=False ) -> Job: version = _VersionManagerFactory._build_manager()._get_latest_version() job = Job( id=JobId(f\"{Job._ID_PREFIX}_{task.config_id}_{uuid.uuid4()}\"), task=task, submit_id=submit_id, submit_entity_id=submit_entity_id, force=force, version=version, ) cls._set(job) Notifier.publish(_make_event(job, EventOperation.CREATION)) job._on_status_change(*callbacks) return job @classmethod def _delete(cls, job: Job, force=False): if job.is_finished() or force: super()._delete(job.id) from .._orchestrator._dispatcher._job_dispatcher import _JobDispatcher _JobDispatcher._pop_dispatched_process(job.id) else: err = JobNotDeletedException(job.id) cls._logger.warning(err) raise err @classmethod def _cancel(cls, job: Union[str, Job]): job = cls._get(job) if isinstance(job, str) else job from .._orchestrator._orchestrator_factory import _OrchestratorFactory _OrchestratorFactory._build_orchestrator().cancel_job(job) @classmethod def _get_latest(cls, task: Task) -> Optional[Job]: jobs_of_task = list(filter(lambda job: task in job, cls._get_all())) if len(jobs_of_task) == 0: return None if len(jobs_of_task) == 1: return jobs_of_task[0] else: return max(jobs_of_task) @classmethod def _is_deletable(cls, job: Union[Job, JobId]) -> bool: if isinstance(job, str): job = cls._get(job) if job.is_finished(): return True return False "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from typing import NewType JobId = NewType(\"JobId\", str) JobId.__doc__ = \"\"\"Type that holds a `Job^` identifier.\"\"\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._sql_repository import _SQLRepository from ._job_converter import _JobConverter from ._job_model import _JobModel class _JobSQLRepository(_SQLRepository): def __init__(self): super().__init__(model_type=_JobModel, converter=_JobConverter) "} {"text": "from datetime import datetime from typing import List from .._repository._abstract_converter import _AbstractConverter from ..common._utils import _fcts_to_dict, _load_fct from ..exceptions import InvalidSubscriber from ..job._job_model import _JobModel from ..job.job import Job from ..task._task_manager_factory import _TaskManagerFactory class _JobConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, job: Job) -> _JobModel: return _JobModel( job.id, job._task.id, job._status, job._force, job.submit_id, job.submit_entity_id, job._creation_date.isoformat(), cls.__serialize_subscribers(job._subscribers), job._stacktrace, version=job._version, ) @classmethod def _model_to_entity(cls, model: _JobModel) -> Job: task_manager = _TaskManagerFactory._build_manager() task_repository = task_manager._repository job = Job( id=model.id, task=task_repository._load(model.task_id), submit_id=model.submit_id, submit_entity_id=model.submit_entity_id, version=model.version, ) job._status = model.status # type: ignore job._force = model.force # type: ignore job._creation_date = datetime.fromisoformat(model.creation_date) # type: ignore for it in model.subscribers: try: fct_module, fct_name = it.get(\"fct_module\"), it.get(\"fct_name\") job._subscribers.append(_load_fct(fct_module, fct_name)) # type: ignore except AttributeError: raise InvalidSubscriber(f\"The subscriber function {it.get('fct_name')} cannot be loaded.\") job._stacktrace = model.stacktrace return job @staticmethod def __serialize_subscribers(subscribers: List) -> List: return _fcts_to_dict(subscribers) "} {"text": "from ..common._repr_enum import _ReprEnum class Status(_ReprEnum): \"\"\"Execution status of a `Job^`. It is implemented as an enumeration. The possible values are: - `SUBMITTED`: A `SUBMITTED` job has been submitted for execution but not processed yet by the orchestrator. - `PENDING`: A `PENDING` job has been enqueued by the orchestrator. It is waiting for an executor to be available for its execution. - `BLOCKED`: A `BLOCKED` job has been blocked because its input data nodes are not ready yet. It is waiting for the completion of another `Job^` - `RUNNING`: A `RUNNING` job is currently executed by a dedicated executor. - `CANCELED`: A `CANCELED` job has been submitted but its execution has been canceled. - `FAILED`: A `FAILED` job raised an exception during its execution. - `COMPLETED`: A `COMPLETED` job has successfully been executed. - `SKIPPED`: A `SKIPPED` job has not been executed because its outputs were already computed. - `ABANDONED`: An `ABANDONED` job has not been executed because it depends on a job that could not complete ( cancelled, failed, or abandoned). \"\"\" SUBMITTED = 1 BLOCKED = 2 PENDING = 3 RUNNING = 4 CANCELED = 5 FAILED = 6 COMPLETED = 7 SKIPPED = 8 ABANDONED = 9 "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ._job_fs_repository import _JobFSRepository from ._job_manager import _JobManager from ._job_sql_repository import _JobSQLRepository class _JobManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _JobFSRepository, \"sql\": _JobSQLRepository} @classmethod def _build_manager(cls) -> Type[_JobManager]: # type: ignore if cls._using_enterprise(): job_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".job._job_manager\", \"_JobManager\" ) # type: ignore build_repository = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".job._job_manager_factory\", \"_JobManagerFactory\" )._build_repository # type: ignore else: job_manager = _JobManager build_repository = cls._build_repository job_manager._repository = build_repository() # type: ignore return job_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": "from typing import NewType SequenceId = NewType(\"SequenceId\", str) SequenceId.__doc__ = \"\"\"Type that holds a `Sequence^` identifier.\"\"\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from typing import Dict from .._repository._abstract_converter import _AbstractConverter from ..common import _utils from ..task.task import Task from .sequence import Sequence class _SequenceConverter(_AbstractConverter): _SEQUENCE_MODEL_ID_KEY = \"id\" _SEQUENCE_MODEL_OWNER_ID_KEY = \"owner_id\" _SEQUENCE_MODEL_PARENT_IDS_KEY = \"parent_ids\" _SEQUENCE_MODEL_PROPERTIES_KEY = \"properties\" _SEQUENCE_MODEL_TASKS_KEY = \"tasks\" _SEQUENCE_MODEL_SUBSCRIBERS_KEY = \"subscribers\" _SEQUENCE_MODEL_VERSION_KEY = \"version\" @classmethod def _entity_to_model(cls, sequence: Sequence) -> Dict: return { \"id\": sequence.id, \"owner_id\": sequence.owner_id, \"parent_ids\": list(sequence._parent_ids), \"properties\": sequence._properties.data, \"tasks\": cls.__to_task_ids(sequence._tasks), \"subscribers\": _utils._fcts_to_dict(sequence._subscribers), \"version\": sequence._version, } @staticmethod def __to_task_ids(tasks): return [t.id if isinstance(t, Task) else t for t in tasks] "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ._sequence_manager import _SequenceManager class _SequenceManagerFactory(_ManagerFactory): @classmethod def _build_manager(cls) -> Type[_SequenceManager]: # type: ignore if cls._using_enterprise(): sequence_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".sequence._sequence_manager\", \"_SequenceManager\" ) # type: ignore else: sequence_manager = _SequenceManager return sequence_manager # type: ignore "} {"text": "from ..exceptions.exceptions import InvalidExposedType class _AbstractTabularDataNode(object): \"\"\"Abstract base class for tabular data node implementations (CSVDataNode, ParquetDataNode, ExcelDataNode, SQLTableDataNode and SQLDataNode) that are tabular representable.\"\"\" @staticmethod def _check_exposed_type(exposed_type, valid_string_exposed_types): if isinstance(exposed_type, str) and exposed_type not in valid_string_exposed_types: raise InvalidExposedType( f\"Invalid string exposed type {exposed_type}. Supported values are \" f\"{', '.join(valid_string_exposed_types)}\" ) "} {"text": "from enum import Enum class Operator(Enum): \"\"\"Enumeration of operators for Data Node filtering. The possible values are: - `EQUAL` - `NOT_EQUAL` - `LESS_THAN` - `LESS_OR_EQUAL` - `GREATER_THAN` - `GREATER_OR_EQUAL` \"\"\" EQUAL = 1 NOT_EQUAL = 2 LESS_THAN = 3 LESS_OR_EQUAL = 4 GREATER_THAN = 5 GREATER_OR_EQUAL = 6 class JoinOperator(Enum): \"\"\" Enumeration of join operators for Data Node filtering. The possible values are `AND` and `OR`. \"\"\" AND = 1 OR = 2 "} {"text": "from .csv import CSVDataNode from .data_node import DataNode from .excel import ExcelDataNode from .generic import GenericDataNode from .in_memory import InMemoryDataNode from .json import JSONDataNode from .mongo import MongoCollectionDataNode from .operator import JoinOperator, Operator from .parquet import ParquetDataNode from .pickle import PickleDataNode from .sql import SQLDataNode from .sql_table import SQLTableDataNode "} {"text": "import pathlib class _AbstractFileDataNode(object): \"\"\"Abstract base class for data node implementations (CSVDataNode, ParquetDataNode, ExcelDataNode, PickleDataNode and JSONDataNode) that are file based.\"\"\" __EXTENSION_MAP = {\"csv\": \"csv\", \"excel\": \"xlsx\", \"parquet\": \"parquet\", \"pickle\": \"p\", \"json\": \"json\"} def _build_path(self, storage_type): from taipy.config.config import Config folder = f\"{storage_type}s\" dir_path = pathlib.Path(Config.core.storage_folder) / folder if not dir_path.exists(): dir_path.mkdir(parents=True, exist_ok=True) return dir_path / f\"{self.id}.{self.__EXTENSION_MAP.get(storage_type)}\" "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ._data_fs_repository import _DataFSRepository from ._data_manager import _DataManager from ._data_sql_repository import _DataSQLRepository class _DataManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _DataFSRepository, \"sql\": _DataSQLRepository} @classmethod def _build_manager(cls) -> Type[_DataManager]: # type: ignore if cls._using_enterprise(): data_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".data._data_manager\", \"_DataManager\" ) # type: ignore build_repository = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".data._data_manager_factory\", \"_DataManagerFactory\" )._build_repository # type: ignore else: data_manager = _DataManager build_repository = cls._build_repository data_manager._repository = build_repository() # type: ignore return data_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._sql_repository import _SQLRepository from ._data_converter import _DataNodeConverter from ._data_model import _DataNodeModel class _DataSQLRepository(_SQLRepository): def __init__(self): super().__init__(model_type=_DataNodeModel, converter=_DataNodeConverter) "} {"text": "from dataclasses import dataclass from typing import Any, Dict, List, Optional from sqlalchemy import JSON, Boolean, Column, Enum, Float, String, Table, UniqueConstraint from taipy.config.common.scope import Scope from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry from .data_node_id import Edit @mapper_registry.mapped @dataclass class _DataNodeModel(_BaseModel): __table__ = Table( \"data_node\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"config_id\", String), Column(\"scope\", Enum(Scope)), Column(\"storage_type\", String), Column(\"owner_id\", String), Column(\"parent_ids\", JSON), Column(\"last_edit_date\", String), Column(\"edits\", JSON), Column(\"version\", String), Column(\"validity_days\", Float), Column(\"validity_seconds\", Float), Column(\"edit_in_progress\", Boolean), Column(\"editor_id\", String), Column(\"editor_expiration_date\", String), Column(\"data_node_properties\", JSON), ) __table_args__ = (UniqueConstraint(\"config_id\", \"owner_id\", name=\"_config_owner_uc\"),) id: str config_id: str scope: Scope storage_type: str owner_id: Optional[str] parent_ids: List[str] last_edit_date: Optional[str] edits: List[Edit] version: str validity_days: Optional[float] validity_seconds: Optional[float] edit_in_progress: bool editor_id: Optional[str] editor_expiration_date: Optional[str] data_node_properties: Dict[str, Any] @staticmethod def from_dict(data: Dict[str, Any]): return _DataNodeModel( id=data[\"id\"], config_id=data[\"config_id\"], scope=Scope._from_repr(data[\"scope\"]), storage_type=data[\"storage_type\"], owner_id=data.get(\"owner_id\"), parent_ids=data.get(\"parent_ids\", []), last_edit_date=data.get(\"last_edit_date\"), edits=_BaseModel._deserialize_attribute(data[\"edits\"]), version=data[\"version\"], validity_days=data[\"validity_days\"], validity_seconds=data[\"validity_seconds\"], edit_in_progress=bool(data.get(\"edit_in_progress\", False)), editor_id=data.get(\"editor_id\", None), editor_expiration_date=data.get(\"editor_expiration_date\"), data_node_properties=_BaseModel._deserialize_attribute(data[\"data_node_properties\"]), ) def to_list(self): return [ self.id, self.config_id, repr(self.scope), self.storage_type, self.owner_id, _BaseModel._serialize_attribute(self.parent_ids), self.last_edit_date, _BaseModel._serialize_attribute(self.edits), self.version, self.validity_days, self.validity_seconds, self.edit_in_progress, self.editor_id, self.editor_expiration_date, _BaseModel._serialize_attribute(self.data_node_properties), ] "} {"text": "from datetime import datetime, timedelta from typing import Any, Dict, List, Optional, Set from taipy.config.common.scope import Scope from .._version._version_manager_factory import _VersionManagerFactory from .data_node import DataNode from .data_node_id import DataNodeId, Edit in_memory_storage: Dict[str, Any] = {} class InMemoryDataNode(DataNode): \"\"\"Data Node stored in memory. Warning: This Data Node implementation is not compatible with a parallel execution of taipy tasks, but only with a task executor in development mode. The purpose of `InMemoryDataNode` is to be used for development or debugging. Attributes: config_id (str): Identifier of the data node configuration. It must be a valid Python identifier. scope (Scope^): The scope of this data node. id (str): The unique identifier of this data node. owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or `None`. parent_ids (Optional[Set[str]]): The identifiers of the parent tasks or `None`. last_edit_date (datetime): The date and time of the last modification. edits (List[Edit^]): The ordered list of edits for that job. version (str): The string indicates the application version of the data node to instantiate. If not provided, the current version is used. validity_period (Optional[timedelta]): The duration implemented as a timedelta since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task management page](../core/entities/task-mgt.md) for more details). If _validity_period_ is set to `None`, the data node is always up-to-date. edit_in_progress (bool): True if a task computing the data node has been submitted and not completed yet. False otherwise. editor_id (Optional[str]): The identifier of the user who is currently editing the data node. editor_expiration_date (Optional[datetime]): The expiration date of the editor lock. properties (dict[str, Any]): A dictionary of additional properties. When creating an _In Memory_ data node, if the _properties_ dictionary contains a _\"default_data\"_ entry, the data node is automatically written with the corresponding _\"default_data\"_ value. \"\"\" __STORAGE_TYPE = \"in_memory\" __DEFAULT_DATA_VALUE = \"default_data\" _REQUIRED_PROPERTIES: List[str] = [] def __init__( self, config_id: str, scope: Scope, id: Optional[DataNodeId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, last_edit_date: Optional[datetime] = None, edits: List[Edit] = None, version: str = None, validity_period: Optional[timedelta] = None, edit_in_progress: bool = False, editor_id: Optional[str] = None, editor_expiration_date: Optional[datetime] = None, properties=None, ): if properties is None: properties = {} default_value = properties.pop(self.__DEFAULT_DATA_VALUE, None) super().__init__( config_id, scope, id, owner_id, parent_ids, last_edit_date, edits, version or _VersionManagerFactory._build_manager()._get_latest_version(), validity_period, edit_in_progress, editor_id, editor_expiration_date, **properties ) if default_value is not None and self.id not in in_memory_storage: self._write(default_value) self._last_edit_date = datetime.now() self._edits.append( Edit( { \"timestamp\": self._last_edit_date, \"writer_identifier\": \"TAIPY\", \"comments\": \"Default data written.\", } ) ) self._TAIPY_PROPERTIES.update({self.__DEFAULT_DATA_VALUE}) @classmethod def storage_type(cls) -> str: return cls.__STORAGE_TYPE def _read(self): return in_memory_storage.get(self.id) def _write(self, data): in_memory_storage[self.id] = data "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._filesystem_repository import _FileSystemRepository from ._data_converter import _DataNodeConverter from ._data_model import _DataNodeModel class _DataFSRepository(_FileSystemRepository): def __init__(self): super().__init__(model_type=_DataNodeModel, converter=_DataNodeConverter, dir_name=\"data_nodes\") "} {"text": "from typing import Any, Dict, NewType DataNodeId = NewType(\"DataNodeId\", str) DataNodeId.__doc__ = \"\"\"Type that holds a `DataNode^` identifier.\"\"\" Edit = NewType(\"Edit\", Dict[str, Any]) Edit.__doc__ = \"\"\"Type that holds a `DataNode^` edit information.\"\"\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._filesystem_repository import _FileSystemRepository from ._submission_converter import _SubmissionConverter from ._submission_model import _SubmissionModel class _SubmissionFSRepository(_FileSystemRepository): def __init__(self): super().__init__(model_type=_SubmissionModel, converter=_SubmissionConverter, dir_name=\"submission\") "} {"text": "from typing import NewType SubmissionId = NewType(\"SubmissionId\", str) SubmissionId.__doc__ = \"\"\"Type that holds a `Submission^` identifier.\"\"\" "} {"text": "from ..common._repr_enum import _ReprEnum class SubmissionStatus(_ReprEnum): \"\"\"Execution status of a `Submission^`. It is implemented as an enumeration. The possible values are: - `SUBMITTED`: A `SUBMITTED` submission has been submitted for execution but not processed yet by the orchestrator. - `UNDEFINED`: AN `UNDEFINED` submission's jobs have been submitted for execution but got some undefined status changes. - `PENDING`: A `PENDING` submission has been enqueued by the orchestrator. It is waiting for an executor to be available for its execution. - `BLOCKED`: A `BLOCKED` submission has been blocked because it has been finished with a job being blocked. - `RUNNING`: A `RUNNING` submission has its jobs currently being executed. - `CANCELED`: A `CANCELED` submission has been submitted but its execution has been canceled. - `FAILED`: A `FAILED` submission has a job failed during its execution. - `COMPLETED`: A `COMPLETED` submission has successfully been executed. \"\"\" SUBMITTED = 0 UNDEFINED = 1 BLOCKED = 2 PENDING = 3 RUNNING = 4 CANCELED = 5 FAILED = 6 COMPLETED = 7 "} {"text": "from dataclasses import dataclass from typing import Any, Dict, List, Union from sqlalchemy import JSON, Column, Enum, String, Table from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry from ..job.job_id import JobId from .submission_status import SubmissionStatus @mapper_registry.mapped @dataclass class _SubmissionModel(_BaseModel): __table__ = Table( \"submission\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"entity_id\", String), Column(\"job_ids\", JSON), Column(\"creation_date\", String), Column(\"submission_status\", Enum(SubmissionStatus)), Column(\"version\", String), ) id: str entity_id: str job_ids: Union[List[JobId], List] creation_date: str submission_status: SubmissionStatus version: str @staticmethod def from_dict(data: Dict[str, Any]): return _SubmissionModel( id=data[\"id\"], entity_id=data[\"entity_id\"], job_ids=_BaseModel._deserialize_attribute(data[\"job_ids\"]), creation_date=data[\"creation_date\"], submission_status=SubmissionStatus._from_repr(data[\"submission_status\"]), version=data[\"version\"], ) def to_list(self): return [ self.id, self.entity_id, _BaseModel._serialize_attribute(self.job_ids), self.creation_date, repr(self.submission_status), self.version, ] "} {"text": "from datetime import datetime from .._repository._abstract_converter import _AbstractConverter from ..job.job import Job, JobId from ..submission._submission_model import _SubmissionModel from ..submission.submission import Submission from .submission import SubmissionId class _SubmissionConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, submission: Submission) -> _SubmissionModel: return _SubmissionModel( id=submission.id, entity_id=submission._entity_id, job_ids=[job.id if isinstance(job, Job) else JobId(str(job)) for job in list(submission._jobs)], creation_date=submission._creation_date.isoformat(), submission_status=submission._submission_status, version=submission._version, ) @classmethod def _model_to_entity(cls, model: _SubmissionModel) -> Submission: submission = Submission( entity_id=model.entity_id, id=SubmissionId(model.id), jobs=model.job_ids, creation_date=datetime.fromisoformat(model.creation_date), submission_status=model.submission_status, version=model.version, ) return submission "} {"text": "from typing import List, Optional, Union from .._manager._manager import _Manager from .._repository._abstract_repository import _AbstractRepository from .._version._version_mixin import _VersionMixin from ..notification import EventEntityType, EventOperation, Notifier, _make_event from ..scenario.scenario import Scenario from ..sequence.sequence import Sequence from ..submission.submission import Submission from ..task.task import Task class _SubmissionManager(_Manager[Submission], _VersionMixin): _ENTITY_NAME = Submission.__name__ _repository: _AbstractRepository _EVENT_ENTITY_TYPE = EventEntityType.SUBMISSION @classmethod def _get_all(cls, version_number: Optional[str] = None) -> List[Submission]: \"\"\" Returns all entities. \"\"\" filters = cls._build_filters_with_version(version_number) return cls._repository._load_all(filters) @classmethod def _create( cls, entity_id: str, ) -> Submission: submission = Submission(entity_id=entity_id) cls._set(submission) Notifier.publish(_make_event(submission, EventOperation.CREATION)) return submission @classmethod def _get_latest(cls, entity: Union[Scenario, Sequence, Task]) -> Optional[Submission]: entity_id = entity.id if not isinstance(entity, str) else entity submissions_of_task = list(filter(lambda submission: submission.entity_id == entity_id, cls._get_all())) if len(submissions_of_task) == 0: return None if len(submissions_of_task) == 1: return submissions_of_task[0] else: return max(submissions_of_task) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ._submission_fs_repository import _SubmissionFSRepository from ._submission_manager import _SubmissionManager from ._submission_sql_repository import _SubmissionSQLRepository class _SubmissionManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _SubmissionFSRepository, \"sql\": _SubmissionSQLRepository} @classmethod def _build_manager(cls) -> Type[_SubmissionManager]: # type: ignore if cls._using_enterprise(): submission_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".submission._submission_manager\", \"_SubmissionManager\" ) # type: ignore build_repository = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".submission._submission_manager_factory\", \"_SubmissionManagerFactory\", )._build_repository # type: ignore else: submission_manager = _SubmissionManager build_repository = cls._build_repository submission_manager._repository = build_repository() # type: ignore return submission_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._sql_repository import _SQLRepository from ._submission_converter import _SubmissionConverter from ._submission_model import _SubmissionModel class _SubmissionSQLRepository(_SQLRepository): def __init__(self): super().__init__(model_type=_SubmissionModel, converter=_SubmissionConverter) "} {"text": "\"\"\" A single-page Taipy application. Please refer to https://docs.taipy.io/en/latest/manuals/gui/ for more details. \"\"\" import webbrowser from taipy.gui import Markdown, notify import taipy as tp value = 0 logo = \"images/taipy_logo.jpg\" page = Markdown( \"\"\"
<|navbar|lov={[(\"page1\", \"Homepage\"), (\"https://docs.taipy.io/en/latest/manuals/about/\", \"Taipy Docs\"), (\"https://docs.taipy.io/en/latest/getting_started/\", \"Getting Started\")]}|>
<|
<|{logo}|image|height=200px|width=200px|on_action=image_action|>
|> # Taipy Application <|{value}|slider|on_change=on_slider|> <|Push|button|on_action=on_push|> \"\"\" ) def image_action(state): webbrowser.open(\"https://taipy.io\") def on_push(state): ... def on_slider(state): if state.value == 100: notify(state, \"success\", \"Taipy is running!\") def on_change(state, var_name: str, var_value): ... gui = tp.Gui(page=page) if __name__ == '__main__': # Execute by the _Python_ interpretor, for debug only. tp.run(gui, title=\"Taipy Application (development)\") else: # Execute by _Gunicorn_, for production environment. app = tp.run(gui, title=\"Taipy Application\", run_server=False)"} {"text": "from taipy import Gui # A dark mode is available in Taipy # However, we will use the light mode for the Getting Started Gui(page=\"# Getting started with *Taipy*\").run(dark_mode=False)"} {"text": ""} {"text": ""} {"text": "\"\"\"The setup script.\"\"\" import json import os import sysconfig from importlib.util import find_spec from pathlib import Path from setuptools import find_namespace_packages, find_packages, setup from setuptools.command.build_py import build_py with open(\"README.md\", \"rb\") as readme_file: readme = readme_file.read().decode(\"UTF-8\") with open(f\"src{os.sep}taipy{os.sep}version.json\") as version_file: version = json.load(version_file) version_string = f'{version.get(\"major\", 0)}.{version.get(\"minor\", 0)}.{version.get(\"patch\", 0)}' if vext := version.get(\"ext\"): version_string = f\"{version_string}.{vext}\" requirements = [ \"backports.zoneinfo>=0.2.1,<0.3;python_version<'3.9'\", \"cookiecutter>=2.1.1,<2.2\", \"taipy-gui@git+https://git@github.com/Avaiga/taipy-gui.git@develop\", \"taipy-rest@git+https://git@github.com/Avaiga/taipy-rest.git@develop\", \"taipy-templates@git+https://git@github.com/Avaiga/taipy-templates.git@develop\", ] test_requirements = [\"pytest>=3.8\"] extras_require = { \"ngrok\": [\"pyngrok>=5.1,<6.0\"], \"image\": [ \"python-magic>=0.4.24,<0.5;platform_system!='Windows'\", \"python-magic-bin>=0.4.14,<0.5;platform_system=='Windows'\", ], \"rdp\": [\"rdp>=0.8\"], \"arrow\": [\"pyarrow>=10.0.1,<11.0\"], \"mssql\": [\"pyodbc>=4\"], } def _build_webapp(): already_exists = Path(\"./src/taipy/gui_core/lib/taipy-gui-core.js\").exists() if not already_exists: # default site-packages path is from the current python interpreter site_packages_path = sysconfig.get_path(\"purelib\") # taipy-gui should be available through setup_requires option # taipy-gui at this step is installed in a backend site-packages separated from the one being used by pip if find_spec(\"taipy\") and find_spec(\"taipy.gui\"): import taipy site_packages_path = Path(taipy.__file__).absolute().parent.parent # Specify the correct path to taipy-gui in gui/.env file env_file_path = Path(__file__).absolute().parent / \"frontend\" / \"taipy\" / \".env\" if not os.path.exists(env_file_path): with open(env_file_path, \"w\") as env_file: env_file.write(f\"TAIPY_GUI_DIR={site_packages_path}\\n\") os.system(\"cd frontend/taipy && npm ci && npm run build\") class NPMInstall(build_py): def run(self): _build_webapp() build_py.run(self) setup( author=\"Avaiga\", author_email=\"dev@taipy.io\", python_requires=\">=3.8\", classifiers=[ \"Intended Audience :: Developers\", \"License :: OSI Approved :: Apache Software License\", \"Natural Language :: English\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.8\", \"Programming Language :: Python :: 3.9\", \"Programming Language :: Python :: 3.10\", \"Programming Language :: Python :: 3.11\", ], description=\"A 360\u00b0 open-source platform from Python pilots to production-ready web apps.\", install_requires=requirements, entry_points={ \"console_scripts\": [ \"taipy = taipy._entrypoint:_entrypoint\", ] }, license=\"Apache License 2.0\", long_description=readme, long_description_content_type=\"text/markdown\", keywords=\"taipy\", name=\"taipy\", package_dir={\"\": \"src\"}, packages=find_namespace_packages(where=\"src\") + find_packages(include=[\"taipy\"]), include_package_data=True, test_suite=\"tests\", url=\"https://github.com/avaiga/taipy\", version=version_string, zip_safe=False, extras_require=extras_require, cmdclass={\"build_py\": NPMInstall}, ) "} {"text": "import re import sys repo_name = sys.argv[1] branch_name = sys.argv[2] # Regex pattern ]*?)(?]*?)> pattern = re.compile(\"]*?)(?]*?)>\") replacement = r'' with open(\"README.md\") as readme_file: readme_str = readme_file.read() modified_readme = re.sub(pattern, replacement.format(repo_name=repo_name, branch_name=branch_name), readme_str) with open(\"README.md\", \"w\") as readme_file: readme_file.write(modified_readme) "} {"text": "# ############################################################ # Generate Python interface definition files # ############################################################ from src.taipy.gui.config import Config import json import os import typing as t # ############################################################ # Generate gui pyi file (gui/gui.pyi) # ############################################################ gui_py_file = \"./src/taipy/gui/gui.py\" gui_pyi_file = gui_py_file + \"i\" os.system(f\"pipenv run stubgen {gui_py_file} --no-import --parse-only --export-less -o ./\") gui_config = \"\".join( f\", {k}: {v.__name__} = ...\" if \" t.List[t.Dict[str, t.Any]]: properties = element[\"properties\"] if \"inherits\" not in element: return properties for inherit in element[\"inherits\"]: inherit_element = next((e for e in viselements[\"undocumented\"] if e[0] == inherit), None) if inherit_element is None: inherit_element = next((e for e in viselements[\"blocks\"] if e[0] == inherit), None) if inherit_element is None: inherit_element = next((e for e in viselements[\"controls\"] if e[0] == inherit), None) if inherit_element is None: raise RuntimeError(f\"Can't find element with name {inherit}\") properties += get_properties(inherit_element[1], viselements) return properties def build_doc(element: t.Dict[str, t.Any]): if \"doc\" not in element: return \"\" doc = str(element[\"doc\"]).replace(\"\\n\", f'\\n{16*\" \"}') return f\"{element['name']} ({element['type']}): {doc} {'(default: '+element['default_value'] + ')' if 'default_value' in element else ''}\" # noqa: E501 for control_element in viselements[\"controls\"]: name = control_element[0] property_list = [] property_names = [] for property in get_properties(control_element[1], viselements): if property[\"name\"] not in property_names and \"[\" not in property[\"name\"]: property_list.append(property) property_names.append(property[\"name\"]) properties = \", \".join([f\"{p} = ...\" for p in property_names]) doc_arguments = f\"\\n{12*' '}\".join([build_doc(p) for p in property_list]) # append properties to __init__.pyi with open(builder_pyi_file, \"a\") as file: file.write( control_template.replace(\"{{name}}\", name) .replace(\"{{properties}}\", properties) .replace(\"{{doc_arguments}}\", doc_arguments) ) for block_element in viselements[\"blocks\"]: name = block_element[0] property_list = [] property_names = [] for property in get_properties(block_element[1], viselements): if property[\"name\"] not in property_names and \"[\" not in property[\"name\"]: property_list.append(property) property_names.append(property[\"name\"]) properties = \", \".join([f\"{p} = ...\" for p in property_names]) doc_arguments = f\"{8*' '}\".join([build_doc(p) for p in property_list]) # append properties to __init__.pyi with open(builder_pyi_file, \"a\") as file: file.write( block_template.replace(\"{{name}}\", name) .replace(\"{{properties}}\", properties) .replace(\"{{doc_arguments}}\", doc_arguments) ) os.system(f\"pipenv run isort {gui_pyi_file}\") os.system(f\"pipenv run black {gui_pyi_file}\") os.system(f\"pipenv run isort {builder_pyi_file}\") os.system(f\"pipenv run black {builder_pyi_file}\") "} {"text": "import pytest def pytest_addoption(parser): parser.addoption(\"--e2e-base-url\", action=\"store\", default=\"/\", help=\"base url for e2e testing\") parser.addoption(\"--e2e-port\", action=\"store\", default=\"5000\", help=\"port for e2e testing\") @pytest.fixture(scope=\"session\") def e2e_base_url(request): return request.config.getoption(\"--e2e-base-url\") @pytest.fixture(scope=\"session\") def e2e_port(request): return request.config.getoption(\"--e2e-port\") "} {"text": "from unittest import mock from src.taipy._run import _run from taipy.core import Core from taipy.gui import Gui from taipy.rest import Rest @mock.patch(\"taipy.gui.Gui.run\") def test_run_pass_with_gui(gui_run): _run(Gui()) gui_run.assert_called_once() @mock.patch(\"taipy.core.Core.run\") def test_run_pass_with_core(core_run): _run(Core()) core_run.assert_called_once() @mock.patch(\"taipy.rest.Rest.run\") @mock.patch(\"taipy.core.Core.run\") def test_run_pass_with_rest(rest_run, core_run): _run(Rest()) rest_run.assert_called_once() core_run.assert_called_once() @mock.patch(\"taipy.rest.Rest.run\") @mock.patch(\"taipy.core.Core.run\") def test_run_pass_with_core_and_rest(core_run, rest_run): _run(Core(), Rest()) core_run.assert_called_once() rest_run.assert_called_once() @mock.patch(\"taipy.gui.Gui.run\") @mock.patch(\"taipy.rest.Rest.run\") @mock.patch(\"taipy.core.Core.run\") def test_run_pass_with_gui_and_rest(core_run, rest_run, gui_run): _run(Gui(), Rest()) gui_run.assert_called_once() core_run.assert_called_once() rest_run.assert_not_called() @mock.patch(\"taipy.gui.Gui.run\") @mock.patch(\"taipy.core.Core.run\") def test_run_pass_with_gui_and_core(core_run, gui_run): _run(Gui(), Core()) gui_run.assert_called_once() core_run.assert_called_once() "} {"text": "\"\"\"Unit test package for taipy.\"\"\" "} {"text": "from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._orchestrator._dispatcher import _DevelopmentJobDispatcher, _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.exceptions.exceptions import CoreServiceIsAlreadyRunning from taipy.config import Config from taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked class TestCore: def test_run_core_trigger_config_check(self, caplog): Config.configure_data_node(id=\"d0\", storage_type=\"toto\") with patch(\"sys.argv\", [\"prog\"]): with pytest.raises(SystemExit): core = Core() core.run() expected_error_message = ( \"`storage_type` field of DataNodeConfig `d0` must be either csv, sql_table,\" \" sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory.\" ' Current value of property `storage_type` is \"toto\".' ) assert expected_error_message in caplog.text core.stop() def test_run_core_as_a_service_development_mode(self): _OrchestratorFactory._dispatcher = None with patch(\"sys.argv\", [\"prog\"]): core = Core() assert core._orchestrator is None assert core._dispatcher is None assert _OrchestratorFactory._dispatcher is None core.run() assert core._orchestrator is not None assert core._orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._orchestrator == _Orchestrator assert core._dispatcher is not None assert isinstance(core._dispatcher, _DevelopmentJobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) core.stop() def test_run_core_as_a_service_standalone_mode(self): _OrchestratorFactory._dispatcher = None with patch(\"sys.argv\", [\"prog\"]): core = Core() assert core._orchestrator is None assert core._dispatcher is None assert _OrchestratorFactory._dispatcher is None Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) core.run() assert core._orchestrator is not None assert core._orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._orchestrator == _Orchestrator assert core._dispatcher is not None assert isinstance(core._dispatcher, _StandaloneJobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher) assert core._dispatcher.is_running() assert _OrchestratorFactory._dispatcher.is_running() core.stop() def test_core_service_can_only_be_run_once(self): with patch(\"sys.argv\", [\"prog\"]): core_instance_1 = Core() core_instance_2 = Core() core_instance_1.run() with pytest.raises(CoreServiceIsAlreadyRunning): core_instance_1.run() with pytest.raises(CoreServiceIsAlreadyRunning): core_instance_2.run() # Stop the Core service and run it again should work core_instance_1.stop() core_instance_1.run() core_instance_1.stop() core_instance_2.run() core_instance_2.stop() def test_block_config_update_when_core_service_is_running_development_mode(self): _OrchestratorFactory._dispatcher = None with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_data_node(id=\"i1\") core.stop() def test_block_config_update_when_core_service_is_running_standalone_mode(self): _OrchestratorFactory._dispatcher = None with patch(\"sys.argv\", [\"prog\"]): core = Core() Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) core.run() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_data_node(id=\"i1\") core.stop() "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import json import os from datetime import datetime, timedelta import pytest from src.taipy.core._repository._decoder import _Decoder from src.taipy.core._repository._encoder import _Encoder @pytest.fixture(scope=\"function\", autouse=True) def create_and_delete_json_file(): test_json_file = { \"name\": \"testing\", \"date\": datetime(1991, 1, 1), \"default_data\": \"data for testing encoder\", \"validity_period\": timedelta(days=1), } with open(\"data.json\", \"w\") as f: json.dump(test_json_file, f, ensure_ascii=False, indent=4, cls=_Encoder) yield os.unlink(\"data.json\") def test_json_encoder(): with open(\"data.json\") as json_file: data = json.load(json_file) assert data[\"name\"] == \"testing\" assert data[\"default_data\"] == \"data for testing encoder\" assert data[\"date\"] == { \"__type__\": \"Datetime\", \"__value__\": \"1991-01-01T00:00:00\", } assert data[\"date\"].get(\"__type__\") == \"Datetime\" assert data[\"date\"].get(\"__value__\") == \"1991-01-01T00:00:00\" def test_json_decoder(): with open(\"data.json\") as json_file: data = json.load(json_file, cls=_Decoder) assert data[\"name\"] == \"testing\" assert data[\"default_data\"] == \"data for testing encoder\" assert data[\"date\"] == datetime(1991, 1, 1) "} {"text": "import src.taipy.core.taipy as tp from src.taipy.core.config import Config def test_no_special_characters(): scenario_config = Config.configure_scenario(\"scenario_1\") scenario = tp.create_scenario(scenario_config, name=\"martin\") assert scenario.name == \"martin\" scenarios = tp.get_scenarios() assert len(scenarios) == 1 assert scenarios[0].name == \"martin\" def test_many_special_characters(): scenario_config = Config.configure_scenario(\"scenario_1\") special_characters = ( \"!#$%&'()*+,-./:;<=>?@[]^_`\\\\{\" \"\u00bb\u00bc\u00bd\u00be\u00bf\u00c0\u00c1\u00c2\u00c3\u00c4\u00c5\u00c6\u00c7\u00c8\u00c9\u00ca\u00cb\u00cc\u00cd\u00ce\u00cf\u00d0\u00d1\u00d2\u00d3\u00d4\u00d5\u00d6\" \"\u00d7\u00d8\u00d9\u00da\u00db\u00dc\u00dd\u00de\u00df\u00e0\u00e1\u00e2\u00e3\u00e4\u00e5\u00e6\u00e7\u00e8\u00e9\u00ea\u00eb\u00ec\u00ed\u00ee\u00ef\u00f0\u00f1\u00f2\" \"\u00f3\u00f4\u00f5\u00f6\u00f7\u00f8\u00f9\u00fa\u00fb\u00fc\u00fd\u00fe\u00ff\u0100\u0101\u0102\u0103\u0104\u0105\u0106\u0107\u0108\u0109\u010a\u010b\u010c\u010d\u010e\" \"\u010f\u0110\u0111\u0112\u0113\u0114\u0115\u0116\u0117\u0118\u0119\u011a\u011b\u011c\u011d\u011e\u011f\u0120\u0121\u0122\u0123\u0124\u0125\u0126\u0127\u0128\u0129\u012a\" \"\u012b\u012c\u012d\u012e\u012f\u0130\u0132\u0133\u0134\u0135\u0136\u0137\u0138\u0139\u013a\u013b\u013c\u013d\u013e\u013f\u0140\u0141\u0142\u0143\u0144\u0145\u0146\u0147\" \"\u0148\u0149\u014a\u014b\u014c\u014d\u014e\u014f\u0150\u0151\u0152\u0153\u0154\u0155\u0156\u0157\u0158\u0159\u015a\u015b\u015c\u015d\u015e\u015f\u0160\u0161\u0162\u0163\" \"\u0164\u0165\u0166\u0167\u0168\u0169\u016a\u016b\u016c\u016d\u016e\u016f\u0170\u0171\u0172\u0173\u0174\u0175\u0176\u0177\u0178\u0179\u017a\u017b\u017c\u017d\u017e\u017f\" ) scenario = tp.create_scenario(scenario_config, name=special_characters) assert scenario.name == special_characters scenarios = tp.get_scenarios() assert len(scenarios) == 1 assert scenarios[0].name == special_characters "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import json import os import pathlib import shutil import pytest from src.taipy.core.exceptions.exceptions import InvalidExportPath from taipy.config.config import Config from .mocks import MockConverter, MockFSRepository, MockModel, MockObj, MockSQLRepository class TestRepositoriesStorage: @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_save_and_fetch_model(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) m = MockObj(\"uuid\", \"foo\") r._save(m) fetched_model = r._load(m.id) assert m == fetched_model @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_exists(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) m = MockObj(\"uuid\", \"foo\") r._save(m) assert r._exists(m.id) assert not r._exists(\"not-existed-model\") @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_get_all(self, mock_repo, params, init_sql_repo): objs = [] r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) r._save(m) _objs = r._load_all() assert len(_objs) == 5 for obj in _objs: assert isinstance(obj, MockObj) assert sorted(objs, key=lambda o: o.id) == sorted(_objs, key=lambda o: o.id) @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_delete_all(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f\"uuid-{i}\", f\"Foo{i}\") r._save(m) _models = r._load_all() assert len(_models) == 5 r._delete_all() _models = r._load_all() assert len(_models) == 0 @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_delete_many(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f\"uuid-{i}\", f\"Foo{i}\") r._save(m) _models = r._load_all() assert len(_models) == 5 r._delete_many([\"uuid-0\", \"uuid-1\"]) _models = r._load_all() assert len(_models) == 3 @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_search(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() m = MockObj(\"uuid\", \"foo\") r._save(m) m1 = r._search(\"name\", \"bar\") m2 = r._search(\"name\", \"foo\") assert m1 == [] assert m2 == [m] @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) @pytest.mark.parametrize(\"export_path\", [\"tmp\"]) def test_export(self, mock_repo, params, export_path, init_sql_repo): r = mock_repo(**params) m = MockObj(\"uuid\", \"foo\") r._save(m) r._export(\"uuid\", export_path) assert pathlib.Path(os.path.join(export_path, \"mock_model/uuid.json\")).exists() with open(os.path.join(export_path, \"mock_model/uuid.json\"), \"r\") as exported_file: exported_data = json.load(exported_file) assert exported_data[\"id\"] == \"uuid\" assert exported_data[\"name\"] == \"foo\" # Export to same location again should work r._export(\"uuid\", export_path) assert pathlib.Path(os.path.join(export_path, \"mock_model/uuid.json\")).exists() if mock_repo == MockFSRepository: with pytest.raises(InvalidExportPath): r._export(\"uuid\", Config.core.storage_folder) shutil.rmtree(export_path, ignore_errors=True) "} {"text": "import dataclasses import pathlib from dataclasses import dataclass from typing import Any, Dict, Optional from sqlalchemy import Column, String, Table from sqlalchemy.dialects import sqlite from sqlalchemy.orm import declarative_base, registry from sqlalchemy.schema import CreateTable from src.taipy.core._repository._abstract_converter import _AbstractConverter from src.taipy.core._repository._filesystem_repository import _FileSystemRepository from src.taipy.core._repository._sql_repository import _SQLRepository from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config class Base: __allow_unmapped__ = True Base = declarative_base(cls=Base) # type: ignore mapper_registry = registry() @dataclass class MockObj: def __init__(self, id: str, name: str, version: Optional[str] = None) -> None: self.id = id self.name = name if version: self._version = version else: self._version = _VersionManager._get_latest_version() @dataclass class MockModel(Base): # type: ignore __table__ = Table( \"mock_model\", mapper_registry.metadata, Column(\"id\", String(200), primary_key=True), Column(\"name\", String(200)), Column(\"version\", String(200)), ) id: str name: str version: str def to_dict(self): return dataclasses.asdict(self) @staticmethod def from_dict(data: Dict[str, Any]): return MockModel(id=data[\"id\"], name=data[\"name\"], version=data[\"version\"]) def _to_entity(self): return MockObj(id=self.id, name=self.name, version=self.version) @classmethod def _from_entity(cls, entity: MockObj): return MockModel(id=entity.id, name=entity.name, version=entity._version) def to_list(self): return [self.id, self.name, self.version] class MockConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, entity): return MockModel(id=entity.id, name=entity.name, version=entity._version) @classmethod def _model_to_entity(cls, model): return MockObj(id=model.id, name=model.name, version=model.version) class MockFSRepository(_FileSystemRepository): def __init__(self, **kwargs): super().__init__(**kwargs) @property def _storage_folder(self) -> pathlib.Path: return pathlib.Path(Config.core.storage_folder) # type: ignore class MockSQLRepository(_SQLRepository): def __init__(self, **kwargs): super().__init__(**kwargs) self.db.execute(str(CreateTable(MockModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect()))) "} {"text": "import pytest from taipy.config.config import Config def test_job_config(): assert Config.job_config.mode == \"development\" job_c = Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) assert job_c.mode == \"standalone\" assert job_c.max_nb_of_workers == 2 assert Config.job_config.mode == \"standalone\" assert Config.job_config.max_nb_of_workers == 2 Config.configure_job_executions(foo=\"bar\") assert Config.job_config.foo == \"bar\" def test_clean_config(): job_config = Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2, prop=\"foo\") assert Config.job_config is job_config job_config._clean() # Check if the instance before and after _clean() is the same assert Config.job_config is job_config assert job_config.mode == \"development\" assert job_config._config == {\"max_nb_of_workers\": 1} assert job_config.properties == {} "} {"text": "from taipy.config.config import Config def migrate_pickle_path(dn): dn.path = \"s1.pkl\" def migrate_skippable(task): task.skippable = True def test_migration_config(): assert Config.migration_functions.migration_fcts == {} data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") migration_cfg = Config.add_migration_function( target_version=\"1.0\", config=data_nodes1, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == {\"1.0\": {\"data_nodes1\": migrate_pickle_path}} assert migration_cfg.properties == {} data_nodes2 = Config.configure_data_node(\"data_nodes2\", \"pickle\") migration_cfg = Config.add_migration_function( target_version=\"1.0\", config=data_nodes2, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == { \"1.0\": {\"data_nodes1\": migrate_pickle_path, \"data_nodes2\": migrate_pickle_path} } def test_clean_config(): assert Config.migration_functions.migration_fcts == {} data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") migration_cfg = Config.add_migration_function( target_version=\"1.0\", config=data_nodes1, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == {\"1.0\": {\"data_nodes1\": migrate_pickle_path}} assert migration_cfg.properties == {} migration_cfg._clean() assert migration_cfg.migration_fcts == {} assert migration_cfg._properties == {} "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from src.taipy.core.config import CoreSection from src.taipy.core.config.data_node_config import DataNodeConfig from src.taipy.core.config.job_config import JobConfig from src.taipy.core.config.migration_config import MigrationConfig from src.taipy.core.config.scenario_config import ScenarioConfig from src.taipy.core.config.task_config import TaskConfig from taipy.config._config import _Config from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.global_app.global_app_config import GlobalAppConfig def _test_default_job_config(job_config: JobConfig): assert job_config is not None assert job_config.mode == JobConfig._DEFAULT_MODE def _test_default_core_section(core_section: CoreSection): assert core_section is not None assert core_section.mode == CoreSection._DEFAULT_MODE assert core_section.version_number == \"\" assert not core_section.force assert core_section.root_folder == \"./taipy/\" assert core_section.storage_folder == \".data/\" assert core_section.repository_type == \"filesystem\" assert core_section.repository_properties == {} assert len(core_section.properties) == 0 def _test_default_data_node_config(dn_config: DataNodeConfig): assert dn_config is not None assert dn_config.id is not None assert dn_config.storage_type == \"pickle\" assert dn_config.scope == Scope.SCENARIO assert dn_config.validity_period is None assert len(dn_config.properties) == 0 # type: ignore def _test_default_task_config(task_config: TaskConfig): assert task_config is not None assert task_config.id is not None assert task_config.input_configs == [] assert task_config.output_configs == [] assert task_config.function is None assert not task_config.skippable assert len(task_config.properties) == 0 # type: ignore def _test_default_scenario_config(scenario_config: ScenarioConfig): assert scenario_config is not None assert scenario_config.id is not None assert scenario_config.tasks == [] assert scenario_config.task_configs == [] assert scenario_config.additional_data_nodes == [] assert scenario_config.additional_data_node_configs == [] assert scenario_config.data_nodes == [] assert scenario_config.data_node_configs == [] assert scenario_config.sequences == {} assert len(scenario_config.properties) == 0 # type: ignore def _test_default_version_migration_config(version_migration_config: MigrationConfig): assert version_migration_config is not None assert version_migration_config.migration_fcts == {} assert len(version_migration_config.properties) == 0 # type: ignore def _test_default_global_app_config(global_config: GlobalAppConfig): assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_default_configuration(): default_config = Config._default_config assert default_config._global_config is not None _test_default_global_app_config(default_config._global_config) _test_default_global_app_config(Config.global_config) _test_default_global_app_config(GlobalAppConfig().default_config()) assert default_config._unique_sections is not None assert len(default_config._unique_sections) == 3 assert len(default_config._sections) == 3 _test_default_job_config(default_config._unique_sections[JobConfig.name]) _test_default_job_config(Config.job_config) _test_default_job_config(JobConfig().default_config()) _test_default_version_migration_config(default_config._unique_sections[MigrationConfig.name]) _test_default_version_migration_config(Config.migration_functions) _test_default_version_migration_config(MigrationConfig.default_config()) _test_default_core_section(default_config._unique_sections[CoreSection.name]) _test_default_core_section(Config.core) _test_default_core_section(CoreSection().default_config()) _test_default_data_node_config(default_config._sections[DataNodeConfig.name][_Config.DEFAULT_KEY]) _test_default_data_node_config(Config.data_nodes[_Config.DEFAULT_KEY]) _test_default_data_node_config(DataNodeConfig.default_config()) assert len(default_config._sections[DataNodeConfig.name]) == 1 assert len(Config.data_nodes) == 1 _test_default_task_config(default_config._sections[TaskConfig.name][_Config.DEFAULT_KEY]) _test_default_task_config(Config.tasks[_Config.DEFAULT_KEY]) _test_default_task_config(TaskConfig.default_config()) assert len(default_config._sections[TaskConfig.name]) == 1 assert len(Config.tasks) == 1 _test_default_scenario_config(default_config._sections[ScenarioConfig.name][_Config.DEFAULT_KEY]) Config.scenarios[_Config.DEFAULT_KEY] _test_default_scenario_config(Config.scenarios[_Config.DEFAULT_KEY]) _test_default_scenario_config(ScenarioConfig.default_config()) assert len(default_config._sections[ScenarioConfig.name]) == 1 assert len(Config.scenarios) == 1 "} {"text": "from unittest.mock import patch import pytest from src.taipy.core._init_version import _read_version from src.taipy.core.config.core_section import CoreSection from src.taipy.core.exceptions import ConfigCoreVersionMismatched from taipy.config.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile _MOCK_CORE_VERSION = \"3.1.1\" @pytest.fixture(scope=\"function\", autouse=True) def mock_core_version(): with patch(\"src.taipy.core.config.core_section._read_version\") as mock_read_version: mock_read_version.return_value = _MOCK_CORE_VERSION CoreSection._CURRENT_CORE_VERSION = _MOCK_CORE_VERSION Config.unique_sections[CoreSection.name] = CoreSection.default_config() Config._default_config._unique_sections[CoreSection.name] = CoreSection.default_config() yield @pytest.fixture(scope=\"session\", autouse=True) def reset_core_version(): yield CoreSection._CURRENT_CORE_VERSION = _read_version() class TestCoreVersionInCoreSectionConfig: major, minor, patch = _MOCK_CORE_VERSION.split(\".\") current_version = f\"{major}.{minor}.{patch}\" current_dev_version = f\"{major}.{minor}.{patch}.dev0\" compatible_future_version = f\"{major}.{minor}.{int(patch) + 1}\" compatible_future_dev_version = f\"{major}.{minor}.{int(patch) + 1}.dev0\" core_version_is_compatible = [ # Current version and dev version should be compatible (f\"{major}.{minor}.{patch}\", True), (f\"{major}.{minor}.{patch}.dev0\", True), # Future versions with same major and minor should be compatible (f\"{major}.{minor}.{int(patch) + 1}\", True), (f\"{major}.{minor}.{int(patch) + 1}.dev0\", True), # Past versions with same major and minor should be compatible (f\"{major}.{minor}.{int(patch) - 1}\", True), (f\"{major}.{minor}.{int(patch) - 1}.dev0\", True), # Future versions with different minor number should be incompatible (f\"{major}.{int(minor) + 1}.{patch}\", False), (f\"{major}.{int(minor) + 1}.{patch}.dev0\", False), # Past versions with different minor number should be incompatible (f\"{major}.{int(minor) - 1}.{patch}\", False), (f\"{major}.{int(minor) - 1}.{patch}.dev0\", False), ] @pytest.mark.parametrize(\"core_version, is_compatible\", core_version_is_compatible) def test_load_configuration_file(self, core_version, is_compatible): file_config = NamedTemporaryFile( f\"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" core_version = \"{core_version}\" [VERSION_MIGRATION.migration_fcts] \"\"\" ) if is_compatible: Config.load(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION else: with pytest.raises(ConfigCoreVersionMismatched): Config.load(file_config.filename) @pytest.mark.parametrize(\"core_version,is_compatible\", core_version_is_compatible) def test_override_configuration_file(self, core_version, is_compatible): file_config = NamedTemporaryFile( f\"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" core_version = \"{core_version}\" [VERSION_MIGRATION.migration_fcts] \"\"\" ) if is_compatible: Config.override(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION else: with pytest.raises(ConfigCoreVersionMismatched): Config.override(file_config.filename) def test_load_configuration_file_without_core_section(self): file_config = NamedTemporaryFile( \"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" [VERSION_MIGRATION.migration_fcts] \"\"\" ) Config.load(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION "} {"text": "from datetime import timedelta from taipy.config import Config from taipy.config.common.scope import Scope class TestConfig: def test_configure_csv_data_node(self): a, b, c, d, e, f = \"foo\", \"path\", True, \"numpy\", Scope.SCENARIO, timedelta(1) Config.configure_csv_data_node(a, b, c, d, e, f) assert len(Config.data_nodes) == 2 def test_configure_excel_data_node(self): a, b, c, d, e, f, g = \"foo\", \"path\", True, \"Sheet1\", \"numpy\", Scope.SCENARIO, timedelta(1) Config.configure_excel_data_node(a, b, c, d, e, f, g) assert len(Config.data_nodes) == 2 def test_configure_generic_data_node(self): a, b, c, d, e, f, g, h = \"foo\", print, print, tuple([]), tuple([]), Scope.SCENARIO, timedelta(1), \"qux\" Config.configure_generic_data_node(a, b, c, d, e, f, g, property=h) assert len(Config.data_nodes) == 2 def test_configure_in_memory_data_node(self): a, b, c, d, e = \"foo\", 0, Scope.SCENARIO, timedelta(1), \"qux\" Config.configure_in_memory_data_node(a, b, c, d, property=e) assert len(Config.data_nodes) == 2 def test_configure_pickle_data_node(self): a, b, c, d, e = \"foo\", 0, Scope.SCENARIO, timedelta(1), \"path\" Config.configure_pickle_data_node(a, b, c, d, path=e) assert len(Config.data_nodes) == 2 def test_configure_json_data_node(self): a, dp, ec, dc, sc, f, p = \"foo\", \"path\", \"ec\", \"dc\", Scope.SCENARIO, timedelta(1), \"qux\" Config.configure_json_data_node(a, dp, ec, dc, sc, f, path=p) assert len(Config.data_nodes) == 2 def test_configure_sql_table_data_node(self): a, b, c, d, e, f, g, h, i, extra_args, exposed_type, scope, vp, k = ( \"foo\", \"user\", \"pwd\", \"db\", \"engine\", \"table_name\", \"port\", \"host\", \"driver\", {\"foo\": \"bar\"}, \"exposed_type\", Scope.SCENARIO, timedelta(1), \"qux\", ) Config.configure_sql_table_data_node(a, b, c, d, e, f, g, h, i, extra_args, exposed_type, scope, vp, property=k) assert len(Config.data_nodes) == 2 def test_configure_sql_data_node(self): a, b, c, d, e, f, g, h, i, j, k, extra_args, exposed_type, scope, vp, k = ( \"foo\", \"user\", \"pwd\", \"db\", \"engine\", \"read_query\", \"write_query_builder\", \"append_query_builder\", \"port\", \"host\", \"driver\", {\"foo\": \"bar\"}, \"exposed_type\", Scope.SCENARIO, timedelta(1), \"qux\", ) Config.configure_sql_data_node(a, b, c, d, e, f, g, h, i, j, k, extra_args, exposed_type, scope, vp, property=k) assert len(Config.data_nodes) == 2 def test_configure_mongo_data_node(self): a, b, c, d, e, f, g, h, extra_args, scope, vp, k = ( \"foo\", \"db_name\", \"collection_name\", None, \"user\", \"pwd\", \"host\", \"port\", {\"foo\": \"bar\"}, Scope.SCENARIO, timedelta(1), \"qux\", ) Config.configure_mongo_collection_data_node(a, b, c, d, e, f, g, h, extra_args, scope, vp, property=k) assert len(Config.data_nodes) == 2 "} {"text": "from unittest.mock import patch from src.taipy.core import Core from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from taipy.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile def test_core_section(): with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() assert not Config.core.force core.stop() with patch(\"sys.argv\", [\"prog\"]): Config.configure_core(mode=\"experiment\", version_number=\"test_num\", force=True) core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"test_num\" assert Config.core.force core.stop() toml_config = NamedTemporaryFile( content=\"\"\" [TAIPY] [CORE] mode = \"production\" version_number = \"test_num_2\" force = \"true:bool\" \"\"\" ) Config.load(toml_config.filename) with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() assert Config.core.mode == \"production\" assert Config.core.version_number == \"test_num_2\" assert Config.core.force core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"test_num_3\", \"--no-taipy-force\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"test_num_3\" assert not Config.core.force core.stop() def test_clean_config(): core_config = Config.configure_core(mode=\"experiment\", version_number=\"test_num\", force=True) assert Config.core is core_config core_config._clean() # Check if the instance before and after _clean() is the same assert Config.core is core_config assert core_config.mode == \"development\" assert core_config.version_number == \"\" assert core_config.force is False assert core_config.properties == {} "} {"text": "from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config import MigrationConfig from taipy.config.config import Config def mock_func(): pass def test_check_if_entity_property_key_used_is_predefined(caplog): with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() assert caplog.text == \"\" core.stop() caplog.clear() Config.unique_sections[MigrationConfig.name]._properties[\"_entity_owner\"] = None with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() assert ( \"Properties of MigrationConfig `VERSION_MIGRATION` cannot have `_entity_owner` as its property.\" in caplog.text ) caplog.clear() Config.unique_sections[MigrationConfig.name]._properties[\"_entity_owner\"] = \"entity_owner\" with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( \"Properties of MigrationConfig `VERSION_MIGRATION` cannot have `_entity_owner` as its property.\" ' Current value of property `_entity_owner` is \"entity_owner\".' ) assert expected_error_message in caplog.text def test_check_valid_version(caplog): data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") Config.add_migration_function(\"2.0\", data_nodes1, mock_func) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() assert \"The target version for a migration function must be a production version.\" in caplog.text caplog.clear() Config.unblock_update() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() assert caplog.text == \"\" core.stop() def test_check_callable_function(caplog): data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") Config.add_migration_function(\"1.0\", data_nodes1, 1) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( \"The migration function of config `data_nodes1` from version 1.0 must be populated with\" \" Callable value. Current value of property `migration_fcts` is 1.\" ) assert expected_error_message in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function(\"1.0\", data_nodes1, \"bar\") with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( \"The migration function of config `data_nodes1` from version 1.0 must be populated with\" ' Callable value. Current value of property `migration_fcts` is \"bar\".' ) assert expected_error_message in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function(\"1.0\", data_nodes1, mock_func) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() def test_check_migration_from_productions_to_productions_exist(caplog): _VersionManager._set_production_version(\"1.0\", True) _VersionManager._set_production_version(\"1.1\", True) _VersionManager._set_production_version(\"1.2\", True) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() assert 'There is no migration function from production version \"1.0\" to version \"1.1\".' in caplog.text assert 'There is no migration function from production version \"1.1\" to version \"1.2\".' in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function(\"1.2\", \"data_nodes1\", mock_func) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() assert 'There is no migration function from production version \"1.0\" to version \"1.1\".' in caplog.text "} {"text": "import pytest from taipy.config.checker.issue_collector import IssueCollector from taipy.config.config import Config class TestConfigIdChecker: def test_check_standalone_mode(self, caplog): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") Config.configure_scenario(id=\"bar\", task_configs=[], additional_data_node_configs=[]) Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id=\"bar\", task_configs=[]) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"`bar` is used as the config_id of multiple configurations ['DATA_NODE', 'SCENARIO']\" ' Current value of property `config_id` is \"bar\".' ) assert expected_error_message in caplog.text Config.configure_task(id=\"bar\", function=print) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"`bar` is used as the config_id of multiple configurations ['DATA_NODE', 'TASK', 'SCENARIO']\" ' Current value of property `config_id` is \"bar\".' ) assert expected_error_message in caplog.text Config.configure_task(id=\"foo\", function=print) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 2 expected_error_message = ( \"`foo` is used as the config_id of multiple configurations ['DATA_NODE', 'TASK']\" ' Current value of property `config_id` is \"foo\".' ) assert expected_error_message in caplog.text "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pytest from src.taipy.core.config.job_config import JobConfig from taipy.config.checker.issue_collector import IssueCollector from taipy.config.config import Config class TestJobConfigChecker: def test_check_standalone_mode(self, caplog): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE, max_nb_of_workers=2) Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=1) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"DataNode `foo`: In-memory storage type can ONLY be used in development mode. Current\" ' value of property `storage_type` is \"in_memory\".' ) assert expected_error_message in caplog.text "} {"text": "from src.taipy.core.config.checkers._core_section_checker import _CoreSectionChecker from src.taipy.core.config.core_section import CoreSection from taipy.config import Config from taipy.config.checker.issue_collector import IssueCollector class TestCoreSectionChecker: _CoreSectionChecker._ACCEPTED_REPOSITORY_TYPES.update([\"mock_repo_type\"]) def test_check_valid_repository(self): Config.configure_core(repository_type=\"mock_repo_type\") Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 0 Config.configure_core(repository_type=\"filesystem\") Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 0 Config.configure_core(repository_type=\"sql\") Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 0 def test_check_repository_type_value_wrong_str(self): Config.configure_core(repository_type=\"any\") Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 1 assert Config._collector.warnings[0].field == CoreSection._REPOSITORY_TYPE_KEY assert Config._collector.warnings[0].value == \"any\" def test_check_repository_type_value_wrong_type(self): Config.configure_core(repository_type=1) Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 1 assert Config._collector.warnings[0].field == CoreSection._REPOSITORY_TYPE_KEY assert Config._collector.warnings[0].value == 1 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from queue import SimpleQueue from src.taipy.core import taipy as tp from src.taipy.core.notification.core_event_consumer import CoreEventConsumerBase from src.taipy.core.notification.event import Event, EventEntityType, EventOperation from src.taipy.core.notification.notifier import Notifier from taipy.config import Config, Frequency from tests.core.utils import assert_true_after_time class AllCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.event_collected = 0 self.event_entity_type_collected: dict = {} self.event_operation_collected: dict = {} super().__init__(registration_id, queue) def process_event(self, event: Event): self.event_collected += 1 self.event_entity_type_collected[event.entity_type] = ( self.event_entity_type_collected.get(event.entity_type, 0) + 1 ) self.event_operation_collected[event.operation] = self.event_operation_collected.get(event.operation, 0) + 1 class ScenarioCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.scenario_event_collected = 0 self.event_operation_collected: dict = {} super().__init__(registration_id, queue) def process_event(self, event: Event): self.scenario_event_collected += 1 self.event_operation_collected[event.operation] = self.event_operation_collected.get(event.operation, 0) + 1 class TaskCreationCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.task_event_collected = 0 self.creation_event_operation_collected = 0 super().__init__(registration_id, queue) def process_event(self, event: Event): self.task_event_collected += 1 self.creation_event_operation_collected += 1 def test_core_event_consumer(): register_id_0, register_queue_0 = Notifier.register() all_evt_csumer_0 = AllCoreEventConsumerProcessor(register_id_0, register_queue_0) register_id_1, register_queue_1 = Notifier.register(entity_type=EventEntityType.SCENARIO) sc_evt_csumer_1 = ScenarioCoreEventConsumerProcessor(register_id_1, register_queue_1) register_id_2, register_queue_2 = Notifier.register( entity_type=EventEntityType.TASK, operation=EventOperation.CREATION ) task_creation_evt_csumer_2 = TaskCreationCoreEventConsumerProcessor(register_id_2, register_queue_2) all_evt_csumer_0.start() sc_evt_csumer_1.start() task_creation_evt_csumer_2.start() dn_config = Config.configure_data_node(\"dn_config\") task_config = Config.configure_task(\"task_config\", print, [dn_config]) scenario_config = Config.configure_scenario( \"scenario_config\", [task_config], frequency=Frequency.DAILY, sequences={\"seq\": [task_config]} ) # Create a scenario trigger 5 creation events scenario = tp.create_scenario(scenario_config) assert_true_after_time(lambda: all_evt_csumer_0.event_collected == 5, time=10) assert_true_after_time(lambda: len(all_evt_csumer_0.event_entity_type_collected) == 5, time=10) assert_true_after_time(lambda: all_evt_csumer_0.event_operation_collected[EventOperation.CREATION] == 5, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.scenario_event_collected == 1, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.event_operation_collected[EventOperation.CREATION] == 1, time=10) assert_true_after_time(lambda: len(sc_evt_csumer_1.event_operation_collected) == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.task_event_collected == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.creation_event_operation_collected == 1, time=10) # Delete a scenario trigger 5 update events tp.delete(scenario.id) assert_true_after_time(lambda: all_evt_csumer_0.event_collected == 10, time=10) assert_true_after_time(lambda: len(all_evt_csumer_0.event_entity_type_collected) == 5, time=10) assert_true_after_time(lambda: all_evt_csumer_0.event_operation_collected[EventOperation.DELETION] == 5, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.scenario_event_collected == 2, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.event_operation_collected[EventOperation.DELETION] == 1, time=10) assert_true_after_time(lambda: len(sc_evt_csumer_1.event_operation_collected) == 2, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.task_event_collected == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.creation_event_operation_collected == 1, time=10) all_evt_csumer_0.stop() sc_evt_csumer_1.stop() task_creation_evt_csumer_2.stop() "} {"text": " from queue import SimpleQueue from src.taipy.core.notification import EventEntityType, EventOperation from src.taipy.core.notification._registration import _Registration from src.taipy.core.notification._topic import _Topic def test_create_registration(): registration_0 = _Registration() assert isinstance(registration_0.registration_id, str) assert registration_0.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_0.queue, SimpleQueue) assert registration_0.queue.qsize() == 0 assert isinstance(registration_0.topic, _Topic) assert registration_0.topic.entity_type is None assert registration_0.topic.entity_id is None assert registration_0.topic.operation is None assert registration_0.topic.attribute_name is None registration_1 = _Registration( entity_type=EventEntityType.SCENARIO, entity_id=\"SCENARIO_scenario_id\", operation=EventOperation.CREATION ) assert isinstance(registration_1.registration_id, str) assert registration_1.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_1.queue, SimpleQueue) assert registration_1.queue.qsize() == 0 assert isinstance(registration_1.topic, _Topic) assert registration_1.topic.entity_type == EventEntityType.SCENARIO assert registration_1.topic.entity_id == \"SCENARIO_scenario_id\" assert registration_1.topic.operation == EventOperation.CREATION assert registration_1.topic.attribute_name is None registration_2 = _Registration( entity_type=EventEntityType.SEQUENCE, entity_id=\"SEQUENCE_scenario_id\", operation=EventOperation.UPDATE, attribute_name=\"tasks\", ) assert isinstance(registration_2.registration_id, str) assert registration_2.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_2.queue, SimpleQueue) assert registration_2.queue.qsize() == 0 assert isinstance(registration_2.topic, _Topic) assert registration_2.topic.entity_type == EventEntityType.SEQUENCE assert registration_2.topic.entity_id == \"SEQUENCE_scenario_id\" assert registration_2.topic.operation == EventOperation.UPDATE assert registration_2.topic.attribute_name == \"tasks\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import pytest from src.taipy.core.cycle._cycle_fs_repository import _CycleFSRepository from src.taipy.core.cycle._cycle_sql_repository import _CycleSQLRepository from src.taipy.core.cycle.cycle import Cycle, CycleId from src.taipy.core.exceptions import ModelNotFound class TestCycleRepositories: @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_save_and_load(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) obj = repository._load(cycle.id) assert isinstance(obj, Cycle) @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_exists(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) assert repository._exists(cycle.id) assert not repository._exists(\"not-existed-cycle\") @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_load_all(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") repository._save(cycle) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_load_all_with_filters(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") cycle._name = f\"cycle-{i}\" repository._save(cycle) objs = repository._load_all(filters=[{\"id\": \"cycle-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_CycleSQLRepository]) def test_delete(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) repository._delete(cycle.id) with pytest.raises(ModelNotFound): repository._load(cycle.id) @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_delete_all(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") repository._save(cycle) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_delete_many(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") repository._save(cycle) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_search(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") cycle.name = f\"cycle-{i}\" repository._save(cycle) assert len(repository._load_all()) == 10 objs = repository._search(\"name\", \"cycle-2\") assert len(objs) == 1 assert isinstance(objs[0], Cycle) @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_export(self, tmpdir, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) repository._export(cycle.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _CycleFSRepository else os.path.join(tmpdir.strpath, \"cycle\") assert os.path.exists(os.path.join(dir_path, f\"{cycle.id}.json\")) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import dataclasses import pathlib from dataclasses import dataclass from typing import Any, Dict, Iterable, List, Optional, Union from src.taipy.core._manager._manager import _Manager from src.taipy.core._repository._abstract_converter import _AbstractConverter from src.taipy.core._repository._abstract_repository import _AbstractRepository from src.taipy.core._repository._filesystem_repository import _FileSystemRepository from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config @dataclass class MockModel: id: str name: str version: str def to_dict(self): return dataclasses.asdict(self) @staticmethod def from_dict(data: Dict[str, Any]): return MockModel(id=data[\"id\"], name=data[\"name\"], version=data[\"version\"]) @dataclass class MockEntity: def __init__(self, id: str, name: str, version: str = None) -> None: self.id = id self.name = name if version: self._version = version else: self._version = _VersionManager._get_latest_version() class MockConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, entity: MockEntity) -> MockModel: return MockModel(id=entity.id, name=entity.name, version=entity._version) @classmethod def _model_to_entity(cls, model: MockModel) -> MockEntity: return MockEntity(id=model.id, name=model.name, version=model.version) class MockRepository(_AbstractRepository): # type: ignore def __init__(self, **kwargs): self.repo = _FileSystemRepository(**kwargs, converter=MockConverter) def _to_model(self, obj: MockEntity): return MockModel(obj.id, obj.name, obj._version) def _from_model(self, model: MockModel): return MockEntity(model.id, model.name, model.version) def _load(self, entity_id: str) -> MockEntity: return self.repo._load(entity_id) def _load_all(self, filters: Optional[List[Dict]] = None) -> List[MockEntity]: return self.repo._load_all(filters) def _save(self, entity: MockEntity): return self.repo._save(entity) def _exists(self, entity_id: str) -> bool: return self.repo._exists(entity_id) def _delete(self, entity_id: str): return self.repo._delete(entity_id) def _delete_all(self): return self.repo._delete_all() def _delete_many(self, ids: Iterable[str]): return self.repo._delete_many(ids) def _delete_by(self, attribute: str, value: str): return self.repo._delete_by(attribute, value) def _search(self, attribute: str, value: Any, filters: Optional[List[Dict]] = None) -> List[MockEntity]: return self.repo._search(attribute, value, filters) def _export(self, entity_id: str, folder_path: Union[str, pathlib.Path]): return self.repo._export(self, entity_id, folder_path) @property def _storage_folder(self) -> pathlib.Path: return pathlib.Path(Config.core.storage_folder) # type: ignore class MockManager(_Manager[MockEntity]): _ENTITY_NAME = MockEntity.__name__ _repository = MockRepository(model_type=MockModel, dir_name=\"foo\") class TestManager: def test_save_and_fetch_model(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) fetched_model = MockManager._get(m.id) assert m == fetched_model def test_exists(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) assert MockManager._exists(m.id) def test_get(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) assert MockManager._get(m.id) == m def test_get_all(self): MockManager._delete_all() objs = [] for i in range(5): m = MockEntity(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) MockManager._set(m) _objs = MockManager._get_all() assert len(_objs) == 5 def test_delete(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) MockManager._delete(m.id) assert MockManager._get(m.id) is None def test_delete_all(self): objs = [] for i in range(5): m = MockEntity(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) MockManager._set(m) MockManager._delete_all() assert MockManager._get_all() == [] def test_delete_many(self): objs = [] for i in range(5): m = MockEntity(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) MockManager._set(m) MockManager._delete_many([\"uuid-0\", \"uuid-1\"]) assert len(MockManager._get_all()) == 3 def test_is_editable(self): m = MockEntity(\"uuid\", \"Foo\") MockManager._set(m) assert MockManager._is_editable(m) def test_is_readable(self): m = MockEntity(\"uuid\", \"Foo\") MockManager._set(m) assert MockManager._is_readable(m) "} {"text": " class NotifyMock: \"\"\" A shared class for testing notification on jobStatus of sequence level and scenario level \"entity\" can be understood as either \"scenario\" or \"sequence\". \"\"\" def __init__(self, entity): self.scenario = entity self.nb_called = 0 self.__name__ = \"NotifyMock\" def __call__(self, entity, job): assert entity == self.scenario if self.nb_called == 0: assert job.is_pending() if self.nb_called == 1: assert job.is_running() if self.nb_called == 2: assert job.is_finished() self.nb_called += 1 def assert_called_3_times(self): assert self.nb_called == 3 def assert_not_called(self): assert self.nb_called == 0 def reset(self): self.nb_called = 0 "} {"text": " def assert_true_after_time(assertion, msg=None, time=120): from datetime import datetime from time import sleep loops = 0 start = datetime.now() while (datetime.now() - start).seconds < time: sleep(1) # Limit CPU usage try: if assertion(): return except BaseException as e: print(\"Raise : \", e) loops += 1 continue if msg: print(msg) assert assertion() "} {"text": "import os import tempfile class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile(\"w\", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, \"r\") as fp: return fp.read() def __del__(self): os.unlink(self.filename) "} {"text": "from unittest import mock import pytest from src.taipy.core import taipy from src.taipy.core._entity._labeled import _Labeled from taipy.config import Config, Frequency, Scope class MockOwner: label = \"owner_label\" def get_label(self): return self.label def test_get_label(): labeled_entity = _Labeled() with pytest.raises(NotImplementedError): labeled_entity.get_label() with pytest.raises(NotImplementedError): labeled_entity.get_simple_label() with pytest.raises(AttributeError): labeled_entity._get_label() with pytest.raises(AttributeError): labeled_entity._get_simple_label() labeled_entity.id = \"id\" assert labeled_entity._get_label() == \"id\" assert labeled_entity._get_simple_label() == \"id\" labeled_entity.config_id = \"the config id\" assert labeled_entity._get_label() == \"the config id\" assert labeled_entity._get_simple_label() == \"the config id\" labeled_entity._properties = {\"name\": \"a name\"} assert labeled_entity._get_label() == \"a name\" assert labeled_entity._get_simple_label() == \"a name\" labeled_entity.owner_id = \"owner_id\" with mock.patch(\"src.taipy.core.get\") as get_mck: get_mck.return_value = MockOwner() assert labeled_entity._get_label() == \"owner_label > a name\" assert labeled_entity._get_simple_label() == \"a name\" labeled_entity._properties[\"label\"] = \"a wonderful label\" assert labeled_entity._get_label() == \"a wonderful label\" assert labeled_entity._get_simple_label() == \"a wonderful label\" def mult(n1, n2): return n1 * n2 def test_get_label_complex_case(): dn1_cfg = Config.configure_data_node(\"dn1\", scope=Scope.GLOBAL) dn2_cfg = Config.configure_data_node(\"dn2\", scope=Scope.CYCLE) dn3_cfg = Config.configure_data_node(\"dn3\", scope=Scope.CYCLE) dn4_cfg = Config.configure_data_node(\"dn4\", scope=Scope.SCENARIO) dn5_cfg = Config.configure_data_node(\"dn5\", scope=Scope.SCENARIO) tA_cfg = Config.configure_task(\"t_A_C\", mult, [dn1_cfg, dn2_cfg], dn3_cfg) tB_cfg = Config.configure_task(\"t_B_S\", mult, [dn3_cfg, dn4_cfg], dn5_cfg) scenario_cfg = Config.configure_scenario(\"scenario_cfg\", [tA_cfg, tB_cfg], [], Frequency.DAILY) scenario_cfg.add_sequences( { \"sequence_C\": [tA_cfg], \"sequence_S\": [tA_cfg, tB_cfg], } ) scenario = taipy.create_scenario(scenario_cfg, name=\"My Name\") cycle = scenario.cycle cycle.name = \"Today\" sequence_C = scenario.sequence_C sequence_S = scenario.sequence_S tA = scenario.t_A_C tB = scenario.t_B_S dn1 = scenario.dn1 dn2 = scenario.dn2 dn3 = scenario.dn3 dn4 = scenario.dn4 dn5 = scenario.dn5 assert cycle.get_label() == scenario.cycle.name assert cycle.get_simple_label() == scenario.cycle.name assert scenario.get_label() == \"Today > My Name\" assert scenario.get_simple_label() == \"My Name\" assert sequence_C.get_label() == \"Today > My Name > sequence_C\" assert sequence_C.get_simple_label() == \"sequence_C\" assert sequence_S.get_label() == \"Today > My Name > sequence_S\" assert sequence_S.get_simple_label() == \"sequence_S\" assert tA.get_label() == \"Today > t_A_C\" assert tA.get_simple_label() == \"t_A_C\" assert tB.get_label() == \"Today > My Name > t_B_S\" assert tB.get_simple_label() == \"t_B_S\" assert dn1.get_label() == \"dn1\" assert dn1.get_simple_label() == \"dn1\" assert dn2.get_label() == \"Today > dn2\" assert dn2.get_simple_label() == \"dn2\" assert dn3.get_label() == \"Today > dn3\" assert dn3.get_simple_label() == \"dn3\" assert dn4.get_label() == \"Today > My Name > dn4\" assert dn4.get_simple_label() == \"dn4\" assert dn5.get_label() == \"Today > My Name > dn5\" assert dn5.get_simple_label() == \"dn5\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.core._entity._entity_ids import _EntityIds class TestEntityIds: def test_add_two_entity_ids(self): entity_ids_1 = _EntityIds() entity_ids_2 = _EntityIds() entity_ids_1_address = id(entity_ids_1) entity_ids_1.data_node_ids.update([\"data_node_id_1\", \"data_node_id_2\"]) entity_ids_1.task_ids.update([\"task_id_1\", \"task_id_2\"]) entity_ids_1.job_ids.update([\"job_id_1\", \"job_id_2\"]) entity_ids_1.sequence_ids.update([\"sequence_id_1\", \"sequence_id_2\"]) entity_ids_1.scenario_ids.update([\"scenario_id_1\", \"scenario_id_2\"]) entity_ids_1.cycle_ids.update([\"cycle_id_1\", \"cycle_id_2\"]) entity_ids_2.data_node_ids.update([\"data_node_id_2\", \"data_node_id_3\"]) entity_ids_2.task_ids.update([\"task_id_2\", \"task_id_3\"]) entity_ids_2.job_ids.update([\"job_id_2\", \"job_id_3\"]) entity_ids_2.sequence_ids.update([\"sequence_id_2\", \"sequence_id_3\"]) entity_ids_2.scenario_ids.update([\"scenario_id_2\", \"scenario_id_3\"]) entity_ids_2.cycle_ids.update([\"cycle_id_2\", \"cycle_id_3\"]) entity_ids_1 += entity_ids_2 # += operator should not change the address of entity_ids_1 assert id(entity_ids_1) == entity_ids_1_address assert entity_ids_1.data_node_ids == {\"data_node_id_1\", \"data_node_id_2\", \"data_node_id_3\"} assert entity_ids_1.task_ids == {\"task_id_1\", \"task_id_2\", \"task_id_3\"} assert entity_ids_1.job_ids == {\"job_id_1\", \"job_id_2\", \"job_id_3\"} assert entity_ids_1.sequence_ids == {\"sequence_id_1\", \"sequence_id_2\", \"sequence_id_3\"} assert entity_ids_1.scenario_ids == {\"scenario_id_1\", \"scenario_id_2\", \"scenario_id_3\"} assert entity_ids_1.cycle_ids == {\"cycle_id_1\", \"cycle_id_2\", \"cycle_id_3\"} "} {"text": "import pytest from src.taipy.core.common._utils import _retry_read_entity from taipy.config import Config def test_retry_decorator(mocker): func = mocker.Mock(side_effect=Exception()) @_retry_read_entity((Exception,)) def decorated_func(): func() with pytest.raises(Exception): decorated_func() # Called once in the normal flow and no retry # The Config.core.read_entity_retry is set to 0 at conftest.py assert Config.core.read_entity_retry == 0 assert func.call_count == 1 func.reset_mock() Config.core.read_entity_retry = 3 with pytest.raises(Exception): decorated_func() # Called once in the normal flow and 3 more times on the retry flow assert func.call_count == 4 def test_retry_decorator_exception_not_in_list(mocker): func = mocker.Mock(side_effect=KeyError()) Config.core.read_entity_retry = 3 @_retry_read_entity((Exception,)) def decorated_func(): func() with pytest.raises(KeyError): decorated_func() # Called only on the first time and not trigger retry because KeyError is not on the exceptions list assert func.called == 1 "} {"text": "from src.taipy.core.common.warn_if_inputs_not_ready import _warn_if_inputs_not_ready from src.taipy.core.data._data_manager_factory import _DataManagerFactory from taipy.config import Config def test_warn_inputs_all_not_ready(caplog): one = Config.configure_data_node(\"one\") two = Config.configure_data_node(\"two\") three = Config.configure_data_node(\"three\") data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}).values() _warn_if_inputs_not_ready(data_nodes) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in data_nodes ] assert all([expected_output in stdout for expected_output in expected_outputs]) def test_warn_inputs_all_ready(caplog): one = Config.configure_data_node(\"one\", default_data=1) two = Config.configure_data_node(\"two\", default_data=2) three = Config.configure_data_node(\"three\", default_data=3) data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}).values() _warn_if_inputs_not_ready(data_nodes) stdout = caplog.text not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in data_nodes ] assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_warn_inputs_one_ready(caplog): one = Config.configure_data_node(\"one\", default_data=1) two = Config.configure_data_node(\"two\") three = Config.configure_data_node(\"three\") data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}) _warn_if_inputs_not_ready(data_nodes.values()) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in [data_nodes[two], data_nodes[three]] ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in [data_nodes[one]] ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_submit_task_with_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") excel_dn_cfg = Config.configure_excel_data_node(\"wrong_excel_file_path\", default_path=\"wrong_path.xlsx\") json_dn_cfg = Config.configure_json_data_node(\"wrong_json_file_path\", default_path=\"wrong_path.json\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_path=\"wrong_path.pickle\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") input_dn_cfgs = [csv_dn_cfg, excel_dn_cfg, json_dn_cfg, pickle_dn_cfg, parquet_dn_cfg] dn_manager = _DataManagerFactory._build_manager() dns = [dn_manager._bulk_get_or_create([input_dn_cfg])[input_dn_cfg] for input_dn_cfg in input_dn_cfgs] _warn_if_inputs_not_ready(dns) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in dns ] assert all([expected_output in stdout for expected_output in expected_outputs]) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import pytest from src.taipy.core.exceptions import ModelNotFound from src.taipy.core.scenario._scenario_fs_repository import _ScenarioFSRepository from src.taipy.core.scenario._scenario_sql_repository import _ScenarioSQLRepository from src.taipy.core.scenario.scenario import Scenario, ScenarioId class TestScenarioFSRepository: @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_save_and_load(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) obj = repository._load(scenario.id) assert isinstance(obj, Scenario) @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_exists(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) assert repository._exists(scenario.id) assert not repository._exists(\"not-existed-scenario\") @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_load_all(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_load_all_with_filters(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) objs = repository._load_all(filters=[{\"id\": \"scenario-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) repository._delete(scenario.id) with pytest.raises(ModelNotFound): repository._load(scenario.id) @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_all(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_many(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_by(self, scenario, repo, init_sql_repo): repository = repo() # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") scenario._version = f\"{(i+1) // 5}.0\" repository._save(scenario) objs = repository._load_all() assert len(objs) == 10 repository._delete_by(\"version\", \"1.0\") assert len(repository._load_all()) == 5 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_search(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) assert len(repository._load_all()) == 10 objs = repository._search(\"id\", \"scenario-2\") assert len(objs) == 1 assert isinstance(objs[0], Scenario) objs = repository._search(\"id\", \"scenario-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], Scenario) assert repository._search(\"id\", \"scenario-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_export(self, tmpdir, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) repository._export(scenario.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _ScenarioFSRepository else os.path.join(tmpdir.strpath, \"scenario\") assert os.path.exists(os.path.join(dir_path, f\"{scenario.id}.json\")) "} {"text": "from src.taipy.core._version._version import _Version from taipy.config.config import Config def test_create_version(): v = _Version(\"foo\", config=Config.configure_data_node(\"dn\")) assert v.id == \"foo\" assert v.config is not None "} {"text": "import os import pytest from src.taipy.core._version._version import _Version from src.taipy.core._version._version_fs_repository import _VersionFSRepository from src.taipy.core._version._version_sql_repository import _VersionSQLRepository from src.taipy.core.exceptions import ModelNotFound class TestVersionFSRepository: @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_save_and_load(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) obj = repository._load(_version.id) assert isinstance(obj, _Version) @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_exists(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) assert repository._exists(_version.id) assert not repository._exists(\"not-existed-version\") @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_load_all(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" repository._save(_version) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_load_all_with_filters(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" _version.name = f\"_version_{i}\" repository._save(_version) objs = repository._load_all(filters=[{\"id\": \"_version_2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_delete(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) repository._delete(_version.id) with pytest.raises(ModelNotFound): repository._load(_version.id) @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_delete_all(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" repository._save(_version) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_delete_many(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" repository._save(_version) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_search(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" _version.name = f\"_version_{i}\" repository._save(_version) assert len(repository._load_all()) == 10 objs = repository._search(\"id\", \"_version_2\") assert len(objs) == 1 assert isinstance(objs[0], _Version) @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_export(self, tmpdir, _version, repo, init_sql_repo): repository = repo() repository._save(_version) repository._export(_version.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _VersionFSRepository else os.path.join(tmpdir.strpath, \"version\") assert os.path.exists(os.path.join(dir_path, f\"{_version.id}.json\")) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pytest from src.taipy.core._version._version import _Version from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config def test_save_and_get_version_entity(tmpdir): _VersionManager._repository.base_path = tmpdir assert len(_VersionManager._get_all()) == 0 version = _Version(id=\"foo\", config=Config._applied_config) _VersionManager._get_or_create(id=\"foo\", force=False) version_1 = _VersionManager._get(version.id) assert version_1.id == version.id assert Config._serializer._str(version_1.config) == Config._serializer._str(version.config) assert len(_VersionManager._get_all()) == 1 assert _VersionManager._get(version.id) == version "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from unittest import mock import pytest from src.taipy.core._orchestrator._dispatcher import _DevelopmentJobDispatcher, _JobDispatcher, _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.exceptions.exceptions import OrchestratorNotBuilt from taipy.config import Config def test_build_orchestrator(): _OrchestratorFactory._orchestrator = None _OrchestratorFactory._dispatcher = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is None orchestrator = _OrchestratorFactory._build_orchestrator() assert orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator == _Orchestrator dispatcher = _OrchestratorFactory._build_dispatcher() assert isinstance(dispatcher, _JobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _JobDispatcher) _OrchestratorFactory._orchestrator = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is not None with mock.patch( \"src.taipy.core._orchestrator._orchestrator_factory._OrchestratorFactory._build_dispatcher\" ) as build_dispatcher, mock.patch( \"src.taipy.core._orchestrator._orchestrator._Orchestrator.initialize\" ) as initialize: orchestrator = _OrchestratorFactory._build_orchestrator() assert orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator == _Orchestrator build_dispatcher.assert_not_called() initialize.assert_called_once() def test_build_development_dispatcher(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._orchestrator = None _OrchestratorFactory._dispatcher = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is None with pytest.raises(OrchestratorNotBuilt): _OrchestratorFactory._build_dispatcher() _OrchestratorFactory._build_orchestrator() assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._dispatcher is None _OrchestratorFactory._build_dispatcher() assert isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) def test_build_standalone_dispatcher(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher) assert not isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 2 _OrchestratorFactory._dispatcher._nb_available_workers = 1 _OrchestratorFactory._build_dispatcher(force_restart=False) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 1 _OrchestratorFactory._build_dispatcher(force_restart=True) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 2 "} {"text": "import multiprocessing from concurrent.futures import ProcessPoolExecutor from functools import partial from unittest import mock from unittest.mock import MagicMock from pytest import raises from src.taipy.core import DataNodeId, JobId, TaskId from src.taipy.core._orchestrator._dispatcher._development_job_dispatcher import _DevelopmentJobDispatcher from src.taipy.core._orchestrator._dispatcher._standalone_job_dispatcher import _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.job.job import Job from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.task.task import Task from taipy.config.config import Config from tests.core.utils import assert_true_after_time def execute(lock): with lock: ... return None def _error(): raise RuntimeError(\"Something bad has happened\") def test_build_development_job_dispatcher(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() dispatcher = _OrchestratorFactory._dispatcher assert isinstance(dispatcher, _DevelopmentJobDispatcher) assert dispatcher._nb_available_workers == 1 with raises(NotImplementedError): assert dispatcher.start() assert dispatcher.is_running() with raises(NotImplementedError): dispatcher.stop() def test_build_standalone_job_dispatcher(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() dispatcher = _OrchestratorFactory._dispatcher assert not isinstance(dispatcher, _DevelopmentJobDispatcher) assert isinstance(dispatcher, _StandaloneJobDispatcher) assert isinstance(dispatcher._executor, ProcessPoolExecutor) assert dispatcher._nb_available_workers == 2 assert_true_after_time(dispatcher.is_running) dispatcher.stop() dispatcher.join() assert_true_after_time(lambda: not dispatcher.is_running()) def test_can_execute_2_workers(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock = m.Lock() task_id = TaskId(\"task_id1\") output = list(_DataManager._bulk_get_or_create([Config.configure_data_node(\"input1\", default_data=21)]).values()) _OrchestratorFactory._build_dispatcher() task = Task( config_id=\"name\", properties={}, input=[], function=partial(execute, lock), output=output, id=task_id, ) job_id = JobId(\"id1\") job = Job(job_id, task, \"submit_id\", task.id) dispatcher = _StandaloneJobDispatcher(_OrchestratorFactory._orchestrator) with lock: assert dispatcher._can_execute() dispatcher._dispatch(job) assert dispatcher._can_execute() dispatcher._dispatch(job) assert not dispatcher._can_execute() assert_true_after_time(lambda: dispatcher._can_execute()) def test_can_execute_synchronous(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId(\"task_id1\") task = Task(config_id=\"name\", properties={}, input=[], function=print, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id, task._ID_PREFIX) job_id = JobId(\"id1\") job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher assert dispatcher._can_execute() dispatcher._dispatch(job) assert dispatcher._can_execute() def test_exception_in_user_function(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId(\"task_id1\") job_id = JobId(\"id1\") task = Task(config_id=\"name\", properties={}, input=[], function=_error, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id, task._ID_PREFIX) job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher dispatcher._dispatch(job) assert job.is_failed() assert 'RuntimeError(\"Something bad has happened\")' in str(job.stacktrace[0]) def test_exception_in_writing_data(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId(\"task_id1\") job_id = JobId(\"id1\") output = MagicMock() output.id = DataNodeId(\"output_id\") output.config_id = \"my_raising_datanode\" output._is_in_cache = False output.write.side_effect = ValueError() task = Task(config_id=\"name\", properties={}, input=[], function=print, output=[output], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id, task._ID_PREFIX) job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher with mock.patch(\"src.taipy.core.data._data_manager._DataManager._get\") as get: get.return_value = output dispatcher._dispatch(job) assert job.is_failed() assert \"node\" in job.stacktrace[0] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.core.data import InMemoryDataNode from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.task._task_model import _TaskModel from taipy.config.common.scope import Scope def test_none_properties_attribute_compatible(): model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"parent_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"output_id\"], \"version\": \"latest\", \"skippable\": False, } ) assert len(model.properties) == 0 def test_skippable_compatibility_with_non_existing_output(): model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"output_id\"], \"version\": \"latest\", \"skippable\": False, } ) assert not model.skippable def test_skippable_compatibility_with_no_output(): model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [], \"version\": \"latest\", \"skippable\": False, } ) assert not model.skippable def test_skippable_compatibility_with_one_output(): manager = _DataManagerFactory._build_manager() manager._set(InMemoryDataNode(\"cfg_id\", Scope.SCENARIO, id=\"dn_id\")) model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"dn_id\"], \"version\": \"latest\", \"skippable\": True, } ) assert model.skippable def test_skippable_compatibility_with_many_outputs(): manager = _DataManagerFactory._build_manager() manager._set(InMemoryDataNode(\"cfg_id\", Scope.SCENARIO, id=\"dn_id\")) manager._set(InMemoryDataNode(\"cfg_id_2\", Scope.SCENARIO, id=\"dn_2_id\")) model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"dn_id\", \"dn_2_id\"], \"version\": \"latest\", \"skippable\": True, } ) assert model.skippable "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.core.sequence._sequence_converter import _SequenceConverter from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.task.task import Task def test_entity_to_model(sequence): sequence_model_1 = _SequenceConverter._entity_to_model(sequence) expected_sequence_model_1 = { \"id\": \"sequence_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id_1\", \"parent_id_2\"], \"properties\": {}, \"tasks\": [], \"subscribers\": [], \"version\": \"random_version_number\", } sequence_model_1[\"parent_ids\"] = sorted(sequence_model_1[\"parent_ids\"]) assert sequence_model_1 == expected_sequence_model_1 task_1 = Task(\"task_1\", {}, print) task_2 = Task(\"task_2\", {}, print) sequence_2 = Sequence( {\"name\": \"sequence_2\"}, [task_1, task_2], \"SEQUENCE_sq_1_SCENARIO_sc\", \"SCENARIO_sc\", [\"SCENARIO_sc\"], [], \"random_version\", ) sequence_model_2 = _SequenceConverter._entity_to_model(sequence_2) expected_sequence_model_2 = { \"id\": \"SEQUENCE_sq_1_SCENARIO_sc\", \"owner_id\": \"SCENARIO_sc\", \"parent_ids\": [\"SCENARIO_sc\"], \"properties\": {\"name\": \"sequence_2\"}, \"tasks\": [task_1.id, task_2.id], \"subscribers\": [], \"version\": \"random_version\", } assert sequence_model_2 == expected_sequence_model_2 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.core.data.data_node import DataNode from src.taipy.core.data.in_memory import InMemoryDataNode from taipy.config.common.scope import Scope class FakeDataNode(InMemoryDataNode): read_has_been_called = 0 write_has_been_called = 0 def __init__(self, config_id, **kwargs): scope = kwargs.pop(\"scope\", Scope.SCENARIO) super().__init__(config_id=config_id, scope=scope, **kwargs) def _read(self, query=None): self.read_has_been_called += 1 def _write(self, data): self.write_has_been_called += 1 @classmethod def storage_type(cls) -> str: return \"fake_inmemory\" write = DataNode.write # Make sure that the writing behavior comes from DataNode class FakeDataframeDataNode(DataNode): COLUMN_NAME_1 = \"a\" COLUMN_NAME_2 = \"b\" def __init__(self, config_id, default_data_frame, **kwargs): super().__init__(config_id, **kwargs) self.data = default_data_frame def _read(self): return self.data @classmethod def storage_type(cls) -> str: return \"fake_df_dn\" class FakeNumpyarrayDataNode(DataNode): def __init__(self, config_id, default_array, **kwargs): super().__init__(config_id, **kwargs) self.data = default_array def _read(self): return self.data @classmethod def storage_type(cls) -> str: return \"fake_np_dn\" class FakeListDataNode(DataNode): class Row: def __init__(self, value): self.value = value def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = [self.Row(i) for i in range(10)] def _read(self): return self.data @classmethod def storage_type(cls) -> str: return \"fake_list_dn\" class CustomClass: def __init__(self, a, b): self.a = a self.b = b class FakeCustomDataNode(DataNode): def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = [CustomClass(i, i * 2) for i in range(10)] def _read(self): return self.data class FakeMultiSheetExcelDataFrameDataNode(DataNode): def __init__(self, config_id, default_data_frame, **kwargs): super().__init__(config_id, **kwargs) self.data = { \"Sheet1\": default_data_frame, \"Sheet2\": default_data_frame, } def _read(self): return self.data class FakeMultiSheetExcelCustomDataNode(DataNode): def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = { \"Sheet1\": [CustomClass(i, i * 2) for i in range(10)], \"Sheet2\": [CustomClass(i, i * 2) for i in range(10)], } def _read(self): return self.data "} {"text": "import os import pytest from src.taipy.core.data._data_fs_repository import _DataFSRepository from src.taipy.core.data._data_sql_repository import _DataSQLRepository from src.taipy.core.data.data_node import DataNode, DataNodeId from src.taipy.core.exceptions import ModelNotFound class TestDataNodeRepository: @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_save_and_load(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) obj = repository._load(data_node.id) assert isinstance(obj, DataNode) @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_exists(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) assert repository._exists(data_node.id) assert not repository._exists(\"not-existed-data-node\") @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_load_all(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") repository._save(data_node) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_load_all_with_filters(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") data_node.owner_id = f\"task-{i}\" repository._save(data_node) objs = repository._load_all(filters=[{\"owner_id\": \"task-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) repository._delete(data_node.id) with pytest.raises(ModelNotFound): repository._load(data_node.id) @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete_all(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") repository._save(data_node) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete_many(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") repository._save(data_node) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete_by(self, data_node, repo, init_sql_repo): repository = repo() # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") data_node._version = f\"{(i+1) // 5}.0\" repository._save(data_node) objs = repository._load_all() assert len(objs) == 10 repository._delete_by(\"version\", \"1.0\") assert len(repository._load_all()) == 5 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_search(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") data_node.owner_id = f\"task-{i}\" repository._save(data_node) assert len(repository._load_all()) == 10 objs = repository._search(\"owner_id\", \"task-2\") assert len(objs) == 1 assert isinstance(objs[0], DataNode) objs = repository._search(\"owner_id\", \"task-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], DataNode) assert repository._search(\"owner_id\", \"task-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_export(self, tmpdir, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) repository._export(data_node.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _DataFSRepository else os.path.join(tmpdir.strpath, \"data_node\") assert os.path.exists(os.path.join(dir_path, f\"{data_node.id}.json\")) "} {"text": "import pytest from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import NoData from taipy.config.common.scope import Scope from taipy.config.exceptions.exceptions import InvalidConfigurationId class TestInMemoryDataNodeEntity: def test_create(self): dn = InMemoryDataNode( \"foobar_bazy\", Scope.SCENARIO, DataNodeId(\"id_uio\"), \"owner_id\", properties={\"default_data\": \"In memory Data Node\", \"name\": \"my name\"}, ) assert isinstance(dn, InMemoryDataNode) assert dn.storage_type() == \"in_memory\" assert dn.config_id == \"foobar_bazy\" assert dn.scope == Scope.SCENARIO assert dn.id == \"id_uio\" assert dn.name == \"my name\" assert dn.owner_id == \"owner_id\" assert dn.last_edit_date is not None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.read() == \"In memory Data Node\" dn_2 = InMemoryDataNode(\"foo\", Scope.SCENARIO) assert dn_2.last_edit_date is None assert not dn_2.is_ready_for_reading with pytest.raises(InvalidConfigurationId): InMemoryDataNode(\"foo bar\", Scope.SCENARIO, DataNodeId(\"dn_id\")) def test_get_user_properties(self): dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": 1, \"foo\": \"bar\"}) assert dn._get_user_properties() == {\"foo\": \"bar\"} def test_read_and_write(self): no_data_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\")) with pytest.raises(NoData): assert no_data_dn.read() is None no_data_dn.read_or_raise() in_mem_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": \"bar\"}) assert isinstance(in_mem_dn.read(), str) assert in_mem_dn.read() == \"bar\" in_mem_dn.properties[\"default_data\"] = \"baz\" # this modifies the default data value but not the data itself assert in_mem_dn.read() == \"bar\" in_mem_dn.write(\"qux\") assert in_mem_dn.read() == \"qux\" in_mem_dn.write(1998) assert isinstance(in_mem_dn.read(), int) assert in_mem_dn.read() == 1998 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from datetime import datetime from time import sleep from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.submission.submission_status import SubmissionStatus from src.taipy.core.task.task import Task def test_create_submission(scenario): submission_1 = _SubmissionManagerFactory._build_manager()._create(scenario.id, scenario._ID_PREFIX) assert submission_1.id is not None assert submission_1.entity_id == scenario.id assert submission_1.jobs == [] assert isinstance(submission_1.creation_date, datetime) assert submission_1._submission_status == SubmissionStatus.SUBMITTED def test_get_submission(): submission_manager = _SubmissionManagerFactory._build_manager() assert submission_manager._get(\"random_submission_id\") is None submission_1 = submission_manager._create(\"entity_id\", \"ENTITY_TYPE\") submission_2 = submission_manager._get(submission_1.id) assert submission_1.id == submission_2.id assert submission_1.entity_id == submission_2.entity_id == \"entity_id\" assert submission_1.jobs == submission_2.jobs assert submission_1.creation_date == submission_2.creation_date assert submission_1.submission_status == submission_2.submission_status def test_get_all_submission(): submission_manager = _SubmissionManagerFactory._build_manager() version_manager = _VersionManagerFactory._build_manager() submission_manager._set(Submission(\"entity_id\", \"submission_id\", version=version_manager._get_latest_version())) for version_name in [\"abc\", \"xyz\"]: for i in range(10): submission_manager._set( Submission(\"entity_id\", f\"submission_{version_name}_{i}\", version=f\"{version_name}\") ) assert len(submission_manager._get_all()) == 1 version_manager._set_experiment_version(\"xyz\") version_manager._set_experiment_version(\"abc\") assert len(submission_manager._get_all()) == 10 assert len(submission_manager._get_all(\"abc\")) == 10 assert len(submission_manager._get_all(\"xyz\")) == 10 def test_get_latest_submission(): task_1 = Task(\"task_config_1\", {}, print, id=\"task_id_1\") task_2 = Task(\"task_config_2\", {}, print, id=\"task_id_2\") submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(task_1.id, task_1._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_2 = submission_manager._create(task_2.id, task_2._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_3 = submission_manager._create(task_1.id, task_1._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_4 = submission_manager._create(task_2.id, task_2._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_4 def test_delete_submission(): submission_manager = _SubmissionManagerFactory._build_manager() submission = Submission(\"entity_id\", \"submission_id\") submission_manager._set(submission) for i in range(10): submission_manager._set(Submission(\"entity_id\", f\"submission_{i}\")) assert len(submission_manager._get_all()) == 11 assert isinstance(submission_manager._get(submission.id), Submission) submission_manager._delete(submission.id) assert len(submission_manager._get_all()) == 10 assert submission_manager._get(submission.id) is None submission_manager._delete_all() assert len(submission_manager._get_all()) == 0 "} {"text": "from datetime import datetime from time import sleep from src.taipy.core import Task from src.taipy.core._repository.db._sql_connection import _SQLConnection from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.submission.submission_status import SubmissionStatus def init_managers(): _VersionManagerFactory._build_manager()._delete_all() _SubmissionManagerFactory._build_manager()._delete_all() def test_create_submission(scenario, init_sql_repo): init_managers() submission_1 = _SubmissionManagerFactory._build_manager()._create(scenario.id, scenario._ID_PREFIX) assert submission_1.id is not None assert submission_1.entity_id == scenario.id assert submission_1.jobs == [] assert isinstance(submission_1.creation_date, datetime) assert submission_1._submission_status == SubmissionStatus.SUBMITTED def test_get_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(\"entity_id\", \"ENTITY_TYPE\") submission_2 = submission_manager._get(submission_1.id) assert submission_1.id == submission_2.id assert submission_1.entity_id == submission_2.entity_id == \"entity_id\" assert submission_1.jobs == submission_2.jobs assert submission_1.creation_date == submission_2.creation_date assert submission_1.submission_status == submission_2.submission_status def test_get_all_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() version_manager = _VersionManagerFactory._build_manager() submission_manager._set(Submission(\"entity_id\", \"submission_id\", version=version_manager._get_latest_version())) for version_name in [\"abc\", \"xyz\"]: for i in range(10): submission_manager._set( Submission(\"entity_id\", f\"submission_{version_name}_{i}\", version=f\"{version_name}\") ) assert len(submission_manager._get_all()) == 1 version_manager._set_experiment_version(\"xyz\") version_manager._set_experiment_version(\"abc\") assert len(submission_manager._get_all()) == 10 assert len(submission_manager._get_all(\"abc\")) == 10 assert len(submission_manager._get_all(\"xyz\")) == 10 def test_get_latest_submission(init_sql_repo): init_managers() task_1 = Task(\"task_config_1\", {}, print, id=\"task_id_1\") task_2 = Task(\"task_config_2\", {}, print, id=\"task_id_2\") submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(task_1.id, task_1._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_2 = submission_manager._create(task_2.id, task_2._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_3 = submission_manager._create(task_1.id, task_1._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_4 = submission_manager._create(task_2.id, task_2._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_4 def test_delete_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() submission = Submission(\"entity_id\", \"submission_id\") submission_manager._set(submission) for i in range(10): submission_manager._set(Submission(\"entity_id\", f\"submission_{i}\")) assert len(submission_manager._get_all()) == 11 assert isinstance(submission_manager._get(submission.id), Submission) submission_manager._delete(submission.id) assert len(submission_manager._get_all()) == 10 assert submission_manager._get(submission.id) is None submission_manager._delete_all() assert len(submission_manager._get_all()) == 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import pathlib from unittest import TestCase, mock from src.taipy.logger._taipy_logger import _TaipyLogger class TestTaipyLogger(TestCase): def test_taipy_logger(self): _TaipyLogger._get_logger().info(\"baz\") _TaipyLogger._get_logger().debug(\"qux\") def test_taipy_logger_configured_by_file(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"logger.conf\") with mock.patch.dict(os.environ, {\"TAIPY_LOGGER_CONFIG_PATH\": path}): _TaipyLogger._get_logger().info(\"baz\") _TaipyLogger._get_logger().debug(\"qux\") "} {"text": "import os import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked from tests.config.utils.named_temporary_file import NamedTemporaryFile config_from_filename = NamedTemporaryFile( \"\"\" [TAIPY] custom_property_not_overwritten = true custom_property_overwritten = 10 \"\"\" ) config_from_environment = NamedTemporaryFile( \"\"\" [TAIPY] custom_property_overwritten = 11 \"\"\" ) def test_load_from_environment_overwrite_load_from_filename(): os.environ[Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH] = config_from_environment.filename Config.load(config_from_filename.filename) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 11 os.environ.pop(Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH) def test_block_load_from_environment_overwrite_load_from_filename(): Config.load(config_from_filename.filename) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 10 Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): os.environ[Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH] = config_from_environment.filename Config.load(config_from_filename.filename) os.environ.pop(Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 10 # The Config.load is failed to override "} {"text": "import pytest from src.taipy.config._config import _Config from src.taipy.config._config_comparator._config_comparator import _ConfigComparator from src.taipy.config._serializer._toml_serializer import _TomlSerializer from src.taipy.config.checker.issue_collector import IssueCollector from src.taipy.config.config import Config from src.taipy.config.section import Section from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest @pytest.fixture(scope=\"function\", autouse=True) def reset(): reset_configuration_singleton() register_test_sections() def reset_configuration_singleton(): Config.unblock_update() Config._default_config = _Config()._default_config() Config._python_config = _Config() Config._file_config = _Config() Config._env_file_config = _Config() Config._applied_config = _Config() Config._collector = IssueCollector() Config._serializer = _TomlSerializer() Config._comparator = _ConfigComparator() def register_test_sections(): Config._register_default(UniqueSectionForTest(\"default_attribute\")) Config.configure_unique_section_for_tests = UniqueSectionForTest._configure Config.unique_section_name = Config.unique_sections[UniqueSectionForTest.name] Config._register_default(SectionForTest(Section._DEFAULT_KEY, \"default_attribute\", prop=\"default_prop\", prop_int=0)) Config.configure_section_for_tests = SectionForTest._configure Config.section_name = Config.sections[SectionForTest.name] "} {"text": "import os from unittest import mock import pytest from src.taipy.config.exceptions.exceptions import InvalidConfigurationId from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest class WrongUniqueSection(UniqueSectionForTest): name = \"1wrong_id\" class WrongSection(SectionForTest): name = \"correct_name\" def test_section_uses_valid_id(): with pytest.raises(InvalidConfigurationId): WrongUniqueSection(attribute=\"foo\") with pytest.raises(InvalidConfigurationId): WrongSection(\"wrong id\", attribute=\"foo\") with pytest.raises(InvalidConfigurationId): WrongSection(\"1wrong_id\", attribute=\"foo\") with pytest.raises(InvalidConfigurationId): WrongSection(\"wrong_@id\", attribute=\"foo\") def test_templated_properties_are_replaced(): with mock.patch.dict(os.environ, {\"foo\": \"bar\", \"baz\": \"1\"}): u_sect = UniqueSectionForTest(attribute=\"attribute\", tpl_property=\"ENV[foo]\") assert u_sect.tpl_property == \"bar\" sect = SectionForTest(id=\"my_id\", attribute=\"attribute\", tpl_property=\"ENV[baz]:int\") assert sect.tpl_property == 1 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.config.config import Config from src.taipy.config.global_app.global_app_config import GlobalAppConfig from src.taipy.config.section import Section from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest def _test_default_global_app_config(global_config: GlobalAppConfig): assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_default_configuration(): default_config = Config._default_config assert default_config._unique_sections is not None assert len(default_config._unique_sections) == 1 assert default_config._unique_sections[UniqueSectionForTest.name] is not None assert default_config._unique_sections[UniqueSectionForTest.name].attribute == \"default_attribute\" assert default_config._sections is not None assert len(default_config._sections) == 1 _test_default_global_app_config(default_config._global_config) _test_default_global_app_config(Config.global_config) _test_default_global_app_config(GlobalAppConfig().default_config()) def test_register_default_configuration(): Config._register_default(SectionForTest(Section._DEFAULT_KEY, \"default_attribute\", prop1=\"prop1\")) # Replace the first default section Config._register_default(SectionForTest(Section._DEFAULT_KEY, \"default_attribute\", prop2=\"prop2\")) default_section = Config.sections[SectionForTest.name][Section._DEFAULT_KEY] assert len(default_section.properties) == 1 assert default_section.prop2 == \"prop2\" assert default_section.prop1 is None "} {"text": "import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import LoadingError from tests.config.utils.named_temporary_file import NamedTemporaryFile def test_node_can_not_appear_twice(): config = NamedTemporaryFile( \"\"\" [unique_section_name] attribute = \"my_attribute\" [unique_section_name] attribute = \"other_attribute\" \"\"\" ) with pytest.raises(LoadingError, match=\"Can not load configuration\"): Config.load(config.filename) def test_skip_configuration_outside_nodes(): config = NamedTemporaryFile( \"\"\" foo = \"bar\" \"\"\" ) Config.load(config.filename) assert Config.global_config.foo is None "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.config._config import _Config from src.taipy.config.checker._checker import _Checker class TestDefaultConfigChecker: def test_check_default_config(self): config = _Config._default_config() collector = _Checker._check(config) assert len(collector._errors) == 0 assert len(collector._infos) == 0 assert len(collector._warnings) == 0 "} {"text": "from src.taipy.config.checker.issue import Issue from src.taipy.config.checker.issue_collector import IssueCollector class TestIssueCollector: def test_add_error(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_error(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 1 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", \"value\", \"message\", \"checker\") collector._add_error(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 2 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", \"value\", \"message\", \"checker\") assert collector.all[1] == Issue(IssueCollector._ERROR_LEVEL, \"field\", \"value\", \"message\", \"checker\") def test_add_warning(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_warning(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 0 assert len(collector.warnings) == 1 assert len(collector.infos) == 0 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, \"field\", \"value\", \"message\", \"checker\") collector._add_warning(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 0 assert len(collector.warnings) == 2 assert len(collector.infos) == 0 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, \"field\", \"value\", \"message\", \"checker\") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, \"field\", \"value\", \"message\", \"checker\") def test_add_info(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_info(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 1 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, \"field\", \"value\", \"message\", \"checker\") collector._add_info(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 2 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, \"field\", \"value\", \"message\", \"checker\") assert collector.all[1] == Issue(IssueCollector._INFO_LEVEL, \"field\", \"value\", \"message\", \"checker\") def test_all(self): collector = IssueCollector() collector._add_info(\"foo\", \"bar\", \"baz\", \"qux\") assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"qux\") collector._add_warning(\"foo2\", \"bar2\", \"baz2\", \"qux2\") assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, \"foo2\", \"bar2\", \"baz2\", \"qux2\") assert collector.all[1] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"qux\") collector._add_warning(\"foo3\", \"bar3\", \"baz3\", \"qux3\") assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, \"foo2\", \"bar2\", \"baz2\", \"qux2\") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, \"foo3\", \"bar3\", \"baz3\", \"qux3\") assert collector.all[2] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"qux\") collector._add_info(\"field\", \"value\", \"message\", \"checker\") collector._add_error(\"field\", \"value\", \"message\", \"checker\") assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", \"value\", \"message\", \"checker\") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, \"foo2\", \"bar2\", \"baz2\", \"qux2\") assert collector.all[2] == Issue(IssueCollector._WARNING_LEVEL, \"foo3\", \"bar3\", \"baz3\", \"qux3\") assert collector.all[3] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"qux\") assert collector.all[4] == Issue(IssueCollector._INFO_LEVEL, \"field\", \"value\", \"message\", \"checker\") "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os from unittest import mock from unittest.mock import MagicMock from src.taipy.config import Config from src.taipy.config.checker._checker import _Checker from src.taipy.config.checker.issue_collector import IssueCollector from tests.config.utils.checker_for_tests import CheckerForTest def test_register_checker(): checker = CheckerForTest checker._check = MagicMock() _Checker.add_checker(checker) Config.check() checker._check.assert_called_once() "} {"text": "import logging from unittest import mock from src.taipy.config._config import _Config from src.taipy.config.checker._checkers._config_checker import _ConfigChecker from src.taipy.config.checker.issue import Issue from src.taipy.config.checker.issue_collector import IssueCollector class MyCustomChecker(_ConfigChecker): def _check(self) -> IssueCollector: # type: ignore pass def test__error(): with mock.patch.object(logging.Logger, \"error\"): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._error(\"field\", 17, \"my message\") assert len(collector.all) == 1 assert len(collector.errors) == 1 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert collector.errors[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") MyCustomChecker(_Config(), collector)._error(\"foo\", \"bar\", \"baz\") assert len(collector.all) == 2 assert len(collector.errors) == 2 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert collector.errors[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") assert collector.errors[1] == Issue(IssueCollector._ERROR_LEVEL, \"foo\", \"bar\", \"baz\", \"MyCustomChecker\") def test__warning(): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._warning(\"field\", 17, \"my message\") assert len(collector.all) == 1 assert len(collector.warnings) == 1 assert len(collector.errors) == 0 assert len(collector.infos) == 0 assert collector.warnings[0] == Issue(IssueCollector._WARNING_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") MyCustomChecker(_Config(), collector)._warning(\"foo\", \"bar\", \"baz\") assert len(collector.all) == 2 assert len(collector.warnings) == 2 assert len(collector.errors) == 0 assert len(collector.infos) == 0 assert collector.warnings[0] == Issue(IssueCollector._WARNING_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") assert collector.warnings[1] == Issue(IssueCollector._WARNING_LEVEL, \"foo\", \"bar\", \"baz\", \"MyCustomChecker\") def test__info(): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._info(\"field\", 17, \"my message\") assert len(collector.all) == 1 assert len(collector.infos) == 1 assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert collector.infos[0] == Issue(IssueCollector._INFO_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") MyCustomChecker(_Config(), collector)._info(\"foo\", \"bar\", \"baz\") assert len(collector.all) == 2 assert len(collector.infos) == 2 assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert collector.infos[0] == Issue(IssueCollector._INFO_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") assert collector.infos[1] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"MyCustomChecker\") "} {"text": "from src.taipy.config import IssueCollector from src.taipy.config.checker._checkers._config_checker import _ConfigChecker class CheckerForTest(_ConfigChecker): def _check(self) -> IssueCollector: return self._collector "} {"text": "from copy import copy from typing import Any, Dict, List, Optional from src.taipy.config import Config, Section from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker from .section_for_tests import SectionForTest class SectionOfSectionsListForTest(Section): name = \"list_section_name\" _MY_ATTRIBUTE_KEY = \"attribute\" _SECTIONS_LIST_KEY = \"sections_list\" def __init__(self, id: str, attribute: Any = None, sections_list: List = None, **properties): self._attribute = attribute self._sections_list = sections_list if sections_list else [] super().__init__(id, **properties) def __copy__(self): return SectionOfSectionsListForTest( self.id, self._attribute, copy(self._sections_list), **copy(self._properties) ) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val @property def sections_list(self): return list(self._sections_list) @sections_list.setter # type: ignore @_ConfigBlocker._check() def sections_list(self, val): self._sections_list = val def _clean(self): self._attribute = None self._sections_list = [] self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute if self._sections_list: as_dict[self._SECTIONS_LIST_KEY] = self._sections_list as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, id) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) section_configs = config._sections.get(SectionForTest.name, None) or [] # type: ignore sections_list = [] if inputs_as_str := as_dict.pop(cls._SECTIONS_LIST_KEY, None): for section_id in inputs_as_str: if section_id in section_configs: sections_list.append(section_configs[section_id]) else: sections_list.append(section_id) return SectionOfSectionsListForTest(id=id, attribute=attribute, sections_list=sections_list, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._sections_list = as_dict.pop(self._SECTIONS_LIST_KEY, self._sections_list) if self._sections_list is None and default_section: self._sections_list = default_section._sections_list self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(id: str, attribute: str, sections_list: List = None, **properties): section = SectionOfSectionsListForTest(id, attribute, sections_list, **properties) Config._register(section) return Config.sections[SectionOfSectionsListForTest.name][id] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import tempfile class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile(\"w\", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, \"r\") as fp: return fp.read() def __del__(self): os.unlink(self.filename) "} {"text": "from copy import copy from typing import Any, Dict, Optional from src.taipy.config import Config, Section from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker class SectionForTest(Section): name = \"section_name\" _MY_ATTRIBUTE_KEY = \"attribute\" def __init__(self, id: str, attribute: Any = None, **properties): self._attribute = attribute super().__init__(id, **properties) def __copy__(self): return SectionForTest(self.id, self._attribute, **copy(self._properties)) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val def _clean(self): self._attribute = None self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, id) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) return SectionForTest(id=id, attribute=attribute, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(id: str, attribute: str, **properties): section = SectionForTest(id, attribute, **properties) Config._register(section) return Config.sections[SectionForTest.name][id] "} {"text": "from copy import copy from typing import Any, Dict, Optional from src.taipy.config import Config from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker from src.taipy.config.unique_section import UniqueSection class UniqueSectionForTest(UniqueSection): name = \"unique_section_name\" _MY_ATTRIBUTE_KEY = \"attribute\" def __init__(self, attribute: str = None, **properties): self._attribute = attribute super().__init__(**properties) def __copy__(self): return UniqueSectionForTest(self._attribute, **copy(self._properties)) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val def _clean(self): self._attribute = None self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id=None, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, None) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) return UniqueSectionForTest(attribute=attribute, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(attribute: str, **properties): section = UniqueSectionForTest(attribute, **properties) Config._register(section) return Config.unique_sections[UniqueSectionForTest.name] "} {"text": "import pytest from src.taipy.config.common._validate_id import _validate_id from src.taipy.config.exceptions.exceptions import InvalidConfigurationId class TestId: def test_validate_id(self): s = _validate_id(\"foo\") assert s == \"foo\" with pytest.raises(InvalidConfigurationId): _validate_id(\"1foo\") with pytest.raises(InvalidConfigurationId): _validate_id(\"foo bar\") with pytest.raises(InvalidConfigurationId): _validate_id(\"foo/foo$\") with pytest.raises(InvalidConfigurationId): _validate_id(\"\") with pytest.raises(InvalidConfigurationId): _validate_id(\" \") with pytest.raises(InvalidConfigurationId): _validate_id(\"class\") with pytest.raises(InvalidConfigurationId): _validate_id(\"def\") with pytest.raises(InvalidConfigurationId): _validate_id(\"with\") with pytest.raises(InvalidConfigurationId): _validate_id(\"CYCLE\") with pytest.raises(InvalidConfigurationId): _validate_id(\"SCENARIO\") with pytest.raises(InvalidConfigurationId): _validate_id(\"SEQUENCE\") with pytest.raises(InvalidConfigurationId): _validate_id(\"TASK\") with pytest.raises(InvalidConfigurationId): _validate_id(\"DATANODE\") "} {"text": "import pytest from src.taipy.config.common.scope import Scope def test_scope(): # Test __ge__ method assert Scope.GLOBAL >= Scope.GLOBAL assert Scope.GLOBAL >= Scope.CYCLE assert Scope.CYCLE >= Scope.CYCLE assert Scope.GLOBAL >= Scope.SCENARIO assert Scope.CYCLE >= Scope.SCENARIO assert Scope.SCENARIO >= Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO >= \"testing string\" # Test __gt__ method assert Scope.GLOBAL > Scope.CYCLE assert Scope.GLOBAL > Scope.SCENARIO assert Scope.CYCLE > Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO > \"testing string\" # Test __le__ method assert Scope.GLOBAL <= Scope.GLOBAL assert Scope.CYCLE <= Scope.GLOBAL assert Scope.CYCLE <= Scope.CYCLE assert Scope.SCENARIO <= Scope.GLOBAL assert Scope.SCENARIO <= Scope.CYCLE assert Scope.SCENARIO <= Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO <= \"testing string\" # Test __lt__ method assert Scope.SCENARIO < Scope.GLOBAL assert Scope.SCENARIO < Scope.GLOBAL assert Scope.SCENARIO < Scope.CYCLE with pytest.raises(TypeError): assert Scope.SCENARIO < \"testing string\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import argparse import re import sys import pytest from src.taipy._cli._base_cli import _CLI if sys.version_info >= (3, 10): argparse_options_str = \"options:\" else: argparse_options_str = \"optional arguments:\" def preprocess_stdout(stdout): stdout = stdout.replace(\"\\n\", \" \").replace(\"\\t\", \" \") return re.sub(\" +\", \" \", stdout) def remove_subparser(name: str): \"\"\"Remove a subparser from argparse.\"\"\" _CLI._sub_taipyparsers.pop(name, None) if _CLI._subparser_action: _CLI._subparser_action._name_parser_map.pop(name, None) for action in _CLI._subparser_action._choices_actions: if action.dest == name: _CLI._subparser_action._choices_actions.remove(action) @pytest.fixture(autouse=True, scope=\"function\") def clean_argparser(): _CLI._parser = argparse.ArgumentParser(conflict_handler=\"resolve\") _CLI._arg_groups = {} subcommands = list(_CLI._sub_taipyparsers.keys()) for subcommand in subcommands: remove_subparser(subcommand) yield def test_subparser(capfd): subcommand_1 = _CLI._add_subparser(\"subcommand_1\", help=\"subcommand_1 help\") subcommand_1.add_argument(\"--foo\", \"-f\", help=\"foo help\") subcommand_1.add_argument(\"--bar\", \"-b\", help=\"bar help\") subcommand_2 = _CLI._add_subparser(\"subcommand_2\", help=\"subcommand_2 help\") subcommand_2.add_argument(\"--doo\", \"-d\", help=\"doo help\") subcommand_2.add_argument(\"--baz\", \"-z\", help=\"baz help\") expected_subcommand_1_help_message = f\"\"\"subcommand_1 [-h] [--foo FOO] [--bar BAR] {argparse_options_str} -h, --help show this help message and exit --foo FOO, -f FOO foo help --bar BAR, -b BAR bar help \"\"\" subcommand_1.print_help() stdout, _ = capfd.readouterr() assert preprocess_stdout(expected_subcommand_1_help_message) in preprocess_stdout(stdout) expected_subcommand_2_help_message = f\"\"\"subcommand_2 [-h] [--doo DOO] [--baz BAZ] {argparse_options_str} -h, --help show this help message and exit --doo DOO, -d DOO doo help --baz BAZ, -z BAZ baz help \"\"\" subcommand_2.print_help() stdout, _ = capfd.readouterr() assert preprocess_stdout(expected_subcommand_2_help_message) in preprocess_stdout(stdout) def test_duplicate_subcommand(): subcommand_1 = _CLI._add_subparser(\"subcommand_1\", help=\"subcommand_1 help\") subcommand_1.add_argument(\"--foo\", \"-f\", help=\"foo help\") subcommand_2 = _CLI._add_subparser(\"subcommand_1\", help=\"subcommand_2 help\") subcommand_2.add_argument(\"--bar\", \"-b\", help=\"bar help\") # The title of subcommand_2 is duplicated with subcommand_1, and therefore # there will be no new subcommand created assert len(_CLI._sub_taipyparsers) == 1 def test_groupparser(capfd): group_1 = _CLI._add_groupparser(\"group_1\", \"group_1 desc\") group_1.add_argument(\"--foo\", \"-f\", help=\"foo help\") group_1.add_argument(\"--bar\", \"-b\", help=\"bar help\") group_2 = _CLI._add_groupparser(\"group_2\", \"group_2 desc\") group_2.add_argument(\"--doo\", \"-d\", help=\"doo help\") group_2.add_argument(\"--baz\", \"-z\", help=\"baz help\") expected_help_message = \"\"\" group_1: group_1 desc --foo FOO, -f FOO foo help --bar BAR, -b BAR bar help group_2: group_2 desc --doo DOO, -d DOO doo help --baz BAZ, -z BAZ baz help \"\"\".strip() _CLI._parser.print_help() stdout, _ = capfd.readouterr() assert expected_help_message in stdout def test_duplicate_group(): group_1 = _CLI._add_groupparser(\"group_1\", \"group_1 desc\") group_1.add_argument(\"--foo\", \"-f\", help=\"foo help\") group_2 = _CLI._add_groupparser(\"group_1\", \"group_2 desc\") group_2.add_argument(\"--bar\", \"-b\", help=\"bar help\") # The title of group_2 is duplicated with group_1, and therefore # there will be no new group created assert len(_CLI._arg_groups) == 1 "} {"text": "import pytest from src.taipy.config.common._classproperty import _Classproperty class TestClassProperty: def test_class_property(self): class TestClass: @_Classproperty def test_property(cls): return \"test_property\" assert TestClass.test_property == \"test_property\" assert TestClass().test_property == \"test_property\" with pytest.raises(TypeError): TestClass.test_property() "} {"text": "import os from unittest import mock import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked def test_global_config_with_env_variable_value(): with mock.patch.dict(os.environ, {\"FOO\": \"bar\", \"BAZ\": \"qux\"}): Config.configure_global_app(foo=\"ENV[FOO]\", bar=\"ENV[BAZ]\") assert Config.global_config.foo == \"bar\" assert Config.global_config.bar == \"qux\" def test_default_global_app_config(): global_config = Config.global_config assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_block_update_global_app_config(): Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_global_app(foo=\"bar\") with pytest.raises(ConfigurationUpdateBlocked): Config.global_config.properties = {\"foo\": \"bar\"} # Test if the global_config stay as default assert Config.global_config.foo is None assert len(Config.global_config.properties) == 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import argparse import re from unittest.mock import patch import pytest from src.taipy._entrypoint import _entrypoint from taipy._cli._base_cli import _CLI def preprocess_stdout(stdout): stdout = stdout.replace(\"\\n\", \" \").replace(\"\\t\", \" \") return re.sub(\" +\", \" \", stdout) def remove_subparser(name: str): \"\"\"Remove a subparser from the _CLI class.\"\"\" _CLI._sub_taipyparsers.pop(name, None) if _CLI._subparser_action: _CLI._subparser_action._name_parser_map.pop(name, None) for action in _CLI._subparser_action._choices_actions: if action.dest == name: _CLI._subparser_action._choices_actions.remove(action) @pytest.fixture(autouse=True, scope=\"function\") def clean_argparser(): _CLI._parser = argparse.ArgumentParser(conflict_handler=\"resolve\") _CLI._subparser_action = None _CLI._arg_groups = {} subcommands = list(_CLI._sub_taipyparsers.keys()) for subcommand in subcommands: remove_subparser(subcommand) yield expected_help = \"\"\"{run,manage-versions,create,migrate,help} ... positional arguments: {run,manage-versions,create,migrate,help} run Run a Taipy application. manage-versions Taipy version control system. create Create a new Taipy application. migrate Migrate entities created from old taipy versions to be compatible with the current taipy version. The entity migration should be performed only after updating taipy code to the current version. help Show the Taipy help message. \"\"\" def test_taipy_command_alone_print_help(capsys): with patch(\"sys.argv\", [\"prog\"]): _entrypoint() out, _ = capsys.readouterr() assert preprocess_stdout(expected_help) in preprocess_stdout(out) def test_taipy_help_command(capsys): with patch(\"sys.argv\", [\"prog\", \"help\"]): with pytest.raises(SystemExit): _entrypoint() out, _ = capsys.readouterr() assert preprocess_stdout(expected_help) in preprocess_stdout(out) def test_help_non_existed_command(caplog): with patch(\"sys.argv\", [\"prog\", \"help\", \"non_existed_command\"]): with pytest.raises(SystemExit): _entrypoint() assert \"non_existed_command is not a valid command.\" in caplog.text def test_taipy_create_help(capsys): expected_help = \"create [-h] [--template\" with patch(\"sys.argv\", [\"prog\", \"help\", \"create\"]): with pytest.raises(SystemExit): _entrypoint() out, _ = capsys.readouterr() assert preprocess_stdout(expected_help) in preprocess_stdout(out) "} {"text": "import os import sys from importlib.util import find_spec from pathlib import Path import pandas as pd # type: ignore import pytest from flask import Flask, g def pytest_configure(config): if (find_spec(\"src\") and find_spec(\"src.taipy\")) and (not find_spec(\"taipy\") or not find_spec(\"taipy.gui\")): import src.taipy.gui import src.taipy.gui._renderers.builder import src.taipy.gui._warnings import src.taipy.gui.builder import src.taipy.gui.data.decimator.lttb import src.taipy.gui.data.decimator.minmax import src.taipy.gui.data.decimator.rdp import src.taipy.gui.data.decimator.scatter_decimator import src.taipy.gui.data.utils import src.taipy.gui.extension import src.taipy.gui.utils._map_dict import src.taipy.gui.utils._variable_directory import src.taipy.gui.utils.expr_var_name sys.modules[\"taipy.gui._warnings\"] = sys.modules[\"src.taipy.gui._warnings\"] sys.modules[\"taipy.gui._renderers.builder\"] = sys.modules[\"src.taipy.gui._renderers.builder\"] sys.modules[\"taipy.gui.utils._variable_directory\"] = sys.modules[\"src.taipy.gui.utils._variable_directory\"] sys.modules[\"taipy.gui.utils.expr_var_name\"] = sys.modules[\"src.taipy.gui.utils.expr_var_name\"] sys.modules[\"taipy.gui.utils._map_dict\"] = sys.modules[\"src.taipy.gui.utils._map_dict\"] sys.modules[\"taipy.gui.extension\"] = sys.modules[\"src.taipy.gui.extension\"] sys.modules[\"taipy.gui.data.utils\"] = sys.modules[\"src.taipy.gui.data.utils\"] sys.modules[\"taipy.gui.data.decimator.lttb\"] = sys.modules[\"src.taipy.gui.data.decimator.lttb\"] sys.modules[\"taipy.gui.data.decimator.rdp\"] = sys.modules[\"src.taipy.gui.data.decimator.rdp\"] sys.modules[\"taipy.gui.data.decimator.minmax\"] = sys.modules[\"src.taipy.gui.data.decimator.minmax\"] sys.modules[\"taipy.gui.data.decimator.scatter_decimator\"] = sys.modules[ \"src.taipy.gui.data.decimator.scatter_decimator\" ] sys.modules[\"taipy.gui\"] = sys.modules[\"src.taipy.gui\"] sys.modules[\"taipy.gui.builder\"] = sys.modules[\"src.taipy.gui.builder\"] csv = pd.read_csv( f\"{Path(Path(__file__).parent.resolve())}{os.path.sep}current-covid-patients-hospital.csv\", parse_dates=[\"Day\"] ) small_dataframe_data = {\"name\": [\"A\", \"B\", \"C\"], \"value\": [1, 2, 3]} @pytest.fixture(scope=\"function\") def csvdata(): yield csv @pytest.fixture(scope=\"function\") def small_dataframe(): yield small_dataframe_data @pytest.fixture(scope=\"function\") def gui(helpers): from taipy.gui import Gui gui = Gui() yield gui # Delete Gui instance and state of some classes after each test gui.stop() helpers.test_cleanup() @pytest.fixture def helpers(): from .helpers import Helpers return Helpers @pytest.fixture def test_client(): flask_app = Flask(\"Test App\") # Create a test client using the Flask application configured for testing with flask_app.test_client() as testing_client: # Establish an application context with flask_app.app_context(): g.client_id = \"test client id\" yield testing_client # this is where the testing happens! "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import inspect from taipy.gui import Gui, Html def test_simple_html(gui: Gui, helpers): # html_string = \"

test

\" html_string = \"

test

\" gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Html(html_string)) gui.run(run_server=False) client = gui._server.test_client() jsx = client.get(\"/taipy-jsx/test\").json[\"jsx\"] assert jsx == \"

test

\" "} {"text": "import pytest from taipy.gui import Gui def test_invalid_control_name(gui: Gui, helpers): md_string = \"<|invalid|invalid|>\" expected_list = [\"INVALID SYNTAX - Control is 'invalid'\"] helpers.test_control_md(gui, md_string, expected_list) def test_value_to_negated_property(gui: Gui, helpers): md_string = \"<|button|not active=true|>\" expected_list = [\"\" expected_list = [\"\"] helpers.test_control_md(gui, md_string, expected_list) def test_opening_unknown_block(gui: Gui, helpers): md_string = \"<|unknown\" expected_list = [\"\" expected_list = [\"
\", \"No matching opened tag\", \"
\"] helpers.test_control_md(gui, md_string, expected_list) def test_md_link(gui: Gui, helpers): md_string = \"[content](link)\" expected_list = [\"\"] helpers.test_control_md(gui, md_string, expected_list) "} {"text": "import pytest from taipy.gui.utils._bindings import _Bindings def test_exception_binding_twice(gui, test_client): bind = _Bindings(gui) bind._new_scopes() bind._bind(\"x\", 10) with pytest.raises(ValueError): bind._bind(\"x\", 10) def test_exception_binding_invalid_name(gui): bind = _Bindings(gui) bind._new_scopes() with pytest.raises(ValueError): bind._bind(\"invalid identifier\", 10) "} {"text": "from email import message import pytest from taipy.gui._page import _Page def test_exception_page(gui): page = _Page() page._route = \"page1\" with pytest.raises(RuntimeError, match=\"Can't render page page1: no renderer found\"): page.render(gui) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import tempfile from unittest.mock import patch import pytest from taipy.config import Config from taipy.config._config import _Config from taipy.config._serializer._toml_serializer import _TomlSerializer from taipy.config.checker._checker import _Checker from taipy.config.checker.issue_collector import IssueCollector from taipy.gui import Gui class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile(\"w\", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, \"r\") as fp: return fp.read() def __del__(self): os.unlink(self.filename) def init_config(): Config.unblock_update() Config._default_config = _Config()._default_config() Config._python_config = _Config() Config._file_config = None Config._env_file_config = None Config._applied_config = _Config._default_config() Config._collector = IssueCollector() Config._serializer = _TomlSerializer() _Checker._checkers = [] from taipy.config import _inject_section from taipy.gui._default_config import default_config from taipy.gui._gui_section import _GuiSection _inject_section( _GuiSection, \"gui_config\", _GuiSection(property_list=list(default_config)), [(\"configure_gui\", _GuiSection._configure)], add_to_unconflicted_sections=True, ) @pytest.fixture(scope=\"function\", autouse=True) def cleanup_test(helpers): init_config() helpers.test_cleanup() yield init_config() helpers.test_cleanup() def test_gui_service_arguments_hierarchy(): # Test default configuration gui = Gui() gui.run(run_server=False) service_config = gui._config.config assert not service_config[\"allow_unsafe_werkzeug\"] assert service_config[\"async_mode\"] == \"gevent\" assert service_config[\"change_delay\"] is None assert service_config[\"chart_dark_template\"] is None assert service_config[\"dark_mode\"] assert service_config[\"dark_theme\"] is None assert not service_config[\"debug\"] assert not service_config[\"extended_status\"] assert service_config[\"favicon\"] is None assert not service_config[\"flask_log\"] assert service_config[\"host\"] == \"127.0.0.1\" assert service_config[\"light_theme\"] is None assert service_config[\"margin\"] is None assert service_config[\"ngrok_token\"] == \"\" assert service_config[\"notification_duration\"] == 3000 assert service_config[\"propagate\"] assert service_config[\"run_browser\"] assert not service_config[\"run_in_thread\"] assert not service_config[\"run_server\"] assert not service_config[\"single_client\"] assert not service_config[\"system_notification\"] assert service_config[\"theme\"] is None assert service_config[\"time_zone\"] is None assert service_config[\"title\"] is None assert service_config[\"upload_folder\"] is None assert not service_config[\"use_arrow\"] assert not service_config[\"use_reloader\"] assert service_config[\"watermark\"] == \"Taipy inside\" assert service_config[\"webapp_path\"] is None assert service_config[\"port\"] == 5000 gui.stop() # Override default configuration by explicit defined arguments in Gui.run() gui = Gui() gui.run(run_server=False, watermark=\"\", host=\"my_host\", port=5001) service_config = gui._config.config assert service_config[\"watermark\"] == \"\" assert service_config[\"host\"] == \"my_host\" assert service_config[\"port\"] == 5001 gui.stop() # Override Gui.run() arguments by explicit defined arguments in Config.configure_gui() Config.configure_gui(dark_mode=False, host=\"my_2nd_host\", port=5002) gui = Gui() gui.run(run_server=False, watermark=\"\", host=\"my_host\", port=5001) service_config = gui._config.config assert not service_config[\"dark_mode\"] assert service_config[\"host\"] == \"my_2nd_host\" assert service_config[\"watermark\"] == \"\" assert service_config[\"port\"] == 5002 gui.stop() # Override Config.configure_gui() arguments by loading a TOML file with [gui] section toml_config = NamedTemporaryFile( content=\"\"\" [TAIPY] [gui] host = \"my_3rd_host\" port = 5003 use_reloader = \"true:bool\" \"\"\" ) Config.load(toml_config.filename) gui = Gui() gui.run(run_server=False, host=\"my_host\", port=5001) service_config = gui._config.config assert service_config[\"host\"] == \"my_3rd_host\" assert service_config[\"port\"] == 5003 assert service_config[\"use_reloader\"] gui.stop() # Override TOML configuration file with CLI arguments with patch(\"sys.argv\", [\"prog\", \"--host\", \"my_4th_host\", \"--port\", \"5004\", \"--no-reloader\", \"--debug\"]): gui = Gui() gui.run(run_server=False, host=\"my_host\", port=5001) service_config = gui._config.config assert service_config[\"host\"] == \"my_4th_host\" assert service_config[\"port\"] == 5004 assert not service_config[\"use_reloader\"] assert service_config[\"debug\"] gui.stop() def test_clean_config(): gui_config = Config.configure_gui(dark_mode=False) assert Config.gui_config is gui_config gui_config._clean() # Check if the instance before and after _clean() is the same assert Config.gui_config is gui_config assert gui_config.dark_mode is None assert gui_config.properties == {} "} {"text": "import inspect import warnings import pytest from taipy.gui import Gui def test_no_ignore_file(gui: Gui): with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.run(run_server=False) client = gui._server.test_client() response = client.get(\"/resource.txt\") assert ( response.status_code == 200 ), f\"file resource.txt request status should be 200 but is {response.status_code}\" "} {"text": "import inspect import warnings import pytest from taipy.gui import Gui def test_ignore_file_found(gui: Gui): with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.run(run_server=False) client = gui._server.test_client() response = client.get(\"/resource.txt\") assert ( response.status_code == 404 ), f\"file resource.txt request status should be 404 but is {response.status_code}\" def test_ignore_file_not_found(gui: Gui): with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.run(run_server=False) client = gui._server.test_client() response = client.get(\"/resource2.txt\") assert ( response.status_code == 200 ), f\"file resource2.txt request status should be 200 but is {response.status_code}\" "} {"text": "import inspect import time from urllib.request import urlopen from taipy.gui import Gui # this hangs in github def test_run_thread(gui: Gui, helpers): gui._set_frame(inspect.currentframe()) gui.add_page(\"page1\", \"# first page\") gui.run(run_in_thread=True, run_browser=False) while not helpers.port_check(): time.sleep(0.1) assert \">first page\" in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") gui.stop() while helpers.port_check(): time.sleep(0.1) gui.run(run_in_thread=True, run_browser=False) while not helpers.port_check(): time.sleep(0.1) assert \">first page\" in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") "} {"text": "import pytest from taipy.gui import Gui def test_add_shared_variables(gui: Gui): Gui.add_shared_variable(\"var1\", \"var2\") assert isinstance(gui._Gui__shared_variables, list) assert len(gui._Gui__shared_variables) == 2 Gui.add_shared_variables(\"var1\", \"var2\") assert len(gui._Gui__shared_variables) == 2 "} {"text": "import json from taipy.gui.gui import Gui def test_multiple_instance(): gui1 = Gui(\"<|gui1|>\") gui2 = Gui(\"<|gui2|>\") gui1.run(run_server=False) gui2.run(run_server=False) client1 = gui1._server.test_client() client2 = gui2._server.test_client() assert_multiple_instance(client1, 'value=\"gui1\"') assert_multiple_instance(client2, 'value=\"gui2\"') def assert_multiple_instance(client, expected_value): response = client.get(\"/taipy-jsx/TaiPy_root_page\") response_data = json.loads(response.get_data().decode(\"utf-8\", \"ignore\")) assert response.status_code == 200 assert isinstance(response_data, dict) assert \"jsx\" in response_data assert expected_value in response_data[\"jsx\"] "} {"text": "from taipy.gui.utils._variable_directory import _MODULE_NAME_MAP, _variable_decode, _variable_encode def test_variable_encode_decode(): assert _variable_encode(\"x\", \"module\") == \"x_TPMDL_0\" assert _MODULE_NAME_MAP[0] == \"module\" assert _variable_decode(\"x_TPMDL_0\") == (\"x\", \"module\") assert _variable_encode(\"x\", None) == \"x\" assert _variable_decode(\"x\") == (\"x\", None) assert _variable_encode(\"TpExPr_x\", \"module1\") == \"TpExPr_x_TPMDL_1\" assert _MODULE_NAME_MAP[1] == \"module1\" assert _variable_decode(\"TpExPr_x_TPMDL_1\") == (\"x\", \"module1\") "} {"text": "import inspect import warnings from taipy.gui import Gui, Markdown, State, navigate def test_navigate(gui: Gui, helpers): def navigate_to(state: State): navigate(state, \"test\") with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"#This is a page\")) gui.run(run_server=False) client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) client.get(f\"/taipy-jsx/test/?client_id={sid}\") ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"A\", \"name\": \"my_button\", \"payload\": \"navigate_to\"}) # assert for received message (message that would be sent to the front-end client) assert ws_client.get_received() def test_navigate_to_no_route(gui: Gui, helpers): def navigate_to(state: State): navigate(state, \"toto\") with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"#This is a page\")) gui.run(run_server=False) client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) client.get(f\"/taipy-jsx/test/?client_id={sid}\") ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"A\", \"name\": \"my_button\", \"payload\": \"navigate_to\"}) # assert for received message (message that would be sent to the front-end client) assert not ws_client.get_received() def test_on_navigate_to_inexistant(gui: Gui, helpers): def on_navigate(state: State, page: str): return \"test2\" if page == \"test\" else page with warnings.catch_warnings(record=True) as records: gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"#This is a page\")) gui.run(run_server=False) client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) client.get(f\"/taipy-jsx/test?client_id={sid}\") warns = helpers.get_taipy_warnings(records) assert len(warns) == 1 text = warns[0].message.args[0] if isinstance(warns[0].message, Warning) else warns[0].message assert text == 'Cannot navigate to \"test2\": unknown page.' def test_on_navigate_to_existant(gui: Gui, helpers): def on_navigate(state: State, page: str): return \"test2\" if page == \"test1\" else page with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.add_page(\"test1\", Markdown(\"#This is a page test1\")) gui.add_page(\"test2\", Markdown(\"#This is a page test2\")) gui.run(run_server=False) client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) content = client.get(f\"/taipy-jsx/test1?client_id={sid}\") assert content.status_code == 302 "} {"text": "import inspect import pandas as pd # type: ignore from taipy.gui import Gui def test_expression_text_control_str(gui: Gui, test_client, helpers): gui._bind_var_val(\"x\", \"Hello World!\") md_string = \"<|{x}|>\" expected_list = [\"\" expected_list = [\"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \" 0}|>\" expected_list = [ \"\" expected_list = [\"\" expected_list = [ \"\" expected_list = [ \" 2 routes + 1 default route assert len(gui._config.pages) == 3 # 2 files -> 2 pages + 1 default page "} {"text": "import inspect import json import warnings from taipy.gui import Gui def test_render_route(gui: Gui): gui._set_frame(inspect.currentframe()) gui.add_page(\"page1\", \"# first page\") gui.add_page(\"page2\", \"# second page\") gui.run(run_server=False) with warnings.catch_warnings(record=True): client = gui._server.test_client() response = client.get(\"/taipy-init\") response_data = json.loads(response.get_data().decode(\"utf-8\", \"ignore\")) assert response.status_code == 200 assert isinstance(response_data, dict) assert isinstance(response_data[\"locations\"], dict) assert \"/page1\" in response_data[\"locations\"] assert \"/page2\" in response_data[\"locations\"] assert \"/\" in response_data[\"locations\"] assert response_data[\"locations\"] == {\"/\": \"/TaiPy_root_page\", \"/page1\": \"/page1\", \"/page2\": \"/page2\"} "} {"text": "import json import pandas as pd import pytest from taipy.gui import Gui from taipy.gui.utils import _TaipyContent def test__get_real_var_name(gui: Gui): res = gui._get_real_var_name(\"\") assert isinstance(res, tuple) assert res[0] == \"\" assert res[1] == \"\" gui.run(run_server=False) with gui.get_flask_app().app_context(): with pytest.raises(NameError): res = gui._get_real_var_name(f\"{_TaipyContent.get_hash()}_var\") def test__get_user_instance(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): with pytest.warns(UserWarning): gui._get_user_instance(\"\", type(None)) def test__call_broadcast_callback(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._call_broadcast_callback(lambda s, t: t, [\"Hello World\"], \"mine\") assert res == \"Hello World\" with gui.get_flask_app().app_context(): with pytest.warns(UserWarning): res = gui._call_broadcast_callback(print, [\"Hello World\"], \"mine\") assert res is None def test__refresh_expr(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._refresh_expr(\"var\", None) assert res is None def test__tbl_cols(gui: Gui): data = pd.DataFrame({\"col1\": [0, 1, 2], \"col2\": [True, True, False]}) gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._tbl_cols(True, None, json.dumps({}), json.dumps({\"data\": \"data\"}), data=data) d = json.loads(res) assert isinstance(d, dict) assert d[\"col1\"][\"type\"] == \"int\" res = gui._tbl_cols(False, None, \"\", \"\") assert repr(res) == \"Taipy: Do not update\" def test__chart_conf(gui: Gui): data = pd.DataFrame({\"col1\": [0, 1, 2], \"col2\": [True, True, False]}) gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._chart_conf(True, None, json.dumps({}), json.dumps({\"data\": \"data\"}), data=data) d = json.loads(res) assert isinstance(d, dict) assert d[\"columns\"][\"col1\"][\"type\"] == \"int\" res = gui._chart_conf(False, None, \"\", \"\") assert repr(res) == \"Taipy: Do not update\" with pytest.warns(UserWarning): res = gui._chart_conf(True, None, \"\", \"\") assert repr(res) == \"Taipy: Do not update\" def test__get_valid_adapter_result(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._get_valid_adapter_result((\"id\", \"label\")) assert isinstance(res, tuple) assert res[0] == \"id\" "} {"text": "import json import warnings from types import SimpleNamespace from taipy.gui import Gui, Markdown def test_partial(gui: Gui): with warnings.catch_warnings(record=True): gui.add_partial(Markdown(\"#This is a partial\")) gui.run(run_server=False) client = gui._server.test_client() response = client.get(f\"/taipy-jsx/{gui._config.partial_routes[0]}\") response_data = json.loads(response.get_data().decode(\"utf-8\", \"ignore\")) assert response.status_code == 200 assert \"jsx\" in response_data and \"This is a partial\" in response_data[\"jsx\"] def test_partial_update(gui: Gui): with warnings.catch_warnings(record=True): partial = gui.add_partial(Markdown(\"#This is a partial\")) gui.run(run_server=False, single_client=True) client = gui._server.test_client() response = client.get(f\"/taipy-jsx/{gui._config.partial_routes[0]}\") response_data = json.loads(response.get_data().decode(\"utf-8\", \"ignore\")) assert response.status_code == 200 assert \"jsx\" in response_data and \"This is a partial\" in response_data[\"jsx\"] # update partial fake_state = SimpleNamespace() fake_state._gui = gui partial.update_content(fake_state, \"#partial updated\") # type: ignore response = client.get(f\"/taipy-jsx/{gui._config.partial_routes[0]}\") response_data = json.loads(response.get_data().decode(\"utf-8\", \"ignore\")) assert response.status_code == 200 assert \"jsx\" in response_data and \"partial updated\" in response_data[\"jsx\"] "} {"text": "from taipy.gui import Gui, Markdown def test_variable_binding(helpers): \"\"\" Tests the binding of a few variables and a function \"\"\" def another_function(gui): pass x = 10 y = 20 z = \"button label\" gui = Gui() gui.add_page(\"test\", Markdown(\"<|{x}|> | <|{y}|> | <|{z}|button|on_action=another_function|>\")) gui.run(run_server=False, single_client=True) client = gui._server.test_client() jsx = client.get(\"/taipy-jsx/test\").json[\"jsx\"] for expected in [\"\")) gui.run(run_server=False) client = gui._server.test_client() jsx = client.get(\"/taipy-jsx/test\").json[\"jsx\"] for expected in [\"\") gui.run(run_server=False) client = gui._server.test_client() jsx = client.get(\"/taipy-jsx/TaiPy_root_page\").json[\"jsx\"] for expected in [\" \"\"\" ) "} {"text": "import inspect import pytest from taipy.gui import Gui from taipy.gui.extension import Element, ElementLibrary class MyLibrary(ElementLibrary): def get_name(self) -> str: return \"taipy_extension_example\" def get_elements(self): return dict() def test_extension_no_config(gui: Gui, helpers): gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with pytest.warns(UserWarning): ret = flask_client.get(\"/taipy-extension/toto/titi\") assert ret.status_code == 404 def test_extension_config_wrong_path(gui: Gui, helpers): Gui.add_library(MyLibrary()) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with pytest.warns(UserWarning): ret = flask_client.get(\"/taipy-extension/taipy_extension_example/titi\") assert ret.status_code == 404 "} {"text": "import inspect import pytest from flask import g from taipy.gui import Gui def test_get_status(gui: Gui): gui.run(run_server=False) flask_client = gui._server.test_client() ret = flask_client.get(\"/taipy.status.json\") assert ret.status_code == 200, f\"status_code => {ret.status_code} != 200\" assert ret.mimetype == \"application/json\", f\"mimetype => {ret.mimetype} != application/json\" assert ret.json, \"json is not defined\" assert \"gui\" in ret.json, \"json has no key gui\" gui = ret.json.get(\"gui\") assert isinstance(gui, dict), \"json.gui is not a dict\" assert \"user_status\" in gui, \"json.gui has no key user_status\" assert gui.get(\"user_status\") == \"\", \"json.gui.user_status is not empty\" def test_get_extended_status(gui: Gui): gui.run(run_server=False, extended_status=True) flask_client = gui._server.test_client() ret = flask_client.get(\"/taipy.status.json\") assert ret.status_code == 200, f\"status_code => {ret.status_code} != 200\" assert ret.mimetype == \"application/json\", f\"mimetype => {ret.mimetype} != application/json\" assert ret.json, \"json is not defined\" gui = ret.json.get(\"gui\") assert \"backend_version\" in gui, \"json.gui has no key backend_version\" assert \"flask_version\" in gui, \"json.gui has no key flask_version\" assert \"frontend_version\" in gui, \"json.gui has no key frontend_version\" assert \"host\" in gui, \"json.gui has no key host\" assert \"python_version\" in gui, \"json.gui has no key python_version\" assert \"user_status\" in gui, \"json.gui has no key user_status\" assert gui.get(\"user_status\") == \"\", \"json.gui.user_status is not empty\" def test_get_status_with_user_status(gui: Gui): user_status = \"user_status\" def on_status(state): return user_status gui._set_frame(inspect.currentframe()) gui.run(run_server=False) flask_client = gui._server.test_client() ret = flask_client.get(\"/taipy.status.json\") assert ret.status_code == 200, f\"status_code => {ret.status_code} != 200\" assert ret.json, \"json is not defined\" gui = ret.json.get(\"gui\") assert \"user_status\" in gui, \"json.gui has no key user_status\" assert gui.get(\"user_status\") == user_status, f'json.gui.user_status => {gui.get(\"user_status\")} != {user_status}' "} {"text": "import inspect import io import pathlib import tempfile import pytest from taipy.gui import Gui from taipy.gui.data.data_scope import _DataScopes from taipy.gui.utils import _get_non_existent_file_path def test_file_upload_no_varname(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) with pytest.warns(UserWarning): ret = flask_client.post(f\"/taipy-uploads?client_id={sid}\") assert ret.status_code == 400 def test_file_upload_no_blob(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) with pytest.warns(UserWarning): ret = flask_client.post(f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": \"varname\"}) assert ret.status_code == 400 def test_file_upload_no_filename(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() file = (io.BytesIO(b\"abcdef\"), \"\") # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) with pytest.warns(UserWarning): ret = flask_client.post(f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": \"varname\", \"blob\": file}) assert ret.status_code == 400 def test_file_upload_simple(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) file_name = \"test.jpg\" file = (io.BytesIO(b\"abcdef\"), file_name) upload_path = pathlib.Path(gui._get_config(\"upload_folder\", tempfile.gettempdir())) file_name = _get_non_existent_file_path(upload_path, file_name).name ret = flask_client.post( f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": \"varname\", \"blob\": file}, content_type=\"multipart/form-data\", ) assert ret.status_code == 200 created_file = upload_path / file_name assert created_file.exists() def test_file_upload_multi_part(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) file_name = \"test2.jpg\" file0 = (io.BytesIO(b\"abcdef\"), file_name) file1 = (io.BytesIO(b\"abcdef\"), file_name) upload_path = pathlib.Path(gui._get_config(\"upload_folder\", tempfile.gettempdir())) file_name = _get_non_existent_file_path(upload_path, file_name).name ret = flask_client.post( f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": \"varname\", \"blob\": file0, \"total\": \"2\", \"part\": \"0\"}, content_type=\"multipart/form-data\", ) assert ret.status_code == 200 file0_path = upload_path / f\"{file_name}.part.0\" assert file0_path.exists() ret = flask_client.post( f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": \"varname\", \"blob\": file1, \"total\": \"2\", \"part\": \"1\"}, content_type=\"multipart/form-data\", ) assert ret.status_code == 200 file1_path = upload_path / f\"{file_name}.part.1\" assert file1_path.exists() file_path = upload_path / file_name assert file_path.exists() def test_file_upload_multiple(gui: Gui, helpers): var_name = \"varname\" gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with gui.get_flask_app().app_context(): gui._bind_var_val(var_name, None) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = _DataScopes._GLOBAL_ID file = (io.BytesIO(b\"abcdef\"), \"test.jpg\") ret = flask_client.post( f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": var_name, \"blob\": file}, content_type=\"multipart/form-data\" ) assert ret.status_code == 200 created_file = pathlib.Path(gui._get_config(\"upload_folder\", tempfile.gettempdir())) / \"test.jpg\" assert created_file.exists() file2 = (io.BytesIO(b\"abcdef\"), \"test2.jpg\") ret = flask_client.post( f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": var_name, \"blob\": file2, \"multiple\": \"True\"}, content_type=\"multipart/form-data\", ) assert ret.status_code == 200 created_file = pathlib.Path(gui._get_config(\"upload_folder\", tempfile.gettempdir())) / \"test2.jpg\" assert created_file.exists() value = getattr(gui._bindings()._get_all_scopes()[sid], var_name) assert len(value) == 2 "} {"text": "import pathlib import pytest from taipy.gui import Gui def test_image_path_not_found(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) ret = flask_client.get(f\"/taipy-images/images/img.png?client_id={sid}\") assert ret.status_code == 404 def test_image_path_found(gui: Gui, helpers): url = gui._get_content( \"img\", str((pathlib.Path(__file__).parent.parent.parent / \"resources\" / \"fred.png\").resolve()), True ) gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) ret = flask_client.get(f\"{url}?client_id={sid}\") assert ret.status_code == 200 def test_image_data_too_big(gui: Gui, helpers): with open((pathlib.Path(__file__).parent.parent.parent / \"resources\" / \"taipan.jpg\"), \"rb\") as big_file: url = gui._get_content(\"img\", big_file.read(), True) assert not url.startswith(\"data:\") "} {"text": "import inspect import pytest from taipy.gui import Gui def test_user_content_without_callback(gui: Gui, helpers): gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with pytest.warns(UserWarning): ret = flask_client.get(gui._get_user_content_url(\"path\")) assert ret.status_code == 404 def test_user_content_with_wrong_callback(gui: Gui, helpers): def on_user_content_cb(state, path, args): return None on_user_content = on_user_content_cb gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with pytest.warns(UserWarning): ret = flask_client.get(gui._get_user_content_url(\"path\", {\"a\": \"b\"})) assert ret.status_code == 404 def test_user_content_with_callback(gui: Gui, helpers): def on_user_content_cb(state, path, args): return \"\" on_user_content = on_user_content_cb gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() ret = flask_client.get(gui._get_user_content_url(\"path\")) assert ret.status_code == 200 "} {"text": "import inspect from taipy.gui import Gui, Markdown from taipy.gui.data.data_scope import _DataScopes def test_sending_messages_in_group(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = _DataScopes._GLOBAL_ID # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") assert gui._bindings()._get_all_scopes()[cid].name == \"World!\" # type: ignore assert gui._bindings()._get_all_scopes()[cid].btn_id == \"button1\" # type: ignore with gui.get_flask_app().test_request_context(f\"/taipy-jsx/test/?client_id={cid}\", data={\"client_id\": cid}): with gui as aGui: aGui._Gui__state.name = \"Monde!\" aGui._Gui__state.btn_id = \"button2\" assert gui._bindings()._get_all_scopes()[cid].name == \"Monde!\" assert gui._bindings()._get_all_scopes()[cid].btn_id == \"button2\" # type: ignore received_messages = ws_client.get_received() helpers.assert_outward_ws_multiple_message(received_messages[0], \"MS\", 2) "} {"text": "import inspect import logging import pathlib import pytest from taipy.gui import Gui, download def test_download_file(gui: Gui, helpers): def do_something(state, id): download(state, (pathlib.Path(__file__).parent.parent.parent / \"resources\" / \"taipan.jpg\")) # Bind a page so that the function will be called # gui.add_page( # \"test\", Markdown(\"<|Do something!|button|on_action=do_something|id=my_button|>\") # ) # set gui frame gui._set_frame(inspect.currentframe()) gui.run(run_server=False) # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"A\", \"name\": \"my_button\", \"payload\": \"do_something\"}) # assert for received message (message that would be sent to the front-end client) received_messages = ws_client.get_received() assert len(received_messages) == 1 assert isinstance(received_messages[0], dict) assert \"name\" in received_messages[0] and received_messages[0][\"name\"] == \"message\" assert \"args\" in received_messages[0] args = received_messages[0][\"args\"] assert \"type\" in args and args[\"type\"] == \"DF\" assert \"content\" in args and args[\"content\"] == \"/taipy-content/taipyStatic0/taipan.jpg\" logging.getLogger().debug(args[\"content\"]) "} {"text": "import inspect from taipy.gui import Gui, Markdown def ws_u_assert_template(gui: Gui, helpers, value_before_update, value_after_update, payload): # Bind test variable var = value_before_update # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) # Bind a page so that the variable will be evaluated as expression gui.add_page(\"test\", Markdown(\"<|{var}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") assert gui._bindings()._get_all_scopes()[sid].var == value_before_update ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"U\", \"name\": \"tpec_TpExPr_var_TPMDL_0\", \"payload\": payload}) assert gui._bindings()._get_all_scopes()[sid].var == value_after_update # assert for received message (message that would be sent to the front-end client) received_message = ws_client.get_received() assert len(received_message) helpers.assert_outward_ws_message(received_message[0], \"MU\", \"tpec_TpExPr_var_TPMDL_0\", value_after_update) def test_ws_u_string(gui: Gui, helpers): value_before_update = \"a random string\" value_after_update = \"a random string is added\" payload = {\"value\": value_after_update} # set gui frame gui._set_frame(inspect.currentframe()) ws_u_assert_template(gui, helpers, value_before_update, value_after_update, payload) def test_ws_u_number(gui: Gui, helpers): value_before_update = 10 value_after_update = \"11\" payload = {\"value\": value_after_update} # set gui frame gui._set_frame(inspect.currentframe()) ws_u_assert_template(gui, helpers, value_before_update, value_after_update, payload) "} {"text": "import inspect from taipy.gui import Gui, Markdown def test_du_table_data_fetched(gui: Gui, helpers, csvdata): # Bind test variables csvdata = csvdata # set gui frame gui._set_frame(inspect.currentframe()) Gui._set_timezone(\"UTC\") # Bind a page so that the variable will be evaluated as expression gui.add_page( \"test\", Markdown( \"<|{csvdata}|table|page_size=10|page_size_options=10;30;100|columns=Day;Entity;Code;Daily hospital occupancy|date_format=eee dd MMM yyyy|>\" ), ) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) sid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") ws_client.emit( \"message\", { \"client_id\": sid, \"type\": \"DU\", \"name\": \"_TpD_tpec_TpExPr_csvdata_TPMDL_0\", \"payload\": { \"columns\": [\"Day\", \"Entity\", \"Code\", \"Daily hospital occupancy\"], \"pagekey\": \"0-100--asc\", \"start\": 0, \"end\": 9, \"orderby\": \"\", \"sort\": \"asc\", }, }, ) # assert for received message (message that would be sent to the front-end client) received_messages = ws_client.get_received() assert received_messages helpers.assert_outward_ws_message( received_messages[0], \"MU\", \"_TpD_tpec_TpExPr_csvdata_TPMDL_0\", { \"data\": [ { \"Code\": \"AUT\", \"Day_str\": \"2020-04-01T00:00:00.000000Z\", \"Daily hospital occupancy\": 856, \"Entity\": \"Austria\", \"_tp_index\": 0, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-02T00:00:00.000000Z\", \"Daily hospital occupancy\": 823, \"Entity\": \"Austria\", \"_tp_index\": 1, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-03T00:00:00.000000Z\", \"Daily hospital occupancy\": 829, \"Entity\": \"Austria\", \"_tp_index\": 2, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-04T00:00:00.000000Z\", \"Daily hospital occupancy\": 826, \"Entity\": \"Austria\", \"_tp_index\": 3, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-05T00:00:00.000000Z\", \"Daily hospital occupancy\": 712, \"Entity\": \"Austria\", \"_tp_index\": 4, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-06T00:00:00.000000Z\", \"Daily hospital occupancy\": 824, \"Entity\": \"Austria\", \"_tp_index\": 5, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-07T00:00:00.000000Z\", \"Daily hospital occupancy\": 857, \"Entity\": \"Austria\", \"_tp_index\": 6, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-08T00:00:00.000000Z\", \"Daily hospital occupancy\": 829, \"Entity\": \"Austria\", \"_tp_index\": 7, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-09T00:00:00.000000Z\", \"Daily hospital occupancy\": 820, \"Entity\": \"Austria\", \"_tp_index\": 8, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-10T00:00:00.000000Z\", \"Daily hospital occupancy\": 771, \"Entity\": \"Austria\", \"_tp_index\": 9, }, ], \"rowcount\": 14477, \"start\": 0, \"format\": \"JSON\", }, ) "} {"text": "import inspect import pytest from taipy.gui import Gui, Markdown def test_default_on_change(gui: Gui, helpers): st = {\"d\": False} def on_change(state, var, value): st[\"d\"] = True x = 10 # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|{x}|input|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") # fake var update ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"U\", \"name\": \"x\", \"payload\": {\"value\": \"20\"}}) assert ws_client.get_received() assert st[\"d\"] is True def test_specific_on_change(gui: Gui, helpers): st = {\"d\": False, \"s\": False} def on_change(state, var, value): st[\"d\"] = True def on_input_change(state, var, value): st[\"s\"] = True x = 10 # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|{x}|input|on_change=on_input_change|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") # fake var update ws_client.emit( \"message\", {\"client_id\": sid, \"type\": \"U\", \"name\": \"x\", \"payload\": {\"value\": \"20\", \"on_change\": \"on_input_change\"}}, ) assert ws_client.get_received() assert st[\"s\"] is True assert st[\"d\"] is False "} {"text": "import inspect import pytest from taipy.gui import Gui, Markdown def test_ru_selector(gui: Gui, helpers, csvdata): # Bind test variables selected_val = [\"value1\", \"value2\"] # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) # Bind a page so that the variable will be evaluated as expression gui.add_page( \"test\", Markdown(\"<|{selected_val}|selector|multiple|>\"), ) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) sid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"RU\", \"name\": \"\", \"payload\": {\"names\": [\"selected_val\"]}}) # assert for received message (message that would be sent to the front-end client) received_messages = ws_client.get_received() assert len(received_messages) helpers.assert_outward_ws_message(received_messages[0], \"MU\", \"selected_val\", [\"value1\", \"value2\"]) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import inspect import pytest from taipy.gui import Gui, Markdown def test_broadcast(gui: Gui, helpers): # Bind test variables selected_val = [\"value1\", \"value2\"] # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) # Bind a page so that the variable will be evaluated as expression gui.add_page( \"test\", Markdown(\"<|{selected_val}|selector|multiple|>\"), ) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) sid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") gui._broadcast(\"broadcast_name\", \"broadcast_value\") received_messages = ws_client.get_received() assert len(received_messages) helpers.assert_outward_simple_ws_message(received_messages[0], \"U\", \"_bc_broadcast_name\", \"broadcast_value\") "} {"text": "import inspect import time from taipy.gui import Gui, Markdown def test_a_button_pressed(gui: Gui, helpers): def do_something(state, id): state.x = state.x + 10 state.text = \"a random text\" x = 10 # noqa: F841 text = \"hi\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) # Bind a page so that the variable will be evaluated as expression gui.add_page( \"test\", Markdown(\"<|Do something!|button|on_action=do_something|id=my_button|> | <|{x}|> | <|{text}|>\") ) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") assert gui._bindings()._get_all_scopes()[sid].x == 10 # type: ignore assert gui._bindings()._get_all_scopes()[sid].text == \"hi\" # type: ignore ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"A\", \"name\": \"my_button\", \"payload\": \"do_something\"}) assert gui._bindings()._get_all_scopes()[sid].text == \"a random text\" assert gui._bindings()._get_all_scopes()[sid].x == 20 # type: ignore # assert for received message (message that would be sent to the front-end client) received_messages = ws_client.get_received() helpers.assert_outward_ws_message(received_messages[0], \"MU\", \"x\", 20) helpers.assert_outward_ws_message(received_messages[1], \"MU\", \"text\", \"a random text\") "} {"text": "import inspect import warnings from flask import g from taipy.gui import Gui from taipy.gui.utils.types import _TaipyNumber def test_unbind_variable_in_expression(gui: Gui, helpers): gui.run(run_server=False, single_client=True) with warnings.catch_warnings(record=True) as records: with gui.get_flask_app().app_context(): gui._evaluate_expr(\"{x}\") warns = helpers.get_taipy_warnings(records) assert len(warns) == 3 assert \"Variable 'x' is not available in\" in str(warns[0].message) assert \"Variable 'x' is not defined\" in str(warns[1].message) assert \"Cannot evaluate expression 'x'\" in str(warns[2].message) assert \"name 'x' is not defined\" in str(warns[2].message) def test_evaluate_same_expression_multiple_times(gui: Gui): x = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) with gui.get_flask_app().app_context(): s1 = gui._evaluate_expr(\"x + 10 = {x + 10}\") s2 = gui._evaluate_expr(\"x + 10 = {x + 10}\") assert s1 == s2 def test_evaluate_expressions_same_variable(gui: Gui): x = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) with gui.get_flask_app().app_context(): s1 = gui._evaluate_expr(\"x + 10 = {x + 10}\") s2 = gui._evaluate_expr(\"x = {x}\") assert \"tp_TpExPr_x\" in s1 and \"tp_TpExPr_x\" in s2 def test_evaluate_holder(gui: Gui): x = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) with warnings.catch_warnings(record=True): with gui.get_flask_app().app_context(): gui._evaluate_expr(\"{x + 10}\") hash = gui._evaluate_bind_holder(_TaipyNumber, \"TpExPr_x + 10_TPMDL_0\") assert \"_TpN_tp_TpExPr_x_10_TPMDL_0_0\" in hash lst = gui._evaluate_holders(\"TpExPr_x + 10_TPMDL_0\") assert len(lst) == 1 assert \"_TpN_tp_TpExPr_x_10_TPMDL_0_0\" in lst[0] # test re-evaluate holders gui._bindings().x = 20 gui._re_evaluate_expr(lst[0]) def test_evaluate_not_expression_type(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): assert \"x + 10\" == gui._evaluate_expr(\"x + 10\") def test_evaluate_expression_2_clients(gui: Gui): x = 10 # noqa: F841 y = 20 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.run(run_server=False) with gui.get_flask_app().app_context(): gui._bindings()._get_or_create_scope(\"A\") gui._bindings()._get_or_create_scope(\"B\") g.client_id = \"A\" gui._evaluate_expr(\"x + y = {x + y}\") g.client_id = \"B\" gui._evaluate_expr(\"x\") gui._re_evaluate_expr(\"x\") "} {"text": "import inspect import pytest from taipy.gui.gui import Gui from taipy.gui.utils import _MapDict def test_map_dict(): d = {\"a\": 1, \"b\": 2, \"c\": 3} md = _MapDict(d) md_copy = _MapDict(d).copy() assert len(md) == 3 assert md.__getitem__(\"a\") == d[\"a\"] md.__setitem__(\"a\", 4) assert md.__getitem__(\"a\") == 4 assert d[\"a\"] == 4 v1 = d[\"b\"] v2 = md.pop(\"b\") assert v1 == v2 assert \"b\" not in d.keys() assert \"c\" in md assert len(md) == 2 v1 = d[\"c\"] v2 = md.popitem() assert v2 == (\"c\", v1) assert len(md) == 1 md.clear() assert len(md) == 0 assert len(d) == 0 assert len(md_copy) == 3 v1 = \"\" for k in md_copy: v1 += k assert v1 == \"abc\" v1 = \"\" for k in md_copy.keys(): v1 += k assert v1 == \"abc\" v1 = \"\" for k in md_copy.__reversed__(): v1 += k assert v1 == \"cba\" v1 = 0 for k in md_copy.values(): v1 += k assert v1 == 6 # 1+2+3 v1 = md_copy.setdefault(\"a\", 5) assert v1 == 1 v1 = md_copy.setdefault(\"d\", 5) assert v1 == 5 try: md = _MapDict(\"not_a_dict\") assert False except Exception: assert True pass def test_map_dict_update(): update_values = {} def update(k, v): update_values[0] = k update_values[1] = v pass d = {\"a\": 1, \"b\": \"2\"} md = _MapDict(d, update) md.__setitem__(\"a\", 3) assert update_values[0] == \"a\" assert update_values[1] == 3 pass def test_map_dict_update_full_dictionary_1(): values = {\"a\": 1, \"b\": 2} update_values = {\"a\": 3, \"b\": 5} md = _MapDict(values) assert md[\"a\"] == 1 assert md[\"b\"] == 2 md.update(update_values) assert md[\"a\"] == 3 assert md[\"b\"] == 5 def test_map_dict_update_full_dictionary_2(): temp_values = {} def update(k, v): temp_values[k] = v values = {\"a\": 1, \"b\": 2} update_values = {\"a\": 3, \"b\": 5} md = _MapDict(values, update) assert md[\"a\"] == 1 assert md[\"b\"] == 2 md.update(update_values) assert temp_values[\"a\"] == 3 assert temp_values[\"b\"] == 5 def test_map_dict_set(gui: Gui, test_client): d = {\"a\": 1} # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) with gui.get_flask_app().app_context(): assert isinstance(gui._Gui__state.d, _MapDict) gui._Gui__state.d = {\"b\": 2} assert isinstance(gui._Gui__state.d, _MapDict) assert len(gui._Gui__state.d) == 1 assert gui._Gui__state.d.get(\"a\", None) is None assert gui._Gui__state.d.get(\"b\", None) == 2 def test_map_dict_items(): def update(k, v): pass values = {\"a\": 1, \"b\": {\"c\": \"list c\"}} md = _MapDict(values) mdu = _MapDict(values, update) assert md[\"a\"] == 1 assert isinstance(md[\"b\"], _MapDict) assert isinstance(mdu[\"b\"], _MapDict) assert md[\"b\"][\"c\"] == \"list c\" assert mdu[\"b\"][\"c\"] == \"list c\" del md[\"a\"] with pytest.raises(KeyError): md[\"e\"] setattr(md, \"a\", 1) assert md[\"a\"] == 1 "} {"text": "import pathlib import tempfile from taipy.gui import Gui from taipy.gui.utils import _get_non_existent_file_path def test_empty_file_name(gui: Gui, helpers): assert _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), \"\").name def test_non_existent_file(gui: Gui, helpers): assert not _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), \"\").exists() def test_existent_file(gui: Gui, helpers): file_path = _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), \"\") with open(file_path, \"w\") as file_handler: file_handler.write(\"hello\") assert file_path.exists() file_stem = file_path.stem.split(\".\", 1)[0] file_suffix = file_path.suffixes[-1] index = int(file_path.suffixes[0][1:]) if len(file_path.suffixes) > 1 else -1 file_path = _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), \"\") assert file_path.name == f\"{file_stem}.{index + 1}{file_suffix}\" with open(file_path, \"w\") as file_handler: file_handler.write(\"hello 2\") assert file_path.exists() file_path = _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), \"\") assert file_path.name == f\"{file_stem}.{index + 2}{file_suffix}\" "} {"text": " import warnings import pytest from taipy.gui.utils.date import _string_to_date from taipy.gui.utils.types import _TaipyBase, _TaipyBool, _TaipyDate, _TaipyNumber def test_taipy_base(): tb = _TaipyBase(\"value\", \"hash\") assert tb.get() == \"value\" assert tb.get_name() == \"hash\" tb.set(\"a value\") assert tb.get() == \"a value\" assert tb.get_hash() == NotImplementedError def test_taipy_bool(): assert _TaipyBool(0, \"v\").get() is False assert _TaipyBool(1, \"v\").get() is True assert _TaipyBool(False, \"v\").get() is False assert _TaipyBool(True, \"v\").get() is True assert _TaipyBool(\"\", \"v\").get() is False assert _TaipyBool(\"hey\", \"v\").get() is True assert _TaipyBool([], \"v\").get() is False assert _TaipyBool([\"an item\"], \"v\").get() is True def test_taipy_number(): with pytest.raises(TypeError): _TaipyNumber(\"a string\", \"x\").get() with warnings.catch_warnings(record=True): _TaipyNumber(\"a string\", \"x\").cast_value(\"a string\") _TaipyNumber(0, \"x\").cast_value(0) def test_taipy_date(): assert _TaipyDate(_string_to_date(\"2022-03-03 00:00:00 UTC\"), \"x\").get() == \"2022-03-03T00:00:00+00:00\" assert _TaipyDate(\"2022-03-03 00:00:00 UTC\", \"x\").get() == \"2022-03-03 00:00:00 UTC\" assert _TaipyDate(None, \"x\").get() is None _TaipyDate(\"\", \"x\").cast_value(\"2022-03-03 00:00:00 UTC\") _TaipyDate(\"\", \"x\").cast_value(_string_to_date(\"2022-03-03 00:00:00 UTC\")) "} {"text": "import inspect from time import sleep import pytest from taipy.gui import Gui, State, invoke_long_callback def test_long_callback(gui: Gui): status = None # noqa: F841 def heavy_function(delay=1): sleep(delay) def heavy_function_with_exception(delay=1): sleep(delay) raise Exception(\"Heavy function Exception\") def heavy_function_status(state: State, status: int): state.status = status def on_exception(state: State, function_name: str, e: Exception): state.status = -1 gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) state = gui._Gui__state with gui.get_flask_app().app_context(): assert state.status is None invoke_long_callback(state, heavy_function) invoke_long_callback(state, heavy_function_with_exception) invoke_long_callback(state, heavy_function, (), heavy_function_status) invoke_long_callback(state, heavy_function, (2), heavy_function_status, (), 1000) invoke_long_callback(state, heavy_function_with_exception, (), heavy_function_status) "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, get_state_id def test_get_state_id(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().app_context(): g.client_id = cid assert cid == get_state_id(gui._Gui__state) "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, State, download def test_download(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 def on_download_action(state: State): pass # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().test_request_context(f\"/taipy-jsx/test/?client_id={cid}\", data={\"client_id\": cid}): g.client_id = cid download(gui._Gui__state, \"some text\", \"filename.txt\", \"on_download_action\") received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message( received_messages[0], \"DF\", {\"name\": \"filename.txt\", \"onAction\": \"on_download_action\"} ) "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, navigate def test_navigate(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().test_request_context(f\"/taipy-jsx/test/?client_id={cid}\", data={\"client_id\": cid}): g.client_id = cid navigate(gui._Gui__state, \"test\") received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message(received_messages[0], \"NA\", {\"to\": \"test\"}) "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, State, invoke_callback def test_invoke_callback(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 val = 1 # noqa: F841 def user_callback(state: State): state.val = 10 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\\n<|{val}|>\")) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() # client id cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().app_context(): g.client_id = cid invoke_callback(gui, cid, user_callback, []) assert gui._Gui__state.val == 10 "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, hold_control def test_hold_control(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().test_request_context(f\"/taipy-jsx/test/?client_id={cid}\", data={\"client_id\": cid}): g.client_id = cid hold_control(gui._Gui__state) received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message( received_messages[0], \"BL\", {\"action\": \"_taipy_on_cancel_block_ui\", \"message\": \"Work in Progress...\"} ) "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, resume_control def test_resume_control(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().test_request_context(f\"/taipy-jsx/test/?client_id={cid}\", data={\"client_id\": cid}): g.client_id = cid resume_control(gui._Gui__state) received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message(received_messages[0], \"BL\", {\"message\": None}) "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, notify def test_notify(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().test_request_context(f\"/taipy-jsx/test/?client_id={cid}\", data={\"client_id\": cid}): g.client_id = cid notify(gui._Gui__state, \"Info\", \"Message\") received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message(received_messages[0], \"AL\", {\"atype\": \"Info\", \"message\": \"Message\"}) "} {"text": "import contextlib import time from urllib.request import urlopen import pytest from testbook import testbook @pytest.mark.filterwarnings(\"ignore::RuntimeWarning\") @testbook(\"tests/gui/notebook/simple_gui.ipynb\") def test_notebook_simple_gui(tb, helpers): tb.execute_cell(\"import\") tb.execute_cell(\"page_declaration\") tb.execute_cell(\"gui_init\") tb.execute_cell(\"gui_run\") while not helpers.port_check(): time.sleep(0.1) assert \">Hello\" in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") assert 'defaultValue=\\\\\"10\\\\\"' in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") # Test state manipulation within notebook tb.execute_cell(\"get_variable\") assert \"10\" in tb.cell_output_text(\"get_variable\") assert 'defaultValue=\\\\\"10\\\\\"' in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") tb.execute_cell(\"set_variable\") assert 'defaultValue=\\\\\"20\\\\\"' in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") tb.execute_cell(\"re_get_variable\") assert \"20\" in tb.cell_output_text(\"re_get_variable\") # Test page reload tb.execute_cell(\"gui_stop\") with pytest.raises(Exception) as exc_info: urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\") assert \"501: Gateway error\" in str(exc_info.value) tb.execute_cell(\"gui_re_run\") while True: with contextlib.suppress(Exception): urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\") break assert \">Hello\" in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") tb.execute_cell(\"gui_reload\") while True: with contextlib.suppress(Exception): urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\") break assert \">Hello\" in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") tb.execute_cell(\"gui_re_stop\") with pytest.raises(Exception) as exc_info: urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\") assert \"501: Gateway error\" in str(exc_info.value) "} {"text": "from taipy.gui import Gui, Markdown "} {"text": "import inspect from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_redirect(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Redirect Successfully|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() == \"Redirect Successfully\" "} {"text": "import pytest @pytest.fixture(scope=\"session\") def browser_context_args(browser_context_args, e2e_port, e2e_base_url): return { **browser_context_args, \"base_url\": f\"http://127.0.0.1:{e2e_port}{e2e_base_url}\", \"timezone_id\": \"Europe/Paris\", } @pytest.fixture(scope=\"function\") def gui(helpers, e2e_base_url): from taipy.gui import Gui gui = Gui() gui.load_config({\"base_url\": e2e_base_url, \"host\": \"0.0.0.0\" if e2e_base_url != \"/\" else \"127.0.0.1\"}) yield gui # Delete Gui instance and state of some classes after each test gui.stop() helpers.test_cleanup() "} {"text": "import inspect import re from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from playwright.sync_api import expect from taipy.gui import Gui @pytest.mark.teste2e def test_navbar_navigate(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) gui.add_page(name=\"Data\", page=\"<|navbar|id=nav1|> <|Data|id=text-data|>\") gui.add_page(name=\"Test\", page=\"<|navbar|id=nav1|> <|Test|id=text-test|>\") helpers.run_e2e(gui) page.goto(\"./Data\") page.expect_websocket() page.wait_for_selector(\"#text-data\") page.click(\"#nav1 button:nth-child(2)\") page.wait_for_selector(\"#text-test\") expect(page).to_have_url(re.compile(\".*Test\")) page.click(\"#nav1 button:nth-child(1)\") page.wait_for_selector(\"#text-data\") expect(page).to_have_url(re.compile(\".*Data\")) "} {"text": "import inspect from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui from taipy.gui.utils.date import _string_to_date @pytest.mark.teste2e def test_timzone_specified_1(page: \"Page\", gui: Gui, helpers): _timezone_test_template(page, gui, helpers, \"Etc/GMT\", [\"2022-03-03 00:00:00 UTC\"]) @pytest.mark.teste2e def test_timzone_specified_2(page: \"Page\", gui: Gui, helpers): _timezone_test_template( page, gui, helpers, \"Europe/Paris\", [\"2022-03-03 01:00:00 GMT+1\", \"2022-03-03 01:00:00 UTC+1\"] ) @pytest.mark.teste2e def test_timzone_specified_3(page: \"Page\", gui: Gui, helpers): _timezone_test_template( page, gui, helpers, \"Asia/Ho_Chi_Minh\", [\"2022-03-03 07:00:00 GMT+7\", \"2022-03-03 07:00:00 UTC+7\"] ) @pytest.mark.teste2e def test_timzone_specified_4(page: \"Page\", gui: Gui, helpers): _timezone_test_template( page, gui, helpers, \"America/Sao_Paulo\", [\"2022-03-02 21:00:00 GMT-3\", \"2022-03-02 21:00:00 UTC\u22123\"] ) @pytest.mark.teste2e def test_timezone_client_side(page: \"Page\", gui: Gui, helpers): _timezone_test_template(page, gui, helpers, \"client\", [\"2022-03-03 01:00:00 GMT+1\", \"2022-03-03 01:00:00 UTC+1\"]) def _timezone_test_template(page: \"Page\", gui: Gui, helpers, time_zone, texts): page_md = \"\"\" <|{t}|id=text1|> \"\"\" t = _string_to_date(\"2022-03-03T00:00:00.000Z\") # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, time_zone=time_zone) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() in texts def test_date_only(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{t}|id=text1|> \"\"\" t = _string_to_date(\"Wed Jul 28 1993\") # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() in [\"1993-07-28\"] "} {"text": "import inspect from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_accessor_json(page: \"Page\", gui: Gui, csvdata, helpers): table_data = csvdata # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page( name=\"test\", page=\"<|{table_data}|table|columns=Day;Entity;Code;Daily hospital occupancy|date_format=eee dd MMM yyyy|id=table1|>\", ) helpers.run_e2e(gui, use_arrow=False) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#table1 tr:nth-child(32)\") # wait for data to be loaded (30 rows of skeleton while loading) assert_table_content(page) @pytest.mark.teste2e def test_accessor_arrow(page: \"Page\", gui: Gui, csvdata, helpers): if util.find_spec(\"pyarrow\"): table_data = csvdata # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page( name=\"test\", page=\"<|{table_data}|table|columns=Day;Entity;Code;Daily hospital occupancy|date_format=eee dd MMM yyyy|id=table1|>\", ) helpers.run_e2e(gui, use_arrow=True) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector( \"#table1 tr:nth-child(32)\" ) # wait for data to be loaded (30 rows of skeleton while loading) assert_table_content(page) def assert_table_content(page: \"Page\"): # assert page.query_selector(\"#table1 tbody tr:nth-child(1) td:nth-child(1)\").inner_text() == \"Wed 01 Apr 2020\" assert page.query_selector(\"#table1 tbody tr:nth-child(1) td:nth-child(2)\").inner_text() == \"Austria\" assert page.query_selector(\"#table1 tbody tr:nth-child(1) td:nth-child(4)\").inner_text() == \"856\" "} {"text": "import inspect from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_theme_light(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=False) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") background_color = page.evaluate( 'window.getComputedStyle(document.querySelector(\"main\"), null).getPropertyValue(\"background-color\")' ) assert background_color == \"rgb(255, 255, 255)\" @pytest.mark.teste2e def test_theme_dark(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=True) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") background_color = page.evaluate( 'window.getComputedStyle(document.querySelector(\"main\"), null).getPropertyValue(\"background-color\")' ) assert background_color == \"rgb(18, 18, 18)\" "} {"text": "import inspect from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_margin_1(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=False, margin=\"10rem\") page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") margin = page.evaluate('window.getComputedStyle(document.querySelector(\"#root\"), null).getPropertyValue(\"margin\")') assert margin == \"160px\" @pytest.mark.teste2e def test_margin_2(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=False) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") margin = page.evaluate('window.getComputedStyle(document.querySelector(\"#root\"), null).getPropertyValue(\"margin\")') assert margin == \"16px\" @pytest.mark.teste2e def test_margin_3(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=False, margin=\"10rem\", stylekit=True) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") margin = page.evaluate('window.getComputedStyle(document.querySelector(\"#root\"), null).getPropertyValue(\"margin\")') assert margin == \"160px\" @pytest.mark.teste2e def test_margin_4(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=False, stylekit={\"root_margin\": \"20rem\"}) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") margin = page.evaluate('window.getComputedStyle(document.querySelector(\"#root\"), null).getPropertyValue(\"margin\")') assert margin == \"320px\" @pytest.mark.teste2e def test_margin_5(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=False, stylekit={\"root_margin\": \"20rem\"}, margin=\"10rem\") page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") margin = page.evaluate('window.getComputedStyle(document.querySelector(\"#root\"), null).getPropertyValue(\"margin\")') assert margin == \"320px\" "} {"text": "import inspect import os import time from importlib import util from pathlib import Path from urllib.request import urlopen import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui, Html from taipy.gui.server import _Server @pytest.mark.teste2e def test_html_render_with_style(page: \"Page\", gui: Gui, helpers): html_content = \"\"\" Hey There \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(\"page1\", Html(html_content)) helpers.run_e2e(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#text1\") retry = 0 while ( retry < 10 and page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') != \"rgb(0, 128, 0)\" ): retry += 1 time.sleep(0.2) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') == \"rgb(0, 128, 0)\" ) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text2\"), null).getPropertyValue(\"color\")') == \"rgb(0, 0, 255)\" ) @pytest.mark.teste2e def test_html_render_bind_assets(page: \"Page\", gui: Gui, helpers, e2e_base_url, e2e_port): gui._set_frame(inspect.currentframe()) gui.add_pages(pages=f\"{Path(Path(__file__).parent.resolve())}{os.path.sep}test-assets\") helpers.run_e2e(gui) assert \".taipy-text\" in urlopen( f\"http://127.0.0.1:{e2e_port}{e2e_base_url}test-assets/style/style.css\" ).read().decode(\"utf-8\") page.goto(\"./test-assets/page1\") page.expect_websocket() page.wait_for_selector(\"#text1\") retry = 0 while ( retry < 10 and page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') != \"rgb(0, 128, 0)\" ): retry += 1 time.sleep(0.1) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') == \"rgb(0, 128, 0)\" ) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text2\"), null).getPropertyValue(\"color\")') == \"rgb(0, 0, 255)\" ) @pytest.mark.teste2e def test_html_render_path_mapping(page: \"Page\", gui: Gui, helpers, e2e_base_url, e2e_port): gui._server = _Server( gui, path_mapping={\"style\": f\"{Path(Path(__file__).parent.resolve())}{os.path.sep}test-assets{os.path.sep}style\"}, flask=gui._flask, async_mode=\"gevent\", ) gui.add_page(\"page1\", Html(f\"{Path(Path(__file__).parent.resolve())}{os.path.sep}page1.html\")) helpers.run_e2e(gui) assert \".taipy-text\" in urlopen(f\"http://127.0.0.1:{e2e_port}{e2e_base_url}/style/style.css\").read().decode(\"utf-8\") page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#text1\") retry = 0 while ( retry < 10 and page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') != \"rgb(0, 128, 0)\" ): retry += 1 time.sleep(0.1) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') == \"rgb(0, 128, 0)\" ) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text2\"), null).getPropertyValue(\"color\")') == \"rgb(0, 0, 255)\" ) "} {"text": "import inspect import logging from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_markdown_render_with_style(page: \"Page\", gui: Gui, helpers): markdown_content = \"\"\" <|Hey|id=text1|> <|There|id=text2|class_name=custom-text|> \"\"\" style = \"\"\" .taipy-text { color: green; } .custom-text { color: blue; } \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(\"page1\", markdown_content, style=style) helpers.run_e2e(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#text1\") page.wait_for_selector(\"#Taipy_style\", state=\"attached\") function_evaluated = True try: page.wait_for_function( 'window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\") !== \"rgb(255, 255, 255)\"' ) except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') == \"rgb(0, 128, 0)\" ) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text2\"), null).getPropertyValue(\"color\")') == \"rgb(0, 0, 255)\" ) "} {"text": "import inspect import logging from importlib import util import pytest from taipy.gui import Gui if util.find_spec(\"playwright\"): from playwright._impl._page import Page from .assets2_class_scopes.page1 import Page1 from .assets2_class_scopes.page2 import Page2 def helpers_assert_value(page, s1, s2, v1): s1_val = page.input_value(\"#s1 input\") assert str(s1_val).startswith(s1) s2_val = page.input_value(\"#s2 input\") assert str(s2_val).startswith(s2) val1 = page.query_selector(\"#v1\").inner_text() assert str(val1).startswith(v1) @pytest.mark.timeout(300) @pytest.mark.teste2e @pytest.mark.filterwarnings(\"ignore::Warning\") def test_class_scopes_binding(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) operand_1 = 0 # noqa: F841 gui.add_page(\"page1\", Page1()) gui.add_page(\"page2\", Page2()) helpers.run_e2e(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#s1\") helpers_assert_value(page, \"0\", \"0\", \"0\") page.fill(\"#s1 input\", \"15\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '0'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"15\", \"0\", \"15\") page.fill(\"#s2 input\", \"20\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '15'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"15\", \"20\", \"35\") page.goto(\"./page2\") page.expect_websocket() page.wait_for_selector(\"#s1\") helpers_assert_value(page, \"15\", \"0\", \"0\") page.fill(\"#s2 input\", \"5\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '0'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"15\", \"5\", \"75\") page.fill(\"#s1 input\", \"17\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '75'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"17\", \"5\", \"85\") page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#s1\") helpers_assert_value(page, \"17\", \"20\", \"37\") page.click(\"#btn_reset\") try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '37'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"17\", \"0\", \"17\") "} {"text": "import inspect import logging from importlib import util import pytest from taipy.gui import Gui if util.find_spec(\"playwright\"): from playwright._impl._page import Page from .assets.page1 import page as page1 from .assets.page2 import page as page2 from .assets.page3 import page as page3 @pytest.mark.timeout(300) @pytest.mark.teste2e def test_page_scopes(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) def on_change(state, var, val, module): if var == \"x\" and \"page3\" in module: state.y = val * 10 gui.add_page(\"page1\", page1) gui.add_page(\"page2\", page2) gui.add_page(\"page3\", page3) helpers.run_e2e(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#x1\") assert page.query_selector(\"#x1\").inner_text() == \"10\" assert page.query_selector(\"#x2\").inner_text() == \"20\" assert page.query_selector(\"#y1\").inner_text() == \"20\" assert page.query_selector(\"#y2\").inner_text() == \"40\" page.goto(\"./page2\") page.expect_websocket() page.wait_for_selector(\"#x1\") assert page.query_selector(\"#x1\").inner_text() == \"20\" assert page.query_selector(\"#x2\").inner_text() == \"40\" assert page.query_selector(\"#y1\").inner_text() == \"10\" assert page.query_selector(\"#y2\").inner_text() == \"20\" page.goto(\"./page3\") page.expect_websocket() page.wait_for_selector(\"#x1\") assert page.query_selector(\"#x1\").inner_text() == \"50\" assert page.query_selector(\"#x2\").inner_text() == \"100\" page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#x1\") page.fill(\"#xinput\", \"15\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#y2').innerText !== '40'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return assert page.query_selector(\"#x1\").inner_text() == \"15\" assert page.query_selector(\"#x2\").inner_text() == \"30\" assert page.query_selector(\"#y1\").inner_text() == \"45\" assert page.query_selector(\"#y2\").inner_text() == \"90\" page.goto(\"./page2\") page.expect_websocket() page.wait_for_selector(\"#x1\") assert page.query_selector(\"#x1\").inner_text() == \"45\" assert page.query_selector(\"#x2\").inner_text() == \"90\" assert page.query_selector(\"#y1\").inner_text() == \"15\" assert page.query_selector(\"#y2\").inner_text() == \"30\" page.fill(\"#xinput\", \"37\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#y2').innerText !== '30'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return assert page.query_selector(\"#x1\").inner_text() == \"37\" assert page.query_selector(\"#x2\").inner_text() == \"74\" assert page.query_selector(\"#y1\").inner_text() == \"185\" assert page.query_selector(\"#y2\").inner_text() == \"370\" page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#x1\") assert page.query_selector(\"#x1\").inner_text() == \"185\" assert page.query_selector(\"#x2\").inner_text() == \"370\" assert page.query_selector(\"#y1\").inner_text() == \"37\" assert page.query_selector(\"#y2\").inner_text() == \"74\" page.goto(\"./page3\") page.expect_websocket() page.wait_for_selector(\"#x1\") assert page.query_selector(\"#x1\").inner_text() == \"50\" assert page.query_selector(\"#x2\").inner_text() == \"100\" "} {"text": "import inspect import logging from importlib import util import pytest from taipy.gui import Gui, Markdown if util.find_spec(\"playwright\"): from playwright._impl._page import Page from .assets3.page1 import page as page1 def helpers_assert_text(page, s): val1 = page.query_selector(\"#t1\").inner_text() assert str(val1).startswith(s) # for issue #583 @pytest.mark.teste2e @pytest.mark.filterwarnings(\"ignore::Warning\") def test_page_scopes_main_var_access(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) n = \"Hello\" # noqa: F841 root_md = Markdown( \"\"\" <|{n}|input|id=i1|> \"\"\" ) gui.add_pages({\"/\": root_md, \"page1\": page1}) helpers.run_e2e(gui) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#t1\") page.wait_for_selector(\"#i1\") helpers_assert_text(page, \"Hello\") page.fill(\"#i1\", \"Hello World\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#t1').innerText !== 'Hello'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_text(page, \"Hello World\") "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import inspect import logging from importlib import util import pytest from taipy.gui import Gui, Markdown if util.find_spec(\"playwright\"): from playwright._impl._page import Page from .assets3_class_scopes.page1 import Page1 def helpers_assert_text(page, s): val1 = page.query_selector(\"#t1\").inner_text() assert str(val1).startswith(s) # for issue #583 @pytest.mark.teste2e @pytest.mark.filterwarnings(\"ignore::Warning\") def test_class_scopes_main_var_access(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) n = \"Hello\" # noqa: F841 root_md = Markdown( \"\"\" <|{n}|input|id=i1|> \"\"\" ) gui.add_pages({\"/\": root_md, \"page1\": Page1()}) helpers.run_e2e(gui) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#t1\") page.wait_for_selector(\"#i1\") helpers_assert_text(page, \"Hello\") page.fill(\"#i1\", \"Hello World\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#t1').innerText !== 'Hello'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_text(page, \"Hello World\") "} {"text": "import inspect import logging from importlib import util import pytest from taipy.gui import Gui if util.find_spec(\"playwright\"): from playwright._impl._page import Page from .assets4.page1 import page as page1 from .assets4.page1 import reset_d @pytest.mark.timeout(300) @pytest.mark.teste2e @pytest.mark.filterwarnings(\"ignore::Warning\") def test_page_scopes_state_runtime(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) def test(state): reset_d(state) def test2(state): state[\"page1\"].d = 30 page_md = \"\"\" <|button|on_action=test|id=btn1|> <|button|on_action=test2|id=btn2|> \"\"\" gui.add_page(\"page1\", page1) gui.add_page(name=Gui._get_root_page_name(), page=page_md) helpers.run_e2e(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#n1\") text1 = page.query_selector(\"#t1\") assert text1.inner_text() == \"20\" page.fill(\"#n1\", \"21\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#t1').innerText !== '20'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return text1 = page.query_selector(\"#t1\") assert text1.inner_text() == \"21\" page.click(\"#btn1\") try: page.wait_for_function(\"document.querySelector('#t1').innerText !== '21'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return text1 = page.query_selector(\"#t1\") assert text1.inner_text() == \"20\" page.click(\"#btn2\") try: page.wait_for_function(\"document.querySelector('#t1').innerText !== '20'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return text1 = page.query_selector(\"#t1\") assert text1.inner_text() == \"30\" "} {"text": "import inspect import logging from importlib import util import pytest from taipy.gui import Gui if util.find_spec(\"playwright\"): from playwright._impl._page import Page from .assets2.page1 import page as page1 from .assets2.page2 import page as page2 def helpers_assert_value(page, s1, s2, v1): s1_val = page.input_value(\"#s1 input\") assert str(s1_val).startswith(s1) s2_val = page.input_value(\"#s2 input\") assert str(s2_val).startswith(s2) val1 = page.query_selector(\"#v1\").inner_text() assert str(val1).startswith(v1) @pytest.mark.timeout(300) @pytest.mark.teste2e @pytest.mark.filterwarnings(\"ignore::Warning\") def test_page_scopes_binding(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) operand_1 = 0 # noqa: F841 gui.add_page(\"page1\", page1) gui.add_page(\"page2\", page2) helpers.run_e2e(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#s1\") helpers_assert_value(page, \"0\", \"0\", \"0\") page.fill(\"#s1 input\", \"15\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '0'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"15\", \"0\", \"15\") page.fill(\"#s2 input\", \"20\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '15'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"15\", \"20\", \"35\") page.goto(\"./page2\") page.expect_websocket() page.wait_for_selector(\"#s1\") helpers_assert_value(page, \"15\", \"0\", \"0\") page.fill(\"#s2 input\", \"5\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '0'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"15\", \"5\", \"75\") page.fill(\"#s1 input\", \"17\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '75'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"17\", \"5\", \"85\") page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#s1\") helpers_assert_value(page, \"17\", \"20\", \"37\") "} {"text": "from taipy.gui import Markdown, Page class Page1(Page): def __init__(self): self.operand_2 = 0 super().__init__() def create_page(self): return Markdown(\"page1.md\") def reset(state): state.operand_2 = 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from taipy.gui import Markdown, Page class Page2(Page): def __init__(self): self.operand_2 = 0 super().__init__() def create_page(self): return Markdown(\"page2.md\") "} {"text": "from taipy.gui import Markdown, Page class Page1(Page): def create_page(self): return Markdown( \"\"\" <|{n}|id=t1|> \"\"\" ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from taipy.gui import Markdown x = 10 y = 20 def on_change(state, var, val): if var == \"x\": state.y = val * 3 page = Markdown( \"\"\" x = <|{x}|id=x1|> x * 2 = <|{x*2}|id=x2|> x number: <|{x}|number|id=xinput|> y = <|{y}|id=y1|> y * 2 = <|{y*2}|id=y2|> \"\"\" ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from taipy.gui import Markdown from .page1 import x as y from .page1 import y as x def on_change(state, var, val): if var == \"x\": state.y = val * 5 page = Markdown( \"\"\" y = <|{x}|id=x1|> y * 2 = <|{x*2}|id=x2|> y number: <|{x}|number|id=xinput|> x = <|{y}|id=y1|> x * 2 = <|{y*2}|id=y2|> \"\"\" ) "} {"text": "from taipy.gui import Markdown x = 50 page = Markdown( \"\"\" <|{x}|id=x1|> x * 2 = <|{x*2}|id=x2|> <|{x}|number|id=xinput|> \"\"\" ) "} {"text": "from taipy.gui import Markdown page = Markdown( \"\"\" # Page1 - Add Operand 1: <|{operand_1}|slider|id=s1|> Operand 2: <|{operand_2}|slider|id=s2|> Operand 1 + Operand 2 = <|{operand_1 + operand_2}|id=v1|> \"\"\" ) operand_2 = 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from taipy.gui import Markdown page = Markdown( \"\"\" # Page2 - Multiply Operand 1: <|{operand_1}|slider|id=s1|> Operand 2: <|{operand_2}|slider|id=s2|> Operand 1 * Operand 2 = <|{operand_1 * operand_2}|id=v1|> \"\"\" ) operand_2 = 0 "} {"text": "from taipy.gui import Markdown d = 20 def reset_d(state): state.d = d # a page = Markdown( \"\"\" <|{d}|text|id=t1|> <|{d}|number|id=n1|> \"\"\" ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from taipy.gui import Markdown page = Markdown( \"\"\" <|{n}|id=t1|> \"\"\" ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import inspect import logging from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_slider_action(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{x}|id=text1|> <|{x}|slider|id=slider1|> \"\"\" x = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() == \"10\" page.wait_for_selector(\"#slider1\") page.fill(\"#slider1 input\", \"20\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#text1').innerText !== '10'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: text1_2 = page.query_selector(\"#text1\") assert text1_2.inner_text() == \"20\" @pytest.mark.teste2e def test_slider_action_on_change(page: \"Page\", gui: Gui, helpers): d = {\"v1\": 10, \"v2\": 10} # noqa: F841 def on_change(state, var, val): if var == \"d.v2\": d = {\"v1\": 2 * val} state.d.update(d) page_md = \"\"\" Value: <|{d.v1}|id=text1|> Slider: <|{d.v2}|slider|id=slider1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() == \"10\" page.wait_for_selector(\"#slider1\") page.fill(\"#slider1 input\", \"20\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#text1').innerText !== '10'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: text1_2 = page.query_selector(\"#text1\") assert text1_2.inner_text() == \"40\" "} {"text": "import inspect import logging from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_button_action(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{x}|id=text1|> <|Action|button|on_action=do_something_fn|id=button1|> \"\"\" x = 10 # noqa: F841 def do_something_fn(state): state.x = state.x * 2 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() == \"10\" page.click(\"#button1\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#text1').innerText !== '10'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: text1_2 = page.query_selector(\"#text1\") assert text1_2.inner_text() == \"20\" "} {"text": "import inspect import time from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui, State @pytest.mark.teste2e def test_selector_action(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{x}|selector|lov=Item 1;Item 2;Item 3|id=selector1|> \"\"\" x = \"Item 1\" # noqa: F841 def on_init(state: State): assert state.x == \"Item 1\" def on_change(state: State, var, val): if var == \"x\": assert val == \"Item 3\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"ul#selector1\") page.click('#selector1 > div[data-id=\"Item 3\"]') page.wait_for_function( \"document.querySelector('#selector1 > div[data-id=\\\"Item 3\\\"]').classList.contains('Mui-selected')\" ) "} {"text": "import inspect import logging from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui def edit_and_assert_page(page: \"Page\"): assert_input(page, \"0\") page.fill(\"#input2 input\", \"20\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#val1').innerText !== '0'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return assert_input(page, \"20\") page.fill(\"#input1\", \"30\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#val1').innerText !== '20'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return assert_input(page, \"30\") def assert_input(page: \"Page\", val: str): val1 = page.query_selector(\"#val1\").inner_text() assert str(val1).startswith(val) val2 = page.query_selector(\"#val2\").inner_text() assert str(val2).startswith(f\"Val: {val}\") inp1 = page.input_value(\"input#input1\") assert str(inp1).startswith(val) inp2 = page.input_value(\"#input2 input\") assert str(inp2).startswith(val) @pytest.mark.filterwarnings(\"ignore::Warning\") @pytest.mark.teste2e def test_slider_input_reload(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" #Test Multi Number <|{val}|id=val1|> <|Val: {val}|id=val2|> <|{val}|number|id=input1|> <|{val}|slider|id=input2|> \"\"\" val = 0 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"page1\", page=page_md) helpers.run_e2e_multi_client(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#val1\") edit_and_assert_page(page) page.reload() page.expect_websocket() page.wait_for_selector(\"#val1\") assert_input(page, \"30\") page.evaluate(\"window.localStorage.removeItem('TaipyClientId')\") page.reload() page.expect_websocket() page.wait_for_selector(\"#val1\") assert_input(page, \"0\") "} {"text": "import inspect import logging from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_dict(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{a_dict[a_key]}|input|id=inp1|> <|{a_dict.key}|input|id=inp2|> <|test|button|on_action=on_action_1|id=btn1|> <|test|button|on_action=on_action_2|id=btn2|> \"\"\" a_key = \"key\" a_dict = {a_key: \"Taipy\"} # noqa: F841 def on_action_1(state): state.a_dict.key = \"Hello\" def on_action_2(state): state.a_dict[state.a_key] = \"World\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#inp1\") assert_text(page, \"Taipy\", \"Taipy\") page.fill(\"input#inp1\", \"Taipy is the best\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#inp2').value !== 'Taipy'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: assert_text(page, \"Taipy is the best\", \"Taipy is the best\") page.fill(\"#inp2\", \"Taipy-Gui\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#inp1').value !== 'Taipy is the best'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: assert_text(page, \"Taipy-Gui\", \"Taipy-Gui\") page.click(\"#btn1\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#inp1').value !== 'Taipy-Gui'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: assert_text(page, \"Hello\", \"Hello\") page.click(\"#btn2\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#inp1').value !== 'Hello'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: assert_text(page, \"World\", \"World\") def assert_text(page, inp1, inp2): assert page.input_value(\"input#inp1\") == inp1 assert page.input_value(\"input#inp2\") == inp2 "} {"text": "import inspect import logging from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_text_edit(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{x}|text|id=text1|> <|{x}|input|id=input1|> \"\"\" x = \"Hey\" # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() == \"Hey\" page.wait_for_selector(\"#input1\") page.fill(\"#input1\", \"There\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#text1').innerText !== 'Hey'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: text1_2 = page.query_selector(\"#text1\") assert text1_2.inner_text() == \"There\" @pytest.mark.teste2e def test_number_edit(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{x}|text|id=text1|> <|{x}|number|id=number1|> \"\"\" x = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() == \"10\" page.wait_for_selector(\"#number1\") page.fill(\"#number1\", \"20\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#text1').innerText !== '10'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: text1_2 = page.query_selector(\"#text1\") assert text1_2.inner_text() == \"20\" "} {"text": "from importlib import util from taipy.gui import Gui from taipy.gui.data.array_dict_data_accessor import _ArrayDictDataAccessor from taipy.gui.data.data_format import _DataFormat from taipy.gui.utils import _MapDict an_array = [1, 2, 3] def test_simple_data(gui: Gui, helpers): accessor = _ArrayDictDataAccessor() ret_data = accessor.get_data(gui, \"x\", an_array, {\"start\": 0, \"end\": -1}, _DataFormat.JSON) assert ret_data value = ret_data[\"value\"] assert value assert value[\"rowcount\"] == 3 data = value[\"data\"] assert len(data) == 3 def test_simple_data_with_arrow(gui: Gui, helpers): if util.find_spec(\"pyarrow\"): accessor = _ArrayDictDataAccessor() ret_data = accessor.get_data(gui, \"x\", an_array, {\"start\": 0, \"end\": -1}, _DataFormat.APACHE_ARROW) assert ret_data value = ret_data[\"value\"] assert value assert value[\"rowcount\"] == 3 data = value[\"data\"] assert isinstance(data, bytes) def test_slice(gui: Gui, helpers): accessor = _ArrayDictDataAccessor() value = accessor.get_data(gui, \"x\", an_array, {\"start\": 0, \"end\": 1}, _DataFormat.JSON)[\"value\"] assert value[\"rowcount\"] == 3 data = value[\"data\"] assert len(data) == 2 value = accessor.get_data(gui, \"x\", an_array, {\"start\": \"0\", \"end\": \"1\"}, _DataFormat.JSON)[\"value\"] data = value[\"data\"] assert len(data) == 2 def test_sort(gui: Gui, helpers): accessor = _ArrayDictDataAccessor() a_dict = {\"name\": [\"A\", \"B\", \"C\"], \"value\": [3, 2, 1]} query = {\"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"orderby\": \"name\", \"sort\": \"desc\"} data = accessor.get_data(gui, \"x\", a_dict, query, _DataFormat.JSON)[\"value\"][\"data\"] assert data[0][\"name\"] == \"C\" def test_aggregate(gui: Gui, helpers, small_dataframe): accessor = _ArrayDictDataAccessor() a_dict = {\"name\": [\"A\", \"B\", \"C\", \"A\"], \"value\": [3, 2, 1, 2]} query = {\"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"aggregates\": [\"name\"], \"applies\": {\"value\": \"sum\"}} value = accessor.get_data(gui, \"x\", a_dict, query, _DataFormat.JSON)[\"value\"] assert value[\"rowcount\"] == 3 data = value[\"data\"] agregValue = next(v.get(\"value\") for v in data if v.get(\"name\") == \"A\") assert agregValue == 5 def test_array_of_array(gui: Gui, helpers, small_dataframe): accessor = _ArrayDictDataAccessor() an_array = [[1, 2, 3], [2, 4, 6]] ret_data = accessor.get_data(gui, \"x\", an_array, {\"start\": 0, \"end\": -1}, _DataFormat.JSON) assert ret_data value = ret_data[\"value\"] assert value assert value[\"rowcount\"] == 2 data = value[\"data\"] assert len(data) == 2 assert len(data[0]) == 4 # including _tp_index def test_empty_array(gui: Gui, helpers, small_dataframe): accessor = _ArrayDictDataAccessor() an_array: list[str] = [] ret_data = accessor.get_data(gui, \"x\", an_array, {\"start\": 0, \"end\": -1}, _DataFormat.JSON) assert ret_data value = ret_data[\"value\"] assert value assert value[\"rowcount\"] == 0 data = value[\"data\"] assert len(data) == 0 def test_array_of_diff_array(gui: Gui, helpers, small_dataframe): accessor = _ArrayDictDataAccessor() an_array = [[1, 2, 3], [2, 4]] ret_data = accessor.get_data(gui, \"x\", an_array, {\"start\": 0, \"end\": -1, \"alldata\": True}, _DataFormat.JSON) assert ret_data value = ret_data[\"value\"] assert value assert value[\"multi\"] is True data = value[\"data\"] assert len(data) == 2 assert len(data[0][\"0/0\"]) == 3 assert len(data[1][\"1/0\"]) == 2 def test_array_of_dicts(gui: Gui, helpers, small_dataframe): accessor = _ArrayDictDataAccessor() an_array_of_dicts = [ { \"temperatures\": [ [17.2, 27.4, 28.6, 21.5], [5.6, 15.1, 20.2, 8.1], [26.6, 22.8, 21.8, 24.0], [22.3, 15.5, 13.4, 19.6], [3.9, 18.9, 25.7, 9.8], ], \"cities\": [\"Hanoi\", \"Paris\", \"Rio de Janeiro\", \"Sydney\", \"Washington\"], }, {\"seasons\": [\"Winter\", \"Summer\", \"Spring\", \"Autumn\"]}, ] ret_data = accessor.get_data( gui, \"x\", an_array_of_dicts, {\"start\": 0, \"end\": -1, \"alldata\": True}, _DataFormat.JSON ) assert ret_data value = ret_data[\"value\"] assert value assert value[\"multi\"] is True data = value[\"data\"] assert len(data) == 2 assert len(data[0][\"temperatures\"]) == 5 assert len(data[1][\"seasons\"]) == 4 def test_array_of_Mapdicts(gui: Gui, helpers, small_dataframe): accessor = _ArrayDictDataAccessor() dict1 = _MapDict( { \"temperatures\": [ [17.2, 27.4, 28.6, 21.5], [5.6, 15.1, 20.2, 8.1], [26.6, 22.8, 21.8, 24.0], [22.3, 15.5, 13.4, 19.6], [3.9, 18.9, 25.7, 9.8], ], \"cities\": [\"Hanoi\", \"Paris\", \"Rio de Janeiro\", \"Sydney\", \"Washington\"], } ) dict2 = _MapDict({\"seasons\": [\"Winter\", \"Summer\", \"Spring\", \"Autumn\"]}) ret_data = accessor.get_data(gui, \"x\", [dict1, dict2], {\"start\": 0, \"end\": -1, \"alldata\": True}, _DataFormat.JSON) assert ret_data value = ret_data[\"value\"] assert value assert value[\"multi\"] is True data = value[\"data\"] assert len(data) == 2 assert len(data[0][\"temperatures\"]) == 5 assert len(data[1][\"seasons\"]) == 4 "} {"text": "import inspect import taipy.gui.builder as tgb from taipy.gui import Gui def test_slider_builder(gui: Gui, test_client, helpers): gui._bind_var_val(\"x\", 10) with tgb.Page(frame=None) as page: tgb.slider(value=\"{x}\") expected_list = [ \"This is a header', '

This is a paragraph.', 'a text', \"
\", \"This is bold text inside the paragrah.\", ] helpers.test_control_builder(gui, page, expected_list) "} {"text": "import inspect import taipy.gui.builder as tgb from taipy.gui import Gui def test_status_builder(gui: Gui, helpers): status = [{\"status\": \"info\", \"message\": \"Info Message\"}] # noqa: F841 with tgb.Page(frame=None) as page: tgb.status(value=\"{status}\") expected_list = [ \"\" expected_list = [ \"\" expected_list = [\"\" expected_list = [ \"\" expected_list = [\"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"{x}\" expected_list = [ \"\" expected_list = [\"\" expected_list = [\"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"{x}\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \" \"\"\" expected_list = [ \" # This is an expandable section <|expandable.end|> \"\"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"{dates}\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [\"\" expected_list = [\"\" expected_list = [ \" \"\"\" expected_list = [ \"\", ] helpers.test_control_md(gui, md_string, expected_list) def test_pane_persistent_md(gui: Gui, test_client, helpers): gui._bind_var_val(\"show_pane\", False) md_string = \"\"\" <|{show_pane}|pane|persistent| # This is a Pane |> \"\"\" expected_list = [ \"\", ] helpers.test_control_md(gui, md_string, expected_list) def test_pane_html(gui: Gui, test_client, helpers): gui._bind_var_val(\"show_pane\", False) html_string = '

This is a Pane

' expected_list = [ \"\", ] helpers.test_control_html(gui, html_string, expected_list) "} {"text": "from taipy.gui import Gui def test_text_md_1(gui: Gui, test_client, helpers): gui._bind_var_val(\"x\", 10) md_string = \"<|{x}|>\" expected_list = [\"{x}\" expected_list = [\"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [\"\" expected_list = [\"\" expected_list = [ \"\" expected_list = [ \"\" ) expected_list = [ \"' ) expected_list = [ \"' ) expected_list = [ \"' ) expected_list = [ \"\" expected_list = [ \" \"\"\" expected_list = [\" # This is a layout section <|layout.end|> \"\"\" expected_list = [\"\" expected_list = [\"\" expected_list = [ \"{x}\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"{date}\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \" \"\"\" expected_list = [\" # This is a part <|part.end|> \"\"\" expected_list = [\" None: self.assign = kwargs.get(\"assign\") class TestGuiCoreContext_is_submittable: def test_submit_entity(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get), patch( \"src.taipy.gui_core._context.is_submittable\", side_effect=mock_is_true ): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.submit_entity( MockState(assign=assign), \"\", { \"args\": [ {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sv_error\" assert str(assign.call_args.args[1]).startswith(\"Error submitting entity.\") with patch(\"src.taipy.gui_core._context.is_submittable\", side_effect=mock_is_submittable_false): assign.reset_mock() gui_core_context.submit_entity( MockState(assign=assign), \"\", { \"args\": [ {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sv_error\" assert str(assign.call_args.args[1]).endswith(\"is not submittable.\") "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from unittest.mock import Mock, patch import pytest from src.taipy.gui_core._context import _GuiCoreContext from taipy.config.common.scope import Scope from taipy.core import Job, Scenario, Task from taipy.core.data.pickle import PickleDataNode a_scenario = Scenario(\"scenario_config_id\", [], {}, sequences={\"sequence\": {}}) a_task = Task(\"task_config_id\", {}, print) a_job = Job(\"JOB_job_id\", a_task, \"submit_id\", a_scenario.id) a_job.isfinished = lambda s: True a_datanode = PickleDataNode(\"data_node_config_id\", Scope.SCENARIO) def mock_core_get(entity_id): if entity_id == a_scenario.id: return a_scenario if entity_id == a_job.id: return a_job if entity_id == a_datanode.id: return a_datanode return a_task def mock_is_deletable_false(entity_id): return False def mock_is_true(entity_id): return True class MockState: def __init__(self, **kwargs) -> None: self.assign = kwargs.get(\"assign\") class TestGuiCoreContext_is_deletable: def test_crud_scenario(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get), patch( \"src.taipy.gui_core._context.is_deletable\", side_effect=mock_is_true ): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.crud_scenario( MockState(assign=assign), \"\", { \"args\": [ True, True, {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sc_error\" assert str(assign.call_args.args[1]).startswith(\"Error deleting Scenario.\") with patch(\"src.taipy.gui_core._context.is_deletable\", side_effect=mock_is_deletable_false): assign.reset_mock() gui_core_context.crud_scenario( MockState(assign=assign), \"\", { \"args\": [ True, True, {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sc_error\" assert str(assign.call_args.args[1]).endswith(\"is not deletable.\") def test_act_on_jobs(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get), patch( \"src.taipy.gui_core._context.is_deletable\", side_effect=mock_is_true ): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.act_on_jobs( MockState(assign=assign), \"\", { \"args\": [ {\"id\": [a_job.id], \"action\": \"delete\"}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_js_error\" assert str(assign.call_args.args[1]).find(\"is not deletable.\") == -1 assign.reset_mock() with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_deletable_false): gui_core_context.act_on_jobs( MockState(assign=assign), \"\", { \"args\": [ {\"id\": [a_job.id], \"action\": \"delete\"}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_js_error\" assert str(assign.call_args.args[1]).endswith(\"is not readable.\") "} {"text": "from unittest.mock import Mock, patch import pytest from src.taipy.gui_core._context import _GuiCoreContext from taipy.config.common.scope import Scope from taipy.core import Job, Scenario, Task from taipy.core.data.pickle import PickleDataNode a_scenario = Scenario(\"scenario_config_id\", [], {}, sequences={\"sequence\": {}}) a_task = Task(\"task_config_id\", {}, print) a_job = Job(\"JOB_job_id\", a_task, \"submit_id\", a_scenario.id) a_job.isfinished = lambda s: True a_datanode = PickleDataNode(\"data_node_config_id\", Scope.SCENARIO) def mock_core_get(entity_id): if entity_id == a_scenario.id: return a_scenario if entity_id == a_job.id: return a_job if entity_id == a_datanode.id: return a_datanode return a_task def mock_is_promotable_false(entity_id): return False def mock_is_true(entity_id): return True class MockState: def __init__(self, **kwargs) -> None: self.assign = kwargs.get(\"assign\") class TestGuiCoreContext_is_promotable: def test_edit_entity(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get), patch( \"src.taipy.gui_core._context.is_promotable\", side_effect=mock_is_true ): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.edit_entity( MockState(assign=assign), \"\", { \"args\": [ {\"name\": \"name\", \"id\": a_scenario.id, \"primary\": True}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sv_error\" assert str(assign.call_args.args[1]).endswith(\"to primary because it doesn't belong to a cycle.\") assign.reset_mock() with patch(\"src.taipy.gui_core._context.is_promotable\", side_effect=mock_is_promotable_false): gui_core_context.edit_entity( MockState(assign=assign), \"\", { \"args\": [ {\"name\": \"name\", \"id\": a_scenario.id, \"primary\": True}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sv_error\" assert str(assign.call_args.args[1]).endswith(\"is not promotable.\") "} {"text": "from unittest import mock import pytest from flask import url_for from src.taipy.rest.api.exceptions.exceptions import ScenarioIdMissingException, SequenceNameMissingException from taipy.core.exceptions.exceptions import NonExistingScenario from taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory def test_get_sequence(client, default_sequence): # test 404 user_url = url_for(\"api.sequence_by_id\", sequence_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.sequence._sequence_manager._SequenceManager._get\") as manager_mock: manager_mock.return_value = default_sequence # test get_sequence rep = client.get(url_for(\"api.sequence_by_id\", sequence_id=\"foo\")) assert rep.status_code == 200 def test_delete_sequence(client): # test 404 user_url = url_for(\"api.sequence_by_id\", sequence_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.sequence._sequence_manager._SequenceManager._delete\"), mock.patch( \"taipy.core.sequence._sequence_manager._SequenceManager._get\" ): # test get_sequence rep = client.delete(url_for(\"api.sequence_by_id\", sequence_id=\"foo\")) assert rep.status_code == 200 def test_create_sequence(client, default_scenario): sequences_url = url_for(\"api.sequences\") rep = client.post(sequences_url, json={}) assert rep.status_code == 400 assert rep.json == {\"message\": \"Scenario id is missing.\"} sequences_url = url_for(\"api.sequences\") rep = client.post(sequences_url, json={\"scenario_id\": \"SCENARIO_scenario_id\"}) assert rep.status_code == 400 assert rep.json == {\"message\": \"Sequence name is missing.\"} sequences_url = url_for(\"api.sequences\") rep = client.post(sequences_url, json={\"scenario_id\": \"SCENARIO_scenario_id\", \"sequence_name\": \"sequence\"}) assert rep.status_code == 404 _ScenarioManagerFactory._build_manager()._set(default_scenario) with mock.patch(\"taipy.core.scenario._scenario_manager._ScenarioManager._get\") as config_mock: config_mock.return_value = default_scenario sequences_url = url_for(\"api.sequences\") rep = client.post( sequences_url, json={\"scenario_id\": default_scenario.id, \"sequence_name\": \"sequence\", \"tasks\": []} ) assert rep.status_code == 201 def test_get_all_sequences(client, default_scenario_config_list): for ds in range(10): with mock.patch(\"src.taipy.rest.api.resources.scenario.ScenarioList.fetch_config\") as config_mock: config_mock.return_value = default_scenario_config_list[ds] scenario_url = url_for(\"api.scenarios\", config_id=config_mock.name) client.post(scenario_url) sequences_url = url_for(\"api.sequences\") rep = client.get(sequences_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 @pytest.mark.xfail() def test_execute_sequence(client, default_sequence): # test 404 user_url = url_for(\"api.sequence_submit\", sequence_id=\"foo\") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.sequence._sequence_manager._SequenceManager._get\") as manager_mock: manager_mock.return_value = default_sequence # test get_sequence rep = client.post(url_for(\"api.sequence_submit\", sequence_id=\"foo\")) assert rep.status_code == 200 "} {"text": "from unittest import mock from flask import url_for def test_get_job(client, default_job): # test 404 user_url = url_for(\"api.job_by_id\", job_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.job._job_manager._JobManager._get\") as manager_mock: manager_mock.return_value = default_job # test get_job rep = client.get(url_for(\"api.job_by_id\", job_id=\"foo\")) assert rep.status_code == 200 def test_delete_job(client): # test 404 user_url = url_for(\"api.job_by_id\", job_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.job._job_manager._JobManager._delete\"), mock.patch( \"taipy.core.job._job_manager._JobManager._get\" ): # test get_job rep = client.delete(url_for(\"api.job_by_id\", job_id=\"foo\")) assert rep.status_code == 200 def test_create_job(client, default_task_config): # without config param jobs_url = url_for(\"api.jobs\") rep = client.post(jobs_url) assert rep.status_code == 400 with mock.patch(\"src.taipy.rest.api.resources.job.JobList.fetch_config\") as config_mock: config_mock.return_value = default_task_config jobs_url = url_for(\"api.jobs\", task_id=\"foo\") rep = client.post(jobs_url) assert rep.status_code == 201 def test_get_all_jobs(client, create_job_list): jobs_url = url_for(\"api.jobs\") rep = client.get(jobs_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 def test_cancel_job(client, default_job): # test 404 from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory _OrchestratorFactory._build_orchestrator() _OrchestratorFactory._build_dispatcher() user_url = url_for(\"api.job_cancel\", job_id=\"foo\") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.job._job_manager._JobManager._get\") as manager_mock: manager_mock.return_value = default_job # test get_job rep = client.post(url_for(\"api.job_cancel\", job_id=\"foo\")) assert rep.status_code == 200 "} {"text": "from unittest import mock from flask import url_for def test_get_task(client, default_task): # test 404 user_url = url_for(\"api.task_by_id\", task_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.task._task_manager._TaskManager._get\") as manager_mock: manager_mock.return_value = default_task # test get_task rep = client.get(url_for(\"api.task_by_id\", task_id=\"foo\")) assert rep.status_code == 200 def test_delete_task(client): # test 404 user_url = url_for(\"api.task_by_id\", task_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.task._task_manager._TaskManager._delete\"), mock.patch( \"taipy.core.task._task_manager._TaskManager._get\" ): # test get_task rep = client.delete(url_for(\"api.task_by_id\", task_id=\"foo\")) assert rep.status_code == 200 def test_create_task(client, default_task_config): # without config param tasks_url = url_for(\"api.tasks\") rep = client.post(tasks_url) assert rep.status_code == 400 # config does not exist tasks_url = url_for(\"api.tasks\", config_id=\"foo\") rep = client.post(tasks_url) assert rep.status_code == 404 with mock.patch(\"src.taipy.rest.api.resources.task.TaskList.fetch_config\") as config_mock: config_mock.return_value = default_task_config tasks_url = url_for(\"api.tasks\", config_id=\"bar\") rep = client.post(tasks_url) assert rep.status_code == 201 def test_get_all_tasks(client, task_data, default_task_config_list): for ds in range(10): with mock.patch(\"src.taipy.rest.api.resources.task.TaskList.fetch_config\") as config_mock: config_mock.return_value = default_task_config_list[ds] tasks_url = url_for(\"api.tasks\", config_id=config_mock.name) client.post(tasks_url) rep = client.get(tasks_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 def test_execute_task(client, default_task): # test 404 user_url = url_for(\"api.task_submit\", task_id=\"foo\") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.task._task_manager._TaskManager._get\") as manager_mock: manager_mock.return_value = default_task # test get_task rep = client.post(url_for(\"api.task_submit\", task_id=\"foo\")) assert rep.status_code == 200 "} {"text": "from functools import wraps from unittest.mock import MagicMock, patch from src.taipy.rest.api.middlewares._middleware import _middleware def mock_enterprise_middleware(f): @wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs) return wrapper @patch(\"src.taipy.rest.api.middlewares._middleware._using_enterprise\") @patch(\"src.taipy.rest.api.middlewares._middleware._enterprise_middleware\") def test_enterprise_middleware_applied_when_enterprise_is_installed( enterprise_middleware: MagicMock, using_enterprise: MagicMock ): enterprise_middleware.return_value = mock_enterprise_middleware using_enterprise.return_value = True @_middleware def f(): return \"f\" rv = f() assert rv == \"f\" using_enterprise.assert_called_once() enterprise_middleware.assert_called_once() @patch(\"src.taipy.rest.api.middlewares._middleware._using_enterprise\") @patch(\"src.taipy.rest.api.middlewares._middleware._enterprise_middleware\") def test_enterprise_middleware_not_applied_when_enterprise_is_not_installed( enterprise_middleware: MagicMock, using_enterprise: MagicMock ): enterprise_middleware.return_value = mock_enterprise_middleware using_enterprise.return_value = False @_middleware def f(): return \"f\" rv = f() assert rv == \"f\" using_enterprise.assert_called_once() enterprise_middleware.assert_not_called() "} {"text": "from unittest import mock import pytest from flask import url_for def test_get_datanode(client, default_datanode): # test 404 user_url = url_for(\"api.datanode_by_id\", datanode_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.data._data_manager._DataManager._get\") as manager_mock: manager_mock.return_value = default_datanode # test get_datanode rep = client.get(url_for(\"api.datanode_by_id\", datanode_id=\"foo\")) assert rep.status_code == 200 def test_delete_datanode(client): # test 404 user_url = url_for(\"api.datanode_by_id\", datanode_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.data._data_manager._DataManager._delete\"), mock.patch( \"taipy.core.data._data_manager._DataManager._get\" ): # test get_datanode rep = client.delete(url_for(\"api.datanode_by_id\", datanode_id=\"foo\")) assert rep.status_code == 200 def test_create_datanode(client, default_datanode_config): # without config param datanodes_url = url_for(\"api.datanodes\") rep = client.post(datanodes_url) assert rep.status_code == 400 # config does not exist datanodes_url = url_for(\"api.datanodes\", config_id=\"foo\") rep = client.post(datanodes_url) assert rep.status_code == 404 with mock.patch(\"src.taipy.rest.api.resources.datanode.DataNodeList.fetch_config\") as config_mock: config_mock.return_value = default_datanode_config datanodes_url = url_for(\"api.datanodes\", config_id=\"bar\") rep = client.post(datanodes_url) assert rep.status_code == 201 def test_get_all_datanodes(client, default_datanode_config_list): for ds in range(10): with mock.patch(\"src.taipy.rest.api.resources.datanode.DataNodeList.fetch_config\") as config_mock: config_mock.return_value = default_datanode_config_list[ds] datanodes_url = url_for(\"api.datanodes\", config_id=config_mock.name) client.post(datanodes_url) rep = client.get(datanodes_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 def test_read_datanode(client, default_df_datanode): with mock.patch(\"taipy.core.data._data_manager._DataManager._get\") as config_mock: config_mock.return_value = default_df_datanode # without operators datanodes_url = url_for(\"api.datanode_reader\", datanode_id=\"foo\") rep = client.get(datanodes_url, json={}) assert rep.status_code == 200 # Without operators and body rep = client.get(datanodes_url) assert rep.status_code == 200 # TODO: Revisit filter test # operators = {\"operators\": [{\"key\": \"a\", \"value\": 5, \"operator\": \"LESS_THAN\"}]} # rep = client.get(datanodes_url, json=operators) # assert rep.status_code == 200 def test_write_datanode(client, default_datanode): with mock.patch(\"taipy.core.data._data_manager._DataManager._get\") as config_mock: config_mock.return_value = default_datanode # Get DataNode datanodes_read_url = url_for(\"api.datanode_reader\", datanode_id=default_datanode.id) rep = client.get(datanodes_read_url, json={}) assert rep.status_code == 200 assert rep.json == {\"data\": [1, 2, 3, 4, 5, 6]} datanodes_write_url = url_for(\"api.datanode_writer\", datanode_id=default_datanode.id) rep = client.put(datanodes_write_url, json=[1, 2, 3]) assert rep.status_code == 200 rep = client.get(datanodes_read_url, json={}) assert rep.status_code == 200 assert rep.json == {\"data\": [1, 2, 3]} "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import json from typing import Dict from flask import url_for def create_and_submit_scenario(config_id: str, client) -> Dict: response = client.post(url_for(\"api.scenarios\", config_id=config_id)) assert response.status_code == 201 scenario = response.json.get(\"scenario\") assert (set(scenario) - set(json.load(open(\"tests/rest/json/expected/scenario.json\")))) == set() response = client.post(url_for(\"api.scenario_submit\", scenario_id=scenario.get(\"id\"))) assert response.status_code == 200 return scenario def get(url, name, client) -> Dict: response = client.get(url) returned_data = response.json.get(name) assert (set(returned_data) - set(json.load(open(f\"tests/rest/json/expected/{name}.json\")))) == set() return returned_data def get_assert_status(url, client, status_code) -> None: response = client.get(url) assert response.status_code == status_code def get_all(url, expected_quantity, client): response = client.get(url) assert len(response.json) == expected_quantity def delete(url, client): response = client.delete(url) assert response.status_code == 200 def test_end_to_end(client, setup_end_to_end): # Create Scenario: Should also create all of its dependencies(sequences, tasks, datanodes, etc) scenario = create_and_submit_scenario(\"scenario\", client) # Get other models and verify if they return the necessary fields cycle = get(url_for(\"api.cycle_by_id\", cycle_id=scenario.get(\"cycle\")), \"cycle\", client) sequence = get( url_for(\"api.sequence_by_id\", sequence_id=f\"SEQUENCE_sequence_{scenario['id']}\"), \"sequence\", client, ) task = get(url_for(\"api.task_by_id\", task_id=sequence.get(\"tasks\")[0]), \"task\", client) datanode = get( url_for(\"api.datanode_by_id\", datanode_id=task.get(\"input_ids\")[0]), \"datanode\", client, ) # Get All get_all(url_for(\"api.scenarios\"), 1, client) get_all(url_for(\"api.cycles\"), 1, client) get_all(url_for(\"api.sequences\"), 1, client) get_all(url_for(\"api.tasks\"), 2, client) get_all(url_for(\"api.datanodes\"), 5, client) get_all(url_for(\"api.jobs\"), 2, client) # Delete entities delete(url_for(\"api.cycle_by_id\", cycle_id=cycle.get(\"id\")), client) delete(url_for(\"api.sequence_by_id\", sequence_id=sequence.get(\"id\")), client) delete(url_for(\"api.task_by_id\", task_id=task.get(\"id\")), client) delete(url_for(\"api.datanode_by_id\", datanode_id=datanode.get(\"id\")), client) # Check status code # Non-existing entities should return 404 get_assert_status(url_for(\"api.cycle_by_id\", cycle_id=9999999), client, 404) get_assert_status(url_for(\"api.scenario_by_id\", scenario_id=9999999), client, 404) get_assert_status(url_for(\"api.sequence_by_id\", sequence_id=9999999), client, 404) get_assert_status(url_for(\"api.task_by_id\", task_id=9999999), client, 404) get_assert_status(url_for(\"api.datanode_by_id\", datanode_id=9999999), client, 404) # Check URL with and without trailing slashes url_with_slash = url_for(\"api.scenarios\") url_without_slash = url_for(\"api.scenarios\")[:-1] get_all(url_with_slash, 1, client) get_all(url_without_slash, 1, client) "} {"text": "from unittest import mock from flask import url_for def test_get_cycle(client, default_cycle): # test 404 cycle_url = url_for(\"api.cycle_by_id\", cycle_id=\"foo\") rep = client.get(cycle_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.cycle._cycle_manager._CycleManager._get\") as manager_mock: manager_mock.return_value = default_cycle # test get_cycle rep = client.get(url_for(\"api.cycle_by_id\", cycle_id=\"foo\")) assert rep.status_code == 200 def test_delete_cycle(client): # test 404 cycle_url = url_for(\"api.cycle_by_id\", cycle_id=\"foo\") rep = client.get(cycle_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.cycle._cycle_manager._CycleManager._delete\"), mock.patch( \"taipy.core.cycle._cycle_manager._CycleManager._get\" ): # test get_cycle rep = client.delete(url_for(\"api.cycle_by_id\", cycle_id=\"foo\")) assert rep.status_code == 200 def test_create_cycle(client, cycle_data): # without config param cycles_url = url_for(\"api.cycles\") data = {\"bad\": \"data\"} rep = client.post(cycles_url, json=data) assert rep.status_code == 400 rep = client.post(cycles_url, json=cycle_data) assert rep.status_code == 201 def test_get_all_cycles(client, create_cycle_list): cycles_url = url_for(\"api.cycles\") rep = client.get(cycles_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 "} {"text": "from unittest import mock import pytest from flask import url_for def test_get_scenario(client, default_scenario): # test 404 user_url = url_for(\"api.scenario_by_id\", scenario_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.scenario._scenario_manager._ScenarioManager._get\") as manager_mock: manager_mock.return_value = default_scenario # test get_scenario rep = client.get(url_for(\"api.scenario_by_id\", scenario_id=\"foo\")) assert rep.status_code == 200 def test_delete_scenario(client): # test 404 user_url = url_for(\"api.scenario_by_id\", scenario_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.scenario._scenario_manager._ScenarioManager._delete\"), mock.patch( \"taipy.core.scenario._scenario_manager._ScenarioManager._get\" ): # test get_scenario rep = client.delete(url_for(\"api.scenario_by_id\", scenario_id=\"foo\")) assert rep.status_code == 200 def test_create_scenario(client, default_scenario_config): # without config param scenarios_url = url_for(\"api.scenarios\") rep = client.post(scenarios_url) assert rep.status_code == 400 # config does not exist scenarios_url = url_for(\"api.scenarios\", config_id=\"foo\") rep = client.post(scenarios_url) assert rep.status_code == 404 with mock.patch(\"src.taipy.rest.api.resources.scenario.ScenarioList.fetch_config\") as config_mock: config_mock.return_value = default_scenario_config scenarios_url = url_for(\"api.scenarios\", config_id=\"bar\") rep = client.post(scenarios_url) assert rep.status_code == 201 def test_get_all_scenarios(client, default_sequence, default_scenario_config_list): for ds in range(10): with mock.patch(\"src.taipy.rest.api.resources.scenario.ScenarioList.fetch_config\") as config_mock: config_mock.return_value = default_scenario_config_list[ds] scenarios_url = url_for(\"api.scenarios\", config_id=config_mock.name) client.post(scenarios_url) rep = client.get(scenarios_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 @pytest.mark.xfail() def test_execute_scenario(client, default_scenario): # test 404 user_url = url_for(\"api.scenario_submit\", scenario_id=\"foo\") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.scenario._scenario_manager._ScenarioManager._get\") as manager_mock: manager_mock.return_value = default_scenario # test get_scenario rep = client.post(url_for(\"api.scenario_submit\", scenario_id=\"foo\")) assert rep.status_code == 200 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pickle import random from datetime import datetime, timedelta from typing import Any, Dict import pandas as pd n_predictions = 14 def forecast(model, date: datetime): dates = [date + timedelta(days=i) for i in range(n_predictions)] forecasts = [f + random.uniform(0, 2) for f in model.forecast(len(dates))] days = [str(dt.date()) for dt in dates] res = {\"Date\": days, \"Forecast\": forecasts} return pd.DataFrame.from_dict(res) def evaluate(cleaned: pd.DataFrame, forecasts: pd.DataFrame, date: datetime) -> Dict[str, Any]: cleaned = cleaned[cleaned[\"Date\"].isin(forecasts[\"Date\"].tolist())] forecasts_as_series = pd.Series(forecasts[\"Forecast\"].tolist(), name=\"Forecast\") res = pd.concat([cleaned.reset_index(), forecasts_as_series], axis=1) res[\"Delta\"] = abs(res[\"Forecast\"] - res[\"Value\"]) return { \"Date\": date, \"Dataframe\": res, \"Mean_absolute_error\": res[\"Delta\"].mean(), \"Relative_error\": (res[\"Delta\"].mean() * 100) / res[\"Value\"].mean(), } if __name__ == \"__main__\": model = pickle.load(open(\"../my_model.p\", \"rb\")) day = datetime(2020, 1, 25) forecasts = forecast(model, day) historical_temperature = pd.read_csv(\"../historical_temperature.csv\") evaluation = evaluate(historical_temperature, forecasts, day) print(evaluation[\"Dataframe\"]) print() print(f'Mean absolute error : {evaluation[\"Mean_absolute_error\"]}') print(f'Relative error in %: {evaluation[\"Relative_error\"]}') "} {"text": "from taipy.core import Config, Frequency from .algorithms import evaluate, forecast model_cfg = Config.configure_data_node(\"model\", path=\"my_model.p\", storage_type=\"pickle\") day_cfg = Config.configure_data_node(id=\"day\") forecasts_cfg = Config.configure_data_node(id=\"forecasts\") forecast_task_cfg = Config.configure_task( id=\"forecast_task\", input=[model_cfg, day_cfg], function=forecast, output=forecasts_cfg, ) historical_temperature_cfg = Config.configure_data_node( \"historical_temperature\", storage_type=\"csv\", path=\"historical_temperature.csv\", has_header=True, ) evaluation_cfg = Config.configure_data_node(\"evaluation\") evaluate_task_cfg = Config.configure_task( \"evaluate_task\", input=[historical_temperature_cfg, forecasts_cfg, day_cfg], function=evaluate, output=evaluation_cfg, ) scenario_cfg = Config.configure_scenario(\"scenario\", [forecast_task_cfg, evaluate_task_cfg], frequency=Frequency.DAILY) scenario_cfg.add_sequences({\"sequence\": [forecast_task_cfg, evaluate_task_cfg]}) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import random import string from example_library import ExampleLibrary from taipy.gui import Gui # Initial value label = \"Here is some text\" page = \"\"\" # Custom elements example ## Fraction: No denominator: <|123|example.fraction|> Denominator is 0: <|321|example.fraction|denominator=0|> Regular: <|355|example.fraction|denominator=113|> ## Custom label: Colored text: <|{label}|example.label|> <|Add a character|button|id=addChar|> <|Remove a character|button|id=removeChar|> \"\"\" def on_action(state, id): if id == \"addChar\": # Add a random character to the end of 'label' state.label += random.choice(string.ascii_letters) elif id == \"removeChar\": # Remove the first character of 'label' if len(state.label) > 0: state.label = state.label[1:] Gui(page, libraries=[ExampleLibrary()]).run(debug=True) "} {"text": "from .example_library import ExampleLibrary "} {"text": "from taipy.gui.extension import Element, ElementLibrary, ElementProperty, PropertyType class ExampleLibrary(ElementLibrary): def __init__(self) -> None: # Initialize the set of visual elements for this extension library self.elements = { # A static element that displays its properties in a fraction \"fraction\": Element( \"numerator\", { \"numerator\": ElementProperty(PropertyType.number), \"denominator\": ElementProperty(PropertyType.number), }, render_xhtml=ExampleLibrary._fraction_render, ), # A dynamic element that decorates its value \"label\": Element( \"value\", {\"value\": ElementProperty(PropertyType.dynamic_string)}, # The name of the React component (ColoredLabel) that implements this custom # element, exported as ExampleLabel in front-end/src/index.ts react_component=\"ExampleLabel\", ), } # The implementation of the rendering for the \"fraction\" static element @staticmethod def _fraction_render(props: dict) -> str: # Get the property values numerator = props.get(\"numerator\") denominator = props.get(\"denominator\") # No denominator or numerator is 0: display the numerator if denominator is None or int(numerator) == 0: return f\"{numerator}\" # Denominator is zero: display infinity if int(denominator) == 0: return '' # 'Normal' case return f\"{numerator}/{denominator}\" def get_name(self) -> str: return \"example\" def get_elements(self) -> dict: return self.elements def get_scripts(self) -> list[str]: # Only one JavaScript bundle for this library. return [\"front-end/dist/exampleLibrary.js\"] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # ----------------------------------------------------------------------------------------- # To execute this script, make sure that the taipy-gui package is installed in your # Python environment and run: # python
\"\"\")"} {"text": "from geopy.geocoders import Nominatim import folium user_agent = \"geoapiExercises/1.0 AIzaSyBIeklfsRu1yz97lY2gJzWHJcmrd7lx2zU\" # Initialize the geocoder with the user agent geolocator = Nominatim(user_agent=user_agent, timeout=10) # List of locations to geocode locations = [\"Denver, CO, United States\", \"New York, NY, United States\", \"Los Angeles, CA, United States\"] # Create an empty map map_location = folium.Map(location=[0, 0], zoom_start=5) # Iterate through the list of locations for location in locations: # Perform geocoding location_info = geolocator.geocode(location) if location_info: # Extract latitude and longitude latitude = location_info.latitude longitude = location_info.longitude # Add a marker for the geocoded location folium.Marker([latitude, longitude], popup=location).add_to(map_location) else: print(f\"Geocoding was not successful for the location: {location}\") # Save or display the map (as an HTML file) map_location.save(\"geocoded_locations_map.html\") print(\"Map created and saved as 'geocoded_locations_map.html'\") "} {"text": "from taipy.gui import Gui, notify import pandas as pd import yfinance as yf from taipy.config import Config import taipy as tp import datetime as dt from taipy import Core from show_hospitals_map import html_page from flask import Flask, request, session, jsonify, redirect, render_template from flask_restful import Api, Resource import requests Config.load(\"config_model_train.toml\") scenario_cfg = Config.scenarios['stock'] tickers = yf.Tickers(\"msft aapl goog\") root_md = \"<|navbar|>\" property_chart = { \"type\": \"lines\", \"x\": \"Date\", \"y[1]\": \"Open\", \"y[2]\": \"Close\", \"y[3]\": \"High\", \"y[4]\": \"Low\", \"color[1]\": \"green\", \"color[2]\": \"grey\", \"color[3]\": \"red\", \"color[4]\": \"yellow\", } df = pd.DataFrame([], columns=[\"Date\", \"High\", \"Low\", \"Open\", \"Close\"]) df_pred = pd.DataFrame([], columns = ['Date','Close_Prediction']) stock = \"\" stock_text = \"No Stock to Show\" chart_text = \"No Chart to Show\" stocks = [] page = \"\"\" # Stock Portfolio ### Choose the stock to show <|toggle|theme|> <|layout|columns=1 1| <| <|{stock_text}|> <|{stock}|selector|lov=MSFT;AAPL;GOOG;Reset|dropdown|> <|Press for Stock|button|on_action=on_button_action|> <|Get the future predictions|button|on_action=get_predictions|> |> <|{stock} <|{chart_text}|> <|{df}|chart|properties={property_chart}|> |> |> \"\"\" pages = { \"/\" : root_md, \"home\" : page, \"claim\": \"empty page\" } def on_button_action(state): if state.stock == \"Reset\": state.stock_text = \"No Stock to Show\" state.chart_text = \"No Chart to Show\" state.df = pd.DataFrame([], columns=[\"Date\", \"High\", \"Low\", \"Open\", \"Close\"]) state.df_pred = pd.DataFrame([], columns = ['Date','Close_Prediction']) state.pred_text = \"No Prediction to Show\" else: state.stock_text = f\"The stock is {state.stock}\" state.chart_text = f\"Monthly history of stock {state.stock}\" state.df = tickers.tickers[state.stock].history().reset_index() state.df.to_csv(f\"{stock}.csv\", index=False) def get_predictions(state): scenario_stock = tp.create_scenario(scenario_cfg) scenario_stock.initial_dataset.path = f\"{stock}\".csv notify(state, 'success', 'camehere') scenario_stock.write(state.df) tp.submit(scenario_stock) state.df_pred = scenario_stock.predictions.read() state.df_pred.to_csv(\"pred.csv\", index=False) tp.Core().run() # Gui(pages=pages).run(use_reloader=True) app = Flask(__name__) # app = Flask(__name__) app.secret_key = \"your_secret_key\" # Set a secret key for session management api = Api(app) class SignupResource(Resource): def get(self): return redirect(\"/signup.html\") def post(self): SIGNUP_API_URL = \"https://health-insurance-rest-apis.onrender.com/api/signup\" signup_data = { 'username': request.form['username'], 'password': request.form['password'], 'email': request.form['email'] } headers = { 'Content-Type': 'application/json' } print(signup_data) response = requests.post(SIGNUP_API_URL, headers=headers, json=signup_data) print(\"response\", response) if response.status_code == 200: return redirect(\"/login.html\") else: return 'Signup Failed' # Login Resource class LoginResource(Resource): def get(self): \"\"\" Return a simple login page HTML \"\"\" return redirect(\"/login.html\") def post(self): email = request.form['email'] password = request.form['password'] auth_data = { 'username': email, 'password': password } AUTH_API_URL = \"https://health-insurance-rest-apis.onrender.com/api/login\" response = requests.post(AUTH_API_URL, json=auth_data) if response.status_code == 200: auth_data = response.json() access_token = auth_data.get('access_token') refresh_token = auth_data.get('refresh_token') # Store tokens in the session session['access_token'] = access_token session['refresh_token'] = refresh_token return redirect(\"/home\") else: return 'Login failed', 401 # Protected Resource class ProtectedResource(Resource): def get(self): # Check if the JWT token is present in the session if 'jwt_token' in session: jwt_token = session['jwt_token'] # You can add logic here to verify the JWT token if needed # For simplicity, we assume the token is valid return {'message': 'Access granted for protected route', 'jwt_token': jwt_token}, 200 else: return {'message': 'Access denied'}, 401 print(\"registered the apis\") # Add resources to the API api.add_resource(LoginResource, '/login') api.add_resource(ProtectedResource, '/protected') api.add_resource(SignupResource, '/signup') @app.before_request def check_access_token(): # print ('access_token' in session, \"checkIt\") if request.endpoint != 'login' and 'access_token' not in session: # # Redirect to the login page if not on the login route and no access_token is in the session # print(request.endpoint, \"endpoint\") return redirect(\"/login\") gui = Gui(pages=pages, flask=app).run(debug=False) "} {"text": "from taipy import Config, Scope import pandas as pd from prophet import Prophet from functions import * # Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id=\"initial_dataset\", storage_type=\"csv\", default_path='df.csv') cleaned_dataset_cfg = Config.configure_data_node(id=\"cleaned_dataset\") clean_data_task_cfg = Config.configure_task(id=\"clean_data_task\", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg, skippable=True) model_training_cfg = Config.configure_data_node(id=\"model_output\") predictions_cfg = Config.configure_data_node(id=\"predictions\") model_training_task_cfg = Config.configure_task(id=\"model_retraining_task\", function=retrained_model, input=cleaned_dataset_cfg, output=model_training_cfg, skippable=True) predict_task_cfg = Config.configure_task(id=\"predict_task\", function=predict, input=model_training_cfg, output=predictions_cfg, skippable=True) # Create the first pipeline configuration # retraining_model_pipeline_cfg = Config.configure_pipeline( # id=\"model_retraining_pipeline\", # task_configs=[clean_data_task_cfg, model_training_task_cfg], # ) # Run the Taipy Core service # import taipy as tp # # Run of the Taipy Core service # tp.Core().run() # # Create the pipeline # retrain_pipeline = tp.create_pipeline(retraining_model_pipeline_cfg) # # Submit the pipeline # tp.submit(retrain_pipeline) # tp.Core().stop() scenario_cfg = Config.configure_scenario_from_tasks(id=\"stock\", task_configs=[clean_data_task_cfg, model_training_task_cfg, predict_task_cfg]) # tp.Core().run() # tp.submit(scenario_cfg) Config.export(\"config_model_train.toml\")"} {"text": "from taipy import Config from functions import build_message name_data_node_cfg = Config.configure_data_node(id=\"name\") message_data_node_cfg = Config.configure_data_node(id=\"message\") build_msg_task_cfg = Config.configure_task(\"build_msg\", build_message, name_data_node_cfg, message_data_node_cfg) scenario_cfg = Config.configure_scenario_from_tasks(\"scenario\", task_configs=[build_msg_task_cfg]) Config.export('my_config.toml')"} {"text": "from functools import wraps import jwt from flask import request, abort from flask import current_app def token_required(f): @wraps(f) def decorated(*args, **kwargs): token = None if \"Authorization\" in request.headers: token = request.headers[\"Authorization\"].split(\" \")[1] if not token: return { \"message\": \"Authentication Token is missing!\", \"data\": None, \"error\": \"Unauthorized\" }, 401 try: # data=jwt.decode(token, current_app.config[\"SECRET_KEY\"], algorithms=[\"RS256\"]) print(\"got the token\") # current_user=models.User().get_by_id(data[\"user_id\"]) current_user = 12 if current_user is None: return { \"message\": \"Invalid Authentication token!\", \"data\": None, \"error\": \"Unauthorized\" }, 401 if not current_user[\"active\"]: abort(403) except Exception as e: return { \"message\": \"Something went wrong\", \"data\": None, \"error\": str(e) }, 500 return f(current_user, *args, **kwargs) return decorated"} {"text": "from flask import Flask, request, session, jsonify from flask_restful import Api, Resource app = Flask(__name__) app.secret_key = \"your_secret_key\" # Set a secret key for session management api = Api(app) # Dummy user data for demonstration users = { 'maneesh': {'password': 'securepassword'} } # Login Resource class LoginResource(Resource): def post(self): data = request.get_json() username = data.get('username') password = data.get('password') print(\"hello\") # Check if user exists and password is correct if username in users and users[username]['password'] == password: # Simulate receiving a JWT token from a third-party API jwt_token = \"your_received_jwt_token\" # Store the JWT token in the session session['jwt_token'] = jwt_token return {'message': 'Login successful'}, 200 else: return {'message': 'Invalid credentials'}, 401 # Protected Resource class ProtectedResource(Resource): def get(self): # Check if the JWT token is present in the session if 'jwt_token' in session: jwt_token = session['jwt_token'] # You can add logic here to verify the JWT token if needed # For simplicity, we assume the token is valid return {'message': 'Access granted for protected route', 'jwt_token': jwt_token}, 200 else: return {'message': 'Access denied'}, 401 # Add resources to the API api.add_resource(LoginResource, '/login') api.add_resource(ProtectedResource, '/protected') if __name__ == '__main__': app.run(debug=True) "}