{"text": "# Display df_selection in an expandable\n<|Sales Table|expandable|expanded=False|"} {"text": "# Create a Generate text button\n<|Generate text|button|on_action=generate_text|label=Generate text|>"} {"text": "# Create a text input for the tweet\n<|{tweet}|input|multiline|label=Resulting tweet|class_name=fullwidth|>"} {"text": "# Break line\n
"} {"text": "# Display image\n
<|{image}|image|height=400px|>
"} {"text": "# Display image from path\n<|{path}|image|width=500px|height=500px|class_name=img|>"} {"text": "# Create a slider to select iterations\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>"} {"text": "# Slider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>"} {"text": "# Create a toggle button\n<|{value}|toggle|lov=Item 1;Item 2;Item 3|>"} {"text": "# Create a dropdown to select between A B and C\n<|{value}|selector|lov=A;B;C|dropdown|>"} {"text": "# Create a date selector\n<|{dt}|date|>"} {"text": "# Date selection\n<|{dt}|date|>"} {"text": "# Create a gauge to visualize progress\n<|message|indicator|value={progress}|min=0|max=100|>"} {"text": "# Create and register a page:\nfrom taipy import Gui\nGui(\"# This is my page title\")"} {"text": "# Create a page variable and register it:\nmd = \"# This is my page title\"\nGui(md)"} {"text": "# Create 2 pages and register them:\npages = {\n 'page1': Markdown(\"# My first page\"),\n 'page2': Markdown(\"# My second page\")\n}\nGui(pages=pages)"} {"text": "# Create a multi-page application:\nfrom taipy import Gui\n\n root_md=\"# Multi-page application\"\n page1_md=\"# This is page 1\"\n page2_md=\"# This is page 2\"\n\n pages = {\n \"/\": root_md,\n \"page1\": page1_md,\n \"page2\": page2_md\n }\n Gui(pages=pages).run()"} {"text": "# Create a multi-page application with content placeholder:\n from taipy import Gui\n\n root_md=\"\"\"#\n Multi-page application\n\n\n This application was created with [Taipy](http://taipy.avaiga.com).\n \"\"\"\n page1_md=\"# This is page 1\"\n page2_md=\"# This is page 2\"\n\n pages = {\n \"/\": root_md,\n \"page1\": page1_md,\n \"page2\": page2_md\n }\n Gui(pages=pages).run()"} {"text": "# Create a dialog in Markdown content:\n page=\"\"\"\n <|{dialog_is_visible}|dialog|\n Enter a name:\n <|{name}|input|>\n |>\"\"\"\n Gui(page).run()"} {"text": "# Create a partial page:\n gui = Gui()\n prompt_user = gui.add_partial(\n \"\"\"\n Enter a name:\n <|{name}|input|>\n \"\"\"\n )\n gui.run()"} {"text": "# Display a value:\n<|{value}|>"} {"text": "# Format the value with 2 decimal places:\n<|{value}|text|format=%.2f|>"} {"text": "# Create a button with Button Label:\n<|Button Label|button|>"} {"text": "# Create a Save button:\n<|Button Label|button|>"} {"text": "# Create a Cancel button and button_action_function_name:\n<|Cancel|button|on_action=button_action_function_name|>"} {"text": "# Create a Cancel button with cancel function:\n<|Cancel|button|on_action=cancel|>"} {"text": "# Create input field for name:\n<|{name}|input|>"} {"text": "# Create input field for address:\n<|{address}|input|>"} {"text": "# Create number field for age:\n<|{age}|number|>"} {"text": "# Create a slider for value between 1 and 10:\n<|{value}|slider|min=1|max=10|>"} {"text": "# Create a list of toggle buttons for Item 1, Item 2, Item 3:\n<|{value}|toggle|lov=Item 1;Item 2;Item 3|>"} {"text": "# Create a toggle control that lets you pick a specific user:\n<|{user_sel}|toggle|lov={users}|type=User|adapter={lambda u: (u.id, u.name)}|>"} {"text": "# Create a date selector:\n<|{dt}|date|>"} {"text": "# Create a date selector without time:\n<|{dt}|date|not with_time|>"} {"text": "# Create a date selector with only date:\n<|{dt}|date|not with_time|>"} {"text": "# Create a file download:\n<|{content}|file_download|>"} {"text": "# Create a file download with action:\n<|{content}|file_download|label=Download File|on_action=function_name|name=filename|>"} {"text": "# Create a file download with no review:\n<|{content}|file_download|bypass_preview=False|>"} {"text": "# Create an auto download file download:\n<|{content}|file_download|auto|>"} {"text": "# Create a file selector:\n<|{content}|file_selector|>"} {"text": "# Create a file selector with label and action:\n<|{content}|file_selector|label=Download File|on_action=function_name|extensions=.csv,.xlsx|drop_message=Drop Message|>"} {"text": "# Create a multiple file uploader:\n<|{content}|file_selector|multiple|>"} {"text": "# Show an image:\n<|{content}|image|>"} {"text": "# Show an image with label and callback:\n<|{content}|image|label=this is an image|on_action=function_name|>"} {"text": "# Defining a simple static menu:\n<|menu|lov=menu 1;menu 2|>"} {"text": "# Show a table:\n<|{data}|table|>"} {"text": "# Display a list of string:\n<|{value}|tree|lov=Item 1;Item 2;Item 3|>"} {"text": "# Display df_display in an expandable\n<|Display Table|expandable|expanded=False|"} {"text": "# Create a text input for the note\n<|{note}|input|multiline|label=Note|class_name=fullwidth|>"} {"text": "# Insert a line break\n
"} {"text": "# Display plot\n
<|{plot}|image|height=400px|>
"} {"text": "# Display plot from file location\n<|{file_location}|image|width=500px|height=500px|class_name=img|>"} {"text": "# Create a slider to adjust volume\n<|{volume}|slider|min=0|max=100|continuous=False|on_change=adjust_volume|>"} {"text": "# Slider dx_threshold\n<|{dx_threshold}|slider|min=0|max=10|continuous=False|on_change=update_threshold|>"} {"text": "# Create a toggle button\n<|{is_enabled}|toggle|lov=Off;On|>"} {"text": "# Create a dropdown to select between Option X, Option Y, and Option Z\n<|{option}|selector|lov=Option X;Option Y;Option Z|dropdown|>"} {"text": "# Create a date picker\n<|{selected_date}|date|>"} {"text": "# Date picker\n<|{selected_date}|date|>"} {"text": "# Create a Cancel button with cancel_handler function:\n<|Cancel|button|on_action=cancel_handler|>"} {"text": "# Create an input field for username:\n<|{username}|input|>"} {"text": "# Create an input field for location:\n<|{location}|input|>"} {"text": "# Create a numeric field for age:\n<|{age}|number|>"} {"text": "# Create a slider for value between 1 and 10:\n<|{rating}|slider|min=1|max=10|>"} {"text": "# Create a set of toggle buttons for Option 1, Option 2, Option 3:\n<|{choice}|toggle|lov=Option 1;Option 2;Option 3|>"} {"text": "# Create a toggle control to select a specific category:\n<|{category_sel}|toggle|lov={categories}|type=Category|adapter={lambda c: (c.id, c.name)}|>"} {"text": "# Create a date picker:\n<|{event_date}|date|>"} {"text": "# Create a date picker without time:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a date picker with only date:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a file download link:\n<|{document}|file_download|>"} {"text": "# Create a file download link with action:\n<|{document}|file_download|label=Download Document|on_action=download_file|name=file_name|>"} {"text": "# Create a file download link without preview:\n<|{document}|file_download|bypass_preview=False|>"} {"text": "# Create an auto download file link:\n<|{document}|file_download|auto|>"} {"text": "# Create a file selector:\n<|{selected_file}|file_selector|>"} {"text": "# Create a file selector with label and action:\n<|{selected_file}|file_selector|label=Select File|on_action=file_selected|extensions=.csv,.xlsx|drop_message=Drop file here|>"} {"text": "# Create a multiple file uploader:\n<|{selected_files}|file_selector|multiple|>"} {"text": "# Show an illustration:\n<|{illustration}|image|>"} {"text": "# Show an image with description and callback:\n<|{picture}|image|label=This is a picture|on_action=image_clicked|>"} {"text": "# Display a message at a specified position between min and max:\n<|status|indicator|value={percentage}|min=0|max=100|>"} {"text": "# Define a basic static menu:\n<|menu|lov=menu_item1;menu_item2|>"} {"text": "# Display df_results in an expandable\n<|Results Table|expandable|expanded=False|"} {"text": "# Create a Generate summary button\n<|Generate summary|button|on_action=generate_summary|label=Generate summary|>"} {"text": "# Create a text input for the article\n<|{article}|input|multiline|label=Resulting article|class_name=fullwidth|>"} {"text": "# Insert a line break\n
"} {"text": "# Display chart\n
<|{chart}|image|height=400px|>
"} {"text": "# Display chart from file path\n<|{file_path}|image|width=500px|height=500px|class_name=img|>"} {"text": "# Create a slider to adjust zoom level\n<|{zoom_level}|slider|min=1|max=5|continuous=False|on_change=adjust_zoom|>"} {"text": "# Slider dx_threshold\n<|{dx_threshold}|slider|min=0|max=10|continuous=False|on_change=update_threshold|>"} {"text": "# Create a toggle switch\n<|{state}|toggle|lov=Off;On;Auto|>"} {"text": "# Create a dropdown to select between X, Y, and Z\n<|{axis}|selector|lov=X;Y;Z|dropdown|>"} {"text": "# Date picker\n<|{selected_date}|date|>"} {"text": "# Create a gauge to visualize value\n<|status|indicator|value={progress}|min=0|max=100|>"} {"text": "# Define a page variable and initialize it:\nmd_content = \"# My Page Title\"\nGui(md_content)"} {"text": "# Define 2 pages and set them up:\ncontent_pages = {\n 'first_page': Markdown(\"# Introduction\"),\n 'second_page': Markdown(\"# Advanced Topics\")\n}\nGui(pages=content_pages)"} {"text": "# Set up a multi-page application:\nfrom taipy import Gui\n\n root_content=\"# Welcome to the App\"\n page1_content=\"# This is Page 1\"\n page2_content=\"# This is Page 2\"\n\n pages_dict = {\n \"/\": root_content,\n \"page1\": page1_content,\n \"page2\": page2_content\n }\n Gui(pages=pages_dict).run()"} {"text": "# Set up a multi-page application with content placeholder:\n from taipy import Gui\n\n root_content=\"\"\"#\n Multi-page application\n\n\n This app was built using Taipy.\n \"\"\"\n page1_content=\"# Page 1\"\n page2_content=\"# Page 2\"\n\n pages_dict = {\n \"/\": root_content,\n \"page1\": page1_content,\n \"page2\": page2_content\n }\n Gui(pages=pages_dict).run()"} {"text": "# Create a dialog with Markdown content:\n dialog_content=\"\"\"\n <|{is_visible}|dialog|\n Please enter your name:\n <|{user_name}|input|>\n |>\"\"\"\n Gui(dialog_content).run()"} {"text": "# Set up a partial page:\n gui_instance = Gui()\n user_prompt = gui_instance.add_partial(\n \"\"\"\n Please enter your name:\n <|{user_name}|input|>\n \"\"\"\n )\n gui_instance.run()"} {"text": "# Display a calculated result:\n<|{result}|>"} {"text": "# Format the value with 2 decimal points:\n<|{value}|text|format=%.2f|>"} {"text": "# Create a button with the label 'Click Me':\n<|Click Me|button|>"} {"text": "# Create a Save button:\n<|Save|button|>"} {"text": "# Create a Cancel button with cancel_handler function:\n<|Cancel|button|on_action=cancel_handler|>"} {"text": "# Create an input field for username:\n<|{username}|input|>"} {"text": "# Create an input field for location:\n<|{location}|input|>"} {"text": "# Create a numeric field for age:\n<|{age}|number|>"} {"text": "# Create a slider for value between 1 and 10:\n<|{rating}|slider|min=1|max=10|>"} {"text": "# Create a set of toggle buttons for Option 1, Option 2, Option 3:\n<|{choice}|toggle|lov=Option 1;Option 2;Option 3|>"} {"text": "# Create a toggle control to select a specific category:\n<|{category_sel}|toggle|lov={categories}|type=Category|adapter={lambda c: (c.id, c.name)}|>"} {"text": "# Create a date picker:\n<|{event_date}|date|>"} {"text": "# Create a date picker without time:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a date picker with only date:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a file download link:\n<|{document}|file_download|>"} {"text": "# Create a file download link with action:\n<|{document}|file_download|label=Download Document|on_action=download_file|name=file_name|>"} {"text": "# Create a file download link without preview:\n<|{document}|file_download|bypass_preview=False|>"} {"text": "# Create an auto download file link:\n<|{document}|file_download|auto|>"} {"text": "# Create a file selector:\n<|{selected_file}|file_selector|>"} {"text": "# Create a file selector with label and action:\n<|{selected_file}|file_selector|label=Select File|on_action=file_selected|extensions=.csv,.xlsx|drop_message=Drop file here|>"} {"text": "# Show an illustration:\n<|{illustration}|image|>"} {"text": "# Show an image with description and callback:\n<|{picture}|image|label=This is a picture|on_action=image_clicked|>"} {"text": "# Display a message at a specified position between min and max:\n<|status|indicator|value={percentage}|min=0|max=100|>"} {"text": "# Define a basic static menu:\n<|menu|lov=menu_item1;menu_item2|>"} {"text": "# Display df_output in an expandable\n<|Output Table|expandable|expanded=False|"} {"text": "# Create a Visualize button\n<|Visualize|button|on_action=visualize_data|label=Visualize|>"} {"text": "# Create a text input for the description\n<|{description}|input|multiline|label=Description|class_name=fullwidth|>"} {"text": "# Insert a line break\n
"} {"text": "# Display diagram\n
<|{diagram}|image|height=400px|>
"} {"text": "# Display diagram from file path\n<|{diagram_path}|image|width=500px|height=500px|class_name=img|>"} {"text": "# Create a slider to adjust brightness\n<|{brightness}|slider|min=0|max=100|continuous=False|on_change=adjust_brightness|>"} {"text": "# Slider threshold\n<|{threshold}|slider|min=0|max=10|continuous=False|on_change=update_threshold|>"} {"text": "# Create a toggle button\n<|{is_enabled}|toggle|lov=Off;On|>"} {"text": "# Create a dropdown to select between Choice A, Choice B, and Choice C\n<|{choice}|selector|lov=Choice A;Choice B;Choice C|dropdown|>"} {"text": "# Create a date picker\n<|{selected_date}|date|>"} {"text": "# Date picker\n<|{selected_date}|date|>"} {"text": "# Create a gauge to visualize value\n<|status|indicator|value={progress}|min=0|max=100|>"} {"text": "# Create and initialize a page:\nfrom taipy import Gui\nGui(\"# Dashboard\")"} {"text": "# Define a page variable and initialize it:\nmd_content = \"# Dashboard\"\nGui(md_content)"} {"text": "# Define 2 pages and set them up:\ncontent_pages = {\n 'overview': Markdown(\"# Overview\"),\n 'details': Markdown(\"# Details\")\n}\nGui(pages=content_pages)"} {"text": "# Set up a multi-page application:\nfrom taipy import Gui\n\n root_content=\"# Welcome to the App\"\n page1_content=\"# Page 1\"\n page2_content=\"# Page 2\"\n\n pages_dict = {\n \"/\": root_content,\n \"page1\": page1_content,\n \"page2\": page2_content\n }\n Gui(pages=pages_dict).run()"} {"text": "# Set up a multi-page application with content placeholder:\n from taipy import Gui\n\n root_content=\"\"\"#\n Multi-page application\n\n\n This app was built using Taipy.\n \"\"\"\n page1_content=\"# Page 1\"\n page2_content=\"# Page 2\"\n\n pages_dict = {\n \"/\": root_content,\n \"page1\": page1_content,\n \"page2\": page2_content\n }\n Gui(pages=pages_dict).run()"} {"text": "# Create a dialog with Markdown content:\n dialog_content=\"\"\"\n <|{is_visible}|dialog|\n Please enter your name:\n <|{user_name}|input|>\n |>\"\"\"\n Gui(dialog_content).run()"} {"text": "# Set up a partial page:\n gui_instance = Gui()\n user_prompt = gui_instance.add_partial(\n \"\"\"\n Please enter your name:\n <|{user_name}|input|>\n \"\"\"\n )\n gui_instance.run()"} {"text": "# Display a calculated result:\n<|{result}|>"} {"text": "# Format the value with 2 decimal points:\n<|{value}|text|format=%.2f|>"} {"text": "# Create a button with the label 'Click Me':\n<|Click Me|button|>"} {"text": "# Create a Save button:\n<|Save|button|>"} {"text": "# Create a Cancel button and set the action function name:\n<|Cancel|button|on_action=cancel_action_function|>"} {"text": "# Create a Cancel button with cancel_handler function:\n<|Cancel|button|on_action=cancel_handler|>"} {"text": "# Create an input field for username:\n<|{username}|input|>"} {"text": "# Create an input field for location:\n<|{location}|input|>"} {"text": "# Create a numeric field for age:\n<|{age}|number|>"} {"text": "# Create a slider for value between 1 and 10:\n<|{rating}|slider|min=1|max=10|>"} {"text": "# Create a set of toggle buttons for Option 1, Option 2, Option 3:\n<|{choice}|toggle|lov=Option 1;Option 2;Option 3|>"} {"text": "# Create a toggle control to select a specific category:\n<|{category_sel}|toggle|lov={categories}|type=Category|adapter={lambda c: (c.id, c.name)}|>"} {"text": "# Create a date picker:\n<|{event_date}|date|>"} {"text": "# Create a date picker without time:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a date picker with only date:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a file download link:\n<|{document}|file_download|>"} {"text": "# Create a file download link with action:\n<|{document}|file_download|label=Download Document|on_action=download_file|name=file_name|>"} {"text": "# Create a file download link without preview:\n<|{document}|file_download|bypass_preview=False|>"} {"text": "# Create an auto download file link:\n<|{document}|file_download|auto|>"} {"text": "# Create a file selector:\n<|{selected_file}|file_selector|>"} {"text": "# Create a file selector with label and action:\n<|{selected_file}|file_selector|label=Select File|on_action=file_selected|extensions=.csv,.xlsx|drop_message=Drop file here|>"} {"text": "# Create a multiple file uploader:\n<|{selected_files}|file_selector|multiple|>"} {"text": "# Show an illustration:\n<|{illustration}|image|>"} {"text": "# Show an image with description and callback:\n<|{picture}|image|label=This is a picture|on_action=image_clicked|>"} {"text": "# Display a message at a specified position between min and max:\n<|status|indicator|value={percentage}|min=0|max=100|>"} {"text": "# Define a basic static menu:\n<|menu|lov=menu_item1;menu_item2|>"} {"text": "# Display df_display in an expandable\n<|Display Data|expandable|expanded=False|"} {"text": "# Create an Explore button\n<|Explore|button|on_action=explore_data|label=Explore|>"} {"text": "# Create a text input for the notes\n<|{notes}|input|multiline|label=Notes|class_name=fullwidth|>"} {"text": "# Insert a line break\n
"} {"text": "# Display visualization\n
<|{visualization}|image|height=400px|>
"} {"text": "# Display visualization from file path\n<|{file_path}|image|width=500px|height=500px|class_name=img|>"} {"text": "# Create a slider to adjust contrast\n<|{contrast}|slider|min=0|max=100|continuous=False|on_change=adjust_contrast|>"} {"text": "# Slider threshold\n<|{threshold}|slider|min=0|max=10|continuous=False|on_change=update_threshold|>"} {"text": "# Create and initialize a page:\nfrom taipy import Gui\nGui(\"# Data Overview\")"} {"text": "# Define a page variable and initialize it:\nmd_content = \"# Data Overview\"\nGui(md_content)"} {"text": "# Define 2 pages and set them up:\ncontent_pages = {\n 'overview': Markdown(\"# Overview\"),\n 'details': Markdown(\"# Details\")\n}\nGui(pages=content_pages)"} {"text": "# Set up a multi-page application:\nfrom taipy import Gui\n\n root_content=\"# Welcome to the App\"\n page1_content=\"# Page 1\"\n page2_content=\"# Page 2\"\n\n pages_dict = {\n \"/\": root_content,\n \"page1\": page1_content,\n \"page2\": page2_content\n }\n Gui(pages=pages_dict).run()"} {"text": "# Set up a multi-page application with content placeholder:\n from taipy import Gui\n\n root_content=\"\"\"#\n Multi-page application\n\n\n This app was built using Taipy.\n \"\"\"\n page1_content=\"# Page 1\"\n page2_content=\"# Page 2\"\n\n pages_dict = {\n \"/\": root_content,\n \"page1\": page1_content,\n \"page2\": page2_content\n }\n Gui(pages=pages_dict).run()"} {"text": "# Create a dialog with Markdown content:\n dialog_content=\"\"\"\n <|{is_visible}|dialog|\n Please enter your name:\n <|{user_name}|input|>\n |>\"\"\"\n Gui(dialog_content).run()"} {"text": "# Set up a partial page:\n gui_instance = Gui()\n user_prompt = gui_instance.add_partial(\n \"\"\"\n Please enter your name:\n <|{user_name}|input|>\n \"\"\"\n )\n gui_instance.run()"} {"text": "# Display a calculated result:\n<|{result}|>"} {"text": "# Format the value with 2 decimal points:\n<|{value}|text|format=%.2f|>"} {"text": "# Create a button with the label 'Click Me':\n<|Click Me|button|>"} {"text": "# Create a Save button:\n<|Save|button|>"} {"text": "# Create a Cancel button and set the action function name:\n<|Cancel|button|on_action=cancel_action_function|>"} {"text": "# Create a Cancel button with cancel_handler function:\n<|Cancel|button|on_action=cancel_handler|>"} {"text": "# Create an input field for username:\n<|{username}|input|>"} {"text": "# Create an input field for location:\n<|{location}|input|>"} {"text": "# Create a numeric field for age:\n<|{age}|number|>"} {"text": "# Create a slider for value between 1 and 10:\n<|{rating}|slider|min=1|max=10|>"} {"text": "# Create a set of toggle buttons for Option 1, Option 2, Option 3:\n<|{choice}|toggle|lov=Option 1;Option 2;Option 3|>"} {"text": "# Create a toggle control to select a specific category:\n<|{category_sel}|toggle|lov={categories}|type=Category|adapter={lambda c: (c.id, c.name)}|>"} {"text": "# Create a date picker:\n<|{event_date}|date|>"} {"text": "# Create a date picker without time:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a date picker with only date:\n<|{event_date}|date|not with_time|>"} {"text": "# Create a file download link:\n<|{document}|file_download|>"} {"text": "# Create a file download link with action:\n<|{document}|file_download|label=Download Document|on_action=download_file|name=file_name|>"} {"text": "# Create a file download link without preview:\n<|{document}|file_download|bypass_preview=False|>"} {"text": "# Create an auto download file link:\n<|{document}|file_download|auto|>"} {"text": "# Create a file selector:\n<|{selected_file}|file_selector|>"} {"text": "# Create a file selector with label and action:\n<|{selected_file}|file_selector|label=Select File|on_action=file_selected|extensions=.csv,.xlsx|drop_message=Drop file here|>"} {"text": "# Create a multiple file uploader:\n<|{selected_files}|file_selector|multiple|>"} {"text": "# Show an illustration:\n<|{illustration}|image|>"} {"text": "# Show an image with description and callback:\n<|{picture}|image|label=This is a picture|on_action=image_clicked|>"} {"text": "# Display a message at a specified position between min and max:\n<|status|indicator|value={percentage}|min=0|max=100|>"} {"text": "# Define a basic static menu:\n<|menu|lov=menu_item1;menu_item2|>"} {"text": "# Plot Sales according to Date in a line chart:\n<|{data}|chart|type=lines|x=DATE|y=SALES|>"} {"text": "# Plot Sales according to Date in a line chart titled \"Sales according to Revenue\":\n<|{data}|chart|type=lines|x=DATE|x=SALES|title=SALES according to Revenue|>"} {"text": "# Plot Sales and Revenue according to Date:\n<|{data}|chart|type=lines|x=DATE|y[1]=SALES|y[2]=REVENUE|>"} {"text": "# Plot Sales according to Date on a Dashed line:\n<|{data}|chart|type=lines|x=DATE|x=SALES|line=dash|>"} {"text": "# Plot Revenue by Date on a dotted line:\n<|{data}|chart|type=lines|x=DATE|x=SALES|line=dot|>"} {"text": "# Plot Sales by Date in Red:\n<|{data}|chart|type=lines|x=DATE|x=SALES|color=Red|>"} {"text": "# Plot Revenue according to Date in yellow:\n<|{data}|chart|type=lines|x=DATE|x=SALES|color=Yellow|>"} {"text": "# Plot Revenue according to Date in yellow titled Revenue Plot:\n<|{data}|chart|type=lines|x=DATE|x=SALES|color=Yellow|title=REVENUE Plot>"} {"text": "# Plot Sales in blue and Revenue in green according to Date:\n<|{data}|chart|type=lines|x=DATE|y[1]=SALES|y[2]=REVENUE|color[1]=blue|color[2]=green|>"} {"text": "# Plot Revenue by Date in a red dashed line and Sales in a yellow Dotted line:\n<|{data}|chart|type=lines|x=DATE|y[1]=REVENUE|y[2]=SALES|line[1]=dash|line[2]=dot|color[1]=red|color[2]=yellow|>"} {"text": "# Display Date according to Sales:\n<|{data}|chart|type=lines|x=DATE|x=SALES|>"} {"text": "# Plot in a bar chart the Sales according to Date:\n<|{data}|chart|type=bar|x=DATE|x=SALES|>"} {"text": "# Plot in a bar chart the Sales according to Date and Revenue according to Date:\n<|{data}|chart|type=bar|x=DATE|y[1]=SALES|y[2]=REVENUE|>"} {"text": "# Plot Sales and Revenue by Date in a bar chart:\n<|{data}|chart|type=bar|x=DATE|y[1]=SALES|y[2]=REVENUE|>"} {"text": "# Plot in a bar chart the Sales according to Date and Revenue according to Date titled Finance:\n<|{data}|chart|type=bar|x=DATE|y[1]=SALES|y[2]=REVENUE|title=Finance|>"} {"text": "# Plot in a scatter plot Sales according to Date:\n<|{data}|chart|type=scatter|mode=markers|x=DATE|x=SALES|>"} {"text": "# Draw Sales and Revenue by Date in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=DATE|y[1]=SALES|y[2]=REVENUE|>"} {"text": "# Plot Revenue in green points and Sales in yellow points by Date:\n<|{data}|chart|type=scatter|mode=markers|x=DATE|y[1]=REVENUE|y[2]=SALES|color[1]=green|color[2]=yellow|>"} {"text": "# Plot a histogram of Sales:\n<|{data}|chart|type=histogram|x=SALES|>"} {"text": "# Display a horizontal histogram of Sales:\n<|{data}|chart|type=histogram|x=SALES|>"} {"text": "# Plot the distribution of Sales and Revenue:\n<|{data}|chart|type=histogram|x[1]=SALES|x[2]=REVENUE|>"} {"text": "# Plot the distribution of Sales and Revenue titled \"Sales and Revenue Distribution\":\n<|{data}|chart|type=histogram|x[1]=SALES|x[2]=REVENUE|title=SALES and Revenue Distribution|>"} {"text": "# Display a horizontal distribution of Sales and Revenue titled \"Sales and Revenue Distribution\":\n<|{data}|chart|type=histogram|y[1]=SALES|y[2]=REVENUE|title=SALES and Revenue Distribution|>"} {"text": "# Plot a pie chart of Sales by Date:\n<|{data}|chart|type=pie|values=SALES|labels=Date|>"} {"text": "# Draw a pie chart of Sales by Date titled \"Sales Pie Chart\":\n<|{data}|chart|type=pie|values=SALES|labels=Date|title=SALES Pie Chart|>"} {"text": "# Plot a pie chart of Revenue by Date:\n<|{data}|chart|type=pie|values=REVENUE|labels=Date|>"} {"text": "# Visualize Profit over Time in a line chart:\n<|{data}|chart|type=lines|x=TIME|y=PROFIT|>"} {"text": "# Showcase Profit over Time in a line chart titled \"Profit Trend\":\n<|{data}|chart|type=lines|x=TIME|y=PROFIT|title=Profit Trend|>"} {"text": "# Depict Profit and Loss over Time:\n<|{data}|chart|type=lines|x=TIME|y[1]=PROFIT|y[2]=LOSS|>"} {"text": "# Illustrate Profit over Time with a Dashed line:\n<|{data}|chart|type=lines|x=TIME|y=PROFIT|line=dash|>"} {"text": "# Present Loss by Time on a Dotted line:\n<|{data}|chart|type=lines|x=TIME|y=LOSS|line=dot|>"} {"text": "# Plot Profit over Time in Red:\n<|{data}|chart|type=lines|x=TIME|y=PROFIT|color=Red|>"} {"text": "# Exhibit Loss over Time in yellow:\n<|{data}|chart|type=lines|x=TIME|y=LOSS|color=Yellow|>"} {"text": "# Show Profit over Time in yellow titled Profit Overview:\n<|{data}|chart|type=lines|x=TIME|y=PROFIT|color=Yellow|title=Profit Overview|>"} {"text": "# Display Profit in blue and Loss in green over Time:\n<|{data}|chart|type=lines|x=TIME|y[1]=PROFIT|y[2]=LOSS|color[1]=blue|color[2]=green|>"} {"text": "# Visualize Loss by Time in a red dashed line and Profit in a yellow Dotted line:\n<|{data}|chart|type=lines|x=TIME|y[1]=LOSS|y[2]=PROFIT|line[1]=dash|line[2]=dot|color[1]=red|color[2]=yellow|>"} {"text": "# Highlight Time according to Profit:\n<|{data}|chart|type=lines|x=TIME|y=PROFIT|>"} {"text": "# Depict in a bar chart the Profit over Time:\n<|{data}|chart|type=bar|x=TIME|y=PROFIT|>"} {"text": "# Depict in a bar chart the Profit over Time and Loss over Time:\n<|{data}|chart|type=bar|x=TIME|y[1]=PROFIT|y[2]=LOSS|>"} {"text": "# Showcase Profit and Loss by Time in a bar chart:\n<|{data}|chart|type=bar|x=TIME|y[1]=PROFIT|y[2]=LOSS|>"} {"text": "# Depict in a bar chart the Profit over Time and Loss over Time titled Financial Overview:\n<|{data}|chart|type=bar|x=TIME|y[1]=PROFIT|y[2]=LOSS|title=Financial Overview|>"} {"text": "# Depict in a scatter plot Profit over Time:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y=PROFIT|>"} {"text": "# Illustrate Profit and Loss by Time in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y[1]=PROFIT|y[2]=LOSS|>"} {"text": "# Plot Loss in green points and Profit in yellow points by Time:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y[1]=LOSS|y[2]=PROFIT|color[1]=green|color[2]=yellow|>"} {"text": "# Display a histogram of Profit:\n<|{data}|chart|type=histogram|x=PROFIT|>"} {"text": "# Showcase a horizontal histogram of Profit:\n<|{data}|chart|type=histogram|x=PROFIT|>"} {"text": "# Illustrate the distribution of Profit and Loss:\n<|{data}|chart|type=histogram|x[1]=PROFIT|x[2]=LOSS|>"} {"text": "# Illustrate the distribution of Profit and Loss titled \"Profit and Loss Distribution\":\n<|{data}|chart|type=histogram|x[1]=PROFIT|x[2]=LOSS|title=Profit and Loss Distribution|>"} {"text": "# Present a horizontal distribution of Profit and Loss titled \"Profit and Loss Distribution\":\n<|{data}|chart|type=histogram|y[1]=PROFIT|y[2]=LOSS|title=Profit and Loss Distribution|>"} {"text": "# Depict a pie chart of Profit by Time:\n<|{data}|chart|type=pie|values=PROFIT|labels=Time|>"} {"text": "# Illustrate a pie chart of Profit by Time titled \"Profit Pie Chart\":\n<|{data}|chart|type=pie|values=PROFIT|labels=Time|title=Profit Pie Chart|>"} {"text": "# Depict a pie chart of Loss by Time:\n<|{data}|chart|type=pie|values=LOSS|labels=Time|>"} {"text": "# Visualize Quantity over Time in a line chart:\n<|{data}|chart|type=lines|x=TIME|y=QUANTITY|>"} {"text": "# Showcase Quantity over Time in a line chart titled \"Quantity Trend\":\n<|{data}|chart|type=lines|x=TIME|y=QUANTITY|title=Quantity Trend|>"} {"text": "# Depict Quantity and Price over Time:\n<|{data}|chart|type=lines|x=TIME|y[1]=QUANTITY|y[2]=PRICE|>"} {"text": "# Illustrate Quantity over Time with a Dashed line:\n<|{data}|chart|type=lines|x=TIME|y=QUANTITY|line=dash|>"} {"text": "# Present Price by Time on a Dotted line:\n<|{data}|chart|type=lines|x=TIME|y=PRICE|line=dot|>"} {"text": "# Plot Quantity over Time in Green:\n<|{data}|chart|type=lines|x=TIME|y=QUANTITY|color=Green|>"} {"text": "# Exhibit Price over Time in Blue:\n<|{data}|chart|type=lines|x=TIME|y=PRICE|color=Blue|>"} {"text": "# Show Price over Time in Blue titled Price Overview:\n<|{data}|chart|type=lines|x=TIME|y=PRICE|color=Blue|title=Price Overview|>"} {"text": "# Display Quantity in Red and Price in Yellow over Time:\n<|{data}|chart|type=lines|x=TIME|y[1]=QUANTITY|y[2]=PRICE|color[1]=Red|color[2]=Yellow|>"} {"text": "# Visualize Price by Time in a Green dashed line and Quantity in a Yellow Dotted line:\n<|{data}|chart|type=lines|x=TIME|y[1]=PRICE|y[2]=QUANTITY|line[1]=dash|line[2]=dot|color[1]=Green|color[2]=Yellow|>"} {"text": "# Highlight Time according to Quantity:\n<|{data}|chart|type=lines|x=TIME|y=QUANTITY|>"} {"text": "# Depict in a bar chart the Quantity over Time:\n<|{data}|chart|type=bar|x=TIME|y=QUANTITY|>"} {"text": "# Depict in a bar chart the Quantity over Time and Price over Time:\n<|{data}|chart|type=bar|x=TIME|y[1]=QUANTITY|y[2]=PRICE|>"} {"text": "# Showcase Quantity and Price by Time in a bar chart:\n<|{data}|chart|type=bar|x=TIME|y[1]=QUANTITY|y[2]=PRICE|>"} {"text": "# Depict in a bar chart the Quantity over Time and Price over Time titled Product Overview:\n<|{data}|chart|type=bar|x=TIME|y[1]=QUANTITY|y[2]=PRICE|title=Product Overview|>"} {"text": "# Depict in a scatter plot Quantity over Time:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y=QUANTITY|>"} {"text": "# Illustrate Quantity and Price by Time in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y[1]=QUANTITY|y[2]=PRICE|>"} {"text": "# Plot Price in Green points and Quantity in Yellow points by Time:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y[1]=PRICE|y[2]=QUANTITY|color[1]=Green|color[2]=Yellow|>"} {"text": "# Display a histogram of Quantity:\n<|{data}|chart|type=histogram|x=QUANTITY|>"} {"text": "# Showcase a horizontal histogram of Quantity:\n<|{data}|chart|type=histogram|x=QUANTITY|>"} {"text": "# Illustrate the distribution of Quantity and Price:\n<|{data}|chart|type=histogram|x[1]=QUANTITY|x[2]=PRICE|>"} {"text": "# Illustrate the distribution of Quantity and Price titled \"Quantity and Price Distribution\":\n<|{data}|chart|type=histogram|x[1]=QUANTITY|x[2]=PRICE|title=Quantity and Price Distribution|>"} {"text": "# Present a horizontal distribution of Quantity and Price titled \"Quantity and Price Distribution\":\n<|{data}|chart|type=histogram|y[1]=QUANTITY|y[2]=PRICE|title=Quantity and Price Distribution|>"} {"text": "# Depict a pie chart of Quantity by Time:\n<|{data}|chart|type=pie|values=QUANTITY|labels=Time|>"} {"text": "# Illustrate a pie chart of Quantity by Time titled \"Quantity Pie Chart\":\n<|{data}|chart|type=pie|values=QUANTITY|labels=Time|title=Quantity Pie Chart|>"} {"text": "# Depict a pie chart of Price by Time:\n<|{data}|chart|type=pie|values=PRICE|labels=Time|>"} {"text": "# Plot Temperature against Time in a line chart:\n<|{data}|chart|type=lines|x=TIME|y=TEMPERATURE|>"} {"text": "# Showcase Temperature against Time in a line chart titled \"Temperature Trend\":\n<|{data}|chart|type=lines|x=TIME|y=TEMPERATURE|title=Temperature Trend|>"} {"text": "# Depict Temperature and Humidity against Time:\n<|{data}|chart|type=lines|x=TIME|y[1]=TEMPERATURE|y[2]=HUMIDITY|>"} {"text": "# Illustrate Temperature against Time with a Dashed line:\n<|{data}|chart|type=lines|x=TIME|y=TEMPERATURE|line=dash|>"} {"text": "# Present Humidity by Time on a Dotted line:\n<|{data}|chart|type=lines|x=TIME|y=HUMIDITY|line=dot|>"} {"text": "# Plot Temperature against Time in Blue:\n<|{data}|chart|type=lines|x=TIME|y=TEMPERATURE|color=Blue|>"} {"text": "# Exhibit Humidity against Time in Green:\n<|{data}|chart|type=lines|x=TIME|y=HUMIDITY|color=Green|>"} {"text": "# Show Humidity against Time in Green titled Humidity Overview:\n<|{data}|chart|type=lines|x=TIME|y=HUMIDITY|color=Green|title=Humidity Overview|>"} {"text": "# Display Temperature in Red and Humidity in Yellow against Time:\n<|{data}|chart|type=lines|x=TIME|y[1]=TEMPERATURE|y[2]=HUMIDITY|color[1]=Red|color[2]=Yellow|>"} {"text": "# Visualize Humidity against Time in a Red dashed line and Temperature in a Yellow Dotted line:\n<|{data}|chart|type=lines|x=TIME|y[1]=HUMIDITY|y[2]=TEMPERATURE|line[1]=dash|line[2]=dot|color[1]=Red|color[2]=Yellow|>"} {"text": "# Highlight Time according to Temperature:\n<|{data}|chart|type=lines|x=TIME|y=TEMPERATURE|>"} {"text": "# Depict in a bar chart the Temperature against Time:\n<|{data}|chart|type=bar|x=TIME|y=TEMPERATURE|>"} {"text": "# Depict in a bar chart the Temperature against Time and Humidity against Time:\n<|{data}|chart|type=bar|x=TIME|y[1]=TEMPERATURE|y[2]=HUMIDITY|>"} {"text": "# Showcase Temperature and Humidity against Time in a bar chart:\n<|{data}|chart|type=bar|x=TIME|y[1]=TEMPERATURE|y[2]=HUMIDITY|>"} {"text": "# Depict in a bar chart the Temperature against Time and Humidity against Time titled Climate Overview:\n<|{data}|chart|type=bar|x=TIME|y[1]=TEMPERATURE|y[2]=HUMIDITY|title=Climate Overview|>"} {"text": "# Depict in a scatter plot Temperature against Time:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y=TEMPERATURE|>"} {"text": "# Illustrate Temperature and Humidity against Time in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y[1]=TEMPERATURE|y[2]=HUMIDITY|>"} {"text": "# Plot Humidity in Green points and Temperature in Yellow points against Time:\n<|{data}|chart|type=scatter|mode=markers|x=TIME|y[1]=HUMIDITY|y[2]=TEMPERATURE|color[1]=Green|color[2]=Yellow|>"} {"text": "# Display a histogram of Temperature:\n<|{data}|chart|type=histogram|x=TEMPERATURE|>"} {"text": "# Showcase a horizontal histogram of Temperature:\n<|{data}|chart|type=histogram|x=TEMPERATURE|>"} {"text": "# Illustrate the distribution of Temperature and Humidity:\n<|{data}|chart|type=histogram|x[1]=TEMPERATURE|x[2]=HUMIDITY|>"} {"text": "# Illustrate the distribution of Temperature and Humidity titled \"Temperature and Humidity Distribution\":\n<|{data}|chart|type=histogram|x[1]=TEMPERATURE|x[2]=HUMIDITY|title=Temperature and Humidity Distribution|>"} {"text": "# Present a horizontal distribution of Temperature and Humidity titled \"Temperature and Humidity Distribution\":\n<|{data}|chart|type=histogram|y[1]=TEMPERATURE|y[2]=HUMIDITY|title=Temperature and Humidity Distribution|>"} {"text": "# Depict a pie chart of Temperature against Time:\n<|{data}|chart|type=pie|values=TEMPERATURE|labels=Time|>"} {"text": "# Illustrate a pie chart of Temperature against Time titled \"Temperature Pie Chart\":\n<|{data}|chart|type=pie|values=TEMPERATURE|labels=Time|title=Temperature Pie Chart|>"} {"text": "# Depict a pie chart of Humidity against Time:\n<|{data}|chart|type=pie|values=HUMIDITY|labels=Time|>"} {"text": "# Plot Sales against Region in a line chart:\n<|{data}|chart|type=lines|x=REGION|y=SALES|>"} {"text": "# Showcase Sales against Region in a line chart titled \"Sales by Region\":\n<|{data}|chart|type=lines|x=REGION|y=SALES|title=Sales by Region|>"} {"text": "# Depict Sales and Profit against Region:\n<|{data}|chart|type=lines|x=REGION|y[1]=SALES|y[2]=PROFIT|>"} {"text": "# Illustrate Sales against Region with a Dashed line:\n<|{data}|chart|type=lines|x=REGION|y=SALES|line=dash|>"} {"text": "# Present Profit by Region on a Dotted line:\n<|{data}|chart|type=lines|x=REGION|y=PROFIT|line=dot|>"} {"text": "# Plot Sales against Region in Blue:\n<|{data}|chart|type=lines|x=REGION|y=SALES|color=Blue|>"} {"text": "# Exhibit Profit against Region in Green:\n<|{data}|chart|type=lines|x=REGION|y=PROFIT|color=Green|>"} {"text": "# Show Profit against Region in Green titled Profit Overview:\n<|{data}|chart|type=lines|x=REGION|y=PROFIT|color=Green|title=Profit Overview|>"} {"text": "# Display Sales in Red and Profit in Yellow against Region:\n<|{data}|chart|type=lines|x=REGION|y[1]=SALES|y[2]=PROFIT|color[1]=Red|color[2]=Yellow|>"} {"text": "# Visualize Profit by Region in a Red dashed line and Sales in a Yellow Dotted line:\n<|{data}|chart|type=lines|x=REGION|y[1]=PROFIT|y[2]=SALES|line[1]=dash|line[2]=dot|color[1]=Red|color[2]=Yellow|>"} {"text": "# Highlight Region according to Sales:\n<|{data}|chart|type=lines|x=REGION|y=SALES|>"} {"text": "# Depict in a bar chart the Sales against Region:\n<|{data}|chart|type=bar|x=REGION|y=SALES|>"} {"text": "# Depict in a bar chart the Sales against Region and Profit against Region:\n<|{data}|chart|type=bar|x=REGION|y[1]=SALES|y[2]=PROFIT|>"} {"text": "# Showcase Sales and Profit against Region in a bar chart:\n<|{data}|chart|type=bar|x=REGION|y[1]=SALES|y[2]=PROFIT|>"} {"text": "# Depict in a bar chart the Sales against Region and Profit against Region titled Financial Overview:\n<|{data}|chart|type=bar|x=REGION|y[1]=SALES|y[2]=PROFIT|title=Financial Overview|>"} {"text": "# Depict in a scatter plot Sales against Region:\n<|{data}|chart|type=scatter|mode=markers|x=REGION|y=SALES|>"} {"text": "# Illustrate Sales and Profit against Region in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=REGION|y[1]=SALES|y[2]=PROFIT|>"} {"text": "# Plot Profit in Green points and Sales in Yellow points against Region:\n<|{data}|chart|type=scatter|mode=markers|x=REGION|y[1]=PROFIT|y[2]=SALES|color[1]=Green|color[2]=Yellow|>"} {"text": "# Display a histogram of Sales against Region:\n<|{data}|chart|type=histogram|x=SALES|>"} {"text": "# Showcase a horizontal histogram of Sales against Region:\n<|{data}|chart|type=histogram|x=SALES|>"} {"text": "# Illustrate the distribution of Sales and Profit against Region:\n<|{data}|chart|type=histogram|x[1]=SALES|x[2]=PROFIT|>"} {"text": "# Illustrate the distribution of Sales and Profit against Region titled \"Sales and Profit Distribution\":\n<|{data}|chart|type=histogram|x[1]=SALES|x[2]=PROFIT|title=Sales and Profit Distribution|>"} {"text": "# Present a horizontal distribution of Sales and Profit against Region titled \"Sales and Profit Distribution\":\n<|{data}|chart|type=histogram|y[1]=SALES|y[2]=PROFIT|title=Sales and Profit Distribution|>"} {"text": "# Depict a pie chart of Sales against Region:\n<|{data}|chart|type=pie|values=SALES|labels=Region|>"} {"text": "# Illustrate a pie chart of Sales against Region titled \"Sales Pie Chart\":\n<|{data}|chart|type=pie|values=SALES|labels=Region|title=Sales Pie Chart|>"} {"text": "# Depict a pie chart of Profit against Region:\n<|{data}|chart|type=pie|values=PROFIT|labels=Region|>"} {"text": "# Visualize Productivity against Employee in a line chart:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=PRODUCTIVITY|>"} {"text": "# Showcase Productivity against Employee in a line chart titled \"Employee Productivity Trend\":\n<|{data}|chart|type=lines|x=EMPLOYEE|y=PRODUCTIVITY|title=Employee Productivity Trend|>"} {"text": "# Depict Productivity and Satisfaction against Employee:\n<|{data}|chart|type=lines|x=EMPLOYEE|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|>"} {"text": "# Illustrate Productivity against Employee with a Dashed line:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=PRODUCTIVITY|line=dash|>"} {"text": "# Present Satisfaction by Employee on a Dotted line:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=SATISFACTION|line=dot|>"} {"text": "# Plot Productivity against Employee in Blue:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=PRODUCTIVITY|color=Blue|>"} {"text": "# Exhibit Satisfaction against Employee in Green:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=SATISFACTION|color=Green|>"} {"text": "# Show Satisfaction against Employee in Green titled Satisfaction Overview:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=SATISFACTION|color=Green|title=Satisfaction Overview|>"} {"text": "# Display Productivity in Red and Satisfaction in Yellow against Employee:\n<|{data}|chart|type=lines|x=EMPLOYEE|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|color[1]=Red|color[2]=Yellow|>"} {"text": "# Visualize Satisfaction by Employee in a Red dashed line and Productivity in a Yellow Dotted line:\n<|{data}|chart|type=lines|x=EMPLOYEE|y[1]=SATISFACTION|y[2]=PRODUCTIVITY|line[1]=dash|line[2]=dot|color[1]=Red|color[2]=Yellow|>"} {"text": "# Highlight Employee according to Productivity:\n<|{data}|chart|type=lines|x=EMPLOYEE|y=PRODUCTIVITY|>"} {"text": "# Depict in a bar chart the Productivity against Employee:\n<|{data}|chart|type=bar|x=EMPLOYEE|y=PRODUCTIVITY|>"} {"text": "# Depict in a bar chart the Productivity against Employee and Satisfaction against Employee:\n<|{data}|chart|type=bar|x=EMPLOYEE|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|>"} {"text": "# Showcase Productivity and Satisfaction against Employee in a bar chart:\n<|{data}|chart|type=bar|x=EMPLOYEE|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|>"} {"text": "# Depict in a bar chart the Productivity against Employee and Satisfaction against Employee titled Work Overview:\n<|{data}|chart|type=bar|x=EMPLOYEE|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|title=Work Overview|>"} {"text": "# Depict in a scatter plot Productivity against Employee:\n<|{data}|chart|type=scatter|mode=markers|x=EMPLOYEE|y=PRODUCTIVITY|>"} {"text": "# Illustrate Productivity and Satisfaction against Employee in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=EMPLOYEE|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|>"} {"text": "# Plot Satisfaction in Green points and Productivity in Yellow points against Employee:\n<|{data}|chart|type=scatter|mode=markers|x=EMPLOYEE|y[1]=SATISFACTION|y[2]=PRODUCTIVITY|color[1]=Green|color[2]=Yellow|>"} {"text": "# Display a histogram of Productivity against Employee:\n<|{data}|chart|type=histogram|x=PRODUCTIVITY|>"} {"text": "# Showcase a horizontal histogram of Productivity against Employee:\n<|{data}|chart|type=histogram|x=PRODUCTIVITY|>"} {"text": "# Illustrate the distribution of Productivity and Satisfaction against Employee:\n<|{data}|chart|type=histogram|x[1]=PRODUCTIVITY|x[2]=SATISFACTION|>"} {"text": "# Illustrate the distribution of Productivity and Satisfaction against Employee titled \"Productivity and Satisfaction Distribution\":\n<|{data}|chart|type=histogram|x[1]=PRODUCTIVITY|x[2]=SATISFACTION|title=Productivity and Satisfaction Distribution|>"} {"text": "# Present a horizontal distribution of Productivity and Satisfaction against Employee titled \"Productivity and Satisfaction Distribution\":\n<|{data}|chart|type=histogram|y[1]=PRODUCTIVITY|y[2]=SATISFACTION|title=Productivity and Satisfaction Distribution|>"} {"text": "# Depict a pie chart of Productivity against Employee:\n<|{data}|chart|type=pie|values=PRODUCTIVITY|labels=Employee|>"} {"text": "# Illustrate a pie chart of Productivity against Employee titled \"Productivity Pie Chart\":\n<|{data}|chart|type=pie|values=PRODUCTIVITY|labels=Employee|title=Productivity Pie Chart|>"} {"text": "# Depict a pie chart of Satisfaction against Employee:\n<|{data}|chart|type=pie|values=SATISFACTION|labels=Employee|>"} {"text": "# Plot Population against Country in a line chart:\n<|{data}|chart|type=lines|x=COUNTRY|y=POPULATION|>"} {"text": "# Showcase Population against Country in a line chart titled \"Population Trends\":\n<|{data}|chart|type=lines|x=COUNTRY|y=POPULATION|title=Population Trends|>"} {"text": "# Depict Population and GDP against Country:\n<|{data}|chart|type=lines|x=COUNTRY|y[1]=POPULATION|y[2]=GDP|>"} {"text": "# Illustrate Population against Country with a Dashed line:\n<|{data}|chart|type=lines|x=COUNTRY|y=POPULATION|line=dash|>"} {"text": "# Present GDP by Country on a Dotted line:\n<|{data}|chart|type=lines|x=COUNTRY|y=GDP|line=dot|>"} {"text": "# Plot Population against Country in Blue:\n<|{data}|chart|type=lines|x=COUNTRY|y=POPULATION|color=Blue|>"} {"text": "# Exhibit GDP against Country in Green:\n<|{data}|chart|type=lines|x=COUNTRY|y=GDP|color=Green|>"} {"text": "# Show GDP against Country in Green titled GDP Overview:\n<|{data}|chart|type=lines|x=COUNTRY|y=GDP|color=Green|title=GDP Overview|>"} {"text": "# Display Population in Red and GDP in Yellow against Country:\n<|{data}|chart|type=lines|x=COUNTRY|y[1]=POPULATION|y[2]=GDP|color[1]=Red|color[2]=Yellow|>"} {"text": "# Visualize GDP by Country in a Red dashed line and Population in a Yellow Dotted line:\n<|{data}|chart|type=lines|x=COUNTRY|y[1]=GDP|y[2]=POPULATION|line[1]=dash|line[2]=dot|color[1]=Red|color[2]=Yellow|>"} {"text": "# Highlight Country according to Population:\n<|{data}|chart|type=lines|x=COUNTRY|y=POPULATION|>"} {"text": "# Depict in a bar chart the Population against Country:\n<|{data}|chart|type=bar|x=COUNTRY|y=POPULATION|>"} {"text": "# Depict in a bar chart the Population against Country and GDP against Country:\n<|{data}|chart|type=bar|x=COUNTRY|y[1]=POPULATION|y[2]=GDP|>"} {"text": "# Showcase Population and GDP against Country in a bar chart:\n<|{data}|chart|type=bar|x=COUNTRY|y[1]=POPULATION|y[2]=GDP|>"} {"text": "# Depict in a bar chart the Population against Country and GDP against Country titled Economic Overview:\n<|{data}|chart|type=bar|x=COUNTRY|y[1]=POPULATION|y[2]=GDP|title=Economic Overview|>"} {"text": "# Depict in a scatter plot Population against Country:\n<|{data}|chart|type=scatter|mode=markers|x=COUNTRY|y=POPULATION|>"} {"text": "# Illustrate Population and GDP against Country in a scatter plot:\n<|{data}|chart|type=scatter|mode=markers|x=COUNTRY|y[1]=POPULATION|y[2]=GDP|>"} {"text": "# Plot GDP in Green points and Population in Yellow points against Country:\n<|{data}|chart|type=scatter|mode=markers|x=COUNTRY|y[1]=GDP|y[2]=POPULATION|color[1]=Green|color[2]=Yellow|>"} {"text": "# Display a histogram of Population against Country:\n<|{data}|chart|type=histogram|x=POPULATION|>"} {"text": "# Showcase a horizontal histogram of Population against Country:\n<|{data}|chart|type=histogram|x=POPULATION|>"} {"text": "# Illustrate the distribution of Population and GDP against Country:\n<|{data}|chart|type=histogram|x[1]=POPULATION|x[2]=GDP|>"} {"text": "# Illustrate the distribution of Population and GDP against Country titled \"Population and GDP Distribution\":\n<|{data}|chart|type=histogram|x[1]=POPULATION|x[2]=GDP|title=Population and GDP Distribution|>"} {"text": "# Present a horizontal distribution of Population and GDP against Country titled \"Population and GDP Distribution\":\n<|{data}|chart|type=histogram|y[1]=POPULATION|y[2]=GDP|title=Population and GDP Distribution|>"} {"text": "# Depict a pie chart of Population against Country:\n<|{data}|chart|type=pie|values=POPULATION|labels=Country|>"} {"text": "# Illustrate a pie chart of Population against Country titled \"Population Pie Chart\":\n<|{data}|chart|type=pie|values=POPULATION|labels=Country|title=Population Pie Chart|>"} {"text": "# Depict a pie chart of GDP against Country:\n<|{data}|chart|type=pie|values=GDP|labels=Country|>"} {"text": "# **Worldwide**{: .color-primary} Health and Fitness Trends\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Average Life Expectancy**{: .color-primary}\n<|{'{:.1f}'.format(np.average(data_world_health['Life Expectancy']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Obesity Rate**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_health['Obesity Rate']))}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Gym Memberships**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_health['Gym Memberships']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_health_metric}|toggle|lov={health_metric_selector}|>\n\n<|part|render={selected_health_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_health_pie_absolute}|chart|type=pie|labels=Country|values=Life Expectancy|title=Global Life Expectancy Distribution|>\n\n<|{data_world_health_evolution_absolute}|chart|properties={data_world_health_evolution_properties}|title=Health and Fitness Evolution Worldwide|>\n|>\n|>\n\n<|part|render={selected_health_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_health_pie_relative}|chart|type=pie|labels=Country|values=Obesity Rate|>\n\n<|{data_world_health_evolution_relative}|chart|properties={data_world_health_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load task data\ntask_data = pd.read_csv(\"task_data.csv\")\n\n# Initialize variables\npriorities = list(task_data[\"Priority\"].unique())\ncategories = list(task_data[\"Category\"].unique())\npriority = priorities\ncategory = categories\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{category}|selector|lov={categories}|multiple|label=Select Category|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_tasks>\n\n hours\n|average_completion_time>\n\n\n|task_table>\n|main_page>\n|>\n\nCode adapted from [Task Management](https://github.com/task_management_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/task-management-app)\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\nfrom PIL import Image\nimport io\n\nimage_data = None\nimage_path = \"\"\n\ndef image_upload(state):\n if state.image_path:\n with open(state.image_path, \"rb\") as file:\n state.image_data = Image.open(io.BytesIO(file.read()))\n\nimage_page = \"\"\"\n<|{image_path}|file_selector|accept=image/*|on_action=image_upload|>\n<|{image_data}|image|>\n\"\"\"\n\nGui(image_page).run()\n"} {"text": "<|{all_reservations}|table|columns={reservation_columns}|width='100%'|on_action={on_reservation_select}|style=reservation_style|>\n<|Create Reservation|button|on_action={open_create_reservation_dialog}|>\n<|Refresh Reservations|button|on_action={refresh_reservation_list}|>\n\n<|{show_create_reservation_dialog}|dialog|title=Create Reservation|\n<|{customer_name}|input|placeholder='Customer Name'|\n<|{reservation_date}|datetime_picker|>\n<|{table_number}|number_input|min=1|placeholder='Table Number'|\n<|Create|button|on_action={create_reservation}|>\n<|Cancel|button|on_action={close_create_reservation_dialog}|>\n|>\n\n<|{show_reservation_details}|pane|\n\n# Reservation Details <|Edit|button|on_action=edit_selected_reservation|> <|Cancel|button|on_action=cancel_selected_reservation|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Customer Name\n<|{selected_reservation.customer_name}|>\n|>\n\n<|part|class_name=card|\n## Date and Time\n<|{selected_reservation.date.strftime(\"%b %d, %Y at %H:%M\")}|>\n|>\n\n<|part|class_name=card|\n## Table Number\n<|{selected_reservation.table_number}|>\n|>\n\n----\n|>\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Product Category
\n<|{product_category_selected}|selector|lov=category_electronics;category_clothing;category_food|dropdown|on_change=on_product_category_change|>\n|>\n\n<|part|class_name=card|\n### Select Store Location
\n<|{store_location_selected}|selector|lov=location_downtown;location_suburb;location_rural|dropdown|on_change=on_store_location_change|>\n|>\n\n|>\n\n<|Inventory Data Overview|expandable|expanded=True|\nDisplay category_data and location_data\n<|layout|columns=1 1|\n<|{category_data}|table|page_size=5|>\n\n<|{location_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{stock_levels_chart}|chart|type=bar|x=Product|y=Stock Level|title=Stock Levels by Category|>\n|>\n\n<|part|class_name=card|\n<|{sales_by_location_chart}|chart|type=pie|options={sales_options}|layout={sales_layout}|title=Sales by Location|>\n|>\n|>\n\n
\n### Analyze Inventory Efficiency:\n<|{inventory_efficiency_analysis}|scenario|on_submission_change=on_inventory_efficiency_status_change|expandable=False|expanded=False|>\n\n<|{inventory_efficiency_analysis}|scenario_dag|>\n\n
\n### View inventory efficiency results:\n<|{inventory_efficiency_analysis.results if inventory_efficiency_analysis else None}|data_node|>\n"} {"text": "from taipy import Gui\n\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Art Generator\n\n<|layout|columns=35 65|\nDisplay Mandelbrot Art from path\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nCreate a slider to select iterations\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Art Generator\")\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Stock
\n<|{stock_selected}|selector|lov=stock_apple;stock_google;stock_amazon|dropdown|on_change=on_stock_change|>\n|>\n\n<|part|class_name=card|\n### Select Comparison Market Index
\n<|{market_index_selected}|selector|lov=index_nasdaq;index_s&p500;index_dowjones|dropdown|on_change=on_market_index_change|>\n|>\n\n|>\n\n<|Stock and Market Data|expandable|expanded=True|\nDisplay stock_data and market_index_data\n<|layout|columns=1 1|\n<|{stock_data}|table|page_size=5|>\n\n<|{market_index_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{stock_price_chart}|chart|type=line|x=Date|y=Price|title=Stock Price Trend|>\n|>\n\n<|part|class_name=card|\n<|{market_index_chart}|chart|type=line|x=Date|y=Index Value|title=Market Index Trend|>\n|>\n|>\n\n
\n### Run Financial Analysis:\n<|{financial_analysis}|scenario|on_submission_change=on_financial_analysis_status_change|expandable=False|expanded=False|>\n\n<|{financial_analysis}|scenario_dag|>\n\n
\n### View financial analysis results:\n<|{financial_analysis.results if financial_analysis else None}|data_node|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load sales data\nsales_data = pd.read_csv(\"sales_data.csv\")\n\n# Initialize variables\nregions = list(sales_data[\"Region\"].unique())\nproducts = list(sales_data[\"Product\"].unique())\nregion = regions\nproduct = products\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{product}|selector|lov={products}|multiple|label=Select Product|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_sales>\n\n\n|average_profit>\n\n\n\n<|{profit_chart}|chart|x=Month|y=Profit|type=line|title=Profit by Month|color=#ff462b|width=100%|>\n|sales_chart>\n|main_page>\n|>\n\nCode adapted from [Sales Analysis](https://github.com/sales_analysis_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/sales-analysis-app)\n"} {"text": "<|{all_events}|table|columns={event_columns}|width='100%'|on_action={on_event_click}|style=event_style|>\n<|Create Event|button|on_action={open_create_event_dialog}|>\n<|Refresh Events|button|on_action={refresh_event_list}|>\n\n<|{show_create_event_dialog}|dialog|title=Create New Event|\n<|{event_title}|input|placeholder='Event Title'|\n<|{event_date}|date_picker|>\n<|Create Event|button|on_action={create_event}|>\n<|Cancel|button|on_action={close_create_event_dialog}|>\n|>\n\n<|{show_event_details}|pane|\n\n# Event Details <|Edit|button|on_action=edit_selected_event|> <|Cancel|button|on_action=cancel_selected_event|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Title\n<|{selected_event.title}|>\n|>\n\n<|part|class_name=card|\n## Date\n<|{selected_event.date.strftime(\"%b %d, %Y\")}|>\n|>\n\n<|part|class_name=card|\n## Description\n<|{selected_event.description}|textarea|disabled=True|>\n|>\n\n----\n|>\n"} {"text": "# **Country**{: .color-primary} Energy Consumption\n\n<|layout|columns=1 1 1|\n<|{selected_country_energy}|selector|lov={selector_country_energy}|on_change=on_change_country_energy|dropdown|label=Country|>\n\n<|{selected_energy_source}|toggle|lov={energy_source_selector}|on_change=update_energy_source_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Total Consumption**{: .color-primary}\n<|{'{:,}'.format(int(energy_data.iloc[-1]['Total']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Renewable Sources**{: .color-primary}\n<|{'{:,}'.format(int(energy_data.iloc[-1]['Renewable']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Non-Renewable Sources**{: .color-primary}\n<|{'{:,}'.format(int(energy_data.iloc[-1]['Non-Renewable']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{energy_data}|chart|type=line|x=Year|y[3]=Total|y[2]=Renewable|y[1]=Non-Renewable|layout={layout}|options={options}|title=Energy Consumption Trends|>\n\n<|{energy_source_distribution_chart}|chart|type=pie|values=energy_source_values|labels=energy_source_labels|title=Energy Source Distribution|>\n|>\n"} {"text": "<|{inventory_items}|table|columns={inventory_columns}|width='100%'|on_action={on_inventory_item_select}|style=inventory_style|>\n<|Add Item|button|on_action={open_add_item_dialog}|>\n<|Refresh Inventory|button|on_action={refresh_inventory}|>\n\n<|{show_add_item_dialog}|dialog|title=Add Inventory Item|\n<|{item_name}|input|placeholder='Item Name'|\n<|{item_quantity}|number_input|min=0|>\n<|Add Item|button|on_action={add_inventory_item}|>\n<|Cancel|button|on_action={close_add_item_dialog}|>\n|>\n\n<|{show_item_details}|pane|\n\n# Item Details <|Remove|button|on_action=remove_selected_item|> <|Update|button|on_action=update_selected_item|>\n\n<|layout|columns=2|\n<|part|class_name=card|\n## Name\n<|{selected_item.name}|>\n|>\n\n<|part|class_name=card|\n## Quantity\n<|{selected_item.quantity}|>\n|>\n\n<|part|class_name=card|\n## ID\n<|{selected_item.id}|>\n|>\n\n<|part|class_name=card|\n## Last Updated\n<|{selected_item.last_updated.strftime(\"%b %d, %Y at %H:%M:%S\")}|>\n|>\n\n----\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Technology\"\n mood = \"tech\"\n style = \"techexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Technology Tweets\n\n This mini-app generates Tweets related to Technology using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Tech Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load event registration data\nevent_data = pd.read_csv(\"event_registration_data.csv\")\n\n# Initialize variables\nevents = list(event_data[\"Event\"].unique())\nregistrants = list(event_data[\"Registrant\"].unique())\nevent = events\nregistrant = registrants\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{registrant}|selector|lov={registrants}|multiple|label=Select Registrant|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_registrations>\n\n attendees\n|average_attendance>\n\n\n|event_registration_table>\n|main_page>\n|>\n\nCode adapted from [Event Registration](https://github.com/event_registration_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/event-registration-app)\n"} {"text": "<|{all_courses}|table|columns={course_columns}|width='100%'|on_action={on_course_select}|style=course_style|>\n<|Add Course|button|on_action={open_add_course_dialog}|>\n<|Refresh Courses|button|on_action={refresh_course_list}|>\n\n<|{show_add_course_dialog}|dialog|title=Add New Course|\n<|{course_title}|input|placeholder='Course Title'|\n<|{course_instructor}|input|placeholder='Instructor Name'|\n<|{course_duration}|number_input|placeholder='Duration in Hours'|\n<|{course_category}|selector|lov={get_all_categories()}|>\n<|Add Course|button|on_action={add_course}|>\n<|Cancel|button|on_action={close_add_course_dialog}|>\n|>\n\n<|{show_course_details}|pane|\n\n# Course Details <|Edit|button|on_action=edit_selected_course|> <|Remove|button|on_action=remove_selected_course|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Title\n<|{selected_course.title}|>\n|>\n\n<|part|class_name=card|\n## Instructor\n<|{selected_course.instructor}|>\n|>\n\n<|part|class_name=card|\n## Duration\n<|{selected_course.duration}|>\n|>\n\n<|part|class_name=card|\n## Category\n<|{selected_course.category}|>\n|>\n\n----\n|>\n"} {"text": "<|{all_itineraries}|table|columns={itinerary_columns}|width='100%'|on_action={on_itinerary_select}|style=itinerary_style|>\n<|Create Itinerary|button|on_action={open_create_itinerary_dialog}|>\n<|Refresh Itineraries|button|on_action={refresh_itinerary_list}|>\n\n<|{show_create_itinerary_dialog}|dialog|title=Create Travel Itinerary|\n<|{destination}|input|placeholder='Destination'|\n<|{start_date}|date_picker|>\n<|{end_date}|date_picker|>\n<|Create Itinerary|button|on_action={create_itinerary}|>\n<|Cancel|button|on_action={close_create_itinerary_dialog}|>\n|>\n\n<|{show_itinerary_details}|pane|\n\n# Itinerary Details <|Edit|button|on_action=edit_selected_itinerary|> <|Remove|button|on_action=remove_selected_itinerary|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Destination\n<|{selected_itinerary.destination}|>\n|>\n\n<|part|class_name=card|\n## Start Date\n<|{selected_itinerary.start_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n<|part|class_name=card|\n## End Date\n<|{selected_itinerary.end_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n----\n|>\n"} {"text": "<|{all_exhibits}|table|columns={exhibit_columns}|width='100%'|on_action={on_exhibit_select}|style=exhibit_style|>\n<|Add Exhibit|button|on_action={open_add_exhibit_dialog}|>\n<|Refresh Exhibits|button|on_action={refresh_exhibit_list}|>\n\n<|{show_add_exhibit_dialog}|dialog|title=Add New Exhibit|\n<|{exhibit_name}|input|placeholder='Exhibit Name'|\n<|{exhibit_artist}|input|placeholder='Artist Name'|\n<|{exhibit_start_date}|date_picker|>\n<|{exhibit_end_date}|date_picker|>\n<|Add Exhibit|button|on_action={add_exhibit}|>\n<|Cancel|button|on_action={close_add_exhibit_dialog}|>\n|>\n\n<|{show_exhibit_details}|pane|\n\n# Exhibit Details <|Edit|button|on_action=edit_selected_exhibit|> <|Remove|button|on_action=remove_selected_exhibit|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Name\n<|{selected_exhibit.name}|>\n|>\n\n<|part|class_name=card|\n## Artist\n<|{selected_exhibit.artist}|>\n|>\n\n<|part|class_name=card|\n## Start Date\n<|{selected_exhibit.start_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n<|part|class_name=card|\n## End Date\n<|{selected_exhibit.end_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n----\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load employee data\nemployee_data = pd.read_csv(\"employee_data.csv\")\n\n# Initialize variables\ndepartments = list(employee_data[\"Department\"].unique())\npositions = list(employee_data[\"Position\"].unique())\ndepartment = departments\nposition = positions\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{position}|selector|lov={positions}|multiple|label=Select Position|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_employees>\n\n\n|average_salary>\n\n\n|employee_table>\n|main_page>\n|>\n\nCode adapted from [Employee Management](https://github.com/employee_management_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/employee-management-app)\n\"\"\"\n\ndef filter(department, position):\n df_selection = employee_data[\n employee_data[\"Department\"].isin(department)\n & employee_data[\"Position\"].isin(position)\n ]\n return df_selection\n\ndef on_filter(state):\n state.df_selection = filter(state.department, state.position)\n\nif __name__ == \"__main__\":\n # Initialize dataframe\n df_selection = filter(department, position)\n\n # Run the app\n Gui(page).run()\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load customer feedback data\nfeedback_data = pd.read_csv(\"customer_feedback_data.csv\")\n\n# Initialize variables\nproducts = list(feedback_data[\"Product\"].unique())\nratings = list(feedback_data[\"Rating\"].unique())\nproduct = products\nrating = ratings\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{rating}|selector|lov={ratings}|multiple|label=Select Rating|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_feedback>\n\n\n|average_rating>\n\n\n|feedback_table>\n|main_page>\n|>\n\nCode adapted from [Customer Feedback](https://github.com/customer_feedback_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/customer-feedback-app)\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages related to the finance sector (replace with actual imports)\nfrom pages.finance.finance import finance_md\n\n# Define the pages\npages = {\n '/': root, # Replace with the root page if you have one\n \"Finance\": finance_md,\n}\n\n# Create a Gui instance with the pages\ngui_multi_pages = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n\n # Run the multi-page app\n gui_multi_pages.run(title=\"Finance Dashboard\")\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load inventory data\ninventory_data = pd.read_csv(\"inventory_data.csv\")\n\n# Initialize variables\ncategories = list(inventory_data[\"Category\"].unique())\nlocations = list(inventory_data[\"Location\"].unique())\ncategory = categories\nlocation = locations\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{location}|selector|lov={locations}|multiple|label=Select Location|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_items>\n\n units\n|average_quantity>\n\n\n|inventory_table>\n|main_page>\n|>\n\nCode adapted from [Inventory Management](https://github.com/inventory_management_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/inventory-management-app)\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|region>\n|>\n\n<|{selected_market_analysis}|market_analysis|on_submission_change=on_submission_change_market_analysis|not expanded|>\n\n---------------------------------------\n\n## **Market Predictions**{: .color-primary} and Data Exploration\n\n<|{selected_market_analysis.result.read() if selected_market_analysis and selected_market_analysis.result.read() is not None else default_market_result}|chart|x=Date|y[1]=Average Price|y[2]=Volume|y[3]=Trend Analysis|type[1]=line|title=Real Estate Market Trends|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|market_analysis>\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load product inventory data\nproduct_inventory = pd.read_csv(\"product_inventory.csv\")\n\n# Initialize variables\ncategories = list(product_inventory[\"Category\"].unique())\nbrands = list(product_inventory[\"Brand\"].unique())\ncategory = categories\nbrand = brands\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{brand}|selector|lov={brands}|multiple|label=Select Brand|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_products>\n\n\n|average_price>\n\n\n|product_table>\n|main_page>\n|>\n\nCode adapted from [Product Inventory](https://github.com/product_inventory_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/product-inventory-app)\n"} {"text": "# **Country**{: .color-primary} Agricultural Production\n\n<|layout|columns=1 1 1|\n<|{selected_country_agriculture}|selector|lov={selector_country_agriculture}|on_change=on_change_country_agriculture|dropdown|label=Country|>\n\n<|{selected_crop}|toggle|lov={crop_selector}|on_change=update_crop_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Total Production**{: .color-primary}\n<|{'{:,}'.format(int(agriculture_data.iloc[-1]['Total']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Crop Yield**{: .color-primary}\n<|{'{:,}'.format(int(agriculture_data.iloc[-1]['Yield']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Export Volume**{: .color-primary}\n<|{'{:,}'.format(int(agriculture_data.iloc[-1]['Export']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{agriculture_data}|chart|type=area|x=Year|y[3]=Total|y[2]=Yield|y[1]=Export|layout={layout}|options={options}|title=Agricultural Trends|>\n\n<|{crop_distribution_chart}|chart|type=pie|values=crop_values|labels=crop_labels|title=Crop Distribution|>\n|>\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages related to the e-commerce sector (replace with actual imports)\nfrom pages.ecommerce.ecommerce import ecommerce_md\n\n# Define the pages\npages = {\n '/': root, # Replace with the root page if you have one\n \"E-commerce\": ecommerce_md,\n}\n\n# Create a Gui instance with the pages\ngui_multi_pages = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n\n # Run the multi-page app\n gui_multi_pages.run(title=\"E-commerce Dashboard\")\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load employee data\nemployee_data = pd.read_csv(\"employee_data.csv\")\n\n# Initialize variables\ndepartments = list(employee_data[\"Department\"].unique())\npositions = list(employee_data[\"Position\"].unique())\ndepartment = departments\nposition = positions\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{position}|selector|lov={positions}|multiple|label=Select Position|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_employees>\n\n\n|average_salary>\n\n\n|employee_table>\n|main_page>\n|>\n\nCode adapted from [Employee Management](https://github.com/employee_management_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/employee-management-app)\n\"\"\"\n\ndef filter(department, position):\n df_selection = employee_data[\n employee_data[\"Department\"].isin(department)\n & employee_data[\"Position\"].isin(position)\n ]\n return df_selection\n\ndef on_filter(state):\n state.df_selection = filter(state.department, state.position)\n\nif __name__ == \"__main__\":\n # Initialize dataframe\n df_selection = filter(department, position)\n\n # Run the app\n Gui(page).run()\n"} {"text": "<|{all_users}|table|columns={user_columns}|width='100%'|on_action={on_user_table_click}|style=user_style|>\n<|Add User|button|on_action={open_add_user_dialog}|>\n<|Refresh Users|button|on_action={refresh_user_list}|>\n\n<|{show_dialog_add_user}|dialog|title=Add new user|\n<|{new_user_name}|input|placeholder='Enter user name'|\n<|{new_user_role}|selector|lov={get_all_roles()}|>\n<|Add|button|on_action={add_user}|>\n<|Cancel|button|on_action={close_add_user_dialog}|>\n|>\n\n<|{show_user_details}|pane|\n\n# User Details <|Delete|button|on_action=delete_selected_user|> <|Disable|button|on_action=disable_selected_user|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n## Name\n<|{selected_user.name}|>\n|>\n\n<|part|class_name=card|\n## Role\n<|{selected_user.role}|>\n|>\n\n<|part|class_name=card|\n## ID\n<|{selected_user.id}|>\n|>\n\n<|part|class_name=card|\n## Creation Date\n<|{selected_user.creation_date.strftime(\"%b %d %y %H:%M:%S\")}|>\n|>\n\n<|part|class_name=card|\n## Status\n<|{get_status(selected_user)}|>\n|>\n\n----\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load sales data\nsales_data = pd.read_csv(\"sales_data.csv\")\n\n# Initialize variables\nregions = list(sales_data[\"Region\"].unique())\ncategories = list(sales_data[\"Category\"].unique())\nregion = regions\ncategory = categories\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{category}|selector|lov={categories}|multiple|label=Select Category|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_sales>\n\n\n|average_profit>\n\n\n|sales_dashboard>\n|main_page>\n|>\n\nCode adapted from [Sales Performance Dashboard](https://github.com/sales_performance_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/sales-performance-dashboard-app)\n"} {"text": "<|{all_resources}|table|columns={resource_columns}|width='100%'|on_action={on_resource_select}|style=resource_style|>\n<|Allocate Resource|button|on_action={open_allocate_resource_dialog}|>\n<|Refresh Resources|button|on_action={refresh_resource_list}|>\n\n<|{show_allocate_resource_dialog}|dialog|title=Allocate Resource|\n<|{resource_name}|selector|lov={get_all_resources()}|>\n<|{resource_quantity}|number_input|min=0|placeholder='Quantity'|\n<|Allocate|button|on_action={allocate_resource}|>\n<|Cancel|button|on_action={close_allocate_resource_dialog}|>\n|>\n\n<|{show_resource_details}|pane|\n\n# Resource Details <|Release|button|on_action=release_selected_resource|> <|Update|button|on_action=update_selected_resource|>\n\n<|layout|columns=2|\n<|part|class_name=card|\n## Name\n<|{selected_resource.name}|>\n|>\n\n<|part|class_name=card|\n## Allocated Quantity\n<|{selected_resource.allocated_quantity}|>\n|>\n\n<|part|class_name=card|\n## Total Quantity\n<|{selected_resource.total_quantity}|>\n|>\n\n<|part|class_name=card|\n## Last Allocation Date\n<|{selected_resource.last_allocation_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n----\n|>\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal Education\n\n<|layout|columns=35 65|\nDisplay Mandelbrot Fractal for Educational Purposes\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nCreate a slider to select iterations\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal Education\")\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Economics\"\n mood = \"economicanalysis\"\n style = \"economicspro\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Economics Tweets\n\n This mini-app generates Tweets related to Economics using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Economics Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Traffic Zone
\n<|{traffic_zone_selected}|selector|lov=zone_downtown;zone_suburbs;zone_industrial|dropdown|on_change=on_traffic_zone_change|>\n|>\n\n<|part|class_name=card|\n### Select Time of Day
\n<|{time_of_day_selected}|selector|lov=time_morning;time_afternoon;time_evening|dropdown|on_change=on_time_of_day_change|>\n|>\n\n|>\n\n<|Traffic Data Overview|expandable|expanded=True|\nDisplay traffic_zone_data and time_of_day_data\n<|layout|columns=1 1|\n<|{traffic_zone_data}|table|page_size=5|>\n\n<|{time_of_day_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{traffic_volume_chart}|chart|type=bar|x=Zone|y=Traffic Volume|title=Traffic Volume by Zone|>\n|>\n\n<|part|class_name=card|\n<|{peak_hours_chart}|chart|type=line|x=Time|y=Vehicles|title=Peak Traffic Hours|>\n|>\n|>\n\n
\n### Analyze Traffic Patterns:\n<|{traffic_pattern_analysis}|scenario|on_submission_change=on_traffic_pattern_status_change|expandable=False|expanded=False|>\n\n<|{traffic_pattern_analysis}|scenario_dag|>\n\n
\n### View traffic pattern analysis results:\n<|{traffic_pattern_analysis.results if traffic_pattern_analysis else None}|data_node|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\ntext_data = \"\"\ntext_path = \"\"\n\ndef text_upload(state):\n if state.text_path:\n with open(state.text_path, \"r\") as file:\n state.text_data = file.read()\n\ntext_page = \"\"\"\n<|{text_path}|file_selector|accept=.txt|on_action=text_upload|>\n<|{text_data}|textarea|rows=10|>\n\"\"\"\n\nGui(text_page).run()\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal for Agriculture Visualization\n\n<|layout|columns=35 65|\nVisualize Complex Patterns with Mandelbrot Fractals\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nSelect the number of iterations to explore fractal patterns\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal for Agriculture Visualization\")\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|energy_type>\n|>\n\n<|{selected_demand_forecast}|demand_forecast|on_submission_change=on_submission_change_demand_forecast|not expanded|>\n\n---------------------------------------\n\n## **Energy Demand Projections**{: .color-primary} and Data Analysis\n\n<|{selected_demand_forecast.result.read() if selected_demand_forecast and selected_demand_forecast.result.read() is not None else default_demand_result}|chart|x=Date|y[1]=Projected Demand|y[2]=Historical Demand|type[1]=bar|title=Energy Demand Forecast|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|demand_forecast>\n|>\n"} {"text": "# **Country**{: .color-primary} Economic Indicators\n\n<|layout|columns=1 1 1|\n<|{selected_country_economy}|selector|lov={selector_country_economy}|on_change=on_change_country_economy|dropdown|label=Country|>\n\n<|{selected_economic_indicator}|toggle|lov={economic_indicator_selector}|on_change=update_economic_indicator|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**GDP**{: .color-primary}\n<|{'${:,.2f}'.format(economy_data.iloc[-1]['GDP'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Inflation Rate**{: .color-primary}\n<|{'{:.2f}%'.format(economy_data.iloc[-1]['Inflation Rate'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Unemployment Rate**{: .color-primary}\n<|{'{:.2f}%'.format(economy_data.iloc[-1]['Unemployment Rate'])}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{economy_data}|chart|type=line|x=Year|y[3]=GDP|y[2]=Inflation Rate|y[1]=Un\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Energy Source
\n<|{energy_source_selected}|selector|lov=source_solar;source_wind;source_hydro|dropdown|on_change=on_energy_source_change|>\n|>\n\n<|part|class_name=card|\n### Select Region
\n<|{region_selected}|selector|lov=region_north;region_south;region_east;region_west|dropdown|on_change=on_region_change|>\n|>\n\n|>\n\n<|Energy Data Overview|expandable|expanded=True|\nDisplay energy_data and region_data\n<|layout|columns=1 1|\n<|{energy_data}|table|page_size=5|>\n\n<|{region_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{consumption_chart}|chart|type=bar|x=Month|y=Consumption|title=Energy Consumption by Source|>\n|>\n\n<|part|class_name=card|\n<|{region_comparison_chart}|chart|type=line|x=Month|y=Region Consumption|title=Regional Energy Consumption|>\n|>\n|>\n\n
\n### Analyze Energy Trends:\n<|{energy_trend_analysis}|scenario|on_submission_change=on_energy_trend_status_change|expandable=False|expanded=False|>\n\n<|{energy_trend_analysis}|scenario_dag|>\n\n
\n### View energy trend analysis results:\n<|{energy_trend_analysis.results if energy_trend_analysis else None}|data_node|>\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\nfrom pages.country.country import country_md\nfrom pages.world.world import world_md\nfrom pages.map.map import map_md\nfrom pages.predictions.predictions import predictions_md, selected_scenario\nfrom pages.root import root, selected_country, selector_country\n\nfrom config.config import Config\n\n# Define the pages\npages = {\n '/': root,\n \"Country\": country_md,\n \"World\": world_md,\n \"Map\": map_md,\n \"Predictions\": predictions_md\n}\n\n# Create a Gui instance with the pages\ngui_multi_pages = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n\n # Run the multi-page app\n gui_multi_pages.run(title=\"Covid Dashboard\")\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Research Area
\n<|{research_area_selected}|selector|lov=area_physics;area_chemistry;area_biology|dropdown|on_change=on_research_area_change|>\n|>\n\n<|part|class_name=card|\n### Select Year
\n<|{publication_year_selected}|selector|lov=year_2020;year_2021;year_2022|dropdown|on_change=on_publication_year_change|>\n|>\n\n|>\n\n<|Research Publications Overview|expandable|expanded=True|\nDisplay research_data and year_data\n<|layout|columns=1 1|\n<|{research_data}|table|page_size=5|>\n\n<|{year_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{publication_count_chart}|chart|type=bar|x=Research Area|y=Publications|title=Publications Count|>\n|>\n\n<|part|class_name=card|\n<|{citation_chart}|chart|type=line|x=Year|y=Citations|title=Citation Trends|>\n|>\n|>\n\n
\n### Analyze Research Impact:\n<|{research_impact_analysis}|scenario|on_submission_change=on_research_impact_status_change|expandable=False|expanded=False|>\n\n<|{research_impact_analysis}|scenario_dag|>\n\n
\n### View research impact results:\n<|{research_impact_analysis.results if research_impact_analysis else None}|data_node|>\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Department
\n<|{department_selected}|selector|lov=dept_sales;dept_marketing;dept_technical|dropdown|on_change=on_department_change|>\n|>\n\n<|part|class_name=card|\n### Select Time Period
\n<|{time_period_selected}|selector|lov=period_this_month;period_last_month;period_this_quarter|dropdown|on_change=on_time_period_change|>\n|>\n\n|>\n\n<|Employee Performance Overview|expandable|expanded=True|\nDisplay department_data and time_period_data\n<|layout|columns=1 1|\n<|{department_data}|table|page_size=5|>\n\n<|{time_period_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{performance_chart}|chart|type=bar|x=Employee|y=Performance Score|title=Department Performance|>\n|>\n\n<|part|class_name=card|\n<|{attendance_chart}|chart|type=line|options={attendance_options}|layout={attendance_layout}|>\n|>\n|>\n\n
\n### Run Employee Analysis:\n<|{employee_analysis}|scenario|on_submission_change=on_employee_analysis_status_change|expandable=False|expanded=False|>\n\n<|{employee_analysis}|scenario_dag|>\n\n
\n### View the employee analysis results:\n<|{employee_analysis.results if employee_analysis else None}|data_node|>\n"} {"text": "from taipy.gui import Gui, notify, state\nimport random\nimport re\nimport logging\nimport taipy\n\n# Import OpenAI module\nimport oai\n\n# Configure logger\nlogging.basicConfig(format=\"\\n%(asctime)s\\n%(message)s\", level=logging.INFO, force=True)\n\n\n# Define functions\ndef error_prompt_flagged(state, prompt):\n \"\"\"Notify user that a prompt has been flagged.\"\"\"\n notify(state, \"error\", \"Prompt flagged as inappropriate.\")\n logging.info(f\"Prompt flagged as inappropriate: {prompt}\")\n\n\ndef error_too_many_requests(state):\n \"\"\"Notify user that too many requests have been made.\"\"\"\n notify(\n state,\n \"error\",\n \"Too many requests. Please wait a few seconds before generating another text or image.\",\n )\n logging.info(f\"Session request limit reached: {state.n_requests}\")\n state.n_requests = 1\n\n\ndef generate_text(state):\n \"\"\"Generate Tweet text.\"\"\"\n state.tweet = \"\"\n state.image = None\n\n # Check the number of requests done by the user\n if state.n_requests >= 5:\n error_too_many_requests(state)\n return\n\n # Check if the user has put a topic\n if state.topic == \"\":\n notify(state, \"error\", \"Please enter a topic\")\n return\n\n # Create the prompt and add a style or not\n if state.style == \"\":\n state.prompt = (\n f\"Write a {state.mood}Tweet about {state.topic} in less than 120 characters \"\n f\"and with the style of {state.style}:\\n\\n\\n\\n\"\n )\n else:\n state.prompt = f\"Write a {state.mood}Tweet about {state.topic} in less than 120 characters:\\n\\n\"\n\n # openai configured and check if text is flagged\n openai = oai.Openai()\n flagged = openai.moderate(state.prompt)\n\n if flagged:\n error_prompt_flagged(state, f\"Prompt: {state.prompt}\\n\")\n return\n else:\n # Generate the tweet\n state.n_requests += 1\n state.tweet = openai.complete(state.prompt).strip().replace('\"', \"\")\n\n # Notify the user in console and in the GUI\n logging.info(\n f\"Topic: {state.prompt}{state.mood}{state.style}\\n\" f\"Tweet: {state.tweet}\"\n )\n notify(state, \"success\", \"Tweet created!\")\n\n\ndef generate_image(state):\n \"\"\"Generate Tweet image.\"\"\"\n notify(state, \"info\", \"Generating image...\")\n\n # Check the number of requests done by the user\n if state.n_requests >= 5:\n error_too_many_requests(state)\n return\n\n state.image = None\n\n # Creates the prompt\n prompt_wo_hashtags = re.sub(\"#[A-Za-z0-9_]+\", \"\", state.prompt)\n processing_prompt = (\n \"Create a detailed but brief description of an image that captures \"\n f\"the essence of the following text:\\n{prompt_wo_hashtags}\\n\\n\"\n )\n\n # Openai configured and check if text is flagged\n openai = oai.Openai()\n flagged = openai.moderate(processing_prompt)\n\n if flagged:\n error_prompt_flagged(state, processing_prompt)\n return\n else:\n state.n_requests += 1\n # Generate the prompt that will create the image\n processed_prompt = (\n openai.complete(prompt=processing_prompt, temperature=0.5, max_tokens=40)\n .strip()\n .replace('\"', \"\")\n .split(\".\")[0]\n + \".\"\n )\n\n # Generate the image\n state.image = openai.image(processed_prompt)\n\n # Notify the user in console and in the GUI\n logging.info(f\"Tweet: {state.prompt}\\nImage prompt: {processed_prompt}\")\n notify(state, \"success\", f\"Image created!\")\n\n\ndef feeling_lucky(state):\n \"\"\"Generate a feeling-lucky tweet.\"\"\"\n with open(\"moods.txt\") as f:\n sample_moods = f.read().splitlines()\n state.topic = \"an interesting topic\"\n state.mood = random.choice(sample_moods)\n state.style = \"\"\n generate_text(state)\n\n\n# Variables\ntweet = \"\"\nprompt = \"\"\nn_requests = 0\n\ntopic = \"AI\"\nmood = \"inspirational\"\nstyle = \"elonmusk\"\n\nimage = None\n\n\n# Called whenever there is a problem\ndef on_exception(state, function_name: str, ex: Exception):\n logging.error(f\"Problem {ex} \\nin {function_name}\")\n notify(state, \"error\", f\"Problem {ex} \\nin {function_name}\")\n\n\n# Markdown for the entire page\npage = \"\"\"\n<|container|\n# **Generate**{: .color-primary} Tweets\n\nThis mini-app generates Tweets using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n<|layout|columns=1 1 1|gap=30px|class_name=card|\n\n|topic>\n\n\n|mood>\n\n\n|style>\n\nCreate a Generate text button\n<|Generate text|button|on_action=generate_text|label=Generate text|>\n\n<|Feeling lucky|button|on_action=feeling_lucky|label=Feeling Lucky|>\n|>\n\n
\n\n---\n\n
\n\n### Generated **Tweet**{: .color-primary}\n\nCreate a text input for the tweet\n<|{tweet}|input|multiline|label=Resulting tweet|class_name=fullwidth|\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal for Scientific Visualization\n\n<|layout|columns=35 65|\nVisualize Complex Patterns with Mandelbrot Fractals\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nSelect the number of iterations to explore fractal patterns\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal for Scientific Visualization\")\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages related to the automotive sector (replace with actual imports)\nfrom pages.automotive.automotive import automotive_md\n\n# Define the pages\npages = {\n '/': root, # Replace with the root page if you have one\n \"Automotive\": automotive_md,\n}\n\n# Create a Gui instance with the pages\ngui_multi_pages = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n\n # Run the multi-page app\n gui_multi_pages.run(title=\"Automotive Dashboard\")\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\nimport json\n\njson_data = []\njson_path = \"\"\n\ndef json_upload(state):\n with open(state.json_path, 'r') as file:\n state.json_data = pd.read_json(file)\n\njson_page = \"\"\"\n<|{json_path}|file_selector|accept=.json|on_action=json_upload|>\n<|{json_data}|table|>\n\"\"\"\n\nGui(json_page).run()\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Primary City for Weather Data
\n<|{primary_city_selected}|selector|lov=city_new_york;city_london;city_tokyo|dropdown|on_change=on_primary_city_change|>\n|>\n\n<|part|class_name=card|\n### Select City to Compare
\n<|{compare_city_selected}|selector|lov=city_new_york;city_london;city_tokyo|dropdown|on_change=on_compare_city_change|>\n|>\n\n|>\n\n<|Weather Data Overview|expandable|expanded=True|\nDisplay primary_city_data and compare_city_data\n<|layout|columns=1 1|\n<|{primary_city_data}|table|page_size=5|>\n\n<|{compare_city_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{temperature_chart}|chart|type=line|x=Day|y[1]=Primary City Temp|y[2]=Compare City Temp|title=Temperature Comparison|>\n|>\n\n<|part|class_name=card|\n<|{humidity_chart}|chart|type=bar|x=Day|y[1]=Primary City Humidity|y[2]=Compare City Humidity|title=Humidity Comparison|>\n|>\n|>\n\n
\n### Analyze Weather Patterns:\n<|{weather_pattern_analysis}|scenario|on_submission_change=on_weather_pattern_status_change|expandable=False|expanded=False|>\n\n<|{weather_pattern_analysis}|scenario_dag|>\n\n
\n### View weather analysis results:\n<|{weather_pattern_analysis.results if weather_pattern_analysis else None}|data_node|>\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages for the logistics sector\nfrom pages.logistics.dashboard import dashboard_md\nfrom pages.logistics.orders import orders_md\nfrom pages.logistics.inventory import inventory_md\nfrom pages.logistics.shipping import shipping_md\n\n# Define your pages dictionary\npages = {\n '/dashboard': dashboard_md,\n '/orders': orders_md,\n '/inventory': inventory_md,\n '/shipping': shipping_md\n}\n\n# Create a Gui with your pages\ngui_logistics = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n \n # Run the multi-page app\n gui_logistics.run(title=\"Logistics Dashboard\")\n"} {"text": "<|{all_projects}|table|columns={project_columns}|width='100%'|on_action={on_project_table_click}|style=project_style|>\n<|Create Project|button|on_action={open_create_project_dialog}|>\n<|Refresh Projects|button|on_action={refresh_project_list}|>\n\n<|{show_dialog_create_project}|dialog|title=Create new project|\n<|{project_name}|input|placeholder='Enter project name'|\n<|Create|button|on_action={create_project}|>\n<|Cancel|button|on_action={close_create_project_dialog}|>\n|>\n\n<|{show_project_details}|pane|\n\n# Project Details <|Delete|button|on_action=delete_selected_project|> <|Archive|button|on_action=archive_selected_project|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n## Project Name\n<|{selected_project.name}|>\n|>\n\n<|part|class_name=card|\n## Project Manager\n<|{selected_project.manager}|>\n|>\n\n<|part|class_name=card|\n## ID\n<|{selected_project.id}|>\n|>\n\n<|part|class_name=card|\n## Start Date\n<|{selected_project.start_date.strftime(\"%b %d %y\")}|>\n|>\n\n<|part|class_name=card|\n## Status\n<|{get_project_status(selected_project)}|>\n|>\n\n----\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load employee time tracking data\ntime_tracking_data = pd.read_csv(\"time_tracking_data.csv\")\n\n# Initialize variables\nemployees = list(time_tracking_data[\"Employee\"].unique())\nprojects = list(time_tracking_data[\"Project\"].unique())\nemployee = employees\nproject = projects\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{project}|selector|lov={projects}|multiple|label=Select Project|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_entries>\n\n hours\n|average_hours>\n\n\n|time_tracking_table>\n|main_page>\n|>\n\nCode adapted from [Employee Time Tracking](https://github.com/time_tracking_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/employee-time-tracking-app)\n"} {"text": "# **Worldwide**{: .color-primary} Education Statistics\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Literacy Rate**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_education['Literacy Rate']))}|text|class_name=h2|>\n|>\n\n<|card|\n**School Enrollment**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_education['School Enrollment']))}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Education Spending**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_education['Education Spending']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_education_metric}|toggle|lov={education_metric_selector}|>\n\n<|part|render={selected_education_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_education_pie_absolute}|chart|type=pie|labels=Country|values=Literacy Rate|title=Global Literacy Rate Distribution|>\n\n<|{data_world_education_evolution_absolute}|chart|properties={data_world_education_evolution_properties}|title=Education Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_education_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_education_pie_relative}|chart|type=pie|labels=Country|values=School Enrollment|>\n\n<|{data_world_education_evolution_relative}|chart|properties={data_world_education_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|route>\n|>\n\n<|{selected_transport_scenario}|transport_scenario|on_submission_change=on_transport_scenario_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Transport Efficiency Metrics**{: .color-primary} and Trends\n\n<|{selected_transport_scenario.result.read() if selected_transport_scenario and selected_transport_scenario.result.read() is not None else default_transport_result}|chart|x=Time|y[1]=Passenger Count|y[2]=Average Delay|type[1]=bar|title=Route Efficiency Analysis|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|transport_scenario>\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\naudio_data = None\naudio_path = \"\"\n\ndef audio_upload(state):\n if state.audio_path:\n audio_data = state.audio_path # Directly use the path for audio elements\n\naudio_page = \"\"\"\n<|{audio_path}|file_selector|accept=audio/*|on_action=audio_upload|>\n<|{audio_data}|audio|controls=True|>\n\"\"\"\n\nGui(audio_page).run()\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport io\n\ncsv_data = None\ncsv_path = \"\"\n\ndef csv_upload_analyze(state):\n if state.csv_path:\n state.csv_data = pd.read_csv(state.csv_path)\n plt.figure()\n state.csv_data.hist()\n plt.xlabel(\"Values\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Data Distribution\")\n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n buf.seek(0)\n state.plot_image = buf.read()\n\ncsv_analyze_page = \"\"\"\n<|{csv_path}|file_selector|accept=.csv|on_action=csv_upload_analyze|>\n<|{csv_data}|table|>\n<|{plot_image}|image|>\n\"\"\"\n\nGui(csv_analyze_page).run()\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load project task data\ntask_data = pd.read_csv(\"project_task_data.csv\")\n\n# Initialize variables\nprojects = list(task_data[\"Project\"].unique())\npriorities = list(task_data[\"Priority\"].unique())\nproject = projects\npriority = priorities\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{priority}|selector|lov={priorities}|multiple|label=Select Priority|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_tasks>\n\n days\n|average_completion>\n\n\n|task_management_table>\n|main_page>\n|>\n\nCode adapted from [Project Task Management](https://github.com/task_management_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/project-task-management-app)\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages for the homecare sector\nfrom pages.homecare.home import home_md\nfrom pages.homecare.services import services_md\nfrom pages.homecare.appointments import appointments_md\nfrom pages.homecare.contacts import contacts_md\n\n# Define your pages dictionary\npages = {\n '/home': home_md,\n '/services': services_md,\n '/appointments': appointments_md,\n '/contacts': contacts_md\n}\n\n# Create a Gui with your pages\ngui_homecare = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n \n # Run the multi-page app\n gui_homecare.run(title=\"Homecare Dashboard\")\n"} {"text": "# **Global**{: .color-primary} Technology Adoption\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Internet Users**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_tech['Internet Users']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Smartphone Penetration**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_tech['Smartphone Penetration']))}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**AI Adoption**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_tech['AI Adoption']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_tech_metric}|toggle|lov={tech_metric_selector}|>\n\n<|part|render={selected_tech_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_tech_pie_absolute}|chart|type=pie|labels=Country|values=Internet Users|title=Global Internet Usage|>\n\n<|{data_world_tech_evolution_absolute}|chart|properties={data_world_tech_evolution_properties}|title=Technology Evolution Worldwide|>\n|>\n|>\n\n<|part|render={selected_tech_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_tech_pie_relative}|chart|type=pie|labels=Country|values=Smartphone Penetration|>\n\n<|{data_world_tech_evolution_relative}|chart|properties={data_world_tech_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|product>\n|>\n\n<|{selected_supply_chain_scenario}|supply_chain_scenario|on_submission_change=on_supply_chain_scenario_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Supply Chain Dynamics**{: .color-primary} and Forecast\n\n<|{selected_supply_chain_scenario.result.read() if selected_supply_chain_scenario and selected_supply_chain_scenario.result.read() is not None else default_supply_chain_result}|chart|x=Date|y[1]=Demand|y[2]=Supply|type[1]=line|title=Supply vs. Demand Forecast|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|supply_chain_scenario>\n|>\n"} {"text": "# **Global**{: .color-primary} Tourism Statistics\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**International Tourist Arrivals**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_tourism['Tourist Arrivals']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Tourism Revenue**{: .color-primary}\n<|{'${:,.2f}'.format(np.sum(data_world_tourism['Revenue']))}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Hotel Occupancy Rates**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_tourism['Occupancy']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_tourism_metric}|toggle|lov={tourism_metric_selector}|>\n\n<|part|render={selected_tourism_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_tourism_pie_absolute}|chart|type=pie|labels=Country|values=Tourist Arrivals|title=Global Tourist Arrivals|>\n\n<|{data_world_tourism_evolution_absolute}|chart|properties={data_world_tourism_evolution_properties}|title=Tourism Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_tourism_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_tourism_pie_relative}|chart|type=pie|labels=Country|values=Occupancy|>\n\n<|{data_world_tourism_evolution_relative}|chart|properties={data_world_tourism_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "# **Global**{: .color-primary} E-Commerce Trends\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Online Sales Volume**{: .color-primary}\n<|{'${:,.2f}'.format(np.sum(data_world_ecommerce['Sales Volume']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Active Online Users**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_ecommerce['Active Users']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Average Purchase Value**{: .color-primary}\n<|{'${:,.2f}'.format(np.average(data_world_ecommerce['Purchase Value']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_ecommerce_metric}|toggle|lov={ecommerce_metric_selector}|>\n\n<|part|render={selected_ecommerce_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_ecommerce_pie_absolute}|chart|type=pie|labels=Country|values=Sales Volume|title=Global Online Sales Volume|>\n\n<|{data_world_ecommerce_evolution_absolute}|chart|properties={data_world_ecommerce_evolution_properties}|title=E-Commerce Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_ecommerce_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_ecommerce_pie_relative}|chart|type=pie|labels=Country|values=Active Users|>\n\n<|{data_world_ecommerce_evolution_relative}|chart|properties={data_world_ecommerce_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "# **Global**{: .color-primary} Automotive Industry\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Total Vehicle Production**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_automotive['Vehicle Production']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Electric Vehicle Sales**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_automotive['EV Sales']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Market Share**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_automotive['Market Share']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_automotive_metric}|toggle|lov={automotive_metric_selector}|>\n\n<|part|render={selected_automotive_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_automotive_pie_absolute}|chart|type=pie|labels=Country|values=Vehicle Production|title=Global Vehicle Production|>\n\n<|{data_world_automotive_evolution_absolute}|chart|properties={data_world_automotive_evolution_properties}|title=Automotive Industry Trends|>\n|>\n|>\n\n<|part|render={selected_automotive_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_automotive_pie_relative}|chart|type=pie|labels=Country|values=EV Sales|>\n\n<|{data_world_automotive_evolution_relative}|chart|properties={data_world_automotive_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "from taipy import Gui\n\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Generator\n\n<|layout|columns=35 65|\nDisplay image from path\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nCreate a slider to select iterations\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Generator\")\n"} {"text": "<|{all_appointments}|table|columns={appointment_columns}|width='100%'|on_action={on_appointment_select}|style=appointment_style|>\n<|Book Appointment|button|on_action={open_book_appointment_dialog}|>\n<|Refresh Appointments|button|on_action={refresh_appointment_list}|>\n\n<|{show_book_appointment_dialog}|dialog|title=Book New Appointment|\n<|{patient_name}|input|placeholder='Patient Name'|\n<|{appointment_date}|datetime_picker|>\n<|{doctor_selector}|selector|lov={get_all_doctors()}|>\n<|Book|button|on_action={book_appointment}|>\n<|Cancel|button|on_action={close_book_appointment_dialog}|>\n|>\n\n<|{show_appointment_details}|pane|\n\n# Appointment Details <|Edit|button|on_action=edit_selected_appointment|> <|Cancel|button|on_action=cancel_selected_appointment|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Patient Name\n<|{selected_appointment.patient_name}|>\n|>\n\n<|part|class_name=card|\n## Date and Time\n<|{selected_appointment.date.strftime(\"%b %d, %Y at %H:%M\")}|>\n|>\n\n<|part|class_name=card|\n## Doctor\n<|{selected_appointment.doctor}|>\n|>\n\n<|part|class_name=card|\n## Status\n<|{get_appointment_status(selected_appointment)}|>\n|>\n\n----\n|>\n"} {"text": "# **Global**{: .color-primary} Food Security and Agriculture\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Cereal Production**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_agri['Cereal Production']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Agricultural Land**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_agri['Agricultural Land']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Undernourishment Rate**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_agri['Undernourishment Rate']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_agri_metric}|toggle|lov={agri_metric_selector}|>\n\n<|part|render={selected_agri_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_agri_pie_absolute}|chart|type=pie|labels=Country|values=Cereal Production|title=Global Cereal Production|>\n\n<|{data_world_agri_evolution_absolute}|chart|properties={data_world_agri_evolution_properties}|title=Agricultural Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_agri_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_agri_pie_relative}|chart|type=pie|labels=Country|values=Undernourishment Rate|>\n\n<|{data_world_agri_evolution_relative}|chart|properties={data_world_agri_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"E-commerce\"\n mood = \"shopping\"\n style = \"ecommerceexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} E-commerce Tweets\n\n This mini-app generates Tweets related to E-commerce using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate E-commerce Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages related to education and healthcare (replace with actual imports)\nfrom pages.education.education import education_md\nfrom pages.healthcare.healthcare import healthcare_md\n\n# Define the pages\npages = {\n '/': root, # Replace with the root page if you have one\n \"Education\": education_md,\n \"Healthcare\": healthcare_md,\n}\n\n# Create a Gui instance with the pages\ngui_multi_pages = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n\n # Run the multi-page app\n gui_multi_pages.run(title=\"Education & Healthcare Dashboard\")\n"} {"text": "<|{all_properties}|table|columns={property_columns}|width='100%'|on_action={on_property_select}|style=property_style|>\n<|Add Property|button|on_action={open_add_property_dialog}|>\n<|Refresh Properties|button|on_action={refresh_property_list}|>\n\n<|{show_add_property_dialog}|dialog|title=Add New Property|\n<|{property_address}|input|placeholder='Address'|\n<|{property_price}|number_input|placeholder='Price'|\n<|Add Property|button|on_action={add_property}|>\n<|Cancel|button|on_action={close_add_property_dialog}|>\n|>\n\n<|{show_property_details}|pane|\n\n# Property Details <|Edit|button|on_action=edit_selected_property|> <|Remove|button|on_action=remove_selected_property|>\n\n<|layout|columns=2|\n<|part|class_name=card|\n## Address\n<|{selected_property.address}|>\n|>\n\n<|part|class_name=card|\n## Price\n<|{selected_property.price}|>\n|>\n\n<|part|class_name=card|\n## Listed Date\n<|{selected_property.listed_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n----\n|>\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages for the education sector\nfrom pages.education.home import home_md\nfrom pages.education.courses import courses_md\nfrom pages.education.students import students_md\nfrom pages.education.teachers import teachers_md\n\n# Define your pages dictionary\npages = {\n '/home': home_md,\n '/courses': courses_md,\n '/students': students_md,\n '/teachers': teachers_md\n}\n\n# Create a Gui with your pages\ngui_education = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n \n # Run the multi-page app\n gui_education.run(title=\"Education Dashboard\")\n"} {"text": "# **Country**{: .color-primary} Education Statistics\n\n<|layout|columns=1 1 1|\n<|{selected_country_education}|selector|lov={selector_country_education}|on_change=on_change_country_education|dropdown|label=Country|>\n\n<|{selected_education_level}|toggle|lov={education_level_selector}|on_change=update_education_level_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Literacy Rate**{: .color-primary}\n<|{'{:.2f}%'.format(education_data.iloc[-1]['Literacy Rate'])}|text|class_name=h2|>\n|>\n\n<|card|\n**School Enrollment**{: .color-primary}\n<|{'{:.2f}%'.format(education_data.iloc[-1]['School Enrollment'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Average Education Years**{: .color-primary}\n<|{'{:.1f}'.format(education_data.iloc[-1]['Average Education Years'])}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{education_data}|chart|type=bar|x=Year|y[3]=Literacy Rate|y[2]=School Enrollment|y[1]=Average Education Years|layout={layout}|options={options}|title=Education Progress|>\n\n<|{education_level_chart}|chart|type=pie|values=education_level_values|labels=education_level_labels|title=Education Level Distribution|>\n|>\n"} {"text": "# **Worldwide**{: .color-primary} Renewable Energy Usage\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Solar Energy Production**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_energy['Solar Energy']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Wind Energy Production**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_energy['Wind Energy']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Hydropower Energy Production**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_energy['Hydropower']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_energy_metric}|toggle|lov={energy_metric_selector}|>\n\n<|part|render={selected_energy_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_energy_pie_absolute}|chart|type=pie|labels=Country|values=Solar Energy|title=Global Solar Energy Production|>\n\n<|{data_world_energy_evolution_absolute}|chart|properties={data_world_energy_evolution_properties}|title=Renewable Energy Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_energy_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_energy_pie_relative}|chart|type=pie|labels=Country|values=Wind Energy|>\n\n<|{data_world_energy_evolution_relative}|chart|properties={data_world_energy_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\nexcel_data = []\nexcel_path = \"\"\n\ndef excel_upload(state):\n state.excel_data = pd.read_excel(state.excel_path)\n\nexcel_page = \"\"\"\n<|{excel_path}|file_selector|accept=.xlsx|on_action=excel_upload|>\n<|{excel_data}|table|>\n\"\"\"\n\nGui(excel_page).run()\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages for the retail sector\nfrom pages.retail.home import home_md\nfrom pages.retail.products import products_md\nfrom pages.retail.customers import customers_md\nfrom pages.retail.sales import sales_md\n\n# Define your pages dictionary\npages = {\n '/home': home_md,\n '/products': products_md,\n '/customers': customers_md,\n '/sales': sales_md\n}\n\n# Create a Gui with your pages\ngui_retail = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n \n # Run the multi-page app\n gui_retail.run(title=\"Retail Dashboard\")\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load customer feedback data\nfeedback_data = pd.read_csv(\"customer_feedback_data.csv\")\n\n# Initialize variables\nproducts = list(feedback_data[\"Product\"].unique())\nsentiments = list(feedback_data[\"Sentiment\"].unique())\nproduct = products\nsentiment = sentiments\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{sentiment}|selector|lov={sentiments}|multiple|label=Select Sentiment|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_feedback>\n\n out of 5\n|average_rating>\n\n\n|feedback_table>\n|main_page>\n|>\n\nCode adapted from [Customer Feedback Analysis](https://github.com/feedback_analysis_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/customer-feedback-analysis-app)\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|market>\n|>\n\n<|{selected_forecast}|forecast|on_submission_change=on_submission_change_forecast|not expanded|>\n\n---------------------------------------\n\n## **Market Predictions**{: .color-primary} and Data Explorer\n\n<|{selected_forecast.result.read() if selected_forecast and selected_forecast.result.read() is not None else default_market_result}|chart|x=Date|y[1]=Market Value|y[2]=Trend Prediction|type[1]=line|title=Market Forecast|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|forecast>\n|>\n"} {"text": "<|{all_classes}|table|columns={class_columns}|width='100%'|on_action={on_class_select}|style=class_style|>\n<|Schedule Class|button|on_action={open_schedule_class_dialog}|>\n<|Refresh Classes|button|on_action={refresh_class_list}|>\n\n<|{show_schedule_class_dialog}|dialog|title=Schedule New Class|\n<|{class_subject}|input|placeholder='Class Subject'|\n<|{class_date}|date_picker|>\n<|Schedule|button|on_action={schedule_class}|>\n<|Cancel|button|on_action={close_schedule_class_dialog}|>\n|>\n\n<|{show_class_details}|pane|\n\n# Class Details <|Edit|button|on_action=edit_selected_class|> <|Cancel|button|on_action=cancel_selected_class|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Subject\n<|{selected_class.subject}|>\n|>\n\n<|part|class_name=card|\n## Date\n<|{selected_class.date.strftime(\"%b %d, %Y at %H:%M\")}|>\n|>\n\n<|part|class_name=card|\n## Instructor\n<|{selected_class.instructor}|>\n|>\n\n<|part|class_name=card|\n## Enrollment\n<|{selected_class.enrollment}|number_input|disabled=True|>\n|>\n\n----\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Healthcare\"\n mood = \"medical\"\n style = \"healthexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Healthcare Tweets\n\n This mini-app generates Tweets related to Healthcare using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Healthcare Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\nvideo_data = None\nvideo_path = \"\"\n\ndef video_upload(state):\n if state.video_path:\n video_data = state.video_path # Directly use the path for video elements\n\nvideo_page = \"\"\"\n<|{video_path}|file_selector|accept=video/*|on_action=video_upload|>\n<|{video_data}|video|controls=True|>\n\"\"\"\n\nGui(video_page).run()\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|region>\n|>\n\n<|{selected_environmental_scenario}|environmental_scenario|on_submission_change=on_environmental_scenario_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Environmental Metrics**{: .color-primary} and Trends\n\n<|{selected_environmental_scenario.result.read() if selected_environmental_scenario and selected_environmental_scenario.result.read() is not None else default_environmental_result}|chart|x=Date|y[1]=Air Quality Index|y[2]=Water Purity Level|type[1]=line|title=Environmental Conditions|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|environmental_scenario>\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Healthcare\"\n mood = \"healthtech\"\n style = \"healthcarepro\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Healthcare Tweets\n\n This mini-app generates Tweets related to Healthcare using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Healthcare Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport io\n\ncsv_data = pd.DataFrame()\ncsv_path = \"\"\n\ndef csv_upload_plot(state):\n if state.csv_path:\n state.csv_data = pd.read_csv(state.csv_path)\n plt.figure()\n state.csv_data.plot(kind='line')\n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n buf.seek(0)\n state.plot_image = buf.read()\n\ncsv_plot_page = \"\"\"\n<|{csv_path}|file_selector|accept=.csv|on_action=csv_upload_plot|>\n<|{plot_image}|image|>\n\"\"\"\n\nGui(csv_plot_page).run()\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|location>\n|>\n\n<|{selected_weather_forecast}|weather_forecast|on_submission_change=on_submission_change_weather|not expanded|>\n\n---------------------------------------\n\n## **Weather Predictions**{: .color-primary} and Data Analysis\n\n<|{selected_weather_forecast.result.read() if selected_weather_forecast and selected_weather_forecast.result.read() is not None else default_weather_result}|chart|x=Date|y[1]=Temperature|y[2]=Humidity|y[3]=Precipitation|type[1]=line|title=Weather Forecast|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|weather_forecast>\n|>\n"} {"text": "<|{all_books}|table|columns={book_columns}|width='100%'|on_action={on_book_select}|style=book_style|>\n<|Add Book|button|on_action={open_add_book_dialog}|>\n<|Refresh Books|button|on_action={refresh_book_list}|>\n\n<|{show_add_book_dialog}|dialog|title=Add New Book|\n<|{book_title}|input|placeholder='Title'|\n<|{book_author}|input|placeholder='Author'|\n<|{book_genre}|selector|lov={get_all_genres()}|>\n<|Add Book|button|on_action={add_book}|>\n<|Cancel|button|on_action={close_add_book_dialog}|>\n|>\n\n<|{show_book_details}|pane|\n\n# Book Details <|Edit|button|on_action=edit_selected_book|> <|Remove|button|on_action=remove_selected_book|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Title\n<|{selected_book.title}|>\n|>\n\n<|part|class_name=card|\n## Author\n<|{selected_book.author}|>\n|>\n\n<|part|class_name=card|\n## Genre\n<|{selected_book.genre}|>\n|>\n\n<|part|class_name=card|\n## ISBN\n<|{selected_book.isbn}|>\n|>\n\n----\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Transportation\"\n mood = \"travel\"\n style = \"transitexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Transportation Tweets\n\n This mini-app generates Tweets related to Transportation using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Transportation Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "# **Worldwide**{: .color-primary} Energy Consumption\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Total Energy Consumption**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_energy_consumption['Total Energy']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Renewable Energy Consumption**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_energy_consumption['Renewable Energy']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Fossil Fuel Consumption**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_energy_consumption['Fossil Fuels']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_energy_consumption_metric}|toggle|lov={energy_consumption_metric_selector}|>\n\n<|part|render={selected_energy_consumption_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_energy_consumption_pie_absolute}|chart|type=pie|labels=Country|values=Total Energy|title=Global Energy Consumption|>\n\n<|{data_world_energy_consumption_evolution_absolute}|chart|properties={data_world_energy_consumption_evolution_properties}|title=Energy Consumption Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_energy_consumption_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_energy_consumption_pie_relative}|chart|type=pie|labels=Country|values=Renewable Energy|>\n\n<|{data_world_energy_consumption_evolution_relative}|chart|properties={data_world_energy_consumption_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni =\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal for Automotive Visualization\n\n<|layout|columns=35 65|\nVisualize Complex Patterns with Mandelbrot Fractals\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nSelect the number of iterations to explore fractal patterns\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal for Automotive Visualization\")\n"} {"text": "<|{all_fitness_classes}|table|columns={fitness_class_columns}|width='100%'|on_action={on_fitness_class_select}|style=fitness_class_style|>\n<|Schedule Fitness Class|button|on_action={open_schedule_fitness_class_dialog}|>\n<|Refresh Classes|button|on_action={refresh_fitness_classes}|>\n\n<|{show_schedule_fitness_class_dialog}|dialog|title=Schedule Fitness Class|\n<|{class_type}|selector|lov={get_all_class_types()}|>\n<|{class_instructor}|input|placeholder='Instructor Name'|\n<|{class_time}|time_picker|>\n<|Schedule Class|button|on_action={schedule_fitness_class}|>\n<|Cancel|button|on_action={close_schedule_fitness_class_dialog}|>\n|>\n\n<|{show_fitness_class_details}|pane|\n\n# Class Details <|Edit|button|on_action=edit_selected_class|> <|Cancel|button|on_action=cancel_selected_class|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Class Type\n<|{selected_fitness_class.type}|>\n|>\n\n<|part|class_name=card|\n## Instructor\n<|{selected_fitness_class.instructor}|>\n|>\n\n<|part|class_name=card|\n## Time\n<|{selected_fitness_class.time.strftime(\"%H:%M\")}|>\n|>\n\n----\n|>\n"} {"text": "# **Global**{: .color-primary} Digital Connectivity\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Internet Connectivity Rate**{: .color-primary}\n<|{'{:.2f}%'.format(np.average(data_world_digital['Connectivity Rate']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Smart Device Usage**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_digital['Smart Devices']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Broadband Subscriptions**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_digital['Broadband Subscriptions']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_digital_metric}|toggle|lov={digital_metric_selector}|>\n\n<|part|render={selected_digital_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_digital_pie_absolute}|chart|type=pie|labels=Country|values=Connectivity Rate|title=Global Internet Connectivity|>\n\n<|{data_world_digital_evolution_absolute}|chart|properties={data_world_digital_evolution_properties}|title=Digital Connectivity Trends|>\n|>\n|>\n\n<|part|render={selected_digital_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_digital_pie_relative}|chart|type=pie|labels=Country|values=Smart Devices|>\n\n<|{data_world_digital_evolution_relative}|chart|properties={data_world_digital_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Automotive\"\n mood = \"automotivenews\"\n style = \"automotivepro\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Automotive Tweets\n\n This mini-app generates Tweets related to Automotive using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Automotive Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal for Logistics Visualization\n\n<|layout|columns=35 65|\nVisualize Complex Patterns with Mandelbrot Fractals\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nSelect the number of iterations to explore fractal patterns\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal for Logistics Visualization\")\n"} {"text": "# **Country**{: .color-primary} Population Growth\n\n<|layout|columns=1 1 1|\n<|{selected_country_population}|selector|lov={selector_country_population}|on_change=on_change_country_population|dropdown|label=Country|>\n\n<|{selected_population_metric}|toggle|lov={population_metric_selector}|on_change=update_population_metric|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Total Population**{: .color-primary}\n<|{'{:,}'.format(int(population_data.iloc[-1]['Total Population']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Urban Population**{: .color-primary}\n<|{'{:,}'.format(int(population_data.iloc[-1]['Urban Population']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Rural Population**{: .color-primary}\n<|{'{:,}'.format(int(population_data.iloc[-1]['Rural Population']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{population_data}|chart|type=bar|x=Year|y[3]=Total Population|y[2]=Urban Population|y[1]=Rural Population|layout={layout}|options={options}|title=Population Trends|>\n\n<|{population_distribution_chart}|chart|type=pie|values=distribution_values|labels=distribution_labels|title=Urban vs Rural Population|>\n|>\n"} {"text": "# **Stock**{: .color-primary} Market Overview\n\n<|layout|columns=1 1 1|\n<|{selected_stock}|selector|lov={selector_stock}|on_change=on_change_stock|dropdown|label=Stock|>\n\n<|{selected_indicator}|toggle|lov={indicator_selector}|on_change=update_indicator_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Market Value**{: .color-primary}\n<|{'${:,.2f}'.format(stock_data.iloc[-1]['Market Value'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Volume**{: .color-primary}\n<|{'{:,}'.format(stock_data.iloc[-1]['Volume'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Change (%)**{: .color-primary}\n<|{'{:+.2f}%'.format(stock_data.iloc[-1]['Change'])}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{stock_data}|chart|type=line|x=Date|y[3]=Market Value|y[2]=Volume|y[1]=Change|layout={layout}|options={options}|title=Stock Performance|>\n\n<|{sector_distribution_chart}|chart|type=pie|values=sector_values|labels=sector_labels|title=Market Sector Distribution|>\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load the sales data\ndf = pd.read_excel(\n io=\"data/supermarkt_sales.xlsx\",\n engine=\"openpyxl\",\n sheet_name=\"Sales\",\n skiprows=3,\n usecols=\"B:R\",\n nrows=1000,\n)\n\n# Add 'hour' column to the dataframe\ndf[\"hour\"] = pd.to_datetime(df[\"Time\"], format=\"%H:%M:%S\").dt.hour\n\n# Initialize variables\ncities = list(df[\"City\"].unique())\ntypes = list(df[\"Customer_type\"].unique())\ngenders = list(df[\"Gender\"].unique())\ncity = cities\ncustomer_type = types\ngender = genders\n\nlayout = {\"margin\": {\"l\": 220}}\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{customer_type}|selector|lov={types}|multiple|label=Select the Customer Type|dropdown|on_change=on_filter|width=100%|>\n\n<|{gender}|selector|lov={genders}|multiple|label=Select the Gender|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_sales>\n\n <|{\"\u2b50\" * int(round(round(df_selection[\"Rating\"].mean(), 1), 0))}|>\n|average_rating>\n\n\n|average_sale>\n|>\n\n
\n\nDisplay df_selection in an expandable\n<|Sales Table|expandable|expanded=False|\n<|{df_selection}|table|width=100%|page_size=5|rebuild|class_name=table|>\n|>\n\n\n\n<|{sales_by_product_line}|chart|x=Total|y=Product|type=bar|orientation=h|title=Sales by Product|layout={layout}|color=#ff462b|>\n|charts>\n|main_page>\n|>\n\nCode from [Coding is Fun](https://github.com/Sven-Bo)\n\nGet the Taipy Code [here](https://github.com/Avaiga/demo-sales-dashboard) and the original code [here](https://github.com/Sven-Bo/streamlit-sales-dashboard)\n\"\"\"\n\n\ndef filter(city, customer_type, gender):\n df_selection = df[\n df[\"City\"].isin(city)\n & df[\"Customer_type\"].isin(customer_type)\n & df[\"Gender\"].isin(gender)\n ]\n\n # SALES BY PRODUCT LINE [BAR CHART]\n sales_by_product_line = (\n df_selection[[\"Product line\", \"Total\"]]\n .groupby(by=[\"Product line\"])\n .sum()[[\"Total\"]]\n .sort_values(by=\"Total\")\n )\n sales_by_product_line[\"Product\"] = sales_by_product_line.index\n\n # SALES BY HOUR [BAR CHART]\n sales_by_hour = (\n df_selection[[\"hour\", \"Total\"]].groupby(by=[\"hour\"]).sum()[[\"Total\"]]\n )\n sales_by_hour[\"Hour\"] = sales_by_hour.index\n return df_selection, sales_by_product_line, sales_by_hour\n\n\ndef on_filter(state):\n state.df_selection, state.sales_by_product_line, state.sales_by_hour = filter(\n state.city, state.customer_type, state.gender\n )\n\n\nif __name__ == \"__main__\":\n # Initialize dataframes\n df_selection, sales_by_product_line, sales_by_hour = filter(\n city, customer_type, gender\n )\n\n # Run the app\n Gui(page).run()\n"} {"text": "# **Country**{: .color-primary} Health Indicators\n\n<|layout|columns=1 1 1|\n<|{selected_country_health}|selector|lov={selector_country_health}|on_change=on_change_country_health|dropdown|label=Country|>\n\n<|{selected_health_metric}|toggle|lov={health_metric_selector}|on_change=update_health_metric_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Life Expectancy**{: .color-primary}\n<|{'{:.2f}'.format(health_data.iloc[-1]['Life Expectancy'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Infant Mortality Rate**{: .color-primary}\n<|{'{:.2f}'.format(health_data.iloc[-1]['Infant Mortality'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Healthcare Expenditure**{: .color-primary}\n<|{'${:,.2f}'.format(health_data.iloc[-1]['Expenditure'])}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{health_data}|chart|type=line|x=Year|y[3]=Life Expectancy|y[2]=Infant Mortality|y[1]=Expenditure|layout={layout}|options={options}|title=Healthcare Trends|>\n\n<|{healthcare_distribution_chart}|chart|type=pie|values=healthcare_values|labels=healthcare_labels|title=Healthcare Distribution|>\n|>\n"} {"text": "# **Worldwide**{: .color-primary} Space Exploration\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Space Missions**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_space['Missions']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Satellites Launched**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_world_space['Satellites']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Budget for Space Programs**{: .color-primary}\n<|{'${:,.2f}'.format(np.sum(data_world_space['Budget']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_space_metric}|toggle|lov={space_metric_selector}|>\n\n<|part|render={selected_space_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_world_space_pie_absolute}|chart|type=pie|labels=Country|values=Missions|title=Global Space Missions|>\n\n<|{data_world_space_evolution_absolute}|chart|properties={data_world_space_evolution_properties}|title=Space Exploration Trends|>\n|>\n|>\n\n<|part|render={selected_space_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_world_space_pie_relative}|chart|type=pie|labels=Country|values=Satellites|>\n\n<|{data_world_space_evolution_relative}|chart|properties={data_world_space_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Education\"\n mood = \"educational\"\n style = \"educationexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Education Tweets\n\n This mini-app generates Tweets related to Education using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Education Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "from taipy.gui import Gui\nimport taipy as tp\n\n# Import pages for the energy sector\nfrom pages.energy.dashboard import dashboard_md\nfrom pages.energy.consumption import consumption_md\nfrom pages.energy.production import production_md\nfrom pages.energy.renewables import renewables_md\n\n# Define your pages dictionary\npages = {\n '/dashboard': dashboard_md,\n '/consumption': consumption_md,\n '/production': production_md,\n '/renewables': renewables_md\n}\n\n# Create a Gui with your pages\ngui_energy = Gui(pages=pages)\n\nif __name__ == '__main__':\n tp.Core().run()\n \n # Run the multi-page app\n gui_energy.run(title=\"Energy Dashboard\")\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|area>\n|>\n\n<|{selected_traffic_scenario}|traffic_scenario|on_submission_change=on_traffic_scenario_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Traffic Predictions**{: .color-primary} and Data Visualization\n\n<|{selected_traffic_scenario.result.read() if selected_traffic_scenario and selected_traffic_scenario.result.read() is not None else default_traffic_result}|chart|x=Time|y[1]=Vehicle Count|y[2]=Congestion Level|type[1]=line|title=Traffic Forecast|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|traffic_scenario>\n|>\n"} {"text": "# **City**{: .color-primary} Urban Development Index\n\n<|layout|columns=1 1 1|\n<|{selected_city_development}|selector|lov={selector_city_development}|on_change=on_change_city_development|dropdown|label=City|>\n\n<|{selected_development_aspect}|toggle|lov={development_aspect_selector}|on_change=update_development_aspect_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Population Growth**{: .color-primary}\n<|{'{:.2f}%'.format(development_data.iloc[-1]['Population Growth'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Infrastructure Rating**{: .color-primary}\n<|{'{:.2f}'.format(development_data.iloc[-1]['Infrastructure'])}|text|class_name=h2|>\n|>\n\n<|card|\n**Economic Activity**{: .color-primary}\n<|{'{:.2f}'.format(development_data.iloc[-1]['Economic Activity'])}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{development_data}|chart|type=bar|x=Year|y[3]=Population Growth|y[2]=Infrastructure|y[1]=Economic Activity|layout={layout}|options={options}|title=Urban Development Trends|>\n\n<|{development_aspect_chart}|chart|type=pie|values=development_aspect_values|labels=development_aspect_labels|title=Aspect Distribution|>\n|>\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|school>\n|>\n\n<|{selected_educational_scenario}|educational_scenario|on_submission_change=on_educational_scenario_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Student Performance Trends**{: .color-primary} and Insights\n\n<|{selected_educational_scenario.result.read() if selected_educational_scenario and selected_educational_scenario.result.read() is not None else default_educational_result}|chart|x=Subject|y[1]=Average Score|y[2]=Grade Level|type[1]=bar|title=Academic Performance|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|educational_scenario>\n|>\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Patient Group
\n<|{patient_group_selected}|selector|lov=group_diabetes;group_cardio;group_respiratory|dropdown|on_change=on_patient_group_change|>\n|>\n\n<|part|class_name=card|\n### Select Comparison Group
\n<|{comparison_group_selected}|selector|lov=group_diabetes;group_cardio;group_respiratory|dropdown|on_change=on_comparison_group_change|>\n|>\n\n|>\n\n<|Patient Data Overview|expandable|expanded=True|\nDisplay patient_group_data and comparison_group_data\n<|layout|columns=1 1|\n<|{patient_group_data}|table|page_size=5|>\n\n<|{comparison_group_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{age_distribution_chart}|chart|type=bar|x=Age Group|y=Patients|title=Age Distribution|>\n|>\n\n<|part|class_name=card|\n<|{disease_prevalence_chart}|chart|type=pie|options={disease_options}|\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Logistics\"\n mood = \"supplychain\"\n style = \"logisticsexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Logistics Tweets\n\n This mini-app generates Tweets related to Logistics using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Logistics Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Team
\n<|{team_selected}|selector|lov=team_lakers;team_warriors;team_celtics|dropdown|on_change=on_team_change|>\n|>\n\n<|part|class_name=card|\n### Select Season
\n<|{season_selected}|selector|lov=season_2020;season_2021;season_2022|dropdown|on_change=on_season_change|>\n|>\n\n|>\n\n<|Team Performance Overview|expandable|expanded=True|\nDisplay team_data and season_data\n<|layout|columns=1 1|\n<|{team_data}|table|page_size=5|>\n\n<|{season_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{player_stats_chart}|chart|type=bar|x=Player|y=Points|title=Player Performance|>\n|>\n\n<|part|class_name=card|\n<|{win_loss_chart}|chart|type=line|x=Game|y=Win/Loss|title=Win-Loss Record|>\n|>\n|>\n\n
\n### Analyze Team Dynamics:\n<|{team_dynamics_analysis}|scenario|on_submission_change=on_team_dynamics_status_change|expandable=False|expanded=False|>\n\n<|{team_dynamics_analysis}|scenario_dag|>\n\n
\n### View team dynamics results:\n<|{team_dynamics_analysis.results if team_dynamics_analysis else None}|data_node|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Technology\"\n mood = \"innovative\"\n style = \"techgiant\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Technology Tweets\n\n This mini-app generates Tweets related to Technology using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Technology Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n Gui(page).run(dark_mode=False, port=5089)\n"} {"text": "# **Global**{: .color-primary} Environmental Statistics\n\n
\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Carbon Emissions**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_global_environment['Emissions']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Deforestation**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_global_environment['Deforestation']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|part|class_name=card|\n**Renewable Energy Usage**{: .color-primary}\n<|{'{:,}'.format(int(np.sum(data_global_environment['Renewable Energy']))).replace(',', ' ')}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|{selected_metric}|toggle|lov={metric_selector}|>\n\n<|part|render={selected_metric=='Absolute'}|\n<|layout|columns=1 2|\n<|{data_global_pie_absolute}|chart|type=pie|labels=Country|values=Emissions|title=Emissions Distribution Worldwide|>\n\n<|{data_global_environment_evolution_absolute}|chart|properties={data_global_environment_evolution_properties}|title=Environmental Trends Worldwide|>\n|>\n|>\n\n<|part|render={selected_metric=='Relative'}|\n<|layout|columns=1 2|\n<|{data_global_pie_relative}|chart|type=pie|labels=Country|values=Deforestation|>\n\n<|{data_global_environment_evolution_relative}|chart|properties={data_global_environment_evolution_relative_properties}|>\n|>\n|>\n"} {"text": "from taipy.gui import Gui\nimport open3d as o3d\nimport numpy as np\n\nmodel_data = None\nmodel_path = \"\"\n\ndef model_upload_view(state):\n if state.model_path:\n model = o3d.io.read_triangle_mesh(state.model_path)\n state.model_data = np.asarray(model.vertices)\n\nmodel_view_page = \"\"\"\n<|{model_path}|file_selector|accept=.ply,.stl|on_action=model_upload_view|>\n<|{model_data}|viewer3d|>\n\"\"\"\n\nGui(model_view_page).run()\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal for Financial Visualization\n\n<|layout|columns=35 65|\nVisualize Complex Patterns with Mandelbrot Fractals\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nSelect the number of iterations to explore fractal patterns\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal for Financial Visualization\")\n"} {"text": "<|{all_feedback}|table|columns={feedback_columns}|width='100%'|on_action={on_feedback_select}|style=feedback_style|>\n<|Add Feedback|button|on_action={open_add_feedback_dialog}|>\n<|Refresh Feedback|button|on_action={refresh_feedback_list}|>\n\n<|{show_add_feedback_dialog}|dialog|title=Submit New Feedback|\n<|{customer_name}|input|placeholder='Customer Name'|\n<|{feedback_content}|textarea|placeholder='Enter feedback here...'|\n<|Submit|button|on_action={submit_feedback}|>\n<|Cancel|button|on_action={close_add_feedback_dialog}|>\n|>\n\n<|{show_feedback_details}|pane|\n\n# Feedback Details <|Archive|button|on_action=archive_selected_feedback|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Customer Name\n<|{selected_feedback.customer_name}|>\n|>\n\n<|part|class_name=card|\n## Feedback\n<|{selected_feedback.content}|textarea|disabled=True|>\n|>\n\n<|part|class_name=card|\n## Submission Date\n<|{selected_feedback.date.strftime(\"%b %d, %Y at %H:%M:%S\")}|>\n|>\n\n----\n|>\n"} {"text": "from taipy import Gui\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nWINDOW_SIZE = 500\n\ncm = plt.cm.get_cmap(\"viridis\")\n\n\ndef generate_mandelbrot(\n center: int = WINDOW_SIZE / 2,\n dx_range: int = 1000,\n dx_start: float = -0.12,\n dy_range: float = 1000,\n dy_start: float = -0.82,\n iterations: int = 50,\n max_value: int = 200,\n i: int = 0,\n) -> str:\n mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE))\n for y in range(WINDOW_SIZE):\n for x in range(WINDOW_SIZE):\n dx = (x - center) / dx_range + dx_start\n dy = (y - center) / dy_range + dy_start\n a = dx\n b = dy\n for t in range(iterations):\n d = (a * a) - (b * b) + dx\n b = 2 * (a * b) + dy\n a = d\n h = d > max_value\n if h is True:\n mat[x, y] = t\n\n colored_mat = cm(mat / mat.max())\n im = Image.fromarray((colored_mat * 255).astype(np.uint8))\n path = f\"mandelbrot_{i}.png\"\n im.save(path)\n\n return path\n\n\ndef generate(state):\n state.i = state.i + 1\n state.path = generate_mandelbrot(\n dx_start=-state.dx_start / 100,\n dy_start=(state.dy_start - 100) / 100,\n iterations=state.iterations,\n i=state.i,\n )\n\n\ni = 0\ndx_start = 11\ndy_start = 17\niterations = 50\n\npath = generate_mandelbrot(\n dx_start=-dx_start / 100,\n dy_start=(dy_start - 100) / 100,\n)\n\npage = \"\"\"\n# Mandelbrot Fractal for Sports Visualization\n\n<|layout|columns=35 65|\nVisualize Complex Patterns with Mandelbrot Fractals\n<|{path}|image|width=500px|height=500px|class_name=img|>\n\nIterations:
\nSelect the number of iterations to explore fractal patterns\n<|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|>
\nX Position:
\n<|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\nY Position:
\n\nSlider dx_start\n<|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|>
\n|>\n\"\"\"\n\nGui(page).run(title=\"Mandelbrot Fractal for Sports Visualization\")\n"} {"text": "<|{all_vehicles}|table|columns={vehicle_columns}|width='100%'|on_action={on_vehicle_select}|style=vehicle_style|>\n<|Log Maintenance|button|on_action={open_log_maintenance_dialog}|>\n<|Refresh Vehicles|button|on_action={refresh_vehicle_list}|>\n\n<|{show_log_maintenance_dialog}|dialog|title=Log Vehicle Maintenance|\n<|{vehicle_id}|selector|lov={get_all_vehicle_ids()}|>\n<|{maintenance_type}|input|placeholder='Maintenance Type'|\n<|{maintenance_date}|date_picker|>\n<|Log Maintenance|button|on_action={log_maintenance}|>\n<|Cancel|button|on_action={close_log_maintenance_dialog}|>\n|>\n\n<|{show_vehicle_details}|pane|\n\n# Vehicle Details <|Edit|button|on_action=edit_selected_vehicle|> <|Remove|button|on_action=remove_selected_vehicle|>\n\n<|layout|columns=1|\n<|part|class_name=card|\n## Vehicle ID\n<|{selected_vehicle.id}|>\n|>\n\n<|part|class_name=card|\n## Maintenance Type\n<|{selected_vehicle.maintenance_type}|>\n|>\n\n<|part|class_name=card|\n## Maintenance Date\n<|{selected_vehicle.maintenance_date.strftime(\"%b %d, %Y\")}|>\n|>\n\n----\n|>\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|hospital>\n|>\n\n<|{selected_healthcare_scenario}|healthcare_scenario|on_submission_change=on_healthcare_scenario_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Resource Allocation**{: .color-primary} and Analysis\n\n<|{selected_healthcare_scenario.result.read() if selected_healthcare_scenario and selected_healthcare_scenario.result.read() is not None else default_resource_allocation}|chart|x=Resource|y[1]=Allocated|y[2]=Required|type[1]=bar|title=Healthcare Resource Allocation|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|healthcare_scenario>\n|>\n"} {"text": "from taipy.gui import Gui\nimport pandas as pd\n\n# Load inventory data\ninventory_data = pd.read_csv(\"inventory_data.csv\")\n\n# Initialize variables\ncategories = list(inventory_data[\"Category\"].unique())\nlocations = list(inventory_data[\"Location\"].unique())\ncategory = categories\nlocation = locations\n\n# Markdown for the entire page\npage = \"\"\"<|toggle|theme|>\n\n<|layout|columns=20 80|gap=30px|\n\n\n<|{location}|selector|lov={locations}|multiple|label=Select Location|dropdown|on_change=on_filter|width=100%|>\n|sidebar>\n\n\n|total_items>\n\n\n|average_price>\n\n\n|inventory_table>\n|main_page>\n|>\n\nCode adapted from [Inventory Management](https://github.com/inventory_management_app)\n\nGet the Taipy Code [here](https://github.com/Avaiga/inventory-management-app)\n"} {"text": "from taipy.gui import Gui\nimport fitz # PyMuPDF\nimport io\n\npdf_data = None\npdf_path = \"\"\n\ndef pdf_upload(state):\n if state.pdf_path:\n pdf_doc = fitz.open(state.pdf_path)\n page = pdf_doc.load_page(0) # Display the first page\n state.pdf_data = io.BytesIO(page.get_pixmap().tobytes(\"png\"))\n\npdf_page = \"\"\"\n<|{pdf_path}|file_selector|accept=.pdf|on_action=pdf_upload|>\n<|{pdf_data}|image|>\n\"\"\"\n\nGui(pdf_page).run()\n"} {"text": "# **City**{: .color-primary} Environmental Data\n\n<|layout|columns=1 1 1|\n<|{selected_city}|selector|lov={selector_city}|on_change=on_change_city|dropdown|label=City|>\n\n<|{selected_pollutant}|toggle|lov={pollutant_selector}|on_change=update_pollutant_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Air Quality Index**{: .color-primary}\n<|{'{:,}'.format(int(city_data.iloc[-1]['AQI'])).replace(',', ' ')}|text|class_name=h2|>\n|>\n\n<|card|\n**Pollution Level**{: .color-primary}\n<|{city_data.iloc[-1]['Pollution Level']}|text|class_name=h2|>\n|>\n\n<|card|\n**Temperature**{: .color-primary}\n<|{'{:.1f}\u00b0C'.format(city_data.iloc[-1]['Temperature'])}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{city_data}|chart|type=line|x=Date|y[3]=AQI|y[2]=Pollution Level|y[1]=Temperature|layout={layout}|options={options}|title=Environmental Trends|>\n\n<|{pollution_distribution_chart}|chart|type=pie|values=pollution_values|labels=pollution_labels|title=Pollution Source Distribution|>\n|>\n"} {"text": "if __name__ == \"__main__\":\n # Initialize with custom sector-related values\n topic = \"Finance\"\n mood = \"financial\"\n style = \"financialexpert\"\n\n # Create a GUI page with custom settings\n page = \"\"\"\n <|container|\n # **Generate**{: .color-primary} Finance Tweets\n\n This mini-app generates Tweets related to Finance using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL\u00b7E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).\n\n
\n\n <|layout|columns=1 1 1|gap=30px|class_name=card|\n \n |topic>\n\n \n |mood>\n\n \n |style>\n\n Create a Generate text button\n <|Generate Finance Tweet|button|on_action=generate_text|label=Generate text|>\n\n <|{image}|image|height=400px|>\n |image>\n\n Break line\n
\n\n **Code from [@kinosal](https://twitter.com/kinosal)**\n\n Original code can be found [here](https://github.com/kinosal/tweet)\n |>\n \"\"\"\n\n"} {"text": "# **City**{: .color-primary} Crime Statistics\n\n<|layout|columns=1 1 1|\n<|{selected_city_crime}|selector|lov={selector_city_crime}|on_change=on_change_city_crime|dropdown|label=City|>\n\n<|{selected_crime_type}|toggle|lov={crime_type_selector}|on_change=update_crime_type_display|>\n|>\n\n
\n\n<|layout|columns=1 1 1 1|gap=50px|\n<|card|\n**Total Crimes**{: .color-primary}\n<|{'{:,}'.format(int(crime_data.iloc[-1]['Total']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Violent Crimes**{: .color-primary}\n<|{'{:,}'.format(int(crime_data.iloc[-1]['Violent']))}|text|class_name=h2|>\n|>\n\n<|card|\n**Property Crimes**{: .color-primary}\n<|{'{:,}'.format(int(crime_data.iloc[-1]['Property']))}|text|class_name=h2|>\n|>\n|>\n\n
\n\n<|layout|columns=2 1|\n<|{crime_data}|chart|type=bar|x=Year|y[3]=Total|y[2]=Violent|y[1]=Property|layout={layout}|options={options}|title=Crime Trends|>\n\n<|{crime_type_distribution_chart}|chart|type=pie|values=crime_type_values|labels=crime_type_labels|title=Crime Type Distribution|>\n|>\n"} {"text": "<|layout|columns=2 9|gap=50px|\n\n|sidebar>\n\n\n|date>\n\n\n|crop>\n|>\n\n<|{selected_yield_forecast}|yield_forecast|on_submission_change=on_yield_forecast_submission_change|not expanded|>\n\n---------------------------------------\n\n## **Crop Yield Predictions**{: .color-primary} and Data Analysis\n\n<|{selected_yield_forecast.result.read() if selected_yield_forecast and selected_yield_forecast.result.read() is not None else default_yield_result}|chart|x=Date|y[1]=Predicted Yield|y[2]=Historical Yield|type[1]=bar|title=Crop Yield Forecast|>\n\n<|Data Nodes|expandable|\n<|1 5|layout|\n<|{selected_data_node}|data_node_selector|> \n\n<|{selected_data_node}|data_node|>\n|>\n|>\n\n|yield_forecast>\n|>\n"} {"text": "<|layout|columns=1 1|\n<|part|class_name=card|\n### Select Sales Period
\n<|{sales_period_selected}|selector|lov=period_last_month;period_last_quarter;period_last_year|dropdown|on_change=on_sales_period_change|>\n|>\n\n<|part|class_name=card|\n### Select Product Category
\n<|{product_category_selected}|selector|lov=category_electronics;category_clothing;category_home_goods|dropdown|on_change=on_product_category_change|>\n|>\n\n|>\n\n<|Sales Data Overview|expandable|expanded=True|\nDisplay sales_data and category_data\n<|layout|columns=1 1|\n<|{sales_data}|table|page_size=5|>\n\n<|{category_data}|table|page_size=5|>\n|>\n|>\n\n<|layout|columns=1 1|\n<|part|class_name=card|\n<|{sales_volume_chart}|chart|type=line|x=Month|y=Sales Volume|title=Monthly Sales Volume|>\n|>\n\n<|part|class_name=card|\n<|{product_category_chart}|chart|type=pie|options={category_options}|layout={category_layout}|>\n|>\n|>\n\n
\n### Analyze Sales Performance:\n<|{sales_performance}|scenario|on_submission_change=on_sales_performance_status_change|expandable=False|expanded=False|>\n\n<|{sales_performance}|scenario_dag|>\n\n
\n### View the analysis results:\n<|{sales_performance.results if sales_performance else None}|data_node|>\n"} {"text": "#!/usr/bin/env python \"\"\"The setup script.\"\"\" import json import os from setuptools import find_namespace_packages, find_packages, setup with open(\"README.md\") as readme_file: readme = readme_file.read() with open(f\"src{os.sep}taipy{os.sep}core{os.sep}version.json\") as version_file: version = json.load(version_file) version_string = f'{version.get(\"major\", 0)}.{version.get(\"minor\", 0)}.{version.get(\"patch\", 0)}' if vext := version.get(\"ext\"): version_string = f\"{version_string}.{vext}\" requirements = [ \"pyarrow>=10.0.1,<11.0\", \"networkx>=2.6,<3.0\", \"openpyxl>=3.1.2,<3.2\", \"modin[dask]>=0.23.0,<1.0\", \"pymongo[srv]>=4.2.0,<5.0\", \"sqlalchemy>=2.0.16,<2.1\", \"toml>=0.10,<0.11\", \"taipy-config@git+https://git@github.com/Avaiga/taipy-config.git@develop\", ] test_requirements = [\"pytest>=3.8\"] extras_require = { \"fastparquet\": [\"fastparquet==2022.11.0\"], \"mssql\": [\"pyodbc>=4,<4.1\"], \"mysql\": [\"pymysql>1,<1.1\"], \"postgresql\": [\"psycopg2>2.9,<2.10\"], } setup( author=\"Avaiga\", author_email=\"dev@taipy.io\", python_requires=\">=3.8\", classifiers=[ \"Intended Audience :: Developers\", \"License :: OSI Approved :: Apache Software License\", \"Natural Language :: English\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.8\", \"Programming Language :: Python :: 3.9\", \"Programming Language :: Python :: 3.10\", \"Programming Language :: Python :: 3.11\", ], description=\"A Python library to build powerful and customized data-driven back-end applications.\", install_requires=requirements, long_description=readme, long_description_content_type=\"text/markdown\", license=\"Apache License 2.0\", keywords=\"taipy-core\", name=\"taipy-core\", package_dir={\"\": \"src\"}, packages=find_namespace_packages(where=\"src\") + find_packages(include=[\"taipy\", \"taipy.core\", \"taipy.core.*\"]), include_package_data=True, test_suite=\"tests\", tests_require=test_requirements, url=\"https://github.com/avaiga/taipy-core\", version=version_string, zip_safe=False, extras_require=extras_require, ) "} {"text": "import os import pickle import shutil from datetime import datetime from queue import Queue import pandas as pd import pytest from sqlalchemy import create_engine, text from src.taipy.core._core import Core from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._repository.db._sql_connection import _SQLConnection from src.taipy.core._version._version import _Version from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.config import ( CoreSection, DataNodeConfig, JobConfig, MigrationConfig, ScenarioConfig, TaskConfig, _ConfigIdChecker, _CoreSectionChecker, _DataNodeConfigChecker, _JobConfigChecker, _ScenarioConfigChecker, _TaskConfigChecker, ) from src.taipy.core.cycle._cycle_manager_factory import _CycleManagerFactory from src.taipy.core.cycle._cycle_model import _CycleModel from src.taipy.core.cycle.cycle import Cycle from src.taipy.core.cycle.cycle_id import CycleId from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data._data_model import _DataNodeModel from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.job._job_manager_factory import _JobManagerFactory from src.taipy.core.job.job import Job from src.taipy.core.job.job_id import JobId from src.taipy.core.notification.notifier import Notifier from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from src.taipy.core.scenario._scenario_model import _ScenarioModel from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.scenario.scenario_id import ScenarioId from src.taipy.core.sequence._sequence_manager_factory import _SequenceManagerFactory from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission._submission_model import _SubmissionModel from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task from taipy.config import _inject_section from taipy.config._config import _Config from taipy.config._serializer._toml_serializer import _TomlSerializer from taipy.config.checker._checker import _Checker from taipy.config.checker.issue_collector import IssueCollector from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config current_time = datetime.now() _OrchestratorFactory._build_orchestrator() @pytest.fixture(scope=\"function\") def csv_file(tmpdir_factory) -> str: csv = pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) fn = tmpdir_factory.mktemp(\"data\").join(\"df.csv\") csv.to_csv(str(fn), index=False) return fn.strpath @pytest.fixture(scope=\"function\") def excel_file(tmpdir_factory) -> str: excel = pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) fn = tmpdir_factory.mktemp(\"data\").join(\"df.xlsx\") excel.to_excel(str(fn), index=False) return fn.strpath @pytest.fixture(scope=\"function\") def excel_file_with_sheet_name(tmpdir_factory) -> str: excel = pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) fn = tmpdir_factory.mktemp(\"data\").join(\"df.xlsx\") excel.to_excel(str(fn), sheet_name=\"sheet_name\", index=False) return fn.strpath @pytest.fixture(scope=\"function\") def json_file(tmpdir_factory) -> str: json_data = pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) fn = tmpdir_factory.mktemp(\"data\").join(\"df.json\") json_data.to_json(str(fn), orient=\"records\") return fn.strpath @pytest.fixture(scope=\"function\") def excel_file_with_multi_sheet(tmpdir_factory) -> str: excel_multi_sheet = { \"Sheet1\": pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]), \"Sheet2\": pd.DataFrame([{\"a\": 7, \"b\": 8, \"c\": 9}, {\"a\": 10, \"b\": 11, \"c\": 12}]), } fn = tmpdir_factory.mktemp(\"data\").join(\"df.xlsx\") with pd.ExcelWriter(str(fn)) as writer: for key in excel_multi_sheet.keys(): excel_multi_sheet[key].to_excel(writer, key, index=False) return fn.strpath @pytest.fixture(scope=\"function\") def pickle_file_path(tmpdir_factory) -> str: data = pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) fn = tmpdir_factory.mktemp(\"data\").join(\"df.p\") with open(str(fn), \"wb\") as f: pickle.dump(data, f) return fn.strpath @pytest.fixture(scope=\"function\") def parquet_file_path(tmpdir_factory) -> str: data = pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) fn = tmpdir_factory.mktemp(\"data\").join(\"df.parquet\") data.to_parquet(str(fn)) return fn.strpath @pytest.fixture(scope=\"function\") def tmp_sqlite_db_file_path(tmpdir_factory): fn = tmpdir_factory.mktemp(\"data\") db_name = \"df\" file_extension = \".db\" db = create_engine(\"sqlite:///\" + os.path.join(fn.strpath, f\"{db_name}{file_extension}\")) conn = db.connect() conn.execute(text(\"CREATE TABLE example (foo int, bar int);\")) conn.execute(text(\"INSERT INTO example (foo, bar) VALUES (1, 2);\")) conn.execute(text(\"INSERT INTO example (foo, bar) VALUES (3, 4);\")) conn.commit() conn.close() db.dispose() return fn.strpath, db_name, file_extension @pytest.fixture(scope=\"function\") def tmp_sqlite_sqlite3_file_path(tmpdir_factory): fn = tmpdir_factory.mktemp(\"data\") db_name = \"df\" file_extension = \".sqlite3\" db = create_engine(\"sqlite:///\" + os.path.join(fn.strpath, f\"{db_name}{file_extension}\")) conn = db.connect() conn.execute(text(\"CREATE TABLE example (foo int, bar int);\")) conn.execute(text(\"INSERT INTO example (foo, bar) VALUES (1, 2);\")) conn.execute(text(\"INSERT INTO example (foo, bar) VALUES (3, 4);\")) conn.commit() conn.close() db.dispose() return fn.strpath, db_name, file_extension @pytest.fixture(scope=\"function\") def default_data_frame(): return pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) @pytest.fixture(scope=\"function\") def default_multi_sheet_data_frame(): return { \"Sheet1\": pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]), \"Sheet2\": pd.DataFrame([{\"a\": 7, \"b\": 8, \"c\": 9}, {\"a\": 10, \"b\": 11, \"c\": 12}]), } @pytest.fixture(scope=\"session\", autouse=True) def cleanup_files(): yield if os.path.exists(\".data\"): shutil.rmtree(\".data\", ignore_errors=True) if os.path.exists(\".my_data\"): shutil.rmtree(\".my_data\", ignore_errors=True) @pytest.fixture(scope=\"function\") def current_datetime(): return current_time @pytest.fixture(scope=\"function\") def scenario(cycle): return Scenario( \"sc\", set(), {}, set(), ScenarioId(\"sc_id\"), current_time, is_primary=False, tags={\"foo\"}, version=\"random_version_number\", cycle=None, ) @pytest.fixture(scope=\"function\") def data_node(): return InMemoryDataNode(\"data_node_config_id\", Scope.SCENARIO, version=\"random_version_number\") @pytest.fixture(scope=\"function\") def data_node_model(): return _DataNodeModel( \"my_dn_id\", \"test_data_node\", Scope.SCENARIO, \"csv\", \"name\", \"owner_id\", list({\"parent_id_1\", \"parent_id_2\"}), datetime(1985, 10, 14, 2, 30, 0).isoformat(), [dict(timestamp=datetime(1985, 10, 14, 2, 30, 0).isoformat(), job_id=\"job_id\")], \"latest\", None, None, False, {\"path\": \"/path\", \"has_header\": True, \"prop\": \"ENV[FOO]\", \"exposed_type\": \"pandas\"}, ) @pytest.fixture(scope=\"function\") def task(data_node): dn = InMemoryDataNode(\"dn_config_id\", Scope.SCENARIO, version=\"random_version_number\") return Task(\"task_config_id\", {}, print, [data_node], [dn]) @pytest.fixture(scope=\"function\") def scenario_model(cycle): return _ScenarioModel( ScenarioId(\"sc_id\"), \"sc\", set(), set(), {}, creation_date=current_time.isoformat(), primary_scenario=False, subscribers=[], tags=[\"foo\"], version=\"random_version_number\", cycle=None, ) @pytest.fixture(scope=\"function\") def cycle(): example_date = datetime.fromisoformat(\"2021-11-11T11:11:01.000001\") return Cycle( Frequency.DAILY, {}, creation_date=example_date, start_date=example_date, end_date=example_date, name=\"cc\", id=CycleId(\"cc_id\"), ) @pytest.fixture(scope=\"class\") def sequence(): return Sequence( {}, [], SequenceId(\"sequence_id\"), owner_id=\"owner_id\", parent_ids=set([\"parent_id_1\", \"parent_id_2\"]), version=\"random_version_number\", ) @pytest.fixture(scope=\"function\") def job(task): return Job(JobId(\"job\"), task, \"foo\", \"bar\", version=\"random_version_number\") @pytest.fixture(scope=\"function\") def _version(): return _Version(id=\"foo\", config=Config._applied_config) @pytest.fixture(scope=\"function\") def cycle_model(): return _CycleModel( CycleId(\"cc_id\"), \"cc\", Frequency.DAILY, {}, creation_date=\"2021-11-11T11:11:01.000001\", start_date=\"2021-11-11T11:11:01.000001\", end_date=\"2021-11-11T11:11:01.000001\", ) @pytest.fixture(scope=\"function\") def tmp_sqlite(tmpdir_factory): fn = tmpdir_factory.mktemp(\"db\") return os.path.join(fn.strpath, \"test.db\") @pytest.fixture(scope=\"function\", autouse=True) def clean_repository(): from sqlalchemy.orm import close_all_sessions close_all_sessions() init_config() init_orchestrator() init_managers() init_config() init_notifier() yield def init_config(): Config.unblock_update() Config._default_config = _Config()._default_config() Config._python_config = _Config() Config._file_config = _Config() Config._env_file_config = _Config() Config._applied_config = _Config() Config._collector = IssueCollector() Config._serializer = _TomlSerializer() _Checker._checkers = [] _inject_section( JobConfig, \"job_config\", JobConfig(\"development\"), [(\"configure_job_executions\", JobConfig._configure)], True ) _inject_section( CoreSection, \"core\", CoreSection.default_config(), [(\"configure_core\", CoreSection._configure)], add_to_unconflicted_sections=True, ) _inject_section( DataNodeConfig, \"data_nodes\", DataNodeConfig.default_config(), [ (\"configure_data_node\", DataNodeConfig._configure), (\"configure_data_node_from\", DataNodeConfig._configure_from), (\"set_default_data_node_configuration\", DataNodeConfig._set_default_configuration), (\"configure_csv_data_node\", DataNodeConfig._configure_csv), (\"configure_json_data_node\", DataNodeConfig._configure_json), (\"configure_sql_table_data_node\", DataNodeConfig._configure_sql_table), (\"configure_sql_data_node\", DataNodeConfig._configure_sql), (\"configure_mongo_collection_data_node\", DataNodeConfig._configure_mongo_collection), (\"configure_in_memory_data_node\", DataNodeConfig._configure_in_memory), (\"configure_pickle_data_node\", DataNodeConfig._configure_pickle), (\"configure_excel_data_node\", DataNodeConfig._configure_excel), (\"configure_generic_data_node\", DataNodeConfig._configure_generic), ], ) _inject_section( TaskConfig, \"tasks\", TaskConfig.default_config(), [ (\"configure_task\", TaskConfig._configure), (\"set_default_task_configuration\", TaskConfig._set_default_configuration), ], ) _inject_section( ScenarioConfig, \"scenarios\", ScenarioConfig.default_config(), [ (\"configure_scenario\", ScenarioConfig._configure), (\"set_default_scenario_configuration\", ScenarioConfig._set_default_configuration), ], ) _inject_section( MigrationConfig, \"migration_functions\", MigrationConfig.default_config(), [(\"add_migration_function\", MigrationConfig._add_migration_function)], True, ) _Checker.add_checker(_ConfigIdChecker) _Checker.add_checker(_CoreSectionChecker) _Checker.add_checker(_DataNodeConfigChecker) _Checker.add_checker(_JobConfigChecker) # We don't need to add _MigrationConfigChecker because it is run only when the Core service is run. _Checker.add_checker(_TaskConfigChecker) _Checker.add_checker(_ScenarioConfigChecker) Config.configure_core(read_entity_retry=0) Core._is_running = False def init_managers(): _CycleManagerFactory._build_manager()._delete_all() _ScenarioManagerFactory._build_manager()._delete_all() _SequenceManagerFactory._build_manager()._delete_all() _JobManagerFactory._build_manager()._delete_all() _TaskManagerFactory._build_manager()._delete_all() _DataManagerFactory._build_manager()._delete_all() _VersionManagerFactory._build_manager()._delete_all() _SubmissionManagerFactory._build_manager()._delete_all() def init_orchestrator(): if _OrchestratorFactory._orchestrator is None: _OrchestratorFactory._build_orchestrator() _OrchestratorFactory._build_dispatcher() _OrchestratorFactory._orchestrator.jobs_to_run = Queue() _OrchestratorFactory._orchestrator.blocked_jobs = [] def init_notifier(): Notifier._topics_registrations_list = {} @pytest.fixture def sql_engine(): return create_engine(\"sqlite:///:memory:\") @pytest.fixture def init_sql_repo(tmp_sqlite): Config.configure_core(repository_type=\"sql\", repository_properties={\"db_location\": tmp_sqlite}) # Clean SQLite database if _SQLConnection._connection: _SQLConnection._connection.close() _SQLConnection._connection = None _SQLConnection.init_db() return tmp_sqlite "} {"text": "import os import pathlib from time import sleep from unittest.mock import patch import pandas as pd import src.taipy.core.taipy as tp from src.taipy.core import Core, Status from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from taipy.config import Config # ################################ USER FUNCTIONS ################################## def sum(a, b): a = a[\"number\"] b = b[\"number\"] return a + b def subtract(a, b): a = a[\"number\"] b = b[\"number\"] return a - b def mult(a, b): return a * b def mult_by_2(a): return a def divide(a, b): return a / b def average(a): return [a.sum() / len(a)] def div_constant_with_sleep(a): sleep(1) return a[\"number\"] / 10 def return_a_number(): return 10 def return_a_number_with_sleep(): sleep(1) return 10 # ################################ TEST METHODS ################################## def test_skipped_jobs(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_orchestrator() input_config = Config.configure_data_node(\"input\") intermediate_config = Config.configure_data_node(\"intermediate\") output_config = Config.configure_data_node(\"output\") task_config_1 = Config.configure_task(\"first\", mult_by_2, input_config, intermediate_config, skippable=True) task_config_2 = Config.configure_task(\"second\", mult_by_2, intermediate_config, output_config, skippable=True) scenario_config = Config.configure_scenario(\"scenario\", [task_config_1, task_config_2]) with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = tp.create_scenario(scenario_config) scenario.input.write(2) scenario.submit() assert len(tp.get_jobs()) == 2 for job in tp.get_jobs(): assert job.status == Status.COMPLETED scenario.submit() assert len(tp.get_jobs()) == 4 skipped = [] for job in tp.get_jobs(): if job.status != Status.COMPLETED: assert job.status == Status.SKIPPED skipped.append(job) assert len(skipped) == 2 core.stop() def test_complex(): # d1 --- t1 # | # | --- t2 --- d5 --- | t10 --- d12 # | | | # | | | # d2 | --- t5 --- d7 --- t7 --- d9 --- t8 --- d10 --- t9 --- d11 # | | | # d3 --- | | | | # | | | t6 --- d8 ------------------- # | t3 --- d6 ---| # | | # | | # t4 d4 Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_orchestrator() csv_path_inp = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") excel_path_inp = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") csv_path_sum = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/sum.csv\") excel_path_sum = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/sum.xlsx\") excel_path_out = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/res.xlsx\") csv_path_out = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/res.csv\") inp_csv_dn_1 = Config.configure_csv_data_node(\"dn_csv_in_1\", default_path=csv_path_inp) inp_csv_dn_2 = Config.configure_csv_data_node(\"dn_csv_in_2\", default_path=csv_path_inp) inp_excel_dn_1 = Config.configure_excel_data_node(\"dn_excel_in_1\", default_path=excel_path_inp, sheet_name=\"Sheet1\") inp_excel_dn_2 = Config.configure_excel_data_node(\"dn_excel_in_2\", default_path=excel_path_inp, sheet_name=\"Sheet1\") placeholder = Config.configure_data_node(\"dn_placeholder\", default_data=10) dn_csv_sum = Config.configure_csv_data_node(\"dn_sum_csv\", default_path=csv_path_sum) dn_excel_sum = Config.configure_excel_data_node(\"dn_sum_excel\", default_path=excel_path_sum, sheet_name=\"Sheet1\") dn_subtract_csv_excel = Config.configure_pickle_data_node(\"dn_subtract_csv_excel\") dn_mult = Config.configure_pickle_data_node(\"dn_mult\") dn_div = Config.configure_pickle_data_node(\"dn_div\") output_csv_dn = Config.configure_csv_data_node(\"csv_out\", csv_path_out) output_excel_dn = Config.configure_excel_data_node(\"excel_out\", excel_path_out) task_print_csv = Config.configure_task(\"task_print_csv\", print, input=inp_csv_dn_1) task_print_excel = Config.configure_task(\"task_print_excel\", print, input=inp_excel_dn_1) task_sum_csv = Config.configure_task(\"task_sum_csv\", sum, input=[inp_csv_dn_2, inp_csv_dn_1], output=dn_csv_sum) task_sum_excel = Config.configure_task( \"task_sum_excel\", sum, input=[inp_excel_dn_2, inp_excel_dn_1], output=dn_excel_sum ) task_subtract_csv_excel = Config.configure_task( \"task_subtract_csv_excel\", subtract, input=[dn_csv_sum, dn_excel_sum], output=dn_subtract_csv_excel ) task_insert_placeholder = Config.configure_task(\"task_insert_placeholder\", return_a_number, output=[placeholder]) task_mult = Config.configure_task( \"task_mult_by_placeholder\", mult, input=[dn_subtract_csv_excel, placeholder], output=dn_mult ) task_div = Config.configure_task(\"task_div_by_placeholder\", divide, input=[dn_mult, placeholder], output=dn_div) task_avg_div = Config.configure_task(\"task_avg_div\", average, input=dn_div, output=output_csv_dn) task_avg_mult = Config.configure_task(\"task_avg_mult\", average, input=dn_mult, output=output_excel_dn) scenario_config = Config.configure_scenario( \"scenario\", [ task_print_csv, task_print_excel, task_sum_csv, task_sum_excel, task_subtract_csv_excel, task_insert_placeholder, task_mult, task_div, task_avg_div, task_avg_mult, ], ) with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = tp.create_scenario(scenario_config) tp.submit(scenario) core.stop() csv_sum_res = pd.read_csv(csv_path_sum) excel_sum_res = pd.read_excel(excel_path_sum) csv_out = pd.read_csv(csv_path_out) excel_out = pd.read_excel(excel_path_out) assert csv_sum_res.to_numpy().flatten().tolist() == [i * 20 for i in range(1, 11)] assert excel_sum_res.to_numpy().flatten().tolist() == [i * 2 for i in range(1, 11)] assert average(csv_sum_res[\"number\"] - excel_sum_res[\"number\"]) == csv_out.to_numpy()[0] assert average((csv_sum_res[\"number\"] - excel_sum_res[\"number\"]) * 10) == excel_out.to_numpy()[0] for path in [csv_path_sum, excel_path_sum, csv_path_out, excel_path_out]: os.remove(path) "} {"text": "\"\"\"Unit test package for taipy.\"\"\" "} {"text": "import datetime import os import pathlib import shutil from unittest import mock import pytest import src.taipy.core.taipy as tp from src.taipy.core import ( Core, Cycle, CycleId, DataNodeId, JobId, Scenario, ScenarioId, Sequence, SequenceId, Task, TaskId, ) from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config.data_node_config import DataNodeConfig from src.taipy.core.config.job_config import JobConfig from src.taipy.core.config.scenario_config import ScenarioConfig from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.pickle import PickleDataNode from src.taipy.core.exceptions.exceptions import DataNodeConfigIsNotGlobal, InvalidExportPath from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.job.job import Job from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked class TestTaipy: def test_set(self, scenario, cycle, sequence, data_node, task): with mock.patch(\"src.taipy.core.data._data_manager._DataManager._set\") as mck: tp.set(data_node) mck.assert_called_once_with(data_node) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._set\") as mck: tp.set(task) mck.assert_called_once_with(task) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._set\") as mck: tp.set(sequence) mck.assert_called_once_with(sequence) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._set\") as mck: tp.set(scenario) mck.assert_called_once_with(scenario) with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._set\") as mck: tp.set(cycle) mck.assert_called_once_with(cycle) def test_is_editable_is_called(self, cycle, job, data_node): with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._is_editable\") as mck: cycle_id = CycleId(\"CYCLE_id\") tp.is_editable(cycle_id) mck.assert_called_once_with(cycle_id) with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._is_editable\") as mck: tp.is_editable(cycle) mck.assert_called_once_with(cycle) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_editable\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.is_editable(scenario_id) mck.assert_called_once_with(scenario_id) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_editable\") as mck: scenario = Scenario(\"scenario_config_id\", [], {}) tp.is_editable(scenario) mck.assert_called_once_with(scenario) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._is_editable\") as mck: sequence_id = SequenceId(\"SEQUENCE_id\") tp.is_editable(sequence_id) mck.assert_called_once_with(sequence_id) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._is_editable\") as mck: sequence = Sequence({}, [], \"sequence_id\") tp.is_editable(sequence) mck.assert_called_once_with(sequence) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._is_editable\") as mck: task_id = TaskId(\"TASK_id\") tp.is_editable(task_id) mck.assert_called_once_with(task_id) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._is_editable\") as mck: task = Task(\"task_config_id\", {}, print) tp.is_editable(task) mck.assert_called_once_with(task) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_editable\") as mck: job_id = JobId(\"JOB_id\") tp.is_editable(job_id) mck.assert_called_once_with(job_id) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_editable\") as mck: tp.is_editable(job) mck.assert_called_once_with(job) with mock.patch(\"src.taipy.core.data._data_manager._DataManager._is_editable\") as mck: dn_id = DataNodeId(\"DATANODE_id\") tp.is_editable(dn_id) mck.assert_called_once_with(dn_id) with mock.patch(\"src.taipy.core.data._data_manager._DataManager._is_editable\") as mck: tp.is_editable(data_node) mck.assert_called_once_with(data_node) def test_is_editable(self): current_date = datetime.datetime.now() cycle = Cycle(Frequency.DAILY, {}, current_date, current_date, current_date) scenario = Scenario(\"scenario_config_id\", [], {}, sequences={\"sequence\": {}}) task = Task(\"task_config_id\", {}, print) job = Job(\"job_id\", task, \"submit_id\", scenario.id) dn = PickleDataNode(\"data_node_config_id\", Scope.SCENARIO) _CycleManager._set(cycle) _ScenarioManager._set(scenario) _TaskManager._set(task) _JobManager._set(job) _DataManager._set(dn) sequence = scenario.sequences[\"sequence\"] assert tp.is_editable(scenario) assert tp.is_editable(sequence) assert tp.is_editable(task) assert tp.is_editable(cycle) assert tp.is_editable(job) assert tp.is_editable(dn) def test_is_readable_is_called(self, cycle, job, data_node): with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._is_readable\") as mck: cycle_id = CycleId(\"CYCLE_id\") tp.is_readable(cycle_id) mck.assert_called_once_with(cycle_id) with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._is_readable\") as mck: tp.is_readable(cycle) mck.assert_called_once_with(cycle) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_readable\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.is_readable(scenario_id) mck.assert_called_once_with(scenario_id) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_readable\") as mck: scenario = Scenario(\"scenario_config_id\", [], {}) tp.is_readable(scenario) mck.assert_called_once_with(scenario) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._is_readable\") as mck: sequence_id = SequenceId(\"SEQUENCE_id\") tp.is_readable(sequence_id) mck.assert_called_once_with(sequence_id) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._is_readable\") as mck: sequence = Sequence({}, [], \"sequence_id\") tp.is_readable(sequence) mck.assert_called_once_with(sequence) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._is_readable\") as mck: task_id = TaskId(\"TASK_id\") tp.is_readable(task_id) mck.assert_called_once_with(task_id) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._is_readable\") as mck: task = Task(\"task_config_id\", {}, print) tp.is_readable(task) mck.assert_called_once_with(task) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_readable\") as mck: job_id = JobId(\"JOB_id\") tp.is_readable(job_id) mck.assert_called_once_with(job_id) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_readable\") as mck: tp.is_readable(job) mck.assert_called_once_with(job) with mock.patch(\"src.taipy.core.data._data_manager._DataManager._is_readable\") as mck: dn_id = DataNodeId(\"DATANODE_id\") tp.is_readable(dn_id) mck.assert_called_once_with(dn_id) with mock.patch(\"src.taipy.core.data._data_manager._DataManager._is_readable\") as mck: tp.is_readable(data_node) mck.assert_called_once_with(data_node) def test_is_readable(self): current_date = datetime.datetime.now() cycle = Cycle(Frequency.DAILY, {}, current_date, current_date, current_date) scenario = Scenario(\"scenario_config_id\", [], {}, sequences={\"sequence\": {}}) task = Task(\"task_config_id\", {}, print) job = Job(\"job_id\", task, \"submit_id\", scenario.id) dn = PickleDataNode(\"data_node_config_id\", Scope.SCENARIO) _CycleManager._set(cycle) _ScenarioManager._set(scenario) _TaskManager._set(task) _JobManager._set(job) _DataManager._set(dn) sequence = scenario.sequences[\"sequence\"] assert tp.is_readable(scenario) assert tp.is_readable(sequence) assert tp.is_readable(task) assert tp.is_readable(cycle) assert tp.is_readable(job) assert tp.is_readable(dn) def test_is_submittable_is_called(self): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_submittable\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.is_submittable(scenario_id) mck.assert_called_once_with(scenario_id) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_submittable\") as mck: scenario = Scenario(\"scenario_config_id\", [], {}) tp.is_submittable(scenario) mck.assert_called_once_with(scenario) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._is_submittable\") as mck: sequence_id = SequenceId(\"SEQUENCE_id\") tp.is_submittable(sequence_id) mck.assert_called_once_with(sequence_id) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._is_submittable\") as mck: sequence = Sequence({}, [], \"sequence_id\") tp.is_submittable(sequence) mck.assert_called_once_with(sequence) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._is_submittable\") as mck: task_id = TaskId(\"TASK_id\") tp.is_submittable(task_id) mck.assert_called_once_with(task_id) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._is_submittable\") as mck: task = Task(\"task_config_id\", {}, print) tp.is_submittable(task) mck.assert_called_once_with(task) def test_is_submittable(self): current_date = datetime.datetime.now() cycle = Cycle(Frequency.DAILY, {}, current_date, current_date, current_date) scenario = Scenario(\"scenario_config_id\", [], {}, sequences={\"sequence\": {}}) task = Task(\"task_config_id\", {}, print) job = Job(\"job_id\", task, \"submit_id\", scenario.id) dn = PickleDataNode(\"data_node_config_id\", Scope.SCENARIO) _CycleManager._set(cycle) _ScenarioManager._set(scenario) _TaskManager._set(task) _JobManager._set(job) _DataManager._set(dn) sequence = scenario.sequences[\"sequence\"] assert tp.is_submittable(scenario) assert tp.is_submittable(sequence) assert tp.is_submittable(task) assert not tp.is_submittable(cycle) assert not tp.is_submittable(job) assert not tp.is_submittable(dn) def test_submit(self, scenario, sequence, task): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._submit\") as mck: tp.submit(scenario) mck.assert_called_once_with(scenario, force=False, wait=False, timeout=None) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._submit\") as mck: tp.submit(sequence) mck.assert_called_once_with(sequence, force=False, wait=False, timeout=None) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._submit\") as mck: tp.submit(task) mck.assert_called_once_with(task, force=False, wait=False, timeout=None) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._submit\") as mck: tp.submit(scenario, False, False, None) mck.assert_called_once_with(scenario, force=False, wait=False, timeout=None) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._submit\") as mck: tp.submit(sequence, False, False, None) mck.assert_called_once_with(sequence, force=False, wait=False, timeout=None) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._submit\") as mck: tp.submit(task, False, False, None) mck.assert_called_once_with(task, force=False, wait=False, timeout=None) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._submit\") as mck: tp.submit(scenario, True, True, 60) mck.assert_called_once_with(scenario, force=True, wait=True, timeout=60) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._submit\") as mck: tp.submit(sequence, True, True, 60) mck.assert_called_once_with(sequence, force=True, wait=True, timeout=60) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._submit\") as mck: tp.submit(task, True, True, 60) mck.assert_called_once_with(task, force=True, wait=True, timeout=60) def test_warning_no_core_service_running(self, scenario): _OrchestratorFactory._remove_dispatcher() with pytest.warns(ResourceWarning) as warning: with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._submit\"): tp.submit(scenario) assert len(warning) == 1 assert warning[0].message.args[0] == \"The Core service is NOT running\" def test_get_tasks(self): with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._get_all\") as mck: tp.get_tasks() mck.assert_called_once_with() def test_get_task(self, task): with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._get\") as mck: task_id = TaskId(\"TASK_id\") tp.get(task_id) mck.assert_called_once_with(task_id) def test_task_exists(self): with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._exists\") as mck: task_id = TaskId(\"TASK_id\") tp.exists(task_id) mck.assert_called_once_with(task_id) def test_is_deletable(self, task): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_deletable\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.is_deletable(scenario_id) mck.assert_called_once_with(scenario_id) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_deletable\") as mck: scenario = Scenario(\"config_id\", [], {}) tp.is_deletable(scenario) mck.assert_called_once_with(scenario) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_deletable\") as mck: job_id = JobId(\"JOB_job_id\") tp.is_deletable(job_id) mck.assert_called_once_with(job_id) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_deletable\") as mck: job = Job(\"job_id\", task, \"submit_id\", task.id) tp.is_deletable(job) mck.assert_called_once_with(job) def test_is_promotable(self): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_promotable_to_primary\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.is_promotable(scenario_id) mck.assert_called_once_with(scenario_id) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_promotable_to_primary\") as mck: scenario = Scenario(\"config_id\", [], {}) tp.is_promotable(scenario) mck.assert_called_once_with(scenario) def test_delete_scenario(self): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._hard_delete\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.delete(scenario_id) mck.assert_called_once_with(scenario_id) def test_delete(self): with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._hard_delete\") as mck: cycle_id = CycleId(\"CYCLE_id\") tp.delete(cycle_id) mck.assert_called_once_with(cycle_id) def test_get_scenarios(self, cycle): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._get_all\") as mck: tp.get_scenarios() mck.assert_called_once_with() with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._get_all_by_cycle\") as mck: tp.get_scenarios(cycle) mck.assert_called_once_with(cycle) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._get_all_by_tag\") as mck: tp.get_scenarios(tag=\"tag\") mck.assert_called_once_with(\"tag\") def test_get_scenario(self, scenario): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._get\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.get(scenario_id) mck.assert_called_once_with(scenario_id) def test_scenario_exists(self): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._exists\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.exists(scenario_id) mck.assert_called_once_with(scenario_id) def test_get_primary(self, cycle): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._get_primary\") as mck: tp.get_primary(cycle) mck.assert_called_once_with(cycle) def test_get_primary_scenarios(self): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._get_primary_scenarios\") as mck: tp.get_primary_scenarios() mck.assert_called_once_with() def test_set_primary(self, scenario): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._set_primary\") as mck: tp.set_primary(scenario) mck.assert_called_once_with(scenario) def test_tag_and_untag(self, scenario): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._tag\") as mck: tp.tag(scenario, \"tag\") mck.assert_called_once_with(scenario, \"tag\") with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._untag\") as mck: tp.untag(scenario, \"tag\") mck.assert_called_once_with(scenario, \"tag\") def test_compare_scenarios(self, scenario): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._compare\") as mck: tp.compare_scenarios(scenario, scenario, data_node_config_id=\"dn\") mck.assert_called_once_with(scenario, scenario, data_node_config_id=\"dn\") def test_subscribe_scenario(self, scenario): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._subscribe\") as mck: tp.subscribe_scenario(print) mck.assert_called_once_with(print, [], None) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._subscribe\") as mck: tp.subscribe_scenario(print, scenario=scenario) mck.assert_called_once_with(print, [], scenario) def test_unsubscribe_scenario(self, scenario): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._unsubscribe\") as mck: tp.unsubscribe_scenario(print) mck.assert_called_once_with(print, None, None) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._unsubscribe\") as mck: tp.unsubscribe_scenario(print, scenario=scenario) mck.assert_called_once_with(print, None, scenario) def test_subscribe_sequence(self, sequence): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._subscribe\") as mck: tp.subscribe_sequence(print) mck.assert_called_once_with(print, None, None) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._subscribe\") as mck: tp.subscribe_sequence(print, sequence=sequence) mck.assert_called_once_with(print, None, sequence) def test_unsubscribe_sequence(self, sequence): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._unsubscribe\") as mck: tp.unsubscribe_sequence(callback=print) mck.assert_called_once_with(print, None, None) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._unsubscribe\") as mck: tp.unsubscribe_sequence(callback=print, sequence=sequence) mck.assert_called_once_with(print, None, sequence) def test_delete_sequence(self): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._hard_delete\") as mck: sequence_id = SequenceId(\"SEQUENCE_id\") tp.delete(sequence_id) mck.assert_called_once_with(sequence_id) def test_get_sequence(self, sequence): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._get\") as mck: sequence_id = SequenceId(\"SEQUENCE_id\") tp.get(sequence_id) mck.assert_called_once_with(sequence_id) def test_get_sequences(self): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._get_all\") as mck: tp.get_sequences() mck.assert_called_once_with() def test_sequence_exists(self): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._exists\") as mck: sequence_id = SequenceId(\"SEQUENCE_id\") tp.exists(sequence_id) mck.assert_called_once_with(sequence_id) def test_get_job(self): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._get\") as mck: job_id = JobId(\"JOB_id\") tp.get(job_id) mck.assert_called_once_with(job_id) def test_get_jobs(self): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._get_all\") as mck: tp.get_jobs() mck.assert_called_once_with() def test_job_exists(self): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._exists\") as mck: job_id = JobId(\"JOB_id\") tp.exists(job_id) mck.assert_called_once_with(job_id) def test_delete_job(self, task): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._delete\") as mck: job = Job(JobId(\"job_id\"), task, \"submit_id\", \"scenario_id\") tp.delete_job(job) mck.assert_called_once_with(job, False) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._delete\") as mck: job = Job(JobId(\"job_id\"), task, \"submit_id\", \"scenario_id\") tp.delete_job(job, False) mck.assert_called_once_with(job, False) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._delete\") as mck: job = Job(JobId(\"job_id\"), task, \"submit_id\", \"scenario_id\") tp.delete_job(job, True) mck.assert_called_once_with(job, True) def test_delete_jobs(self): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._delete_all\") as mck: tp.delete_jobs() mck.assert_called_once_with() def test_get_latest_job(self, task): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._get_latest\") as mck: tp.get_latest_job(task) mck.assert_called_once_with(task) def test_get_latest_submission(self, task): with mock.patch(\"src.taipy.core.submission._submission_manager._SubmissionManager._get_latest\") as mck: tp.get_latest_submission(task) mck.assert_called_once_with(task) def test_cancel_job(self): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._cancel\") as mck: tp.cancel_job(\"job_id\") mck.assert_called_once_with(\"job_id\") def test_block_config_when_core_is_running_in_development_mode(self): input_cfg_1 = Config.configure_data_node(id=\"i1\", storage_type=\"pickle\", default_data=1, scope=Scope.SCENARIO) output_cfg_1 = Config.configure_data_node(id=\"o1\", storage_type=\"pickle\", scope=Scope.SCENARIO) task_cfg_1 = Config.configure_task(\"t1\", print, input_cfg_1, output_cfg_1) scenario_cfg_1 = Config.configure_scenario(\"s1\", [task_cfg_1], [], Frequency.DAILY) core = Core() core.run() scenario_1 = tp.create_scenario(scenario_cfg_1) tp.submit(scenario_1) with pytest.raises(ConfigurationUpdateBlocked): Config.configure_scenario(\"block_scenario\", set([task_cfg_1])) core.stop() def test_block_config_when_core_is_running_in_standalone_mode(self): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE) input_cfg_1 = Config.configure_data_node(id=\"i1\", storage_type=\"pickle\", default_data=1, scope=Scope.SCENARIO) output_cfg_1 = Config.configure_data_node(id=\"o1\", storage_type=\"pickle\", scope=Scope.SCENARIO) task_cfg_1 = Config.configure_task(\"t1\", print, input_cfg_1, output_cfg_1) scenario_cfg_1 = Config.configure_scenario(\"s1\", [task_cfg_1], [], Frequency.DAILY) core = Core() core.run() scenario_1 = tp.create_scenario(scenario_cfg_1) tp.submit(scenario_1, wait=True) with pytest.raises(ConfigurationUpdateBlocked): Config.configure_scenario(\"block_scenario\", set([task_cfg_1])) core.stop() def test_get_data_node(self, data_node): with mock.patch(\"src.taipy.core.data._data_manager._DataManager._get\") as mck: tp.get(data_node.id) mck.assert_called_once_with(data_node.id) def test_get_data_nodes(self): with mock.patch(\"src.taipy.core.data._data_manager._DataManager._get_all\") as mck: tp.get_data_nodes() mck.assert_called_once_with() def test_data_node_exists(self): with mock.patch(\"src.taipy.core.data._data_manager._DataManager._exists\") as mck: data_node_id = DataNodeId(\"DATANODE_id\") tp.exists(data_node_id) mck.assert_called_once_with(data_node_id) def test_get_cycles(self): with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._get_all\") as mck: tp.get_cycles() mck.assert_called_once_with() def test_cycle_exists(self): with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._exists\") as mck: cycle_id = CycleId(\"CYCLE_id\") tp.exists(cycle_id) mck.assert_called_once_with(cycle_id) def test_create_global_data_node(self): dn_cfg = DataNodeConfig(\"id\", \"pickle\", Scope.GLOBAL) with mock.patch(\"src.taipy.core.data._data_manager._DataManager._create_and_set\") as mck: dn = tp.create_global_data_node(dn_cfg) mck.assert_called_once_with(dn_cfg, None, None) dn = tp.create_global_data_node(dn_cfg) assert dn.scope == Scope.GLOBAL assert dn.config_id == dn_cfg.id # Create a global data node from the same configuration should return the same data node dn_2 = tp.create_global_data_node(dn_cfg) assert dn_2.id == dn.id dn_cfg.scope = Scope.SCENARIO with pytest.raises(DataNodeConfigIsNotGlobal): tp.create_global_data_node(dn_cfg) def test_create_scenario(self, scenario): scenario_config = ScenarioConfig(\"scenario_config\") with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._create\") as mck: tp.create_scenario(scenario_config) mck.assert_called_once_with(scenario_config, None, None) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._create\") as mck: tp.create_scenario(scenario_config, datetime.datetime(2022, 2, 5)) mck.assert_called_once_with(scenario_config, datetime.datetime(2022, 2, 5), None) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._create\") as mck: tp.create_scenario(scenario_config, datetime.datetime(2022, 2, 5), \"displayable_name\") mck.assert_called_once_with(scenario_config, datetime.datetime(2022, 2, 5), \"displayable_name\") def test_export_scenario_filesystem(self): shutil.rmtree(\"./tmp\", ignore_errors=True) input_cfg_1 = Config.configure_data_node(id=\"i1\", storage_type=\"pickle\", default_data=1, scope=Scope.SCENARIO) output_cfg_1 = Config.configure_data_node(id=\"o1\", storage_type=\"pickle\", scope=Scope.SCENARIO) task_cfg_1 = Config.configure_task(\"t1\", print, input_cfg_1, output_cfg_1) scenario_cfg_1 = Config.configure_scenario(\"s1\", [task_cfg_1], [], Frequency.DAILY) input_cfg_2 = Config.configure_data_node(id=\"i2\", storage_type=\"pickle\", default_data=2, scope=Scope.SCENARIO) output_cfg_2 = Config.configure_data_node(id=\"o2\", storage_type=\"pickle\", scope=Scope.SCENARIO) task_cfg_2 = Config.configure_task(\"t2\", print, input_cfg_2, output_cfg_2) scenario_cfg_2 = Config.configure_scenario(\"s2\", [task_cfg_2], [], Frequency.DAILY) scenario_1 = tp.create_scenario(scenario_cfg_1) job_1 = tp.submit(scenario_1)[0] # Export scenario 1 tp.export_scenario(scenario_1.id, \"./tmp/exp_scenario_1\") assert sorted(os.listdir(\"./tmp/exp_scenario_1/data_nodes\")) == sorted( [f\"{scenario_1.i1.id}.json\", f\"{scenario_1.o1.id}.json\"] ) assert sorted(os.listdir(\"./tmp/exp_scenario_1/tasks\")) == sorted([f\"{scenario_1.t1.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_1/scenarios\")) == sorted([f\"{scenario_1.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_1/jobs\")) == sorted([f\"{job_1.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_1/cycles\")) == sorted([f\"{scenario_1.cycle.id}.json\"]) scenario_2 = tp.create_scenario(scenario_cfg_2) job_2 = tp.submit(scenario_2)[0] # Export scenario 2 scenario_2.export(pathlib.Path.cwd() / \"./tmp/exp_scenario_2\") assert sorted(os.listdir(\"./tmp/exp_scenario_2/data_nodes\")) == sorted( [f\"{scenario_2.i2.id}.json\", f\"{scenario_2.o2.id}.json\"] ) assert sorted(os.listdir(\"./tmp/exp_scenario_2/tasks\")) == sorted([f\"{scenario_2.t2.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_2/scenarios\")) == sorted([f\"{scenario_2.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_2/jobs\")) == sorted([f\"{job_2.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_2/cycles\")) == sorted([f\"{scenario_2.cycle.id}.json\"]) # Export scenario 2 into the folder containing scenario 1 files tp.export_scenario(scenario_2.id, \"./tmp/exp_scenario_1\") # Should have the files as scenario 1 only assert sorted(os.listdir(\"./tmp/exp_scenario_1/tasks\")) == sorted([f\"{scenario_2.t2.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_1/scenarios\")) == sorted([f\"{scenario_2.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_1/jobs\")) == sorted([f\"{job_2.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_1/cycles\")) == sorted([f\"{scenario_2.cycle.id}.json\"]) with pytest.raises(InvalidExportPath): tp.export_scenario(scenario_2.id, Config.core.storage_folder) shutil.rmtree(\"./tmp\", ignore_errors=True) def test_get_parents(self): def assert_result_parents_and_expected_parents(parents, expected_parents): for key, items in expected_parents.items(): assert len(parents[key]) == len(expected_parents[key]) parent_ids = [parent.id for parent in parents[key]] assert all([item.id in parent_ids for item in items]) dn_config_1 = Config.configure_data_node(id=\"d1\", storage_type=\"in_memory\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(id=\"d2\", storage_type=\"in_memory\", scope=Scope.SCENARIO) dn_config_3 = Config.configure_data_node(id=\"d3\", storage_type=\"in_memory\", scope=Scope.SCENARIO) dn_config_4 = Config.configure_data_node(id=\"d4\", storage_type=\"in_memory\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"t1\", print, dn_config_1, dn_config_2) task_config_2 = Config.configure_task(\"t2\", print, dn_config_2, dn_config_3) scenario_cfg_1 = Config.configure_scenario(\"s1\", [task_config_1, task_config_2], [dn_config_4], Frequency.DAILY) scenario = tp.create_scenario(scenario_cfg_1) tasks = scenario.tasks expected_parents = { \"scenario\": {scenario}, \"task\": {tasks[\"t1\"]}, } parents = tp.get_parents(scenario.tasks[\"t1\"].data_nodes[\"d1\"]) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = { \"scenario\": {scenario}, \"task\": {tasks[\"t1\"], tasks[\"t2\"]}, } parents = tp.get_parents(scenario.tasks[\"t1\"].data_nodes[\"d2\"]) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = {\"scenario\": {scenario}, \"task\": {tasks[\"t2\"]}} parents = tp.get_parents(scenario.tasks[\"t2\"].data_nodes[\"d3\"]) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = {\"scenario\": {scenario}} parents = tp.get_parents(scenario.tasks[\"t1\"]) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = {\"scenario\": {scenario}} parents = tp.get_parents(scenario.tasks[\"t2\"]) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = {\"scenario\": {scenario}} parents = tp.get_parents(scenario.additional_data_nodes[\"d4\"]) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = {} parents = tp.get_parents(scenario) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = {} parents = tp.get_parents(scenario.cycle) assert_result_parents_and_expected_parents(parents, expected_parents) def test_get_cycles_scenarios(self): scenario_cfg_1 = Config.configure_scenario( \"s1\", set(), set(), Frequency.DAILY, ) scenario_cfg_2 = Config.configure_scenario(\"s2\", set(), set(), Frequency.WEEKLY) scenario_cfg_3 = Config.configure_scenario(\"s3\", set(), set(), Frequency.MONTHLY) scenario_cfg_4 = Config.configure_scenario(\"s4\", set(), set(), Frequency.YEARLY) scenario_cfg_5 = Config.configure_scenario(\"s5\", set(), set(), None) now = datetime.datetime.now() scenario_1_1 = tp.create_scenario(scenario_cfg_1, now) scenario_1_2 = tp.create_scenario(scenario_cfg_1, datetime.datetime.now()) scenario_1_3 = tp.create_scenario(scenario_cfg_1, now + datetime.timedelta(days=1)) scenario_1_4 = tp.create_scenario(scenario_cfg_1, now + datetime.timedelta(days=8)) scenario_1_5 = tp.create_scenario(scenario_cfg_1, now + datetime.timedelta(days=25)) scenario_2 = tp.create_scenario(scenario_cfg_2) scenario_3 = tp.create_scenario(scenario_cfg_3) scenario_4 = tp.create_scenario(scenario_cfg_4) scenario_5_1 = tp.create_scenario(scenario_cfg_5) scenario_5_2 = tp.create_scenario(scenario_cfg_5) scenario_5_3 = tp.create_scenario(scenario_cfg_5) expected_cycles_scenarios = { scenario_1_1.cycle: [scenario_1_1.id, scenario_1_2.id], scenario_1_3.cycle: [scenario_1_3.id], scenario_1_4.cycle: [scenario_1_4.id], scenario_1_5.cycle: [scenario_1_5.id], scenario_2.cycle: [scenario_2.id], scenario_3.cycle: [scenario_3.id], scenario_4.cycle: [scenario_4.id], None: [scenario_5_1.id, scenario_5_2.id, scenario_5_3.id], } cycles_scenarios = tp.get_cycles_scenarios() assert expected_cycles_scenarios.keys() == cycles_scenarios.keys() for cycle, scenarios in cycles_scenarios.items(): expected_scenarios = expected_cycles_scenarios[cycle] assert sorted([scenario.id for scenario in scenarios]) == sorted(expected_scenarios) def test_get_entities_by_config_id(self): scenario_config_1 = Config.configure_scenario(\"s1\", sequence_configs=[]) scenario_config_2 = Config.configure_scenario(\"s2\", sequence_configs=[]) s_1_1 = tp.create_scenario(scenario_config_1) s_1_2 = tp.create_scenario(scenario_config_1) s_1_3 = tp.create_scenario(scenario_config_1) assert len(tp.get_scenarios()) == 3 s_2_1 = tp.create_scenario(scenario_config_2) s_2_2 = tp.create_scenario(scenario_config_2) assert len(tp.get_scenarios()) == 5 s1_scenarios = tp.get_entities_by_config_id(scenario_config_1.id) assert len(s1_scenarios) == 3 assert sorted([s_1_1.id, s_1_2.id, s_1_3.id]) == sorted([scenario.id for scenario in s1_scenarios]) s2_scenarios = tp.get_entities_by_config_id(scenario_config_2.id) assert len(s2_scenarios) == 2 assert sorted([s_2_1.id, s_2_2.id]) == sorted([scenario.id for scenario in s2_scenarios]) def test_get_entities_by_config_id_in_multiple_versions_environment(self): scenario_config_1 = Config.configure_scenario(\"s1\", sequence_configs=[]) scenario_config_2 = Config.configure_scenario(\"s2\", sequence_configs=[]) _VersionManager._set_experiment_version(\"1.0\") tp.create_scenario(scenario_config_1) tp.create_scenario(scenario_config_1) tp.create_scenario(scenario_config_1) tp.create_scenario(scenario_config_2) tp.create_scenario(scenario_config_2) assert len(tp.get_scenarios()) == 5 assert len(tp.get_entities_by_config_id(scenario_config_1.id)) == 3 assert len(tp.get_entities_by_config_id(scenario_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") tp.create_scenario(scenario_config_1) tp.create_scenario(scenario_config_1) tp.create_scenario(scenario_config_1) tp.create_scenario(scenario_config_2) tp.create_scenario(scenario_config_2) assert len(tp.get_scenarios()) == 5 assert len(tp.get_entities_by_config_id(scenario_config_1.id)) == 3 assert len(tp.get_entities_by_config_id(scenario_config_2.id)) == 2 "} {"text": "from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.common._utils import _load_fct from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.exceptions.exceptions import NonExistingVersion from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.conftest import init_config, init_managers from tests.core.utils import assert_true_after_time def test_core_cli_no_arguments(init_sql_repo): with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() assert not Config.core.force core.stop() def test_core_cli_development_mode(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() core.stop() def test_core_cli_dev_mode(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"-dev\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() core.stop() def test_core_cli_experiment_mode(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_latest_version() assert not Config.core.force core.stop() def test_core_cli_experiment_mode_with_version(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"2.1\" assert not Config.core.force core.stop() def test_core_cli_experiment_mode_with_force_version(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\", \"--taipy-force\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"2.1\" assert Config.core.force core.stop() def test_core_cli_production_mode(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"--production\"]): core = Core() core.run() assert Config.core.mode == \"production\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_latest_version() assert not Config.core.force core.stop() def test_dev_mode_clean_all_entities_of_the_latest_version(init_sql_repo): scenario_config = config_scenario() init_managers() # Create a scenario in development mode with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Initial assertion assert len(_DataManager._get_all(version_number=\"all\")) == 2 assert len(_TaskManager._get_all(version_number=\"all\")) == 1 assert len(_SequenceManager._get_all(version_number=\"all\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 1 assert len(_CycleManager._get_all(version_number=\"all\")) == 1 assert len(_JobManager._get_all(version_number=\"all\")) == 1 # Create a new scenario in experiment mode with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Assert number of entities in 2nd version assert len(_DataManager._get_all(version_number=\"all\")) == 4 assert len(_TaskManager._get_all(version_number=\"all\")) == 2 assert len(_SequenceManager._get_all(version_number=\"all\")) == 2 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 2 assert ( len(_CycleManager._get_all(version_number=\"all\")) == 1 ) # No new cycle is created since old dev version use the same cycle assert len(_JobManager._get_all(version_number=\"all\")) == 2 # Run development mode again with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() # The 1st dev version should be deleted run with development mode assert len(_DataManager._get_all(version_number=\"all\")) == 2 assert len(_TaskManager._get_all(version_number=\"all\")) == 1 assert len(_SequenceManager._get_all(version_number=\"all\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 1 assert len(_CycleManager._get_all(version_number=\"all\")) == 1 assert len(_JobManager._get_all(version_number=\"all\")) == 1 # Submit new dev version scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) # Assert number of entities with 1 dev version and 1 exp version assert len(_DataManager._get_all(version_number=\"all\")) == 4 assert len(_TaskManager._get_all(version_number=\"all\")) == 2 assert len(_SequenceManager._get_all(version_number=\"all\")) == 2 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 2 assert len(_CycleManager._get_all(version_number=\"all\")) == 1 assert len(_JobManager._get_all(version_number=\"all\")) == 2 # Assert number of entities of the latest version only assert len(_DataManager._get_all(version_number=\"latest\")) == 2 assert len(_TaskManager._get_all(version_number=\"latest\")) == 1 assert len(_SequenceManager._get_all(version_number=\"latest\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"latest\")) == 1 assert len(_JobManager._get_all(version_number=\"latest\")) == 1 # Assert number of entities of the development version only assert len(_DataManager._get_all(version_number=\"development\")) == 2 assert len(_TaskManager._get_all(version_number=\"development\")) == 1 assert len(_SequenceManager._get_all(version_number=\"development\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"development\")) == 1 assert len(_JobManager._get_all(version_number=\"development\")) == 1 # Assert number of entities of an unknown version with pytest.raises(NonExistingVersion): assert _DataManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _TaskManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _SequenceManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _ScenarioManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _JobManager._get_all(version_number=\"foo\") core.stop() def twice_doppelganger(a): return a * 2 def test_dev_mode_clean_all_entities_when_config_is_alternated(init_sql_repo): data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) data_node_2_config = Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"foo.csv\") task_config = Config.configure_task(\"my_task\", twice_doppelganger, data_node_1_config, data_node_2_config) scenario_config = Config.configure_scenario(\"my_scenario\", [task_config], frequency=Frequency.DAILY) # Create a scenario in development mode with the doppelganger function with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Delete the twice_doppelganger function # and clear cache of _load_fct() to simulate a new run del globals()[\"twice_doppelganger\"] _load_fct.cache_clear() # Create a scenario in development mode with another function scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() def test_version_number_when_switching_mode(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() ver_1 = _VersionManager._get_latest_version() ver_dev = _VersionManager._get_development_version() assert ver_1 == ver_dev assert len(_VersionManager._get_all()) == 1 core.stop() # Run with dev mode, the version number is the same with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() ver_2 = _VersionManager._get_latest_version() assert ver_2 == ver_dev assert len(_VersionManager._get_all()) == 1 core.stop() # When run with experiment mode, a new version is created with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() ver_3 = _VersionManager._get_latest_version() assert ver_3 != ver_dev assert len(_VersionManager._get_all()) == 2 core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() ver_4 = _VersionManager._get_latest_version() assert ver_4 == \"2.1\" assert len(_VersionManager._get_all()) == 3 core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() ver_5 = _VersionManager._get_latest_version() assert ver_5 != ver_3 assert ver_5 != ver_4 assert ver_5 != ver_dev assert len(_VersionManager._get_all()) == 4 core.stop() # When run with production mode, the latest version is used as production with patch(\"sys.argv\", [\"prog\", \"--production\"]): core = Core() core.run() ver_6 = _VersionManager._get_latest_version() production_versions = _VersionManager._get_production_versions() assert ver_6 == ver_5 assert production_versions == [ver_6] assert len(_VersionManager._get_all()) == 4 core.stop() # When run with production mode, the \"2.1\" version is used as production with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() ver_7 = _VersionManager._get_latest_version() production_versions = _VersionManager._get_production_versions() assert ver_7 == \"2.1\" assert production_versions == [ver_7, ver_6] assert len(_VersionManager._get_all()) == 4 core.stop() # Run with dev mode, the version number is the same as the first dev version to overide it with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() ver_7 = _VersionManager._get_latest_version() assert ver_1 == ver_7 assert len(_VersionManager._get_all()) == 4 core.stop() def test_production_mode_load_all_entities_from_previous_production_version(init_sql_repo): scenario_config = config_scenario() init_managers() with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() production_ver_1 = _VersionManager._get_latest_version() assert _VersionManager._get_production_versions() == [production_ver_1] # When run production mode on a new app, a dev version is created alongside assert _VersionManager._get_development_version() not in _VersionManager._get_production_versions() assert len(_VersionManager._get_all()) == 2 scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 1 core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() production_ver_2 = _VersionManager._get_latest_version() assert _VersionManager._get_production_versions() == [production_ver_1, production_ver_2] assert len(_VersionManager._get_all()) == 3 # All entities from previous production version should be saved scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 2 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 2 core.stop() def test_force_override_experiment_version(init_sql_repo): scenario_config = config_scenario() init_managers() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() ver_1 = _VersionManager._get_latest_version() assert ver_1 == \"1.0\" # When create new experiment version, a development version entity is also created as a placeholder assert len(_VersionManager._get_all()) == 2 # 2 version include 1 experiment 1 development scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 1 core.stop() Config.configure_global_app(foo=\"bar\") # Without --taipy-force parameter, a SystemExit will be raised with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() core.stop() # With --taipy-force parameter with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\", \"--taipy-force\"]): core = Core() core.run() core.stop() ver_2 = _VersionManager._get_latest_version() assert ver_2 == \"1.0\" assert len(_VersionManager._get_all()) == 2 # 2 version include 1 experiment 1 development # All entities from previous submit should be saved scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 2 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 2 def test_force_override_production_version(init_sql_repo): scenario_config = config_scenario() init_managers() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() ver_1 = _VersionManager._get_latest_version() production_versions = _VersionManager._get_production_versions() assert ver_1 == \"1.0\" assert production_versions == [\"1.0\"] # When create new production version, a development version entity is also created as a placeholder assert len(_VersionManager._get_all()) == 2 # 2 version include 1 production 1 development scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 1 core.stop() Config.configure_global_app(foo=\"bar\") # Without --taipy-force parameter, a SystemExit will be raised with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() # With --taipy-force parameter with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\", \"--taipy-force\"]): core = Core() core.run() ver_2 = _VersionManager._get_latest_version() assert ver_2 == \"1.0\" assert len(_VersionManager._get_all()) == 2 # 2 version include 1 production 1 development # All entities from previous submit should be saved scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 2 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 2 core.stop() def test_modify_config_properties_without_force(caplog, init_sql_repo): scenario_config = config_scenario() init_managers() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() init_config() Config.configure_core(repository_type=\"sql\", repository_properties={\"db_location\": init_sql_repo}) scenario_config_2 = config_scenario_2() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config_2) _ScenarioManager._submit(scenario) core.stop() error_message = str(caplog.text) assert 'DATA_NODE \"d3\" was added' in error_message assert 'DATA_NODE \"d0\" was removed' in error_message assert 'DATA_NODE \"d2\" has attribute \"default_path\" modified' in error_message assert 'CORE \"root_folder\" was modified' in error_message assert 'JOB \"mode\" was modified' in error_message assert 'JOB \"max_nb_of_workers\" was modified' in error_message assert 'SCENARIO \"my_scenario\" has attribute \"frequency\" modified' in error_message assert 'SCENARIO \"my_scenario\" has attribute \"tasks\" modified' in error_message assert 'TASK \"my_task\" has attribute \"inputs\" modified' in error_message assert 'TASK \"my_task\" has attribute \"function\" modified' in error_message assert 'TASK \"my_task\" has attribute \"outputs\" modified' in error_message assert 'DATA_NODE \"d2\" has attribute \"has_header\" modified' in error_message assert 'DATA_NODE \"d2\" has attribute \"exposed_type\" modified' in error_message def test_modify_job_configuration_dont_stop_application(caplog, init_sql_repo): scenario_config = config_scenario() init_managers() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): Config.configure_job_executions(mode=\"development\") core = Core() core.run(force_restart=True) scenario = _ScenarioManager._create(scenario_config) jobs = _ScenarioManager._submit(scenario) assert all([job.is_finished() for job in jobs]) core.stop() init_config() Config.configure_core(repository_type=\"sql\", repository_properties={\"db_location\": init_sql_repo}) scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) core = Core() core.run(force_restart=True) scenario = _ScenarioManager._create(scenario_config) jobs = _ScenarioManager._submit(scenario) assert_true_after_time(lambda: all(job.is_finished() for job in jobs)) error_message = str(caplog.text) assert 'JOB \"mode\" was modified' in error_message assert 'JOB \"max_nb_of_workers\" was modified' in error_message core.stop() assert_true_after_time(lambda: core._dispatcher is None) def twice(a): return a * 2 def config_scenario(): Config.configure_data_node(id=\"d0\") data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) data_node_2_config = Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"foo.csv\") task_config = Config.configure_task(\"my_task\", twice, data_node_1_config, data_node_2_config) scenario_config = Config.configure_scenario(\"my_scenario\", [task_config], frequency=Frequency.DAILY) scenario_config.add_sequences({\"my_sequence\": [task_config]}) return scenario_config def double_twice(a): return a * 2, a * 2 def config_scenario_2(): Config.configure_core( root_folder=\"foo_root\", # Changing the \"storage_folder\" will fail since older versions are stored in older folder # storage_folder=\"foo_storage\", ) Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=5) data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) # Modify properties of \"d2\" data_node_2_config = Config.configure_data_node( id=\"d2\", storage_type=\"csv\", default_path=\"bar.csv\", has_header=False, exposed_type=\"numpy\" ) # Add new data node \"d3\" data_node_3_config = Config.configure_data_node( id=\"d3\", storage_type=\"csv\", default_path=\"baz.csv\", has_header=False, exposed_type=\"numpy\" ) # Modify properties of \"my_task\", including the function and outputs list Config.configure_task(\"my_task\", double_twice, data_node_3_config, [data_node_1_config, data_node_2_config]) task_config_1 = Config.configure_task(\"my_task_1\", double_twice, data_node_3_config, [data_node_2_config]) # Modify properties of \"my_scenario\", where tasks is now my_task_1 scenario_config = Config.configure_scenario(\"my_scenario\", [task_config_1], frequency=Frequency.MONTHLY) scenario_config.add_sequences({\"my_sequence\": [task_config_1]}) return scenario_config "} {"text": "import pytest from src.taipy.core import Core from src.taipy.core._orchestrator._dispatcher import _DevelopmentJobDispatcher, _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.exceptions.exceptions import CoreServiceIsAlreadyRunning from taipy.config import Config from taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked class TestCore: def test_run_core_trigger_config_check(self, caplog): Config.configure_data_node(id=\"d0\", storage_type=\"toto\") with pytest.raises(SystemExit): core = Core() core.run() expected_error_message = ( \"`storage_type` field of DataNodeConfig `d0` must be either csv, sql_table,\" \" sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory.\" ' Current value of property `storage_type` is \"toto\".' ) assert expected_error_message in caplog.text core.stop() def test_run_core_as_a_service_development_mode(self): _OrchestratorFactory._dispatcher = None core = Core() assert core._orchestrator is None assert core._dispatcher is None assert _OrchestratorFactory._dispatcher is None core.run() assert core._orchestrator is not None assert core._orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._orchestrator == _Orchestrator assert core._dispatcher is not None assert isinstance(core._dispatcher, _DevelopmentJobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) core.stop() def test_run_core_as_a_service_standalone_mode(self): _OrchestratorFactory._dispatcher = None core = Core() assert core._orchestrator is None assert core._dispatcher is None assert _OrchestratorFactory._dispatcher is None Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) core.run() assert core._orchestrator is not None assert core._orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._orchestrator == _Orchestrator assert core._dispatcher is not None assert isinstance(core._dispatcher, _StandaloneJobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher) assert core._dispatcher.is_running() assert _OrchestratorFactory._dispatcher.is_running() core.stop() def test_core_service_can_only_be_run_once(self): core_instance_1 = Core() core_instance_2 = Core() core_instance_1.run() with pytest.raises(CoreServiceIsAlreadyRunning): core_instance_1.run() with pytest.raises(CoreServiceIsAlreadyRunning): core_instance_2.run() # Stop the Core service and run it again should work core_instance_1.stop() core_instance_1.run() core_instance_1.stop() core_instance_2.run() core_instance_2.stop() def test_block_config_update_when_core_service_is_running_development_mode(self): _OrchestratorFactory._dispatcher = None core = Core() core.run() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_data_node(id=\"i1\") core.stop() def test_block_config_update_when_core_service_is_running_standalone_mode(self): _OrchestratorFactory._dispatcher = None core = Core() Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) core.run() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_data_node(id=\"i1\") core.stop() "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.common._utils import _load_fct from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.exceptions.exceptions import NonExistingVersion from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.conftest import init_config from tests.core.utils import assert_true_after_time def test_core_cli_no_arguments(): with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() assert not Config.core.force core.stop() def test_core_cli_development_mode(): with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() core.stop() def test_core_cli_dev_mode(): with patch(\"sys.argv\", [\"prog\", \"-dev\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() core.stop() def test_core_cli_experiment_mode(): with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_latest_version() assert not Config.core.force core.stop() def test_core_cli_experiment_mode_with_version(): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"2.1\" assert not Config.core.force core.stop() def test_core_cli_experiment_mode_with_force_version(): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\", \"--taipy-force\"]): init_config() core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"2.1\" assert Config.core.force core.stop() def test_core_cli_production_mode(): with patch(\"sys.argv\", [\"prog\", \"--production\"]): core = Core() core.run() assert Config.core.mode == \"production\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_latest_version() assert not Config.core.force core.stop() def test_dev_mode_clean_all_entities_of_the_latest_version(): scenario_config = config_scenario() # Create a scenario in development mode with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Initial assertion assert len(_DataManager._get_all(version_number=\"all\")) == 2 assert len(_TaskManager._get_all(version_number=\"all\")) == 1 assert len(_SequenceManager._get_all(version_number=\"all\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 1 assert len(_CycleManager._get_all(version_number=\"all\")) == 1 assert len(_JobManager._get_all(version_number=\"all\")) == 1 # Create a new scenario in experiment mode with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Assert number of entities in 2nd version assert len(_DataManager._get_all(version_number=\"all\")) == 4 assert len(_TaskManager._get_all(version_number=\"all\")) == 2 assert len(_SequenceManager._get_all(version_number=\"all\")) == 2 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 2 assert ( len(_CycleManager._get_all(version_number=\"all\")) == 1 ) # No new cycle is created since old dev version use the same cycle assert len(_JobManager._get_all(version_number=\"all\")) == 2 # Run development mode again with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() # The 1st dev version should be deleted run with development mode assert len(_DataManager._get_all(version_number=\"all\")) == 2 assert len(_TaskManager._get_all(version_number=\"all\")) == 1 assert len(_SequenceManager._get_all(version_number=\"all\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 1 assert len(_CycleManager._get_all(version_number=\"all\")) == 1 assert len(_JobManager._get_all(version_number=\"all\")) == 1 # Submit new dev version scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Assert number of entities with 1 dev version and 1 exp version assert len(_DataManager._get_all(version_number=\"all\")) == 4 assert len(_TaskManager._get_all(version_number=\"all\")) == 2 assert len(_SequenceManager._get_all(version_number=\"all\")) == 2 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 2 assert len(_CycleManager._get_all(version_number=\"all\")) == 1 assert len(_JobManager._get_all(version_number=\"all\")) == 2 # Assert number of entities of the latest version only assert len(_DataManager._get_all(version_number=\"latest\")) == 2 assert len(_TaskManager._get_all(version_number=\"latest\")) == 1 assert len(_SequenceManager._get_all(version_number=\"latest\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"latest\")) == 1 assert len(_JobManager._get_all(version_number=\"latest\")) == 1 # Assert number of entities of the development version only assert len(_DataManager._get_all(version_number=\"development\")) == 2 assert len(_TaskManager._get_all(version_number=\"development\")) == 1 assert len(_SequenceManager._get_all(version_number=\"development\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"development\")) == 1 assert len(_JobManager._get_all(version_number=\"development\")) == 1 # Assert number of entities of an unknown version with pytest.raises(NonExistingVersion): assert _DataManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _TaskManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _SequenceManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _ScenarioManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _JobManager._get_all(version_number=\"foo\") def twice_doppelganger(a): return a * 2 def test_dev_mode_clean_all_entities_when_config_is_alternated(): data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) data_node_2_config = Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"foo.csv\") task_config = Config.configure_task(\"my_task\", twice_doppelganger, data_node_1_config, data_node_2_config) scenario_config = Config.configure_scenario(\"my_scenario\", [task_config], frequency=Frequency.DAILY) # Create a scenario in development mode with the doppelganger function with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Delete the twice_doppelganger function # and clear cache of _load_fct() to simulate a new run del globals()[\"twice_doppelganger\"] _load_fct.cache_clear() # Create a scenario in development mode with another function scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() def test_version_number_when_switching_mode(): with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() ver_1 = _VersionManager._get_latest_version() ver_dev = _VersionManager._get_development_version() assert ver_1 == ver_dev assert len(_VersionManager._get_all()) == 1 core.stop() # Run with dev mode, the version number is the same with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() ver_2 = _VersionManager._get_latest_version() assert ver_2 == ver_dev assert len(_VersionManager._get_all()) == 1 core.stop() # When run with experiment mode, a new version is created with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() ver_3 = _VersionManager._get_latest_version() assert ver_3 != ver_dev assert len(_VersionManager._get_all()) == 2 core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() ver_4 = _VersionManager._get_latest_version() assert ver_4 == \"2.1\" assert len(_VersionManager._get_all()) == 3 core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() ver_5 = _VersionManager._get_latest_version() assert ver_5 != ver_3 assert ver_5 != ver_4 assert ver_5 != ver_dev assert len(_VersionManager._get_all()) == 4 core.stop() # When run with production mode, the latest version is used as production with patch(\"sys.argv\", [\"prog\", \"--production\"]): core = Core() core.run() ver_6 = _VersionManager._get_latest_version() production_versions = _VersionManager._get_production_versions() assert ver_6 == ver_5 assert production_versions == [ver_6] assert len(_VersionManager._get_all()) == 4 core.stop() # When run with production mode, the \"2.1\" version is used as production with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() ver_7 = _VersionManager._get_latest_version() production_versions = _VersionManager._get_production_versions() assert ver_7 == \"2.1\" assert production_versions == [ver_6, ver_7] assert len(_VersionManager._get_all()) == 4 core.stop() # Run with dev mode, the version number is the same as the first dev version to overide it with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() ver_7 = _VersionManager._get_latest_version() assert ver_1 == ver_7 assert len(_VersionManager._get_all()) == 4 core.stop() def test_production_mode_load_all_entities_from_previous_production_version(): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() production_ver_1 = _VersionManager._get_latest_version() assert _VersionManager._get_production_versions() == [production_ver_1] # When run production mode on a new app, a dev version is created alongside assert _VersionManager._get_development_version() not in _VersionManager._get_production_versions() assert len(_VersionManager._get_all()) == 2 scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 1 core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() production_ver_2 = _VersionManager._get_latest_version() assert _VersionManager._get_production_versions() == [production_ver_1, production_ver_2] assert len(_VersionManager._get_all()) == 3 # All entities from previous production version should be saved scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 2 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 2 core.stop() def test_force_override_experiment_version(): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() ver_1 = _VersionManager._get_latest_version() assert ver_1 == \"1.0\" # When create new experiment version, a development version entity is also created as a placeholder assert len(_VersionManager._get_all()) == 2 # 2 version include 1 experiment 1 development scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 1 core.stop() Config.configure_global_app(foo=\"bar\") # Without --taipy-force parameter, a SystemExit will be raised with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() core.stop() # With --taipy-force parameter with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\", \"--taipy-force\"]): core = Core() core.run() ver_2 = _VersionManager._get_latest_version() assert ver_2 == \"1.0\" assert len(_VersionManager._get_all()) == 2 # 2 version include 1 experiment 1 development # All entities from previous submit should be saved scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 2 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 2 core.stop() def test_force_override_production_version(): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() ver_1 = _VersionManager._get_latest_version() production_versions = _VersionManager._get_production_versions() assert ver_1 == \"1.0\" assert production_versions == [\"1.0\"] # When create new production version, a development version entity is also created as a placeholder assert len(_VersionManager._get_all()) == 2 # 2 version include 1 production 1 development scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 1 core.stop() Config.configure_global_app(foo=\"bar\") # Without --taipy-force parameter, a SystemExit will be raised with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() # With --taipy-force parameter with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\", \"--taipy-force\"]): core = Core() core.run() ver_2 = _VersionManager._get_latest_version() assert ver_2 == \"1.0\" assert len(_VersionManager._get_all()) == 2 # 2 version include 1 production 1 development # All entities from previous submit should be saved scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 2 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 2 core.stop() def test_modify_job_configuration_dont_stop_application(caplog): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() Config.configure_job_executions(mode=\"development\") core.run(force_restart=True) scenario = _ScenarioManager._create(scenario_config) jobs = _ScenarioManager._submit(scenario) assert all([job.is_finished() for job in jobs]) core.stop() init_config() scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=5) core.run(force_restart=True) scenario = _ScenarioManager._create(scenario_config) jobs = _ScenarioManager._submit(scenario) assert_true_after_time(lambda: all(job.is_finished() for job in jobs)) error_message = str(caplog.text) assert 'JOB \"mode\" was modified' in error_message assert 'JOB \"max_nb_of_workers\" was modified' in error_message core.stop() def test_modify_config_properties_without_force(caplog): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() init_config() scenario_config_2 = config_scenario_2() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config_2) _ScenarioManager._submit(scenario) core.stop() error_message = str(caplog.text) assert 'DATA_NODE \"d3\" was added' in error_message assert 'DATA_NODE \"d0\" was removed' in error_message assert 'DATA_NODE \"d2\" has attribute \"default_path\" modified' in error_message assert 'CORE \"root_folder\" was modified' in error_message assert 'CORE \"repository_type\" was modified' in error_message assert 'JOB \"mode\" was modified' in error_message assert 'JOB \"max_nb_of_workers\" was modified' in error_message assert 'SCENARIO \"my_scenario\" has attribute \"frequency\" modified' in error_message assert 'SCENARIO \"my_scenario\" has attribute \"tasks\" modified' in error_message assert 'TASK \"my_task\" has attribute \"inputs\" modified' in error_message assert 'TASK \"my_task\" has attribute \"function\" modified' in error_message assert 'TASK \"my_task\" has attribute \"outputs\" modified' in error_message assert 'DATA_NODE \"d2\" has attribute \"has_header\" modified' in error_message assert 'DATA_NODE \"d2\" has attribute \"exposed_type\" modified' in error_message assert 'CORE \"repository_properties\" was added' in error_message def twice(a): return a * 2 def config_scenario(): Config.configure_data_node(id=\"d0\") data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) data_node_2_config = Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"foo.csv\") task_config = Config.configure_task(\"my_task\", twice, data_node_1_config, data_node_2_config) scenario_config = Config.configure_scenario(\"my_scenario\", [task_config], frequency=Frequency.DAILY) scenario_config.add_sequences({\"my_sequence\": [task_config]}) return scenario_config def double_twice(a): return a * 2, a * 2 def config_scenario_2(): Config.configure_core( root_folder=\"foo_root\", # Changing the \"storage_folder\" will fail since older versions are stored in older folder # storage_folder=\"foo_storage\", repository_type=\"bar\", repository_properties={\"foo\": \"bar\"}, ) Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=5) data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) # Modify properties of \"d2\" data_node_2_config = Config.configure_data_node( id=\"d2\", storage_type=\"csv\", default_path=\"bar.csv\", has_header=False, exposed_type=\"numpy\" ) # Add new data node \"d3\" data_node_3_config = Config.configure_data_node( id=\"d3\", storage_type=\"csv\", default_path=\"baz.csv\", has_header=False, exposed_type=\"numpy\" ) # Modify properties of \"my_task\", including the function and outputs list Config.configure_task(\"my_task\", double_twice, data_node_3_config, [data_node_1_config, data_node_2_config]) task_config_1 = Config.configure_task(\"my_task_1\", double_twice, data_node_3_config, [data_node_2_config]) # Modify properties of \"my_scenario\", where tasks is now my_task_1 scenario_config = Config.configure_scenario(\"my_scenario\", [task_config_1], frequency=Frequency.MONTHLY) scenario_config.add_sequences({\"my_sequence\": [task_config_1]}) return scenario_config "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import pytest from src.taipy.core import Core from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.csv import CSVDataNode from src.taipy.core.data.excel import ExcelDataNode from src.taipy.core.data.json import JSONDataNode from src.taipy.core.data.parquet import ParquetDataNode from src.taipy.core.data.pickle import PickleDataNode from taipy.config.config import Config def read_backup_file(path): with open(path, \"r\") as f: lines = f.readlines() return lines @pytest.fixture(scope=\"function\", autouse=True) def init_backup_file(): os.environ[\"TAIPY_BACKUP_FILE_PATH\"] = \".taipy_backups\" if os.path.exists(os.environ[\"TAIPY_BACKUP_FILE_PATH\"]): os.remove(os.environ[\"TAIPY_BACKUP_FILE_PATH\"]) yield if os.path.exists(\".taipy_backups\"): os.remove(\".taipy_backups\") del os.environ[\"TAIPY_BACKUP_FILE_PATH\"] backup_file_path = \".taipy_backups\" def test_backup_storage_folder_when_core_run(): core = Core() core.run() backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{Config.core.storage_folder}\\n\"] core.stop() def test_no_new_entry_when_file_is_in_storage_folder(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", path=\"dn_1.pickle\") dn_cfg_2 = Config.configure_data_node(\"dn_cfg_2\") # stored in .data folder dn_1 = _DataManager._create_and_set(dn_cfg_1, None, None) dn_2 = _DataManager._create_and_set(dn_cfg_2, None, None) dn_1.write(\"DN1_CONTENT\") dn_2.write(\"DN2_CONTENT\") backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{dn_1.path}\\n\"] os.remove(dn_1.path) def test_backup_csv_files(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", \"csv\", path=\"example_1.csv\") dn_cfg_2 = Config.configure_data_node(\"dn_cfg_2\", \"csv\", path=\"example_2.csv\") csv_dn_1 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(csv_dn_1, CSVDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_1.path}\\n\"] csv_dn_2 = _DataManager._create_and_set(dn_cfg_2, None, None) assert isinstance(csv_dn_2, CSVDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_1.path}\\n\", f\"{csv_dn_2.path}\\n\"] csv_dn_1.path = \"example_3.csv\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_2.path}\\n\", f\"{csv_dn_1.path}\\n\"] csv_dn_2.path = \"example_4.csv\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_1.path}\\n\", f\"{csv_dn_2.path}\\n\"] _DataManager._delete(csv_dn_1.id) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_2.path}\\n\"] csv_dn_3 = _DataManager._create_and_set(dn_cfg_1, None, None) csv_dn_4 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(csv_dn_3, CSVDataNode) assert isinstance(csv_dn_4, CSVDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_2.path}\\n\", f\"{csv_dn_3.path}\\n\", f\"{csv_dn_4.path}\\n\"] csv_dn_4.path = \"example_5.csv\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_2.path}\\n\", f\"{csv_dn_3.path}\\n\", f\"{csv_dn_4.path}\\n\"] _DataManager._delete_all() backup_files = read_backup_file(backup_file_path) assert backup_files == [] def test_backup_excel_files(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", \"excel\", path=\"example_1.xlsx\") dn_cfg_2 = Config.configure_data_node(\"dn_cfg_2\", \"excel\", path=\"example_2.xlsx\") excel_dn_1 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(excel_dn_1, ExcelDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_1.path}\\n\"] excel_dn_2 = _DataManager._create_and_set(dn_cfg_2, None, None) assert isinstance(excel_dn_2, ExcelDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_1.path}\\n\", f\"{excel_dn_2.path}\\n\"] excel_dn_1.path = \"example_3.excel\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_2.path}\\n\", f\"{excel_dn_1.path}\\n\"] excel_dn_2.path = \"example_4.excel\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_1.path}\\n\", f\"{excel_dn_2.path}\\n\"] _DataManager._delete(excel_dn_1.id) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_2.path}\\n\"] excel_dn_3 = _DataManager._create_and_set(dn_cfg_1, None, None) excel_dn_4 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(excel_dn_3, ExcelDataNode) assert isinstance(excel_dn_4, ExcelDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_2.path}\\n\", f\"{excel_dn_3.path}\\n\", f\"{excel_dn_4.path}\\n\"] excel_dn_4.path = \"example_5.excel\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_2.path}\\n\", f\"{excel_dn_3.path}\\n\", f\"{excel_dn_4.path}\\n\"] _DataManager._delete_all() backup_files = read_backup_file(backup_file_path) assert backup_files == [] def test_backup_pickle_files(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", \"pickle\", path=\"example_1.p\") dn_cfg_2 = Config.configure_data_node(\"dn_cfg_2\", \"pickle\", path=\"example_2.p\") pickle_dn_1 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(pickle_dn_1, PickleDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_1.path}\\n\"] pickle_dn_2 = _DataManager._create_and_set(dn_cfg_2, None, None) assert isinstance(pickle_dn_2, PickleDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_1.path}\\n\", f\"{pickle_dn_2.path}\\n\"] pickle_dn_1.path = \"example_3.pickle\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_2.path}\\n\", f\"{pickle_dn_1.path}\\n\"] pickle_dn_2.path = \"example_4.pickle\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_1.path}\\n\", f\"{pickle_dn_2.path}\\n\"] _DataManager._delete(pickle_dn_1.id) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_2.path}\\n\"] pickle_dn_3 = _DataManager._create_and_set(dn_cfg_1, None, None) pickle_dn_4 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(pickle_dn_3, PickleDataNode) assert isinstance(pickle_dn_4, PickleDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_2.path}\\n\", f\"{pickle_dn_3.path}\\n\", f\"{pickle_dn_4.path}\\n\"] pickle_dn_4.path = \"example_5.pickle\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_2.path}\\n\", f\"{pickle_dn_3.path}\\n\", f\"{pickle_dn_4.path}\\n\"] _DataManager._delete_all() backup_files = read_backup_file(backup_file_path) assert backup_files == [] def test_backup_json_files(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", \"json\", path=\"example_1.json\") dn_cfg_2 = Config.configure_data_node(\"dn_cfg_2\", \"json\", path=\"example_2.json\") json_dn_1 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(json_dn_1, JSONDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_1.path}\\n\"] json_dn_2 = _DataManager._create_and_set(dn_cfg_2, None, None) assert isinstance(json_dn_2, JSONDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_1.path}\\n\", f\"{json_dn_2.path}\\n\"] json_dn_1.path = \"example_3.json\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_2.path}\\n\", f\"{json_dn_1.path}\\n\"] json_dn_2.path = \"example_4.json\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_1.path}\\n\", f\"{json_dn_2.path}\\n\"] _DataManager._delete(json_dn_1.id) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_2.path}\\n\"] json_dn_3 = _DataManager._create_and_set(dn_cfg_1, None, None) json_dn_4 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(json_dn_3, JSONDataNode) assert isinstance(json_dn_4, JSONDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_2.path}\\n\", f\"{json_dn_3.path}\\n\", f\"{json_dn_4.path}\\n\"] json_dn_4.path = \"example_5.json\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_2.path}\\n\", f\"{json_dn_3.path}\\n\", f\"{json_dn_4.path}\\n\"] _DataManager._delete_all() backup_files = read_backup_file(backup_file_path) assert backup_files == [] def test_backup_parquet_files(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", \"parquet\", path=\"example_1.parquet\") dn_cfg_2 = Config.configure_data_node(\"dn_cfg_2\", \"parquet\", path=\"example_2.parquet\") parquet_dn_1 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(parquet_dn_1, ParquetDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_1.path}\\n\"] parquet_dn_2 = _DataManager._create_and_set(dn_cfg_2, None, None) assert isinstance(parquet_dn_2, ParquetDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_1.path}\\n\", f\"{parquet_dn_2.path}\\n\"] parquet_dn_1.path = \"example_3.parquet\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_2.path}\\n\", f\"{parquet_dn_1.path}\\n\"] parquet_dn_2.path = \"example_4.parquet\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_1.path}\\n\", f\"{parquet_dn_2.path}\\n\"] _DataManager._delete(parquet_dn_1.id) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_2.path}\\n\"] parquet_dn_3 = _DataManager._create_and_set(dn_cfg_1, None, None) parquet_dn_4 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(parquet_dn_3, ParquetDataNode) assert isinstance(parquet_dn_4, ParquetDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_2.path}\\n\", f\"{parquet_dn_3.path}\\n\", f\"{parquet_dn_4.path}\\n\"] parquet_dn_4.path = \"example_5.parquet\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_2.path}\\n\", f\"{parquet_dn_3.path}\\n\", f\"{parquet_dn_4.path}\\n\"] _DataManager._delete_all() backup_files = read_backup_file(backup_file_path) assert backup_files == [] def test_no_backup_if_no_env_var(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", \"csv\", path=\"example_1.csv\") _DataManager._create_and_set(dn_cfg_1, None, None) "} {"text": "import json import os from datetime import datetime, timedelta import pytest from src.taipy.core._repository._decoder import _Decoder from src.taipy.core._repository._encoder import _Encoder @pytest.fixture(scope=\"function\", autouse=True) def create_and_delete_json_file(): test_json_file = { \"name\": \"testing\", \"date\": datetime(1991, 1, 1), \"default_data\": \"data for testing encoder\", \"validity_period\": timedelta(days=1), } with open(\"data.json\", \"w\") as f: json.dump(test_json_file, f, ensure_ascii=False, indent=4, cls=_Encoder) yield os.unlink(\"data.json\") def test_json_encoder(): with open(\"data.json\") as json_file: data = json.load(json_file) assert data[\"name\"] == \"testing\" assert data[\"default_data\"] == \"data for testing encoder\" assert data[\"date\"] == { \"__type__\": \"Datetime\", \"__value__\": \"1991-01-01T00:00:00\", } assert data[\"date\"].get(\"__type__\") == \"Datetime\" assert data[\"date\"].get(\"__value__\") == \"1991-01-01T00:00:00\" def test_json_decoder(): with open(\"data.json\") as json_file: data = json.load(json_file, cls=_Decoder) assert data[\"name\"] == \"testing\" assert data[\"default_data\"] == \"data for testing encoder\" assert data[\"date\"] == datetime(1991, 1, 1) "} {"text": "import src.taipy.core.taipy as tp from src.taipy.core.config import Config def test_no_special_characters(): scenario_config = Config.configure_scenario(\"scenario_1\") scenario = tp.create_scenario(scenario_config, name=\"martin\") assert scenario.name == \"martin\" scenarios = tp.get_scenarios() assert len(scenarios) == 1 assert scenarios[0].name == \"martin\" def test_many_special_characters(): scenario_config = Config.configure_scenario(\"scenario_1\") special_characters = ( \"!#$%&'()*+,-./:;<=>?@[]^_`\\\\{\" \"\u00bb\u00bc\u00bd\u00be\u00bf\u00c0\u00c1\u00c2\u00c3\u00c4\u00c5\u00c6\u00c7\u00c8\u00c9\u00ca\u00cb\u00cc\u00cd\u00ce\u00cf\u00d0\u00d1\u00d2\u00d3\u00d4\u00d5\u00d6\" \"\u00d7\u00d8\u00d9\u00da\u00db\u00dc\u00dd\u00de\u00df\u00e0\u00e1\u00e2\u00e3\u00e4\u00e5\u00e6\u00e7\u00e8\u00e9\u00ea\u00eb\u00ec\u00ed\u00ee\u00ef\u00f0\u00f1\u00f2\" \"\u00f3\u00f4\u00f5\u00f6\u00f7\u00f8\u00f9\u00fa\u00fb\u00fc\u00fd\u00fe\u00ff\u0100\u0101\u0102\u0103\u0104\u0105\u0106\u0107\u0108\u0109\u010a\u010b\u010c\u010d\u010e\" \"\u010f\u0110\u0111\u0112\u0113\u0114\u0115\u0116\u0117\u0118\u0119\u011a\u011b\u011c\u011d\u011e\u011f\u0120\u0121\u0122\u0123\u0124\u0125\u0126\u0127\u0128\u0129\u012a\" \"\u012b\u012c\u012d\u012e\u012f\u0130\u0132\u0133\u0134\u0135\u0136\u0137\u0138\u0139\u013a\u013b\u013c\u013d\u013e\u013f\u0140\u0141\u0142\u0143\u0144\u0145\u0146\u0147\" \"\u0148\u0149\u014a\u014b\u014c\u014d\u014e\u014f\u0150\u0151\u0152\u0153\u0154\u0155\u0156\u0157\u0158\u0159\u015a\u015b\u015c\u015d\u015e\u015f\u0160\u0161\u0162\u0163\" \"\u0164\u0165\u0166\u0167\u0168\u0169\u016a\u016b\u016c\u016d\u016e\u016f\u0170\u0171\u0172\u0173\u0174\u0175\u0176\u0177\u0178\u0179\u017a\u017b\u017c\u017d\u017e\u017f\" ) scenario = tp.create_scenario(scenario_config, name=special_characters) assert scenario.name == special_characters scenarios = tp.get_scenarios() assert len(scenarios) == 1 assert scenarios[0].name == special_characters "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import json import os import pathlib import shutil import pytest from src.taipy.core.exceptions.exceptions import InvalidExportPath from taipy.config.config import Config from .mocks import MockConverter, MockFSRepository, MockModel, MockObj, MockSQLRepository class TestRepositoriesStorage: @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_save_and_fetch_model(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) m = MockObj(\"uuid\", \"foo\") r._save(m) fetched_model = r._load(m.id) assert m == fetched_model @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_exists(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) m = MockObj(\"uuid\", \"foo\") r._save(m) assert r._exists(m.id) assert not r._exists(\"not-existed-model\") @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_get_all(self, mock_repo, params, init_sql_repo): objs = [] r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) r._save(m) _objs = r._load_all() assert len(_objs) == 5 for obj in _objs: assert isinstance(obj, MockObj) assert sorted(objs, key=lambda o: o.id) == sorted(_objs, key=lambda o: o.id) @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_delete_all(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f\"uuid-{i}\", f\"Foo{i}\") r._save(m) _models = r._load_all() assert len(_models) == 5 r._delete_all() _models = r._load_all() assert len(_models) == 0 @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_delete_many(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f\"uuid-{i}\", f\"Foo{i}\") r._save(m) _models = r._load_all() assert len(_models) == 5 r._delete_many([\"uuid-0\", \"uuid-1\"]) _models = r._load_all() assert len(_models) == 3 @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_search(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() m = MockObj(\"uuid\", \"foo\") r._save(m) m1 = r._search(\"name\", \"bar\") m2 = r._search(\"name\", \"foo\") assert m1 == [] assert m2 == [m] @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) @pytest.mark.parametrize(\"export_path\", [\"tmp\"]) def test_export(self, mock_repo, params, export_path, init_sql_repo): r = mock_repo(**params) m = MockObj(\"uuid\", \"foo\") r._save(m) r._export(\"uuid\", export_path) assert pathlib.Path(os.path.join(export_path, \"mock_model/uuid.json\")).exists() with open(os.path.join(export_path, \"mock_model/uuid.json\"), \"r\") as exported_file: exported_data = json.load(exported_file) assert exported_data[\"id\"] == \"uuid\" assert exported_data[\"name\"] == \"foo\" # Export to same location again should work r._export(\"uuid\", export_path) assert pathlib.Path(os.path.join(export_path, \"mock_model/uuid.json\")).exists() if mock_repo == MockFSRepository: with pytest.raises(InvalidExportPath): r._export(\"uuid\", Config.core.storage_folder) shutil.rmtree(export_path, ignore_errors=True) "} {"text": "import dataclasses import pathlib from dataclasses import dataclass from typing import Any, Dict, Optional from sqlalchemy import Column, String, Table from sqlalchemy.dialects import sqlite from sqlalchemy.orm import declarative_base, registry from sqlalchemy.schema import CreateTable from src.taipy.core._repository._abstract_converter import _AbstractConverter from src.taipy.core._repository._filesystem_repository import _FileSystemRepository from src.taipy.core._repository._sql_repository import _SQLRepository from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config class Base: __allow_unmapped__ = True Base = declarative_base(cls=Base) # type: ignore mapper_registry = registry() @dataclass class MockObj: def __init__(self, id: str, name: str, version: Optional[str] = None) -> None: self.id = id self.name = name if version: self._version = version else: self._version = _VersionManager._get_latest_version() @dataclass class MockModel(Base): # type: ignore __table__ = Table( \"mock_model\", mapper_registry.metadata, Column(\"id\", String(200), primary_key=True), Column(\"name\", String(200)), Column(\"version\", String(200)), ) id: str name: str version: str def to_dict(self): return dataclasses.asdict(self) @staticmethod def from_dict(data: Dict[str, Any]): return MockModel(id=data[\"id\"], name=data[\"name\"], version=data[\"version\"]) def _to_entity(self): return MockObj(id=self.id, name=self.name, version=self.version) @classmethod def _from_entity(cls, entity: MockObj): return MockModel(id=entity.id, name=entity.name, version=entity._version) def to_list(self): return [self.id, self.name, self.version] class MockConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, entity): return MockModel(id=entity.id, name=entity.name, version=entity._version) @classmethod def _model_to_entity(cls, model): return MockObj(id=model.id, name=model.name, version=model.version) class MockFSRepository(_FileSystemRepository): def __init__(self, **kwargs): super().__init__(**kwargs) @property def _storage_folder(self) -> pathlib.Path: return pathlib.Path(Config.core.storage_folder) # type: ignore class MockSQLRepository(_SQLRepository): def __init__(self, **kwargs): super().__init__(**kwargs) self.db.execute(str(CreateTable(MockModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect()))) "} {"text": "import json from datetime import timedelta from src.taipy.core.common.mongo_default_document import MongoDefaultDocument from taipy.config.common.scope import Scope from taipy.config.config import Config def test_set_default_data_node_configuration(): data_node1 = Config.configure_data_node(id=\"input_data1\") assert data_node1.storage_type == \"pickle\" assert data_node1.scope == Scope.SCENARIO assert data_node1.validity_period is None Config.set_default_data_node_configuration(\"in_memory\", scope=Scope.GLOBAL) data_node2 = Config.configure_data_node(id=\"input_data2\") assert data_node2.storage_type == \"in_memory\" assert data_node2.scope == Scope.GLOBAL assert data_node2.validity_period is None Config.set_default_data_node_configuration(\"csv\") data_node3 = Config.configure_data_node(id=\"input_data3\") assert data_node3.storage_type == \"csv\" assert data_node3.scope == Scope.SCENARIO assert data_node3.validity_period is None Config.set_default_data_node_configuration(\"json\", validity_period=timedelta(1)) data_node4 = Config.configure_data_node(id=\"input_data4\") assert data_node4.storage_type == \"json\" assert data_node4.scope == Scope.SCENARIO assert data_node4.validity_period == timedelta(1) def test_set_default_data_node_configuration_replace_old_default_config(): Config.set_default_data_node_configuration( \"in_memory\", prop1=\"1\", prop2=\"2\", prop3=\"3\", ) dn1 = Config.configure_data_node(id=\"dn1\") assert len(dn1.properties) == 3 Config.set_default_data_node_configuration( \"csv\", prop4=\"4\", prop5=\"5\", prop6=\"6\", ) dn2 = Config.configure_data_node(id=\"dn2\") assert dn2.storage_type == \"csv\" assert len(dn2.properties) == 6 # encoding, exposed_type, and has_header too assert dn2.prop4 == \"4\" assert dn2.prop5 == \"5\" assert dn2.prop6 == \"6\" assert dn2.prop1 is None assert dn2.prop2 is None assert dn2.prop3 is None def test_config_storage_type_different_from_default_data_node(): Config.set_default_data_node_configuration( storage_type=\"pickle\", custom_property={\"foo\": \"bar\"}, scope=Scope.GLOBAL, ) # Config a datanode with specific \"storage_type\" different than \"pickle\" # should ignore the default datanode csv_dn = Config.configure_data_node(id=\"csv_dn\", storage_type=\"csv\") assert len(csv_dn.properties) == 3 # encoding, exposed_type, and has_header assert csv_dn.properties.get(\"custom_property\") is None assert csv_dn.scope == Scope.SCENARIO def test_set_default_csv_data_node_configuration(): Config.set_default_data_node_configuration( storage_type=\"csv\", default_path=\"default.csv\", has_header=False, exposed_type=\"numpy\", scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"csv\" assert dn1.scope == Scope.GLOBAL assert dn1.default_path == \"default.csv\" assert dn1.has_header is False assert dn1.exposed_type == \"numpy\" assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node(id=\"dn2\", default_path=\"dn2.csv\") assert dn2.storage_type == \"csv\" assert dn2.default_path == \"dn2.csv\" assert dn2.has_header is False assert dn2.exposed_type == \"numpy\" assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"csv\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"csv\", default_path=\"dn3.csv\", scope=Scope.SCENARIO, validity_period=timedelta(1), ) assert dn3.storage_type == \"csv\" assert dn3.default_path == \"dn3.csv\" assert dn3.has_header is False assert dn3.exposed_type == \"numpy\" assert dn3.scope == Scope.SCENARIO assert dn3.validity_period == timedelta(1) def test_set_default_json_data_node_configuration(): class MyCustomEncoder(json.JSONEncoder): ... class MyCustomDecoder(json.JSONDecoder): ... Config.set_default_data_node_configuration( storage_type=\"json\", default_path=\"default.json\", encoder=MyCustomEncoder, scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"json\" assert dn1.default_path == \"default.json\" assert dn1.encoder == MyCustomEncoder assert dn1.decoder is None assert dn1.scope == Scope.GLOBAL assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node(id=\"dn2\", default_path=\"dn2.json\") assert dn2.storage_type == \"json\" assert dn2.default_path == \"dn2.json\" assert dn2.encoder == MyCustomEncoder assert dn2.decoder is None assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"json\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"json\", default_path=\"dn3.json\", decoder=MyCustomDecoder, validity_period=timedelta(1), ) assert dn3.storage_type == \"json\" assert dn3.default_path == \"dn3.json\" assert dn3.encoder == MyCustomEncoder assert dn3.decoder == MyCustomDecoder assert dn3.scope == Scope.GLOBAL assert dn3.validity_period == timedelta(1) def test_set_default_parquet_data_node_configuration(): Config.set_default_data_node_configuration( storage_type=\"parquet\", default_path=\"default.parquet\", compression=\"gzip\", exposed_type=\"numpy\", scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"parquet\" assert dn1.default_path == \"default.parquet\" assert dn1.engine == \"pyarrow\" assert dn1.compression == \"gzip\" assert dn1.read_kwargs is None assert dn1.write_kwargs is None assert dn1.exposed_type == \"numpy\" assert dn1.scope == Scope.GLOBAL assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node( id=\"dn2\", default_path=\"dn2.parquet\", engine=\"fastparquet\", ) assert dn2.storage_type == \"parquet\" assert dn2.default_path == \"dn2.parquet\" assert dn2.engine == \"fastparquet\" assert dn2.compression == \"gzip\" assert dn2.read_kwargs is None assert dn2.write_kwargs is None assert dn2.exposed_type == \"numpy\" assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"parquet\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"parquet\", default_path=\"dn3.parquet\", read_kwargs={\"filter\": \"foo\"}, scope=Scope.SCENARIO, validity_period=timedelta(1), ) assert dn3.storage_type == \"parquet\" assert dn3.default_path == \"dn3.parquet\" assert dn3.engine == \"pyarrow\" assert dn3.compression == \"gzip\" assert dn3.read_kwargs == {\"filter\": \"foo\"} assert dn3.write_kwargs is None assert dn3.exposed_type == \"numpy\" assert dn3.scope == Scope.SCENARIO assert dn3.validity_period == timedelta(1) def test_set_default_excel_data_node_configuration(): Config.set_default_data_node_configuration( storage_type=\"excel\", default_path=\"default.xlsx\", has_header=False, exposed_type=\"numpy\", scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"excel\" assert dn1.scope == Scope.GLOBAL assert dn1.default_path == \"default.xlsx\" assert dn1.has_header is False assert dn1.sheet_name is None assert dn1.exposed_type == \"numpy\" assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node(id=\"dn2\", default_path=\"dn2.xlsx\", sheet_name=\"sheet_1\") assert dn2.storage_type == \"excel\" assert dn2.default_path == \"dn2.xlsx\" assert dn2.has_header is False assert dn2.sheet_name == \"sheet_1\" assert dn2.exposed_type == \"numpy\" assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"excel\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"excel\", default_path=\"dn3.xlsx\", scope=Scope.SCENARIO, validity_period=timedelta(1), ) assert dn3.storage_type == \"excel\" assert dn3.default_path == \"dn3.xlsx\" assert dn3.has_header is False assert dn3.sheet_name is None assert dn3.exposed_type == \"numpy\" assert dn3.scope == Scope.SCENARIO assert dn3.validity_period == timedelta(1) def test_set_default_pickle_data_node_configuration(): Config.set_default_data_node_configuration( storage_type=\"pickle\", default_data=1, exposed_type=\"numpy\", scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"pickle\" assert dn1.scope == Scope.GLOBAL assert dn1.default_path is None assert dn1.default_data == 1 assert dn1.exposed_type == \"numpy\" assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node(id=\"dn2\", default_path=\"dn2.pkl\", default_data=2) assert dn2.storage_type == \"pickle\" assert dn2.default_path == \"dn2.pkl\" assert dn2.default_data == 2 assert dn2.exposed_type == \"numpy\" assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"pickle\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"pickle\", default_path=\"dn3.pkl\", scope=Scope.SCENARIO, validity_period=timedelta(1), ) assert dn3.storage_type == \"pickle\" assert dn3.default_path == \"dn3.pkl\" assert dn3.default_data == 1 assert dn3.exposed_type == \"numpy\" assert dn3.scope == Scope.SCENARIO assert dn3.validity_period == timedelta(1) def test_set_default_sql_table_data_node_configuration(): Config.set_default_data_node_configuration( storage_type=\"sql_table\", db_username=\"default_user\", db_password=\"default_pwd\", db_name=\"default_db_name\", db_engine=\"mssql\", table_name=\"default_table\", db_port=1010, db_host=\"default_host\", db_driver=\"default server\", db_extra_args={\"default\": \"default\"}, scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"sql_table\" assert dn1.db_username == \"default_user\" assert dn1.db_password == \"default_pwd\" assert dn1.db_name == \"default_db_name\" assert dn1.db_engine == \"mssql\" assert dn1.table_name == \"default_table\" assert dn1.db_port == 1010 assert dn1.db_host == \"default_host\" assert dn1.db_driver == \"default server\" assert dn1.db_extra_args == {\"default\": \"default\"} assert dn1.scope == Scope.GLOBAL assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node( id=\"dn2\", table_name=\"table_2\", db_port=2020, db_host=\"host_2\", ) assert dn2.storage_type == \"sql_table\" assert dn2.db_username == \"default_user\" assert dn2.db_password == \"default_pwd\" assert dn2.db_name == \"default_db_name\" assert dn2.db_engine == \"mssql\" assert dn2.table_name == \"table_2\" assert dn2.db_port == 2020 assert dn2.db_host == \"host_2\" assert dn2.db_driver == \"default server\" assert dn2.db_extra_args == {\"default\": \"default\"} assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"sql_table\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"sql_table\", db_username=\"user_3\", db_password=\"pwd_3\", db_name=\"db_3\", db_engine=\"postgresql\", table_name=\"table_3\", validity_period=timedelta(1), ) assert dn3.storage_type == \"sql_table\" assert dn3.db_username == \"user_3\" assert dn3.db_password == \"pwd_3\" assert dn3.db_name == \"db_3\" assert dn3.db_engine == \"postgresql\" assert dn3.table_name == \"table_3\" assert dn3.db_port == 1010 assert dn3.db_host == \"default_host\" assert dn3.db_driver == \"default server\" assert dn3.db_extra_args == {\"default\": \"default\"} assert dn3.scope == Scope.GLOBAL assert dn3.validity_period == timedelta(1) def test_set_default_sql_data_node_configuration(): def query_builder(): ... Config.set_default_data_node_configuration( storage_type=\"sql\", db_username=\"default_user\", db_password=\"default_pwd\", db_name=\"default_db_name\", db_engine=\"mssql\", read_query=\"SELECT * FROM default_table\", write_query_builder=query_builder, append_query_builder=query_builder, db_port=1010, db_host=\"default_host\", db_driver=\"default server\", db_extra_args={\"default\": \"default\"}, scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"sql\" assert dn1.db_username == \"default_user\" assert dn1.db_password == \"default_pwd\" assert dn1.db_name == \"default_db_name\" assert dn1.db_engine == \"mssql\" assert dn1.read_query == \"SELECT * FROM default_table\" assert dn1.write_query_builder == query_builder assert dn1.append_query_builder == query_builder assert dn1.db_port == 1010 assert dn1.db_host == \"default_host\" assert dn1.db_driver == \"default server\" assert dn1.db_extra_args == {\"default\": \"default\"} assert dn1.scope == Scope.GLOBAL assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node( id=\"dn2\", table_name=\"table_2\", db_port=2020, db_host=\"host_2\", read_query=\"SELECT * FROM table_2\" ) assert dn2.storage_type == \"sql\" assert dn2.db_username == \"default_user\" assert dn2.db_password == \"default_pwd\" assert dn2.db_name == \"default_db_name\" assert dn2.db_engine == \"mssql\" assert dn2.read_query == \"SELECT * FROM table_2\" assert dn2.write_query_builder == query_builder assert dn2.append_query_builder == query_builder assert dn2.db_port == 2020 assert dn2.db_host == \"host_2\" assert dn2.db_driver == \"default server\" assert dn2.db_extra_args == {\"default\": \"default\"} assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"sql\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"sql\", db_username=\"user_3\", db_password=\"pwd_3\", db_name=\"db_3\", db_engine=\"postgresql\", read_query=\"SELECT * FROM table_3\", write_query_builder=query_builder, validity_period=timedelta(1), ) assert dn3.storage_type == \"sql\" assert dn3.db_username == \"user_3\" assert dn3.db_password == \"pwd_3\" assert dn3.db_name == \"db_3\" assert dn3.db_engine == \"postgresql\" assert dn3.read_query == \"SELECT * FROM table_3\" assert dn3.write_query_builder == query_builder assert dn3.append_query_builder == query_builder assert dn3.db_port == 1010 assert dn3.db_host == \"default_host\" assert dn3.db_driver == \"default server\" assert dn3.db_extra_args == {\"default\": \"default\"} assert dn3.scope == Scope.GLOBAL assert dn3.validity_period == timedelta(1) def test_set_default_mongo_collection_data_node_configuration(): Config.set_default_data_node_configuration( storage_type=\"mongo_collection\", db_name=\"default_db_name\", collection_name=\"default_collection\", db_port=1010, db_host=\"default_host\", db_driver=\"default server\", db_extra_args={\"default\": \"default\"}, scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"mongo_collection\" assert dn1.db_username == \"\" assert dn1.db_password == \"\" assert dn1.db_name == \"default_db_name\" assert dn1.collection_name == \"default_collection\" assert dn1.custom_document == MongoDefaultDocument assert dn1.db_host == \"default_host\" assert dn1.db_port == 1010 assert dn1.db_driver == \"default server\" assert dn1.db_extra_args == {\"default\": \"default\"} assert dn1.scope == Scope.GLOBAL assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node( id=\"dn2\", collection_name=\"collection_2\", db_port=2020, db_host=\"host_2\", ) assert dn2.storage_type == \"mongo_collection\" assert dn2.db_username == \"\" assert dn2.db_password == \"\" assert dn2.db_name == \"default_db_name\" assert dn2.collection_name == \"collection_2\" assert dn2.custom_document == MongoDefaultDocument assert dn2.db_host == \"host_2\" assert dn2.db_port == 2020 assert dn2.db_driver == \"default server\" assert dn2.db_extra_args == {\"default\": \"default\"} assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"mongo_collection\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"mongo_collection\", db_name=\"db_3\", collection_name=\"collection_3\", db_username=\"user_3\", db_password=\"pwd_3\", validity_period=timedelta(1), ) assert dn3.storage_type == \"mongo_collection\" assert dn3.db_username == \"user_3\" assert dn3.db_password == \"pwd_3\" assert dn3.db_name == \"db_3\" assert dn3.collection_name == \"collection_3\" assert dn3.custom_document == MongoDefaultDocument assert dn3.db_port == 1010 assert dn3.db_host == \"default_host\" assert dn3.db_driver == \"default server\" assert dn3.db_extra_args == {\"default\": \"default\"} assert dn3.scope == Scope.GLOBAL assert dn3.validity_period == timedelta(1) "} {"text": "import os from unittest import mock from src.taipy.core.config import DataNodeConfig from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile def _configure_task_in_toml(): return NamedTemporaryFile( content=\"\"\" [TAIPY] [DATA_NODE.input] [DATA_NODE.output] [TASK.tasks1] function = \"builtins.print:function\" inputs = [ \"input:SECTION\",] outputs = [ \"output:SECTION\",] \"\"\" ) def _check_data_nodes_instance(dn_id, task_id): \"\"\"Check if the data node instance in the task config correctly points to the Config._applied_config, not the Config._python_config or the Config._file_config \"\"\" dn_config_applied_instance = Config.data_nodes[dn_id] for dn in Config.tasks[task_id].inputs: if dn.id == dn_id: dn_config_instance_via_task = dn for dn in Config.tasks[task_id].outputs: if dn.id == dn_id: dn_config_instance_via_task = dn dn_config_python_instance = None if Config._python_config._sections.get(\"DATA_NODE\", None): dn_config_python_instance = Config._python_config._sections[\"DATA_NODE\"][dn_id] dn_config_file_instance = None if Config._file_config._sections.get(\"DATA_NODE\", None): dn_config_file_instance = Config._file_config._sections[\"DATA_NODE\"][dn_id] if dn_config_python_instance: assert dn_config_python_instance.scope is None assert dn_config_python_instance is not dn_config_applied_instance assert dn_config_python_instance is not dn_config_instance_via_task if dn_config_file_instance: assert dn_config_file_instance.scope is None assert dn_config_file_instance is not dn_config_applied_instance assert dn_config_file_instance is not dn_config_instance_via_task assert dn_config_applied_instance.scope == DataNodeConfig._DEFAULT_SCOPE assert dn_config_instance_via_task is dn_config_applied_instance def test_data_node_instance_when_configure_task_in_python(): input_config = Config.configure_data_node(\"input\") output_config = Config.configure_data_node(\"output\") Config.configure_task(\"tasks1\", print, input_config, output_config) _check_data_nodes_instance(\"input\", \"tasks1\") _check_data_nodes_instance(\"output\", \"tasks1\") def test_data_node_instance_when_configure_task_by_loading_toml(): toml_config = _configure_task_in_toml() Config.load(toml_config.filename) _check_data_nodes_instance(\"input\", \"tasks1\") _check_data_nodes_instance(\"output\", \"tasks1\") def test_data_node_instance_when_configure_task_by_overriding_toml(): toml_config = _configure_task_in_toml() Config.override(toml_config.filename) _check_data_nodes_instance(\"input\", \"tasks1\") _check_data_nodes_instance(\"output\", \"tasks1\") def test_task_config_creation(): input_config = Config.configure_data_node(\"input\") output_config = Config.configure_data_node(\"output\") task_config = Config.configure_task(\"tasks1\", print, input_config, output_config) assert not task_config.skippable assert list(Config.tasks) == [\"default\", task_config.id] task2 = Config.configure_task(\"tasks2\", print, input_config, output_config, skippable=True) assert task2.skippable assert list(Config.tasks) == [\"default\", task_config.id, task2.id] def test_task_count(): input_config = Config.configure_data_node(\"input\") output_config = Config.configure_data_node(\"output\") Config.configure_task(\"tasks1\", print, input_config, output_config) assert len(Config.tasks) == 2 Config.configure_task(\"tasks2\", print, input_config, output_config) assert len(Config.tasks) == 3 Config.configure_task(\"tasks3\", print, input_config, output_config) assert len(Config.tasks) == 4 def test_task_getitem(): input_config = Config.configure_data_node(\"input\") output_config = Config.configure_data_node(\"output\") task_id = \"tasks1\" task_cfg = Config.configure_task(task_id, print, input_config, output_config) assert Config.tasks[task_id].id == task_cfg.id assert Config.tasks[task_id].properties == task_cfg.properties assert Config.tasks[task_id].function == task_cfg.function assert Config.tasks[task_id].input_configs == task_cfg.input_configs assert Config.tasks[task_id].output_configs == task_cfg.output_configs assert Config.tasks[task_id].skippable == task_cfg.skippable def test_task_creation_no_duplication(): input_config = Config.configure_data_node(\"input\") output_config = Config.configure_data_node(\"output\") Config.configure_task(\"tasks1\", print, input_config, output_config) assert len(Config.tasks) == 2 Config.configure_task(\"tasks1\", print, input_config, output_config) assert len(Config.tasks) == 2 def test_task_config_with_env_variable_value(): input_config = Config.configure_data_node(\"input\") output_config = Config.configure_data_node(\"output\") with mock.patch.dict(os.environ, {\"FOO\": \"plop\", \"BAR\": \"baz\"}): Config.configure_task(\"task_name\", print, input_config, output_config, prop=\"ENV[BAR]\") assert Config.tasks[\"task_name\"].prop == \"baz\" assert Config.tasks[\"task_name\"].properties[\"prop\"] == \"baz\" assert Config.tasks[\"task_name\"]._properties[\"prop\"] == \"ENV[BAR]\" def test_clean_config(): dn1 = Config.configure_data_node(\"dn1\") dn2 = Config.configure_data_node(\"dn2\") task1_config = Config.configure_task(\"id1\", print, dn1, dn2) task2_config = Config.configure_task(\"id2\", print, dn2, dn1) assert Config.tasks[\"id1\"] is task1_config assert Config.tasks[\"id2\"] is task2_config task1_config._clean() task2_config._clean() # Check if the instance before and after _clean() is the same assert Config.tasks[\"id1\"] is task1_config assert Config.tasks[\"id2\"] is task2_config assert task1_config.id == \"id1\" assert task2_config.id == \"id2\" assert task1_config.function is task1_config.function is None assert task1_config.inputs == task1_config.inputs == [] assert task1_config.input_configs == task1_config.input_configs == [] assert task1_config.outputs == task1_config.outputs == [] assert task1_config.output_configs == task1_config.output_configs == [] assert task1_config.skippable is task1_config.skippable is False assert task1_config.properties == task1_config.properties == {} def test_deprecated_cacheable_attribute_remains_compatible(): dn_1_id = \"dn_1_id\" dn_1_config = Config.configure_data_node( id=dn_1_id, storage_type=\"pickle\", cacheable=False, scope=Scope.SCENARIO, ) assert Config.data_nodes[dn_1_id].id == dn_1_id assert Config.data_nodes[dn_1_id].storage_type == \"pickle\" assert Config.data_nodes[dn_1_id].scope == Scope.SCENARIO assert Config.data_nodes[dn_1_id].properties == {\"cacheable\": False} assert not Config.data_nodes[dn_1_id].cacheable dn_1_config.cacheable = True assert Config.data_nodes[dn_1_id].properties == {\"cacheable\": True} assert Config.data_nodes[dn_1_id].cacheable dn_2_id = \"dn_2_id\" dn_2_config = Config.configure_data_node( id=dn_2_id, storage_type=\"pickle\", cacheable=True, scope=Scope.SCENARIO, ) assert Config.data_nodes[dn_2_id].id == dn_2_id assert Config.data_nodes[dn_2_id].storage_type == \"pickle\" assert Config.data_nodes[dn_2_id].scope == Scope.SCENARIO assert Config.data_nodes[dn_2_id].properties == {\"cacheable\": True} assert Config.data_nodes[dn_2_id].cacheable dn_2_config.cacheable = False assert Config.data_nodes[dn_1_id].properties == {\"cacheable\": False} assert not Config.data_nodes[dn_1_id].cacheable dn_3_id = \"dn_3_id\" dn_3_config = Config.configure_data_node( id=dn_3_id, storage_type=\"pickle\", scope=Scope.SCENARIO, ) assert Config.data_nodes[dn_3_id].id == dn_3_id assert Config.data_nodes[dn_3_id].storage_type == \"pickle\" assert Config.data_nodes[dn_3_id].scope == Scope.SCENARIO assert Config.data_nodes[dn_3_id].properties == {} assert not Config.data_nodes[dn_3_id].cacheable dn_3_config.cacheable = True assert Config.data_nodes[dn_3_id].properties == {\"cacheable\": True} assert Config.data_nodes[dn_3_id].cacheable "} {"text": "import pytest from taipy.config.config import Config def test_job_config(): assert Config.job_config.mode == \"development\" job_c = Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) assert job_c.mode == \"standalone\" assert job_c.max_nb_of_workers == 2 assert Config.job_config.mode == \"standalone\" assert Config.job_config.max_nb_of_workers == 2 Config.configure_job_executions(foo=\"bar\") assert Config.job_config.foo == \"bar\" def test_clean_config(): job_config = Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2, prop=\"foo\") assert Config.job_config is job_config job_config._clean() # Check if the instance before and after _clean() is the same assert Config.job_config is job_config assert job_config.mode == \"development\" assert job_config._config == {\"max_nb_of_workers\": 1} assert job_config.properties == {} "} {"text": "import datetime import json from src.taipy.core.config import CoreSection, DataNodeConfig, JobConfig, MigrationConfig, ScenarioConfig, TaskConfig from taipy.config import Config from taipy.config._serializer._json_serializer import _JsonSerializer from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from tests.core.utils.named_temporary_file import NamedTemporaryFile def multiply(a): return a * 2 def migrate_csv_path(dn): dn.path = \"foo.csv\" def compare_function(*data_node_results): comparison_result = {} current_result_index = 0 for current_result in data_node_results: comparison_result[current_result_index] = {} next_result_index = 0 for next_result in data_node_results: print(f\"comparing result {current_result_index} with result {next_result_index}\") comparison_result[current_result_index][next_result_index] = next_result - current_result next_result_index += 1 current_result_index += 1 return comparison_result class CustomClass: a = None b = None class CustomEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, datetime): result = {\"__type__\": \"Datetime\", \"__value__\": o.isoformat()} else: result = json.JSONEncoder.default(self, o) return result class CustomDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) def object_hook(self, source): if source.get(\"__type__\") == \"Datetime\": return datetime.fromisoformat(source.get(\"__value__\")) else: return source def config_test_scenario(): test_csv_dn_cfg = Config.configure_csv_data_node( id=\"test_csv_dn\", path=\"./test.csv\", exposed_type=CustomClass, scope=Scope.GLOBAL, validity_period=datetime.timedelta(1), ) test_json_dn_cfg = Config.configure_json_data_node( id=\"test_json_dn\", default_path=\"./test.json\", encoder=CustomEncoder, decoder=CustomDecoder, ) test_pickle_dn_cfg = Config.configure_pickle_data_node( id=\"test_pickle_dn\", path=\"./test.p\", scope=Scope.SCENARIO, validity_period=datetime.timedelta(1), ) test_task_cfg = Config.configure_task( id=\"test_task\", input=test_csv_dn_cfg, function=multiply, output=test_json_dn_cfg ) test_scenario_cfg = Config.configure_scenario( id=\"test_scenario\", task_configs=[test_task_cfg], additional_data_node_configs=[test_pickle_dn_cfg], comparators={test_json_dn_cfg.id: compare_function}, frequency=Frequency.DAILY, ) test_scenario_cfg.add_sequences({\"sequence1\": [test_task_cfg]}) Config.add_migration_function(\"1.0\", test_csv_dn_cfg, migrate_csv_path) return test_scenario_cfg def test_read_write_toml_configuration_file(): expected_toml_config = f\"\"\" [TAIPY] [JOB] mode = \"development\" max_nb_of_workers = \"1:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" core_version = \"{CoreSection._CURRENT_CORE_VERSION}\" [DATA_NODE.default] storage_type = \"pickle\" scope = \"SCENARIO:SCOPE\" [DATA_NODE.test_csv_dn] storage_type = \"csv\" scope = \"GLOBAL:SCOPE\" validity_period = \"1d0h0m0s:timedelta\" path = \"./test.csv\" exposed_type = \"tests.core.config.test_config_serialization.CustomClass:class\" encoding = \"utf-8\" has_header = \"True:bool\" [DATA_NODE.test_json_dn] storage_type = \"json\" scope = \"SCENARIO:SCOPE\" default_path = \"./test.json\" encoder = \"tests.core.config.test_config_serialization.CustomEncoder:class\" decoder = \"tests.core.config.test_config_serialization.CustomDecoder:class\" encoding = \"utf-8\" [DATA_NODE.test_pickle_dn] storage_type = \"pickle\" scope = \"SCENARIO:SCOPE\" validity_period = \"1d0h0m0s:timedelta\" path = \"./test.p\" [TASK.default] inputs = [] outputs = [] skippable = \"False:bool\" [TASK.test_task] function = \"tests.core.config.test_config_serialization.multiply:function\" inputs = [ \"test_csv_dn:SECTION\",] outputs = [ \"test_json_dn:SECTION\",] skippable = \"False:bool\" [SCENARIO.default] tasks = [] additional_data_nodes = [] [SCENARIO.test_scenario] tasks = [ \"test_task:SECTION\",] additional_data_nodes = [ \"test_pickle_dn:SECTION\",] frequency = \"DAILY:FREQUENCY\" [VERSION_MIGRATION.migration_fcts.\"1.0\"] test_csv_dn = \"tests.core.config.test_config_serialization.migrate_csv_path:function\" [SCENARIO.default.comparators] [SCENARIO.default.sequences] [SCENARIO.test_scenario.comparators] test_json_dn = [ \"tests.core.config.test_config_serialization.compare_function:function\",] [SCENARIO.test_scenario.sequences] sequence1 = [ \"test_task:SECTION\",] \"\"\".strip() config_test_scenario() tf = NamedTemporaryFile() Config.backup(tf.filename) actual_config = tf.read().strip() assert actual_config == expected_toml_config Config.load(tf.filename) tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_config_2 = tf2.read().strip() assert actual_config_2 == expected_toml_config assert Config.unique_sections is not None assert len(Config.unique_sections) == 3 assert Config.unique_sections[JobConfig.name].mode == \"development\" assert Config.unique_sections[JobConfig.name].max_nb_of_workers == 1 assert Config.unique_sections[MigrationConfig.name].migration_fcts[\"1.0\"] == {\"test_csv_dn\": migrate_csv_path} assert Config.sections is not None assert len(Config.sections) == 3 assert Config.sections[DataNodeConfig.name] is not None assert len(Config.sections[DataNodeConfig.name]) == 4 assert Config.sections[DataNodeConfig.name][\"default\"] is not None assert Config.sections[DataNodeConfig.name][\"default\"].storage_type == \"pickle\" assert Config.sections[DataNodeConfig.name][\"default\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].storage_type == \"csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].scope == Scope.GLOBAL assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].validity_period == datetime.timedelta(1) assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].has_header is True assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].path == \"./test.csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].encoding == \"utf-8\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].exposed_type == CustomClass assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].storage_type == \"json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].default_path == \"./test.json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].encoding == \"utf-8\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].encoder == CustomEncoder assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].decoder == CustomDecoder assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].storage_type == \"pickle\" assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].validity_period == datetime.timedelta(1) assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].path == \"./test.p\" assert Config.sections[TaskConfig.name] is not None assert len(Config.sections[TaskConfig.name]) == 2 assert Config.sections[TaskConfig.name][\"default\"] is not None assert Config.sections[TaskConfig.name][\"default\"].inputs == [] assert Config.sections[TaskConfig.name][\"default\"].outputs == [] assert Config.sections[TaskConfig.name][\"default\"].function is None assert not Config.sections[TaskConfig.name][\"default\"].skippable assert [inp.id for inp in Config.sections[TaskConfig.name][\"test_task\"].inputs] == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id ] assert [outp.id for outp in Config.sections[TaskConfig.name][\"test_task\"].outputs] == [ Config.sections[DataNodeConfig.name][\"test_json_dn\"].id ] assert Config.sections[TaskConfig.name][\"test_task\"].function == multiply assert Config.sections[TaskConfig.name][\"test_task\"].function == multiply assert Config.sections[ScenarioConfig.name] is not None assert len(Config.sections[ScenarioConfig.name]) == 2 assert Config.sections[ScenarioConfig.name][\"default\"] is not None assert Config.sections[ScenarioConfig.name][\"default\"].tasks == [] assert Config.sections[ScenarioConfig.name][\"default\"].additional_data_nodes == [] assert Config.sections[ScenarioConfig.name][\"default\"].data_nodes == [] assert len(Config.sections[ScenarioConfig.name][\"default\"].comparators) == 0 assert [task.id for task in Config.sections[ScenarioConfig.name][\"test_scenario\"].tasks] == [ Config.sections[TaskConfig.name][\"test_task\"].id ] assert [ additional_data_node.id for additional_data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].additional_data_nodes ] == [Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].id] assert sorted([data_node.id for data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].data_nodes]) == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id, Config.sections[DataNodeConfig.name][\"test_json_dn\"].id, Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].id, ] sequences = {} for sequence_name, sequence_tasks in Config.sections[ScenarioConfig.name][\"test_scenario\"].sequences.items(): sequences[sequence_name] = [task.id for task in sequence_tasks] assert sequences == {\"sequence1\": [Config.sections[TaskConfig.name][\"test_task\"].id]} assert dict(Config.sections[ScenarioConfig.name][\"test_scenario\"].comparators) == { \"test_json_dn\": [compare_function] } def test_read_write_json_configuration_file(): expected_json_config = ( \"\"\"{ \"TAIPY\": {}, \"JOB\": { \"mode\": \"development\", \"max_nb_of_workers\": \"1:int\" }, \"CORE\": { \"root_folder\": \"./taipy/\", \"storage_folder\": \".data/\", \"repository_type\": \"filesystem\", \"read_entity_retry\": \"0:int\", \"mode\": \"development\", \"version_number\": \"\", \"force\": \"False:bool\",\"\"\" + f\"\"\" \"core_version\": \"{CoreSection._CURRENT_CORE_VERSION}\" \"\"\" + \"\"\" }, \"VERSION_MIGRATION\": { \"migration_fcts\": { \"1.0\": { \"test_csv_dn\": \"tests.core.config.test_config_serialization.migrate_csv_path:function\" } } }, \"DATA_NODE\": { \"default\": { \"storage_type\": \"pickle\", \"scope\": \"SCENARIO:SCOPE\" }, \"test_csv_dn\": { \"storage_type\": \"csv\", \"scope\": \"GLOBAL:SCOPE\", \"validity_period\": \"1d0h0m0s:timedelta\", \"path\": \"./test.csv\", \"exposed_type\": \"tests.core.config.test_config_serialization.CustomClass:class\", \"encoding\": \"utf-8\", \"has_header\": \"True:bool\" }, \"test_json_dn\": { \"storage_type\": \"json\", \"scope\": \"SCENARIO:SCOPE\", \"default_path\": \"./test.json\", \"encoder\": \"tests.core.config.test_config_serialization.CustomEncoder:class\", \"decoder\": \"tests.core.config.test_config_serialization.CustomDecoder:class\", \"encoding\": \"utf-8\" }, \"test_pickle_dn\": { \"storage_type\": \"pickle\", \"scope\": \"SCENARIO:SCOPE\", \"validity_period\": \"1d0h0m0s:timedelta\", \"path\": \"./test.p\" } }, \"TASK\": { \"default\": { \"function\": null, \"inputs\": [], \"outputs\": [], \"skippable\": \"False:bool\" }, \"test_task\": { \"function\": \"tests.core.config.test_config_serialization.multiply:function\", \"inputs\": [ \"test_csv_dn:SECTION\" ], \"outputs\": [ \"test_json_dn:SECTION\" ], \"skippable\": \"False:bool\" } }, \"SCENARIO\": { \"default\": { \"comparators\": {}, \"tasks\": [], \"additional_data_nodes\": [], \"frequency\": null, \"sequences\": {} }, \"test_scenario\": { \"comparators\": { \"test_json_dn\": [ \"tests.core.config.test_config_serialization.compare_function:function\" ] }, \"tasks\": [ \"test_task:SECTION\" ], \"additional_data_nodes\": [ \"test_pickle_dn:SECTION\" ], \"frequency\": \"DAILY:FREQUENCY\", \"sequences\": { \"sequence1\": [ \"test_task:SECTION\" ] } } } } \"\"\".strip() ) Config._serializer = _JsonSerializer() config_test_scenario() tf = NamedTemporaryFile() Config.backup(tf.filename) actual_config = tf.read().strip() assert actual_config == expected_json_config Config.load(tf.filename) tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_config_2 = tf2.read().strip() assert actual_config_2 == expected_json_config assert Config.unique_sections is not None assert len(Config.unique_sections) == 3 assert Config.unique_sections[JobConfig.name].mode == \"development\" assert Config.unique_sections[JobConfig.name].max_nb_of_workers == 1 assert Config.unique_sections[MigrationConfig.name].migration_fcts[\"1.0\"] == {\"test_csv_dn\": migrate_csv_path} assert Config.sections is not None assert len(Config.sections) == 3 assert Config.sections[DataNodeConfig.name] is not None assert len(Config.sections[DataNodeConfig.name]) == 4 assert Config.sections[DataNodeConfig.name][\"default\"] is not None assert Config.sections[DataNodeConfig.name][\"default\"].storage_type == \"pickle\" assert Config.sections[DataNodeConfig.name][\"default\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].storage_type == \"csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].scope == Scope.GLOBAL assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].validity_period == datetime.timedelta(1) assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].has_header is True assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].path == \"./test.csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].encoding == \"utf-8\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].exposed_type == CustomClass assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].storage_type == \"json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].default_path == \"./test.json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].encoding == \"utf-8\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].encoder == CustomEncoder assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].decoder == CustomDecoder assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].storage_type == \"pickle\" assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].validity_period == datetime.timedelta(1) assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].path == \"./test.p\" assert Config.sections[TaskConfig.name] is not None assert len(Config.sections[TaskConfig.name]) == 2 assert Config.sections[TaskConfig.name][\"default\"] is not None assert Config.sections[TaskConfig.name][\"default\"].inputs == [] assert Config.sections[TaskConfig.name][\"default\"].outputs == [] assert Config.sections[TaskConfig.name][\"default\"].function is None assert [inp.id for inp in Config.sections[TaskConfig.name][\"test_task\"].inputs] == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id ] assert [outp.id for outp in Config.sections[TaskConfig.name][\"test_task\"].outputs] == [ Config.sections[DataNodeConfig.name][\"test_json_dn\"].id ] assert Config.sections[TaskConfig.name][\"test_task\"].function == multiply assert Config.sections[ScenarioConfig.name] is not None assert len(Config.sections[ScenarioConfig.name]) == 2 assert Config.sections[ScenarioConfig.name][\"default\"] is not None assert Config.sections[ScenarioConfig.name][\"default\"].tasks == [] assert Config.sections[ScenarioConfig.name][\"default\"].additional_data_nodes == [] assert Config.sections[ScenarioConfig.name][\"default\"].data_nodes == [] assert len(Config.sections[ScenarioConfig.name][\"default\"].comparators) == 0 assert [task.id for task in Config.sections[ScenarioConfig.name][\"test_scenario\"].tasks] == [ Config.sections[TaskConfig.name][\"test_task\"].id ] assert [ additional_data_node.id for additional_data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].additional_data_nodes ] == [Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].id] assert sorted([data_node.id for data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].data_nodes]) == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id, Config.sections[DataNodeConfig.name][\"test_json_dn\"].id, Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].id, ] sequences = {} for sequence_name, sequence_tasks in Config.sections[ScenarioConfig.name][\"test_scenario\"].sequences.items(): sequences[sequence_name] = [task.id for task in sequence_tasks] assert sequences == {\"sequence1\": [Config.sections[TaskConfig.name][\"test_task\"].id]} assert dict(Config.sections[ScenarioConfig.name][\"test_scenario\"].comparators) == { \"test_json_dn\": [compare_function] } def test_read_write_toml_configuration_file_migrate_sequence_in_scenario(): old_toml_config = \"\"\" [TAIPY] [JOB] mode = \"development\" max_nb_of_workers = \"1:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" mode = \"development\" version_number = \"\" force = \"False:bool\" [DATA_NODE.default] storage_type = \"pickle\" scope = \"SCENARIO:SCOPE\" [DATA_NODE.test_csv_dn] storage_type = \"csv\" scope = \"GLOBAL:SCOPE\" validity_period = \"1d0h0m0s:timedelta\" path = \"./test.csv\" exposed_type = \"tests.core.config.test_config_serialization.CustomClass:class\" has_header = \"True:bool\" [DATA_NODE.test_json_dn] storage_type = \"json\" scope = \"SCENARIO:SCOPE\" default_path = \"./test.json\" encoder = \"tests.core.config.test_config_serialization.CustomEncoder:class\" decoder = \"tests.core.config.test_config_serialization.CustomDecoder:class\" [TASK.default] inputs = [] outputs = [] skippable = \"False:bool\" [TASK.test_task] function = \"tests.core.config.test_config_serialization.multiply:function\" inputs = [ \"test_csv_dn:SECTION\",] outputs = [ \"test_json_dn:SECTION\",] skippable = \"False:bool\" [SCENARIO.default] [SCENARIO.test_scenario] tasks = [ \"test_task:SECTION\",] sequences.test_sequence = [ \"test_task:SECTION\",] frequency = \"DAILY:FREQUENCY\" [VERSION_MIGRATION.migration_fcts.\"1.0\"] test_csv_dn = \"tests.core.config.test_config_serialization.migrate_csv_path:function\" [SCENARIO.default.comparators] [SCENARIO.test_scenario.comparators] test_json_dn = [ \"tests.core.config.test_config_serialization.compare_function:function\",] \"\"\".strip() config_test_scenario() tf = NamedTemporaryFile() with open(tf.filename, \"w\") as fd: fd.writelines(old_toml_config) Config.restore(tf.filename) assert Config.unique_sections is not None assert len(Config.unique_sections) == 3 assert Config.unique_sections[CoreSection.name].root_folder == \"./taipy/\" assert Config.unique_sections[CoreSection.name].storage_folder == \".data/\" assert Config.unique_sections[CoreSection.name].repository_type == \"filesystem\" assert Config.unique_sections[CoreSection.name].repository_properties == {} assert Config.unique_sections[CoreSection.name].mode == \"development\" assert Config.unique_sections[CoreSection.name].version_number == \"\" assert Config.unique_sections[CoreSection.name].force is False assert Config.unique_sections[JobConfig.name].mode == \"development\" assert Config.unique_sections[JobConfig.name].max_nb_of_workers == 1 assert Config.unique_sections[MigrationConfig.name].migration_fcts[\"1.0\"] == {\"test_csv_dn\": migrate_csv_path} assert Config.sections is not None assert len(Config.sections) == 3 assert Config.sections[DataNodeConfig.name] is not None assert len(Config.sections[DataNodeConfig.name]) == 3 assert Config.sections[DataNodeConfig.name][\"default\"] is not None assert Config.sections[DataNodeConfig.name][\"default\"].storage_type == \"pickle\" assert Config.sections[DataNodeConfig.name][\"default\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].storage_type == \"csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].scope == Scope.GLOBAL assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].validity_period == datetime.timedelta(1) assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].has_header is True assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].path == \"./test.csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].exposed_type == CustomClass assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].storage_type == \"json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].default_path == \"./test.json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].encoder == CustomEncoder assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].decoder == CustomDecoder assert Config.sections[TaskConfig.name] is not None assert len(Config.sections[TaskConfig.name]) == 2 assert Config.sections[TaskConfig.name][\"default\"] is not None assert Config.sections[TaskConfig.name][\"default\"].inputs == [] assert Config.sections[TaskConfig.name][\"default\"].outputs == [] assert Config.sections[TaskConfig.name][\"default\"].function is None assert not Config.sections[TaskConfig.name][\"default\"].skippable assert [inp.id for inp in Config.sections[TaskConfig.name][\"test_task\"].inputs] == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id ] assert [outp.id for outp in Config.sections[TaskConfig.name][\"test_task\"].outputs] == [ Config.sections[DataNodeConfig.name][\"test_json_dn\"].id ] assert Config.sections[TaskConfig.name][\"test_task\"].function == multiply assert Config.sections[TaskConfig.name][\"test_task\"].function == multiply assert Config.sections[ScenarioConfig.name] is not None assert len(Config.sections[ScenarioConfig.name]) == 2 assert Config.sections[ScenarioConfig.name][\"default\"] is not None assert Config.sections[ScenarioConfig.name][\"default\"].tasks == [] assert Config.sections[ScenarioConfig.name][\"default\"].additional_data_nodes == [] assert Config.sections[ScenarioConfig.name][\"default\"].data_nodes == [] assert len(Config.sections[ScenarioConfig.name][\"default\"].comparators) == 0 assert [task.id for task in Config.sections[ScenarioConfig.name][\"test_scenario\"].tasks] == [ Config.sections[TaskConfig.name][\"test_task\"].id ] assert [ additional_data_node.id for additional_data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].additional_data_nodes ] == [] assert sorted([data_node.id for data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].data_nodes]) == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id, Config.sections[DataNodeConfig.name][\"test_json_dn\"].id, ] assert Config.sections[ScenarioConfig.name][\"test_scenario\"].sequences == { \"test_sequence\": [Config.sections[TaskConfig.name][\"test_task\"]] } assert dict(Config.sections[ScenarioConfig.name][\"test_scenario\"].comparators) == { \"test_json_dn\": [compare_function] } def test_read_write_json_configuration_file_migrate_sequence_in_scenario(): old_json_config = \"\"\" { \"TAIPY\": {}, \"JOB\": { \"mode\": \"development\", \"max_nb_of_workers\": \"1:int\" }, \"CORE\": { \"root_folder\": \"./taipy/\", \"storage_folder\": \".data/\", \"repository_type\": \"filesystem\", \"read_entity_retry\": \"0:int\", \"mode\": \"development\", \"version_number\": \"\", \"force\": \"False:bool\" }, \"VERSION_MIGRATION\": { \"migration_fcts\": { \"1.0\": { \"test_csv_dn\": \"tests.core.config.test_config_serialization.migrate_csv_path:function\" } } }, \"DATA_NODE\": { \"default\": { \"storage_type\": \"pickle\", \"scope\": \"SCENARIO:SCOPE\" }, \"test_csv_dn\": { \"storage_type\": \"csv\", \"scope\": \"GLOBAL:SCOPE\", \"validity_period\": \"1d0h0m0s:timedelta\", \"path\": \"./test.csv\", \"exposed_type\": \"tests.core.config.test_config_serialization.CustomClass:class\", \"has_header\": \"True:bool\" }, \"test_json_dn\": { \"storage_type\": \"json\", \"scope\": \"SCENARIO:SCOPE\", \"default_path\": \"./test.json\", \"encoder\": \"tests.core.config.test_config_serialization.CustomEncoder:class\", \"decoder\": \"tests.core.config.test_config_serialization.CustomDecoder:class\" } }, \"TASK\": { \"default\": { \"function\": null, \"inputs\": [], \"outputs\": [], \"skippable\": \"False:bool\" }, \"test_task\": { \"function\": \"tests.core.config.test_config_serialization.multiply:function\", \"inputs\": [ \"test_csv_dn:SECTION\" ], \"outputs\": [ \"test_json_dn:SECTION\" ], \"skippable\": \"False:bool\" } }, \"SCENARIO\": { \"default\": { \"comparators\": {}, \"sequences\": {}, \"frequency\": null }, \"test_scenario\": { \"comparators\": { \"test_json_dn\": [ \"tests.core.config.test_config_serialization.compare_function:function\" ] }, \"tasks\": [ \"test_task:SECTION\" ], \"sequences\": { \"test_sequence\": [ \"test_task:SECTION\" ] }, \"frequency\": \"DAILY:FREQUENCY\" } } } \"\"\".strip() Config._serializer = _JsonSerializer() config_test_scenario() tf = NamedTemporaryFile() with open(tf.filename, \"w\") as fd: fd.writelines(old_json_config) Config.restore(tf.filename) assert Config.unique_sections is not None assert len(Config.unique_sections) == 3 assert Config.unique_sections[CoreSection.name].root_folder == \"./taipy/\" assert Config.unique_sections[CoreSection.name].storage_folder == \".data/\" assert Config.unique_sections[CoreSection.name].repository_type == \"filesystem\" assert Config.unique_sections[CoreSection.name].repository_properties == {} assert Config.unique_sections[CoreSection.name].mode == \"development\" assert Config.unique_sections[CoreSection.name].version_number == \"\" assert Config.unique_sections[CoreSection.name].force is False assert Config.unique_sections[JobConfig.name].mode == \"development\" assert Config.unique_sections[JobConfig.name].max_nb_of_workers == 1 assert Config.unique_sections[MigrationConfig.name].migration_fcts[\"1.0\"] == {\"test_csv_dn\": migrate_csv_path} assert Config.sections is not None assert len(Config.sections) == 3 assert Config.sections[DataNodeConfig.name] is not None assert len(Config.sections[DataNodeConfig.name]) == 3 assert Config.sections[DataNodeConfig.name][\"default\"] is not None assert Config.sections[DataNodeConfig.name][\"default\"].storage_type == \"pickle\" assert Config.sections[DataNodeConfig.name][\"default\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].storage_type == \"csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].scope == Scope.GLOBAL assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].validity_period == datetime.timedelta(1) assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].has_header is True assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].path == \"./test.csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].exposed_type == CustomClass assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].storage_type == \"json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].default_path == \"./test.json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].encoder == CustomEncoder assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].decoder == CustomDecoder assert Config.sections[TaskConfig.name] is not None assert len(Config.sections[TaskConfig.name]) == 2 assert Config.sections[TaskConfig.name][\"default\"] is not None assert Config.sections[TaskConfig.name][\"default\"].inputs == [] assert Config.sections[TaskConfig.name][\"default\"].outputs == [] assert Config.sections[TaskConfig.name][\"default\"].function is None assert [inp.id for inp in Config.sections[TaskConfig.name][\"test_task\"].inputs] == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id ] assert [outp.id for outp in Config.sections[TaskConfig.name][\"test_task\"].outputs] == [ Config.sections[DataNodeConfig.name][\"test_json_dn\"].id ] assert Config.sections[TaskConfig.name][\"test_task\"].function == multiply assert Config.sections[ScenarioConfig.name] is not None assert len(Config.sections[ScenarioConfig.name]) == 2 assert Config.sections[ScenarioConfig.name][\"default\"] is not None assert Config.sections[ScenarioConfig.name][\"default\"].tasks == [] assert Config.sections[ScenarioConfig.name][\"default\"].additional_data_nodes == [] assert Config.sections[ScenarioConfig.name][\"default\"].data_nodes == [] assert len(Config.sections[ScenarioConfig.name][\"default\"].comparators) == 0 assert [task.id for task in Config.sections[ScenarioConfig.name][\"test_scenario\"].tasks] == [ Config.sections[TaskConfig.name][\"test_task\"].id ] assert [ additional_data_node.id for additional_data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].additional_data_nodes ] == [] assert sorted([data_node.id for data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].data_nodes]) == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id, Config.sections[DataNodeConfig.name][\"test_json_dn\"].id, ] assert Config.sections[ScenarioConfig.name][\"test_scenario\"].sequences == { \"test_sequence\": [Config.sections[TaskConfig.name][\"test_task\"]] } assert dict(Config.sections[ScenarioConfig.name][\"test_scenario\"].comparators) == { \"test_json_dn\": [compare_function] } "} {"text": "from taipy.config.config import Config def migrate_pickle_path(dn): dn.path = \"s1.pkl\" def migrate_skippable(task): task.skippable = True def test_migration_config(): assert Config.migration_functions.migration_fcts == {} data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") migration_cfg = Config.add_migration_function( target_version=\"1.0\", config=data_nodes1, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == {\"1.0\": {\"data_nodes1\": migrate_pickle_path}} assert migration_cfg.properties == {} data_nodes2 = Config.configure_data_node(\"data_nodes2\", \"pickle\") migration_cfg = Config.add_migration_function( target_version=\"1.0\", config=data_nodes2, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == { \"1.0\": {\"data_nodes1\": migrate_pickle_path, \"data_nodes2\": migrate_pickle_path} } def test_clean_config(): assert Config.migration_functions.migration_fcts == {} data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") migration_cfg = Config.add_migration_function( target_version=\"1.0\", config=data_nodes1, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == {\"1.0\": {\"data_nodes1\": migrate_pickle_path}} assert migration_cfg.properties == {} migration_cfg._clean() assert migration_cfg.migration_fcts == {} assert migration_cfg._properties == {} "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from src.taipy.core.config import CoreSection from src.taipy.core.config.data_node_config import DataNodeConfig from src.taipy.core.config.job_config import JobConfig from src.taipy.core.config.migration_config import MigrationConfig from src.taipy.core.config.scenario_config import ScenarioConfig from src.taipy.core.config.task_config import TaskConfig from taipy.config._config import _Config from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.global_app.global_app_config import GlobalAppConfig def _test_default_job_config(job_config: JobConfig): assert job_config is not None assert job_config.mode == JobConfig._DEFAULT_MODE def _test_default_core_section(core_section: CoreSection): assert core_section is not None assert core_section.mode == CoreSection._DEFAULT_MODE assert core_section.version_number == \"\" assert not core_section.force assert core_section.root_folder == \"./taipy/\" assert core_section.storage_folder == \".data/\" assert core_section.repository_type == \"filesystem\" assert core_section.repository_properties == {} assert len(core_section.properties) == 0 def _test_default_data_node_config(dn_config: DataNodeConfig): assert dn_config is not None assert dn_config.id is not None assert dn_config.storage_type == \"pickle\" assert dn_config.scope == Scope.SCENARIO assert dn_config.validity_period is None assert len(dn_config.properties) == 0 # type: ignore def _test_default_task_config(task_config: TaskConfig): assert task_config is not None assert task_config.id is not None assert task_config.input_configs == [] assert task_config.output_configs == [] assert task_config.function is None assert not task_config.skippable assert len(task_config.properties) == 0 # type: ignore def _test_default_scenario_config(scenario_config: ScenarioConfig): assert scenario_config is not None assert scenario_config.id is not None assert scenario_config.tasks == [] assert scenario_config.task_configs == [] assert scenario_config.additional_data_nodes == [] assert scenario_config.additional_data_node_configs == [] assert scenario_config.data_nodes == [] assert scenario_config.data_node_configs == [] assert scenario_config.sequences == {} assert len(scenario_config.properties) == 0 # type: ignore def _test_default_version_migration_config(version_migration_config: MigrationConfig): assert version_migration_config is not None assert version_migration_config.migration_fcts == {} assert len(version_migration_config.properties) == 0 # type: ignore def _test_default_global_app_config(global_config: GlobalAppConfig): assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_default_configuration(): default_config = Config._default_config assert default_config._global_config is not None _test_default_global_app_config(default_config._global_config) _test_default_global_app_config(Config.global_config) _test_default_global_app_config(GlobalAppConfig().default_config()) assert default_config._unique_sections is not None assert len(default_config._unique_sections) == 3 assert len(default_config._sections) == 3 _test_default_job_config(default_config._unique_sections[JobConfig.name]) _test_default_job_config(Config.job_config) _test_default_job_config(JobConfig().default_config()) _test_default_version_migration_config(default_config._unique_sections[MigrationConfig.name]) _test_default_version_migration_config(Config.migration_functions) _test_default_version_migration_config(MigrationConfig.default_config()) _test_default_core_section(default_config._unique_sections[CoreSection.name]) _test_default_core_section(Config.core) _test_default_core_section(CoreSection().default_config()) _test_default_data_node_config(default_config._sections[DataNodeConfig.name][_Config.DEFAULT_KEY]) _test_default_data_node_config(Config.data_nodes[_Config.DEFAULT_KEY]) _test_default_data_node_config(DataNodeConfig.default_config()) assert len(default_config._sections[DataNodeConfig.name]) == 1 assert len(Config.data_nodes) == 1 _test_default_task_config(default_config._sections[TaskConfig.name][_Config.DEFAULT_KEY]) _test_default_task_config(Config.tasks[_Config.DEFAULT_KEY]) _test_default_task_config(TaskConfig.default_config()) assert len(default_config._sections[TaskConfig.name]) == 1 assert len(Config.tasks) == 1 _test_default_scenario_config(default_config._sections[ScenarioConfig.name][_Config.DEFAULT_KEY]) Config.scenarios[_Config.DEFAULT_KEY] _test_default_scenario_config(Config.scenarios[_Config.DEFAULT_KEY]) _test_default_scenario_config(ScenarioConfig.default_config()) assert len(default_config._sections[ScenarioConfig.name]) == 1 assert len(Config.scenarios) == 1 "} {"text": "import os from unittest import mock from taipy.config.common.frequency import Frequency from taipy.config.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile def my_func(): pass def _configure_scenario_in_toml(): return NamedTemporaryFile( content=\"\"\" [TAIPY] [TASK.task1] inputs = [] outputs = [] [TASK.task2] inputs = [] outputs = [] [SCENARIO.scenarios1] tasks = [ \"task1:SECTION\", \"task2:SECTION\"] \"\"\" ) def _check_tasks_instance(task_id, scenario_id): \"\"\"Check if the task instance in the task config correctly points to the Config._applied_config, not the Config._python_config or the Config._file_config \"\"\" task_config_applied_instance = Config.tasks[task_id] task_config_instance_via_scenario = None for task in Config.scenarios[scenario_id].tasks: if task.id == task_id: task_config_instance_via_scenario = task task_config_python_instance = None if Config._python_config._sections.get(\"TASK\", None): task_config_python_instance = Config._python_config._sections[\"TASK\"][task_id] task_config_file_instance = None if Config._file_config._sections.get(\"TASK\", None): task_config_file_instance = Config._file_config._sections[\"TASK\"][task_id] assert task_config_python_instance is not task_config_applied_instance assert task_config_python_instance is not task_config_instance_via_scenario assert task_config_file_instance is not task_config_applied_instance assert task_config_file_instance is not task_config_instance_via_scenario assert task_config_instance_via_scenario is task_config_applied_instance def test_task_instance_when_configure_scenario_in_python(): task1_config = Config.configure_task(\"task1\", []) task2_config = Config.configure_task(\"task2\", print) Config.configure_scenario(\"scenarios1\", [task1_config, task2_config]) _check_tasks_instance(\"task1\", \"scenarios1\") _check_tasks_instance(\"task2\", \"scenarios1\") def test_task_instance_when_configure_scenario_by_loading_toml(): toml_config = _configure_scenario_in_toml() Config.load(toml_config.filename) _check_tasks_instance(\"task1\", \"scenarios1\") _check_tasks_instance(\"task2\", \"scenarios1\") def test_task_instance_when_configure_scenario_by_overriding_toml(): toml_config = _configure_scenario_in_toml() Config.override(toml_config.filename) _check_tasks_instance(\"task1\", \"scenarios1\") _check_tasks_instance(\"task2\", \"scenarios1\") def test_scenario_creation(): dn_config_1 = Config.configure_data_node(\"dn1\") dn_config_2 = Config.configure_data_node(\"dn2\") dn_config_3 = Config.configure_data_node(\"dn3\") dn_config_4 = Config.configure_data_node(\"dn4\") task_config_1 = Config.configure_task(\"task1\", sum, [dn_config_1, dn_config_2], dn_config_3) task_config_2 = Config.configure_task(\"task2\", print, dn_config_3) scenario = Config.configure_scenario( \"scenarios1\", [task_config_1, task_config_2], [dn_config_4], comparators={\"dn_cfg\": [my_func]}, sequences={\"sequence\": []}, ) assert list(Config.scenarios) == [\"default\", scenario.id] scenario2 = Config.configure_scenario(\"scenarios2\", [task_config_1], frequency=Frequency.MONTHLY) assert list(Config.scenarios) == [\"default\", scenario.id, scenario2.id] def test_scenario_count(): task_config_1 = Config.configure_task(\"task1\", my_func) task_config_2 = Config.configure_task(\"task2\", print) Config.configure_scenario(\"scenarios1\", [task_config_1, task_config_2]) assert len(Config.scenarios) == 2 Config.configure_scenario(\"scenarios2\", [task_config_1]) assert len(Config.scenarios) == 3 Config.configure_scenario(\"scenarios3\", [task_config_2]) assert len(Config.scenarios) == 4 def test_scenario_getitem(): dn_config_1 = Config.configure_data_node(\"dn1\") dn_config_2 = Config.configure_data_node(\"dn2\") dn_config_3 = Config.configure_data_node(\"dn3\") dn_config_4 = Config.configure_data_node(\"dn4\") task_config_1 = Config.configure_task(\"task1\", sum, [dn_config_1, dn_config_2], dn_config_3) task_config_2 = Config.configure_task(\"task2\", print, dn_config_3) scenario_id = \"scenarios1\" scenario = Config.configure_scenario(scenario_id, [task_config_1, task_config_2], [dn_config_4]) assert Config.scenarios[scenario_id].id == scenario.id assert Config.scenarios[scenario_id].task_configs == scenario.task_configs assert Config.scenarios[scenario_id].tasks == scenario.tasks assert Config.scenarios[scenario_id].task_configs == scenario.tasks assert Config.scenarios[scenario_id].additional_data_node_configs == scenario.additional_data_node_configs assert Config.scenarios[scenario_id].additional_data_nodes == scenario.additional_data_nodes assert Config.scenarios[scenario_id].additional_data_node_configs == scenario.additional_data_nodes assert Config.scenarios[scenario_id].data_node_configs == scenario.data_node_configs assert Config.scenarios[scenario_id].data_nodes == scenario.data_nodes assert Config.scenarios[scenario_id].data_node_configs == scenario.data_nodes assert scenario.tasks == [task_config_1, task_config_2] assert scenario.additional_data_node_configs == [dn_config_4] assert set(scenario.data_nodes) == set([dn_config_4, dn_config_1, dn_config_2, dn_config_3]) assert Config.scenarios[scenario_id].properties == scenario.properties def test_scenario_creation_no_duplication(): task_config_1 = Config.configure_task(\"task1\", my_func) task_config_2 = Config.configure_task(\"task2\", print) dn_config = Config.configure_data_node(\"dn\") Config.configure_scenario(\"scenarios1\", [task_config_1, task_config_2], [dn_config]) assert len(Config.scenarios) == 2 Config.configure_scenario(\"scenarios1\", [task_config_1, task_config_2], [dn_config]) assert len(Config.scenarios) == 2 def test_scenario_get_set_and_remove_comparators(): task_config_1 = Config.configure_task(\"task1\", my_func) task_config_2 = Config.configure_task(\"task2\", print) dn_config_1 = \"dn_config_1\" scenario_config_1 = Config.configure_scenario( \"scenarios1\", [task_config_1, task_config_2], comparators={dn_config_1: my_func} ) assert scenario_config_1.comparators is not None assert scenario_config_1.comparators[dn_config_1] == [my_func] assert len(scenario_config_1.comparators.keys()) == 1 dn_config_2 = \"dn_config_2\" scenario_config_1.add_comparator(dn_config_2, my_func) assert len(scenario_config_1.comparators.keys()) == 2 scenario_config_1.delete_comparator(dn_config_1) assert len(scenario_config_1.comparators.keys()) == 1 scenario_config_1.delete_comparator(dn_config_2) assert len(scenario_config_1.comparators.keys()) == 0 scenario_config_2 = Config.configure_scenario(\"scenarios2\", [task_config_1, task_config_2]) assert scenario_config_2.comparators is not None scenario_config_2.add_comparator(dn_config_1, my_func) assert len(scenario_config_2.comparators.keys()) == 1 scenario_config_2.delete_comparator(\"dn_config_3\") def test_scenario_config_with_env_variable_value(): task_config_1 = Config.configure_task(\"task1\", my_func) task_config_2 = Config.configure_task(\"task2\", print) with mock.patch.dict(os.environ, {\"FOO\": \"bar\"}): Config.configure_scenario(\"scenario_name\", [task_config_1, task_config_2], prop=\"ENV[FOO]\") assert Config.scenarios[\"scenario_name\"].prop == \"bar\" assert Config.scenarios[\"scenario_name\"].properties[\"prop\"] == \"bar\" assert Config.scenarios[\"scenario_name\"]._properties[\"prop\"] == \"ENV[FOO]\" def test_clean_config(): task1_config = Config.configure_task(\"task1\", print, [], []) task2_config = Config.configure_task(\"task2\", print, [], []) scenario1_config = Config.configure_scenario( \"id1\", [task1_config, task2_config], [], Frequency.YEARLY, {\"foo\": \"bar\"}, prop=\"foo\", sequences={\"sequence_1\": []}, ) scenario2_config = Config.configure_scenario( \"id2\", [task2_config, task1_config], [], Frequency.MONTHLY, {\"foz\": \"baz\"}, prop=\"bar\", sequences={\"sequence_2\": []}, ) assert Config.scenarios[\"id1\"] is scenario1_config assert Config.scenarios[\"id2\"] is scenario2_config scenario1_config._clean() scenario2_config._clean() # Check if the instance before and after _clean() is the same assert Config.scenarios[\"id1\"] is scenario1_config assert Config.scenarios[\"id2\"] is scenario2_config assert scenario1_config.id == \"id1\" assert scenario2_config.id == \"id2\" assert scenario1_config.tasks == scenario1_config.task_configs == [] assert scenario1_config.additional_data_nodes == scenario1_config.additional_data_node_configs == [] assert scenario1_config.data_nodes == scenario1_config.data_node_configs == [] assert scenario1_config.sequences == scenario1_config.sequences == {} assert scenario1_config.frequency is scenario1_config.frequency is None assert scenario1_config.comparators == scenario1_config.comparators == {} assert scenario1_config.properties == scenario1_config.properties == {} assert scenario2_config.tasks == scenario2_config.task_configs == [] assert scenario2_config.additional_data_nodes == scenario2_config.additional_data_node_configs == [] assert scenario2_config.data_nodes == scenario2_config.data_node_configs == [] assert scenario2_config.sequences == scenario1_config.sequences == {} assert scenario2_config.frequency is scenario2_config.frequency is None assert scenario2_config.comparators == scenario2_config.comparators == {} assert scenario2_config.properties == scenario2_config.properties == {} def test_add_sequence(): task1_config = Config.configure_task(\"task1\", print, [], []) task2_config = Config.configure_task(\"task2\", print, [], []) task3_config = Config.configure_task(\"task3\", print, [], []) task4_config = Config.configure_task(\"task4\", print, [], []) scenario_config = Config.configure_scenario( \"id\", [task1_config, task2_config, task3_config, task4_config], [], Frequency.YEARLY, prop=\"foo\" ) assert Config.scenarios[\"id\"] is scenario_config assert scenario_config.id == \"id\" assert ( scenario_config.tasks == scenario_config.task_configs == [task1_config, task2_config, task3_config, task4_config] ) assert scenario_config.additional_data_nodes == scenario_config.additional_data_node_configs == [] assert scenario_config.data_nodes == scenario_config.data_node_configs == [] assert scenario_config.frequency is scenario_config.frequency == Frequency.YEARLY assert scenario_config.comparators == scenario_config.comparators == {} assert scenario_config.properties == {\"prop\": \"foo\"} scenario_config.add_sequences( { \"sequence1\": [task1_config], \"sequence2\": [task2_config, task3_config], \"sequence3\": [task1_config, task2_config, task4_config], } ) assert len(scenario_config.sequences) == 3 assert scenario_config.sequences[\"sequence1\"] == [task1_config] assert scenario_config.sequences[\"sequence2\"] == [task2_config, task3_config] assert scenario_config.sequences[\"sequence3\"] == [task1_config, task2_config, task4_config] scenario_config.remove_sequences(\"sequence1\") assert len(scenario_config.sequences) == 2 scenario_config.remove_sequences([\"sequence2\", \"sequence3\"]) assert len(scenario_config.sequences) == 0 "} {"text": "from unittest.mock import patch import pytest from src.taipy.core._init_version import _read_version from src.taipy.core.config.core_section import CoreSection from src.taipy.core.exceptions import ConfigCoreVersionMismatched from taipy.config.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile _MOCK_CORE_VERSION = \"3.1.1\" @pytest.fixture(scope=\"function\", autouse=True) def mock_core_version(): with patch(\"src.taipy.core.config.core_section._read_version\") as mock_read_version: mock_read_version.return_value = _MOCK_CORE_VERSION CoreSection._CURRENT_CORE_VERSION = _MOCK_CORE_VERSION Config.unique_sections[CoreSection.name] = CoreSection.default_config() Config._default_config._unique_sections[CoreSection.name] = CoreSection.default_config() yield @pytest.fixture(scope=\"session\", autouse=True) def reset_core_version(): yield CoreSection._CURRENT_CORE_VERSION = _read_version() class TestCoreVersionInCoreSectionConfig: major, minor, patch = _MOCK_CORE_VERSION.split(\".\") current_version = f\"{major}.{minor}.{patch}\" current_dev_version = f\"{major}.{minor}.{patch}.dev0\" compatible_future_version = f\"{major}.{minor}.{int(patch) + 1}\" compatible_future_dev_version = f\"{major}.{minor}.{int(patch) + 1}.dev0\" core_version_is_compatible = [ # Current version and dev version should be compatible (f\"{major}.{minor}.{patch}\", True), (f\"{major}.{minor}.{patch}.dev0\", True), # Future versions with same major and minor should be compatible (f\"{major}.{minor}.{int(patch) + 1}\", True), (f\"{major}.{minor}.{int(patch) + 1}.dev0\", True), # Past versions with same major and minor should be compatible (f\"{major}.{minor}.{int(patch) - 1}\", True), (f\"{major}.{minor}.{int(patch) - 1}.dev0\", True), # Future versions with different minor number should be incompatible (f\"{major}.{int(minor) + 1}.{patch}\", False), (f\"{major}.{int(minor) + 1}.{patch}.dev0\", False), # Past versions with different minor number should be incompatible (f\"{major}.{int(minor) - 1}.{patch}\", False), (f\"{major}.{int(minor) - 1}.{patch}.dev0\", False), ] @pytest.mark.parametrize(\"core_version, is_compatible\", core_version_is_compatible) def test_load_configuration_file(self, core_version, is_compatible): file_config = NamedTemporaryFile( f\"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" core_version = \"{core_version}\" [VERSION_MIGRATION.migration_fcts] \"\"\" ) if is_compatible: Config.load(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION else: with pytest.raises(ConfigCoreVersionMismatched): Config.load(file_config.filename) @pytest.mark.parametrize(\"core_version,is_compatible\", core_version_is_compatible) def test_override_configuration_file(self, core_version, is_compatible): file_config = NamedTemporaryFile( f\"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" core_version = \"{core_version}\" [VERSION_MIGRATION.migration_fcts] \"\"\" ) if is_compatible: Config.override(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION else: with pytest.raises(ConfigCoreVersionMismatched): Config.override(file_config.filename) def test_load_configuration_file_without_core_section(self): file_config = NamedTemporaryFile( \"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" [VERSION_MIGRATION.migration_fcts] \"\"\" ) Config.load(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION "} {"text": "import os from datetime import timedelta from unittest import mock from src.taipy.core.config import DataNodeConfig, ScenarioConfig, TaskConfig from src.taipy.core.config.core_section import CoreSection from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile def test_write_configuration_file(): expected_config = f\"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" core_version = \"{CoreSection._CURRENT_CORE_VERSION}\" [VERSION_MIGRATION.migration_fcts] [DATA_NODE.default] storage_type = \"in_memory\" scope = \"SCENARIO:SCOPE\" validity_period = \"1d0h0m0s:timedelta\" custom = \"default_custom_prop\" [DATA_NODE.dn1] storage_type = \"pickle\" scope = \"SCENARIO:SCOPE\" validity_period = \"1d0h0m0s:timedelta\" custom = \"custom property\" default_data = \"dn1\" [DATA_NODE.dn2] storage_type = \"ENV[FOO]\" scope = \"SCENARIO:SCOPE\" validity_period = \"2d0h0m0s:timedelta\" foo = \"bar\" default_data = \"dn2\" baz = \"ENV[QUX]\" quux = \"ENV[QUUZ]:bool\" corge = [ \"grault\", \"ENV[GARPLY]\", \"ENV[WALDO]:int\", \"3.0:float\",] [DATA_NODE.dn3] storage_type = \"ENV[FOO]\" scope = \"SCENARIO:SCOPE\" validity_period = \"1d0h0m0s:timedelta\" foo = \"bar\" default_data = \"dn3\" quux = \"ENV[QUUZ]:bool\" [TASK.default] inputs = [] outputs = [] skippable = \"False:bool\" [TASK.t1] function = \"builtins.print:function\" inputs = [ \"dn1:SECTION\",] outputs = [ \"dn2:SECTION\",] skippable = \"False:bool\" description = \"t1 description\" [SCENARIO.default] tasks = [] additional_data_nodes = [] frequency = \"QUARTERLY:FREQUENCY\" owner = \"Michel Platini\" [SCENARIO.s1] tasks = [ \"t1:SECTION\",] additional_data_nodes = [ \"dn3:SECTION\",] frequency = \"QUARTERLY:FREQUENCY\" owner = \"Raymond Kopa\" [SCENARIO.default.comparators] [SCENARIO.default.sequences] [SCENARIO.s1.comparators] [SCENARIO.s1.sequences] sequence = [ \"t1:SECTION\",] \"\"\".strip() tf = NamedTemporaryFile() with mock.patch.dict( os.environ, {\"FOO\": \"in_memory\", \"QUX\": \"qux\", \"QUUZ\": \"true\", \"GARPLY\": \"garply\", \"WALDO\": \"17\"} ): Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) Config.set_default_data_node_configuration( storage_type=\"in_memory\", custom=\"default_custom_prop\", validity_period=timedelta(1), ) dn1_cfg_v2 = Config.configure_data_node( \"dn1\", storage_type=\"pickle\", scope=Scope.SCENARIO, default_data=\"dn1\", custom=\"custom property\" ) dn2_cfg_v2 = Config.configure_data_node( \"dn2\", storage_type=\"ENV[FOO]\", validity_period=timedelta(2), foo=\"bar\", default_data=\"dn2\", baz=\"ENV[QUX]\", quux=\"ENV[QUUZ]:bool\", corge=(\"grault\", \"ENV[GARPLY]\", \"ENV[WALDO]:int\", 3.0), ) dn3_cfg_v2 = Config.configure_data_node( \"dn3\", storage_type=\"ENV[FOO]\", foo=\"bar\", default_data=\"dn3\", quux=\"ENV[QUUZ]:bool\", ) assert dn2_cfg_v2.scope == Scope.SCENARIO t1_cfg_v2 = Config.configure_task(\"t1\", print, dn1_cfg_v2, dn2_cfg_v2, description=\"t1 description\") Config.set_default_scenario_configuration([], [], Frequency.QUARTERLY, owner=\"Michel Platini\") Config.configure_scenario( \"s1\", task_configs=[t1_cfg_v2], additional_data_node_configs=[dn3_cfg_v2], frequency=Frequency.QUARTERLY, owner=\"Raymond Kopa\", sequences={\"sequence\": [t1_cfg_v2]}, ) Config.backup(tf.filename) actual_config = tf.read().strip() # problem here assert actual_config == expected_config Config.override(tf.filename) tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_config_2 = tf2.read().strip() assert actual_config_2 == expected_config def test_read_configuration_file(): file_config = NamedTemporaryFile( \"\"\" [DATA_NODE.default] has_header = true [DATA_NODE.my_datanode] path = \"/data/csv\" validity_period = \"1d0h0m0s:timedelta\" [DATA_NODE.my_datanode2] path = \"/data2/csv\" [DATA_NODE.my_datanode3] path = \"/data3/csv\" source = \"local\" [TASK.my_task] inputs = [\"my_datanode:SECTION\"] outputs = [\"my_datanode2:SECTION\"] description = \"task description\" [SCENARIO.my_scenario] tasks = [ \"my_task:SECTION\"] additional_data_nodes = [\"my_datanode3:SECTION\"] owner = \"John Doe\" [SCENARIO.my_scenario.sequences] sequence = [ \"my_task:SECTION\",] \"\"\" ) Config.configure_task(\"my_task\", print) Config.override(file_config.filename) assert len(Config.data_nodes) == 4 assert type(Config.data_nodes[\"my_datanode\"]) == DataNodeConfig assert type(Config.data_nodes[\"my_datanode2\"]) == DataNodeConfig assert type(Config.data_nodes[\"my_datanode3\"]) == DataNodeConfig assert Config.data_nodes[\"my_datanode\"].path == \"/data/csv\" assert Config.data_nodes[\"my_datanode2\"].path == \"/data2/csv\" assert Config.data_nodes[\"my_datanode3\"].path == \"/data3/csv\" assert Config.data_nodes[\"my_datanode\"].id == \"my_datanode\" assert Config.data_nodes[\"my_datanode2\"].id == \"my_datanode2\" assert Config.data_nodes[\"my_datanode3\"].id == \"my_datanode3\" assert Config.data_nodes[\"my_datanode\"].validity_period == timedelta(1) assert Config.data_nodes[\"my_datanode3\"].source == \"local\" assert len(Config.tasks) == 2 assert type(Config.tasks[\"my_task\"]) == TaskConfig assert Config.tasks[\"my_task\"].id == \"my_task\" assert Config.tasks[\"my_task\"].description == \"task description\" assert Config.tasks[\"my_task\"].function == print assert len(Config.tasks[\"my_task\"].inputs) == 1 assert type(Config.tasks[\"my_task\"].inputs[0]) == DataNodeConfig assert Config.tasks[\"my_task\"].inputs[0].path == \"/data/csv\" assert Config.tasks[\"my_task\"].inputs[0].id == \"my_datanode\" assert len(Config.tasks[\"my_task\"].outputs) == 1 assert type(Config.tasks[\"my_task\"].outputs[0]) == DataNodeConfig assert Config.tasks[\"my_task\"].outputs[0].path == \"/data2/csv\" assert Config.tasks[\"my_task\"].outputs[0].id == \"my_datanode2\" assert len(Config.scenarios) == 2 assert type(Config.scenarios[\"my_scenario\"]) == ScenarioConfig assert Config.scenarios[\"my_scenario\"].id == \"my_scenario\" assert Config.scenarios[\"my_scenario\"].owner == \"John Doe\" assert len(Config.scenarios[\"my_scenario\"].tasks) == 1 assert type(Config.scenarios[\"my_scenario\"].tasks[0]) == TaskConfig assert len(Config.scenarios[\"my_scenario\"].additional_data_nodes) == 1 assert type(Config.scenarios[\"my_scenario\"].additional_data_nodes[0]) == DataNodeConfig assert Config.scenarios[\"my_scenario\"].tasks[0].id == \"my_task\" assert Config.scenarios[\"my_scenario\"].tasks[0].description == \"task description\" assert Config.scenarios[\"my_scenario\"].additional_data_nodes[0].id == \"my_datanode3\" assert Config.scenarios[\"my_scenario\"].additional_data_nodes[0].source == \"local\" assert [task.id for task in Config.scenarios[\"my_scenario\"].sequences[\"sequence\"]] == [ Config.scenarios[\"my_scenario\"].tasks[0].id ] "} {"text": "from datetime import timedelta from taipy.config import Config from taipy.config.common.scope import Scope class TestConfig: def test_configure_csv_data_node(self): a, b, c, d, e, f = \"foo\", \"path\", True, \"numpy\", Scope.SCENARIO, timedelta(1) Config.configure_csv_data_node(a, b, c, d, e, f) assert len(Config.data_nodes) == 2 def test_configure_excel_data_node(self): a, b, c, d, e, f, g = \"foo\", \"path\", True, \"Sheet1\", \"numpy\", Scope.SCENARIO, timedelta(1) Config.configure_excel_data_node(a, b, c, d, e, f, g) assert len(Config.data_nodes) == 2 def test_configure_generic_data_node(self): a, b, c, d, e, f, g, h = \"foo\", print, print, tuple([]), tuple([]), Scope.SCENARIO, timedelta(1), \"qux\" Config.configure_generic_data_node(a, b, c, d, e, f, g, property=h) assert len(Config.data_nodes) == 2 def test_configure_in_memory_data_node(self): a, b, c, d, e = \"foo\", 0, Scope.SCENARIO, timedelta(1), \"qux\" Config.configure_in_memory_data_node(a, b, c, d, property=e) assert len(Config.data_nodes) == 2 def test_configure_pickle_data_node(self): a, b, c, d, e = \"foo\", 0, Scope.SCENARIO, timedelta(1), \"path\" Config.configure_pickle_data_node(a, b, c, d, path=e) assert len(Config.data_nodes) == 2 def test_configure_json_data_node(self): a, dp, ec, dc, sc, f, p = \"foo\", \"path\", \"ec\", \"dc\", Scope.SCENARIO, timedelta(1), \"qux\" Config.configure_json_data_node(a, dp, ec, dc, sc, f, path=p) assert len(Config.data_nodes) == 2 def test_configure_sql_table_data_node(self): a, b, c, d, e, f, g, h, i, extra_args, exposed_type, scope, vp, k = ( \"foo\", \"user\", \"pwd\", \"db\", \"engine\", \"table_name\", \"port\", \"host\", \"driver\", {\"foo\": \"bar\"}, \"exposed_type\", Scope.SCENARIO, timedelta(1), \"qux\", ) Config.configure_sql_table_data_node(a, b, c, d, e, f, g, h, i, extra_args, exposed_type, scope, vp, property=k) assert len(Config.data_nodes) == 2 def test_configure_sql_data_node(self): a, b, c, d, e, f, g, h, i, j, k, extra_args, exposed_type, scope, vp, k = ( \"foo\", \"user\", \"pwd\", \"db\", \"engine\", \"read_query\", \"write_query_builder\", \"append_query_builder\", \"port\", \"host\", \"driver\", {\"foo\": \"bar\"}, \"exposed_type\", Scope.SCENARIO, timedelta(1), \"qux\", ) Config.configure_sql_data_node(a, b, c, d, e, f, g, h, i, j, k, extra_args, exposed_type, scope, vp, property=k) assert len(Config.data_nodes) == 2 def test_configure_mongo_data_node(self): a, b, c, d, e, f, g, h, extra_args, scope, vp, k = ( \"foo\", \"db_name\", \"collection_name\", None, \"user\", \"pwd\", \"host\", \"port\", {\"foo\": \"bar\"}, Scope.SCENARIO, timedelta(1), \"qux\", ) Config.configure_mongo_collection_data_node(a, b, c, d, e, f, g, h, extra_args, scope, vp, property=k) assert len(Config.data_nodes) == 2 "} {"text": "import os from unittest import mock import pytest from taipy.config.config import Config from taipy.config.exceptions.exceptions import InconsistentEnvVariableError, MissingEnvVariableError from tests.core.utils.named_temporary_file import NamedTemporaryFile def test_override_default_configuration_with_code_configuration(): assert not Config.core.root_folder == \"foo\" assert len(Config.data_nodes) == 1 assert len(Config.tasks) == 1 assert len(Config.scenarios) == 1 Config.configure_job_executions(max_nb_of_workers=-1) Config.configure_core(root_folder=\"foo\") foo_config = Config.configure_data_node(\"foo\", \"in_memory\") xyz_config = Config.configure_data_node(\"xyz\") bar_config = Config.configure_task(\"bar\", print, [foo_config], []) qux_config = Config.configure_scenario(\"qux\", [bar_config], [xyz_config]) assert Config.job_config.max_nb_of_workers == -1 assert Config.core.root_folder == \"foo\" assert len(Config.data_nodes) == 3 assert \"default\" in Config.data_nodes assert foo_config.id in Config.data_nodes assert xyz_config.id in Config.data_nodes assert Config.data_nodes[foo_config.id].storage_type == \"in_memory\" assert Config.data_nodes[xyz_config.id].storage_type == \"pickle\" assert len(Config.tasks) == 2 assert \"default\" in Config.tasks assert bar_config.id in Config.tasks assert len(Config.tasks[bar_config.id].input_configs) == 1 assert Config.tasks[bar_config.id].input_configs[0].id == foo_config.id assert len(Config.tasks[bar_config.id].output_configs) == 0 assert Config.tasks[bar_config.id].function == print assert len(Config.scenarios) == 2 assert \"default\" in Config.scenarios assert qux_config.id in Config.scenarios assert len(Config.scenarios[qux_config.id].tasks) == 1 assert Config.scenarios[qux_config.id].tasks[0].id == bar_config.id assert len(Config.scenarios[qux_config.id].additional_data_nodes) == 1 assert Config.scenarios[qux_config.id].additional_data_nodes[0].id == xyz_config.id def test_override_default_config_with_code_config_including_env_variable_values(): Config.configure_core() assert Config.core.repository_type == \"filesystem\" Config.configure_core(repository_type=\"othertype\") assert Config.core.repository_type == \"othertype\" with mock.patch.dict(os.environ, {\"REPOSITORY_TYPE\": \"foo\"}): Config.configure_core(repository_type=\"ENV[REPOSITORY_TYPE]\") assert Config.core.repository_type == \"foo\" def test_override_default_configuration_with_file_configuration(): tf = NamedTemporaryFile( \"\"\" [TAIPY] [JOB] max_nb_of_workers = -1 [DATA_NODE.foo] [TASK.bar] [SCENARIO.qux] \"\"\" ) assert Config.job_config.max_nb_of_workers == 1 assert len(Config.data_nodes) == 1 assert len(Config.tasks) == 1 assert len(Config.scenarios) == 1 Config.override(tf.filename) assert Config.job_config.max_nb_of_workers == -1 assert len(Config.data_nodes) == 2 assert \"default\" in Config.data_nodes assert \"foo\" in Config.data_nodes assert len(Config.tasks) == 2 assert \"default\" in Config.tasks assert \"bar\" in Config.tasks assert \"default\" in Config.scenarios assert len(Config.scenarios) == 2 assert \"qux\" in Config.scenarios def test_override_default_config_with_file_config_including_env_variable_values(): tf = NamedTemporaryFile( \"\"\" [JOB] max_nb_of_workers = \"ENV[FOO]:int\" start_executor = \"ENV[BAR]\" \"\"\" ) assert Config.job_config.max_nb_of_workers == 1 assert not Config.job_config.start_executor with mock.patch.dict(os.environ, {\"FOO\": \"6\", \"BAR\": \"TRUe\"}): Config.override(tf.filename) assert Config.job_config.max_nb_of_workers == 6 assert Config.job_config.start_executor with mock.patch.dict(os.environ, {\"FOO\": \"foo\", \"BAR\": \"true\"}): with pytest.raises(InconsistentEnvVariableError): Config.override(tf.filename) with mock.patch.dict(os.environ, {\"FOO\": \"5\"}): with pytest.raises(MissingEnvVariableError): Config.override(tf.filename) def test_code_configuration_do_not_override_file_configuration(): config_from_filename = NamedTemporaryFile( \"\"\" [JOB] max_nb_of_workers = 2 \"\"\" ) Config.override(config_from_filename.filename) Config.configure_job_executions(max_nb_of_workers=21) assert Config.job_config.max_nb_of_workers == 2 # From file config def test_code_configuration_do_not_override_file_configuration_including_env_variable_values(): config_from_filename = NamedTemporaryFile( \"\"\" [JOB] max_nb_of_workers = 2 \"\"\" ) Config.override(config_from_filename.filename) with mock.patch.dict(os.environ, {\"FOO\": \"21\"}): Config.configure_job_executions(max_nb_of_workers=\"ENV[FOO]\") assert Config.job_config.max_nb_of_workers == 2 # From file config def test_file_configuration_override_code_configuration(): config_from_filename = NamedTemporaryFile( \"\"\" [JOB] max_nb_of_workers = 2 \"\"\" ) Config.configure_job_executions(max_nb_of_workers=21) Config.override(config_from_filename.filename) assert Config.job_config.max_nb_of_workers == 2 # From file config def test_file_configuration_override_code_configuration_including_env_variable_values(): config_from_filename = NamedTemporaryFile( \"\"\" [JOB] max_nb_of_workers = \"ENV[FOO]:int\" \"\"\" ) Config.configure_job_executions(max_nb_of_workers=21) with mock.patch.dict(os.environ, {\"FOO\": \"2\"}): Config.override(config_from_filename.filename) assert Config.job_config.max_nb_of_workers == 2 # From file config def test_override_default_configuration_with_multiple_configurations(): file_config = NamedTemporaryFile( \"\"\" [DATA_NODE.default] has_header = true [DATA_NODE.my_datanode] path = \"/data/csv\" [JOB] max_nb_of_workers = 10 [TAIPY] \"\"\" ) # Default config is applied assert Config.job_config.max_nb_of_workers == 1 # Code config is applied Config.configure_job_executions(max_nb_of_workers=-1) assert Config.job_config.max_nb_of_workers == -1 # File config is applied Config.override(file_config.filename) assert Config.job_config.max_nb_of_workers == 10 assert Config.data_nodes[\"my_datanode\"].has_header assert Config.data_nodes[\"my_datanode\"].path == \"/data/csv\" assert Config.data_nodes[\"my_datanode\"].not_defined is None def test_override_default_configuration_with_multiple_configurations_including_environment_variable_values(): file_config = NamedTemporaryFile( \"\"\" [DATA_NODE.default] has_header = true [DATA_NODE.my_datanode] path = \"ENV[FOO]\" [JOB] max_nb_of_workers = 10 [TAIPY] \"\"\" ) with mock.patch.dict(os.environ, {\"FOO\": \"/data/csv\", \"BAR\": \"/baz/data/csv\"}): # Default config is applied assert Config.job_config.max_nb_of_workers == 1 # Code config is applied Config.configure_job_executions(max_nb_of_workers=-1) Config.configure_data_node(\"my_datanode\", path=\"ENV[BAR]\") assert Config.job_config.max_nb_of_workers == -1 assert Config.data_nodes[\"my_datanode\"].path == \"/baz/data/csv\" # File config is applied Config.override(file_config.filename) assert Config.job_config.max_nb_of_workers == 10 assert Config.data_nodes[\"my_datanode\"].has_header assert Config.data_nodes[\"my_datanode\"].path == \"/data/csv\" assert Config.data_nodes[\"my_datanode\"].not_defined is None "} {"text": "from unittest.mock import patch from src.taipy.core import Core from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from taipy.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile def test_core_section(): with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() assert not Config.core.force core.stop() with patch(\"sys.argv\", [\"prog\"]): Config.configure_core(mode=\"experiment\", version_number=\"test_num\", force=True) core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"test_num\" assert Config.core.force core.stop() toml_config = NamedTemporaryFile( content=\"\"\" [TAIPY] [CORE] mode = \"production\" version_number = \"test_num_2\" force = \"true:bool\" \"\"\" ) Config.load(toml_config.filename) with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() assert Config.core.mode == \"production\" assert Config.core.version_number == \"test_num_2\" assert Config.core.force core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"test_num_3\", \"--no-taipy-force\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"test_num_3\" assert not Config.core.force core.stop() def test_clean_config(): core_config = Config.configure_core(mode=\"experiment\", version_number=\"test_num\", force=True) assert Config.core is core_config core_config._clean() # Check if the instance before and after _clean() is the same assert Config.core is core_config assert core_config.mode == \"development\" assert core_config.version_number == \"\" assert core_config.force is False assert core_config.properties == {} "} {"text": "import datetime import os from unittest import mock import pytest from src.taipy.core import MongoDefaultDocument from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config import DataNodeConfig from src.taipy.core.config.job_config import JobConfig from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked def test_data_node_config_default_parameter(): csv_dn_cfg = Config.configure_data_node(\"data_node_1\", \"csv\") assert csv_dn_cfg.scope == Scope.SCENARIO assert csv_dn_cfg.has_header is True assert csv_dn_cfg.exposed_type == \"pandas\" assert csv_dn_cfg.validity_period is None json_dn_cfg = Config.configure_data_node(\"data_node_2\", \"json\") assert json_dn_cfg.scope == Scope.SCENARIO assert json_dn_cfg.validity_period is None parquet_dn_cfg = Config.configure_data_node(\"data_node_3\", \"parquet\") assert parquet_dn_cfg.scope == Scope.SCENARIO assert parquet_dn_cfg.engine == \"pyarrow\" assert parquet_dn_cfg.compression == \"snappy\" assert parquet_dn_cfg.exposed_type == \"pandas\" assert parquet_dn_cfg.validity_period is None excel_dn_cfg = Config.configure_data_node(\"data_node_4\", \"excel\") assert excel_dn_cfg.scope == Scope.SCENARIO assert excel_dn_cfg.has_header is True assert excel_dn_cfg.exposed_type == \"pandas\" assert excel_dn_cfg.validity_period is None generic_dn_cfg = Config.configure_data_node(\"data_node_5\", \"generic\") assert generic_dn_cfg.scope == Scope.SCENARIO assert generic_dn_cfg.validity_period is None in_memory_dn_cfg = Config.configure_data_node(\"data_node_6\", \"in_memory\") assert in_memory_dn_cfg.scope == Scope.SCENARIO assert in_memory_dn_cfg.validity_period is None pickle_dn_cfg = Config.configure_data_node(\"data_node_7\", \"pickle\") assert pickle_dn_cfg.scope == Scope.SCENARIO assert pickle_dn_cfg.validity_period is None sql_table_dn_cfg = Config.configure_data_node( \"data_node_8\", \"sql_table\", db_name=\"test\", db_engine=\"mssql\", table_name=\"test\" ) assert sql_table_dn_cfg.scope == Scope.SCENARIO assert sql_table_dn_cfg.db_host == \"localhost\" assert sql_table_dn_cfg.db_port == 1433 assert sql_table_dn_cfg.db_driver == \"\" assert sql_table_dn_cfg.sqlite_file_extension == \".db\" assert sql_table_dn_cfg.exposed_type == \"pandas\" assert sql_table_dn_cfg.validity_period is None sql_dn_cfg = Config.configure_data_node( \"data_node_9\", \"sql\", db_name=\"test\", db_engine=\"mssql\", read_query=\"test\", write_query_builder=print ) assert sql_dn_cfg.scope == Scope.SCENARIO assert sql_dn_cfg.db_host == \"localhost\" assert sql_dn_cfg.db_port == 1433 assert sql_dn_cfg.db_driver == \"\" assert sql_dn_cfg.sqlite_file_extension == \".db\" assert sql_dn_cfg.exposed_type == \"pandas\" assert sql_dn_cfg.validity_period is None mongo_dn_cfg = Config.configure_data_node( \"data_node_10\", \"mongo_collection\", db_name=\"test\", collection_name=\"test\" ) assert mongo_dn_cfg.scope == Scope.SCENARIO assert mongo_dn_cfg.db_host == \"localhost\" assert mongo_dn_cfg.db_port == 27017 assert mongo_dn_cfg.custom_document == MongoDefaultDocument assert mongo_dn_cfg.db_username == \"\" assert mongo_dn_cfg.db_password == \"\" assert mongo_dn_cfg.db_driver == \"\" assert mongo_dn_cfg.validity_period is None def test_data_node_config_check(caplog): data_node_config = Config.configure_data_node(\"data_nodes1\", \"pickle\") assert list(Config.data_nodes) == [DataNodeConfig._DEFAULT_KEY, data_node_config.id] data_node2_config = Config.configure_data_node(\"data_nodes2\", \"pickle\") assert list(Config.data_nodes) == [DataNodeConfig._DEFAULT_KEY, data_node_config.id, data_node2_config.id] data_node3_config = Config.configure_data_node(\"data_nodes3\", \"csv\", has_header=True, default_path=\"\") assert list(Config.data_nodes) == [ \"default\", data_node_config.id, data_node2_config.id, data_node3_config.id, ] with pytest.raises(SystemExit): Config.configure_data_node(\"data_nodes\", storage_type=\"bar\") Config.check() expected_error_message = ( \"`storage_type` field of DataNodeConfig `data_nodes` must be either csv, sql_table,\" \" sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory. Current\" ' value of property `storage_type` is \"bar\".' ) assert expected_error_message in caplog.text with pytest.raises(SystemExit): Config.configure_data_node(\"data_nodes\", scope=\"bar\") Config.check() expected_error_message = ( \"`scope` field of DataNodeConfig `data_nodes` must be populated with a Scope value.\" ' Current value of property `scope` is \"bar\".' ) assert expected_error_message in caplog.text with pytest.raises(TypeError): Config.configure_data_node(\"data_nodes\", storage_type=\"sql\") with pytest.raises(SystemExit): Config.configure_data_node(\"data_nodes\", storage_type=\"generic\") Config.check() expected_error_message = ( \"`storage_type` field of DataNodeConfig `data_nodes` must be either csv, sql_table,\" \" sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory.\" ' Current value of property `storage_type` is \"bar\".' ) assert expected_error_message in caplog.text def test_configure_data_node_from_another_configuration(): d1_cfg = Config.configure_sql_table_data_node( \"d1\", db_username=\"foo\", db_password=\"bar\", db_name=\"db\", db_engine=\"mssql\", db_port=8080, db_host=\"somewhere\", table_name=\"foo\", scope=Scope.GLOBAL, foo=\"bar\", ) d2_cfg = Config.configure_data_node_from( source_configuration=d1_cfg, id=\"d2\", table_name=\"table_2\", ) assert d2_cfg.id == \"d2\" assert d2_cfg.storage_type == \"sql_table\" assert d2_cfg.scope == Scope.GLOBAL assert d2_cfg.validity_period is None assert d2_cfg.db_username == \"foo\" assert d2_cfg.db_password == \"bar\" assert d2_cfg.db_name == \"db\" assert d2_cfg.db_engine == \"mssql\" assert d2_cfg.db_port == 8080 assert d2_cfg.db_host == \"somewhere\" assert d2_cfg.table_name == \"table_2\" assert d2_cfg.foo == \"bar\" d3_cfg = Config.configure_data_node_from( source_configuration=d1_cfg, id=\"d3\", scope=Scope.SCENARIO, validity_period=datetime.timedelta(days=1), table_name=\"table_3\", foo=\"baz\", ) assert d3_cfg.id == \"d3\" assert d3_cfg.storage_type == \"sql_table\" assert d3_cfg.scope == Scope.SCENARIO assert d3_cfg.validity_period == datetime.timedelta(days=1) assert d3_cfg.db_username == \"foo\" assert d3_cfg.db_password == \"bar\" assert d3_cfg.db_name == \"db\" assert d3_cfg.db_engine == \"mssql\" assert d3_cfg.db_port == 8080 assert d3_cfg.db_host == \"somewhere\" assert d3_cfg.table_name == \"table_3\" assert d3_cfg.foo == \"baz\" def test_data_node_count(): Config.configure_data_node(\"data_nodes1\", \"pickle\") assert len(Config.data_nodes) == 2 Config.configure_data_node(\"data_nodes2\", \"pickle\") assert len(Config.data_nodes) == 3 Config.configure_data_node(\"data_nodes3\", \"pickle\") assert len(Config.data_nodes) == 4 def test_data_node_getitem(): data_node_id = \"data_nodes1\" data_node_config = Config.configure_data_node(data_node_id, \"pickle\", default_path=\"foo.p\") assert Config.data_nodes[data_node_id].id == data_node_config.id assert Config.data_nodes[data_node_id].default_path == \"foo.p\" assert Config.data_nodes[data_node_id].storage_type == data_node_config.storage_type assert Config.data_nodes[data_node_id].scope == data_node_config.scope assert Config.data_nodes[data_node_id].properties == data_node_config.properties assert Config.data_nodes[data_node_id].cacheable == data_node_config.cacheable def test_data_node_creation_no_duplication(): Config.configure_data_node(\"data_nodes1\", \"pickle\") assert len(Config.data_nodes) == 2 Config.configure_data_node(\"data_nodes1\", \"pickle\") assert len(Config.data_nodes) == 2 def test_date_node_create_with_datetime(): data_node_config = Config.configure_data_node( id=\"datetime_data\", my_property=datetime.datetime(1991, 1, 1), foo=\"hello\", test=1, test_dict={\"type\": \"Datetime\", 2: \"daw\"}, ) assert data_node_config.foo == \"hello\" assert data_node_config.my_property == datetime.datetime(1991, 1, 1) assert data_node_config.test == 1 assert data_node_config.test_dict.get(\"type\") == \"Datetime\" def test_data_node_with_env_variable_value(): with mock.patch.dict(os.environ, {\"FOO\": \"pickle\", \"BAR\": \"baz\"}): Config.configure_data_node(\"data_node\", storage_type=\"ENV[FOO]\", prop=\"ENV[BAR]\") assert Config.data_nodes[\"data_node\"].prop == \"baz\" assert Config.data_nodes[\"data_node\"].properties[\"prop\"] == \"baz\" assert Config.data_nodes[\"data_node\"]._properties[\"prop\"] == \"ENV[BAR]\" assert Config.data_nodes[\"data_node\"].storage_type == \"pickle\" assert Config.data_nodes[\"data_node\"]._storage_type == \"ENV[FOO]\" def test_data_node_with_env_variable_in_write_fct_args(): def read_fct(): ... def write_fct(): ... with mock.patch.dict(os.environ, {\"FOO\": \"bar\", \"BAZ\": \"qux\"}): Config.configure_data_node( \"data_node\", storage_type=\"generic\", read_fct=read_fct, write_fct=write_fct, write_fct_args=[\"ENV[FOO]\", \"my_param\", \"ENV[BAZ]\"], ) assert Config.data_nodes[\"data_node\"].write_fct_args == [\"bar\", \"my_param\", \"qux\"] def test_data_node_with_env_variable_in_read_fct_args(): def read_fct(): ... def write_fct(): ... with mock.patch.dict(os.environ, {\"FOO\": \"bar\", \"BAZ\": \"qux\"}): Config.configure_data_node( \"data_node\", storage_type=\"generic\", read_fct=read_fct, write_fct=write_fct, read_fct_args=[\"ENV[FOO]\", \"my_param\", \"ENV[BAZ]\"], ) assert Config.data_nodes[\"data_node\"].read_fct_args == [\"bar\", \"my_param\", \"qux\"] def test_block_datanode_config_update_in_development_mode(): data_node_id = \"data_node_id\" Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) data_node_config = Config.configure_data_node( id=data_node_id, storage_type=\"pickle\", default_path=\"foo.p\", scope=Scope.SCENARIO, ) assert Config.data_nodes[data_node_id].id == data_node_id assert Config.data_nodes[data_node_id].default_path == \"foo.p\" assert Config.data_nodes[data_node_id].storage_type == \"pickle\" assert Config.data_nodes[data_node_id].scope == Scope.SCENARIO assert Config.data_nodes[data_node_id].properties == {\"default_path\": \"foo.p\"} _OrchestratorFactory._build_dispatcher() with pytest.raises(ConfigurationUpdateBlocked): data_node_config.storage_type = \"foo\" with pytest.raises(ConfigurationUpdateBlocked): data_node_config.scope = Scope.SCENARIO with pytest.raises(ConfigurationUpdateBlocked): data_node_config.cacheable = True with pytest.raises(ConfigurationUpdateBlocked): data_node_config.properties = {\"foo\": \"bar\"} assert Config.data_nodes[data_node_id].id == data_node_id assert Config.data_nodes[data_node_id].default_path == \"foo.p\" assert Config.data_nodes[data_node_id].storage_type == \"pickle\" assert Config.data_nodes[data_node_id].scope == Scope.SCENARIO assert Config.data_nodes[data_node_id].properties == {\"default_path\": \"foo.p\"} def test_block_datanode_config_update_in_standalone_mode(): data_node_id = \"data_node_id\" Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE) data_node_config = Config.configure_data_node( id=data_node_id, storage_type=\"pickle\", default_path=\"foo.p\", scope=Scope.SCENARIO, ) assert Config.data_nodes[data_node_id].id == data_node_id assert Config.data_nodes[data_node_id].default_path == \"foo.p\" assert Config.data_nodes[data_node_id].storage_type == \"pickle\" assert Config.data_nodes[data_node_id].scope == Scope.SCENARIO assert Config.data_nodes[data_node_id].properties == {\"default_path\": \"foo.p\"} _OrchestratorFactory._build_dispatcher() with pytest.raises(ConfigurationUpdateBlocked): data_node_config.storage_type = \"foo\" with pytest.raises(ConfigurationUpdateBlocked): data_node_config.scope = Scope.SCENARIO with pytest.raises(ConfigurationUpdateBlocked): data_node_config.cacheable = True with pytest.raises(ConfigurationUpdateBlocked): data_node_config.properties = {\"foo\": \"bar\"} assert Config.data_nodes[data_node_id].id == data_node_id assert Config.data_nodes[data_node_id].default_path == \"foo.p\" assert Config.data_nodes[data_node_id].storage_type == \"pickle\" assert Config.data_nodes[data_node_id].scope == Scope.SCENARIO assert Config.data_nodes[data_node_id].properties == {\"default_path\": \"foo.p\"} def test_clean_config(): dn1_config = Config.configure_data_node( id=\"id1\", storage_type=\"csv\", default_path=\"foo.p\", scope=Scope.GLOBAL, validity_period=datetime.timedelta(2), ) dn2_config = Config.configure_data_node( id=\"id2\", storage_type=\"json\", default_path=\"bar.json\", scope=Scope.GLOBAL, validity_period=datetime.timedelta(2), ) assert Config.data_nodes[\"id1\"] is dn1_config assert Config.data_nodes[\"id2\"] is dn2_config dn1_config._clean() dn2_config._clean() # Check if the instance before and after _clean() is the same assert Config.data_nodes[\"id1\"] is dn1_config assert Config.data_nodes[\"id2\"] is dn2_config # Check if the value is similar to the default_config, but with difference instances assert dn1_config.id == \"id1\" assert dn2_config.id == \"id2\" assert dn1_config.storage_type == dn2_config.storage_type == \"pickle\" assert dn1_config.scope == dn2_config.scope == Scope.SCENARIO assert dn1_config.validity_period is dn2_config.validity_period is None assert dn1_config.default_path is dn2_config.default_path is None assert dn1_config.properties == dn2_config.properties == {} def test_deprecated_cacheable_attribute_remains_compatible(): dn_1_id = \"dn_1_id\" dn_1_config = Config.configure_data_node( id=dn_1_id, storage_type=\"pickle\", cacheable=False, scope=Scope.SCENARIO, ) assert Config.data_nodes[dn_1_id].id == dn_1_id assert Config.data_nodes[dn_1_id].storage_type == \"pickle\" assert Config.data_nodes[dn_1_id].scope == Scope.SCENARIO assert Config.data_nodes[dn_1_id].properties == {\"cacheable\": False} assert not Config.data_nodes[dn_1_id].cacheable dn_1_config.cacheable = True assert Config.data_nodes[dn_1_id].properties == {\"cacheable\": True} assert Config.data_nodes[dn_1_id].cacheable dn_2_id = \"dn_2_id\" dn_2_config = Config.configure_data_node( id=dn_2_id, storage_type=\"pickle\", cacheable=True, scope=Scope.SCENARIO, ) assert Config.data_nodes[dn_2_id].id == dn_2_id assert Config.data_nodes[dn_2_id].storage_type == \"pickle\" assert Config.data_nodes[dn_2_id].scope == Scope.SCENARIO assert Config.data_nodes[dn_2_id].properties == {\"cacheable\": True} assert Config.data_nodes[dn_2_id].cacheable dn_2_config.cacheable = False assert Config.data_nodes[dn_1_id].properties == {\"cacheable\": False} assert not Config.data_nodes[dn_1_id].cacheable dn_3_id = \"dn_3_id\" dn_3_config = Config.configure_data_node( id=dn_3_id, storage_type=\"pickle\", scope=Scope.SCENARIO, ) assert Config.data_nodes[dn_3_id].id == dn_3_id assert Config.data_nodes[dn_3_id].storage_type == \"pickle\" assert Config.data_nodes[dn_3_id].scope == Scope.SCENARIO assert Config.data_nodes[dn_3_id].properties == {} assert not Config.data_nodes[dn_3_id].cacheable dn_3_config.cacheable = True assert Config.data_nodes[dn_3_id].properties == {\"cacheable\": True} assert Config.data_nodes[dn_3_id].cacheable "} {"text": "from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config import MigrationConfig from taipy.config.config import Config def mock_func(): pass def test_check_if_entity_property_key_used_is_predefined(caplog): with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() assert caplog.text == \"\" core.stop() caplog.clear() Config.unique_sections[MigrationConfig.name]._properties[\"_entity_owner\"] = None with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() assert ( \"Properties of MigrationConfig `VERSION_MIGRATION` cannot have `_entity_owner` as its property.\" in caplog.text ) caplog.clear() Config.unique_sections[MigrationConfig.name]._properties[\"_entity_owner\"] = \"entity_owner\" with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( \"Properties of MigrationConfig `VERSION_MIGRATION` cannot have `_entity_owner` as its property.\" ' Current value of property `_entity_owner` is \"entity_owner\".' ) assert expected_error_message in caplog.text def test_check_valid_version(caplog): data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") Config.add_migration_function(\"2.0\", data_nodes1, mock_func) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() assert \"The target version for a migration function must be a production version.\" in caplog.text caplog.clear() Config.unblock_update() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() assert caplog.text == \"\" core.stop() def test_check_callable_function(caplog): data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") Config.add_migration_function(\"1.0\", data_nodes1, 1) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( \"The migration function of config `data_nodes1` from version 1.0 must be populated with\" \" Callable value. Current value of property `migration_fcts` is 1.\" ) assert expected_error_message in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function(\"1.0\", data_nodes1, \"bar\") with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( \"The migration function of config `data_nodes1` from version 1.0 must be populated with\" ' Callable value. Current value of property `migration_fcts` is \"bar\".' ) assert expected_error_message in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function(\"1.0\", data_nodes1, mock_func) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() def test_check_migration_from_productions_to_productions_exist(caplog): _VersionManager._set_production_version(\"1.0\", True) _VersionManager._set_production_version(\"1.1\", True) _VersionManager._set_production_version(\"1.2\", True) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() assert 'There is no migration function from production version \"1.0\" to version \"1.1\".' in caplog.text assert 'There is no migration function from production version \"1.1\" to version \"1.2\".' in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function(\"1.2\", \"data_nodes1\", mock_func) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() assert 'There is no migration function from production version \"1.0\" to version \"1.1\".' in caplog.text "} {"text": "import pytest from taipy.config.checker.issue_collector import IssueCollector from taipy.config.config import Config class TestConfigIdChecker: def test_check_standalone_mode(self, caplog): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") Config.configure_scenario(id=\"bar\", task_configs=[], additional_data_node_configs=[]) Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id=\"bar\", task_configs=[]) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"`bar` is used as the config_id of multiple configurations ['DATA_NODE', 'SCENARIO']\" ' Current value of property `config_id` is \"bar\".' ) assert expected_error_message in caplog.text Config.configure_task(id=\"bar\", function=print) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"`bar` is used as the config_id of multiple configurations ['DATA_NODE', 'TASK', 'SCENARIO']\" ' Current value of property `config_id` is \"bar\".' ) assert expected_error_message in caplog.text Config.configure_task(id=\"foo\", function=print) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 2 expected_error_message = ( \"`foo` is used as the config_id of multiple configurations ['DATA_NODE', 'TASK']\" ' Current value of property `config_id` is \"foo\".' ) assert expected_error_message in caplog.text "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pytest from src.taipy.core.config.job_config import JobConfig from taipy.config.checker.issue_collector import IssueCollector from taipy.config.config import Config class TestJobConfigChecker: def test_check_standalone_mode(self, caplog): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE, max_nb_of_workers=2) Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=1) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"DataNode `foo`: In-memory storage type can ONLY be used in development mode. Current\" ' value of property `storage_type` is \"in_memory\".' ) assert expected_error_message in caplog.text "} {"text": "from copy import copy import pytest from src.taipy.core.config import TaskConfig from src.taipy.core.config.data_node_config import DataNodeConfig from taipy.config.checker.issue_collector import IssueCollector from taipy.config.config import Config class TestTaskConfigChecker: def test_check_config_id(self, caplog): config = Config._applied_config Config._compile_configs() Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 assert len(Config._collector.warnings) == 0 config._sections[TaskConfig.name][\"new\"] = copy(config._sections[TaskConfig.name][\"default\"]) config._sections[TaskConfig.name][\"new\"].id = None with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 2 assert \"config_id of TaskConfig `None` is empty\" in caplog.text assert \"function field of TaskConfig `new` is empty\" in caplog.text assert len(Config._collector.warnings) == 2 assert \"inputs field of TaskConfig `new` is empty.\" in caplog.text assert \"outputs field of TaskConfig `new` is empty.\" in caplog.text caplog.clear() config._sections[TaskConfig.name][\"new\"].id = \"new\" with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 assert len(Config._collector.warnings) == 2 def test_check_if_entity_property_key_used_is_predefined(self, caplog): Config._collector = IssueCollector() config = Config._applied_config Config._compile_configs() Config.check() assert len(Config._collector.errors) == 0 config._sections[TaskConfig.name][\"new\"] = copy(config._sections[TaskConfig.name][\"default\"]) config._sections[TaskConfig.name][\"new\"]._properties[\"_entity_owner\"] = None with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 2 assert \"function field of TaskConfig `new` is empty\" in caplog.text assert \"Properties of TaskConfig `default` cannot have `_entity_owner` as its property.\" in caplog.text caplog.clear() config._sections[TaskConfig.name][\"new\"] = copy(config._sections[TaskConfig.name][\"default\"]) config._sections[TaskConfig.name][\"new\"]._properties[\"_entity_owner\"] = \"entity_owner\" with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 2 assert \"function field of TaskConfig `new` is empty\" in caplog.text expected_error_message = ( \"Properties of TaskConfig `default` cannot have `_entity_owner` as its property.\" ' Current value of property `_entity_owner` is \"entity_owner\".' ) assert expected_error_message in caplog.text def test_check_inputs(self, caplog): config = Config._applied_config Config._compile_configs() Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 assert len(Config._collector.warnings) == 0 config._sections[TaskConfig.name][\"new\"] = config._sections[TaskConfig.name][\"default\"] config._sections[TaskConfig.name][\"new\"].id, config._sections[TaskConfig.name][\"new\"].function = \"new\", print Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 assert len(Config._collector.warnings) == 2 assert \"inputs field of TaskConfig `new` is empty.\" in caplog.text assert \"outputs field of TaskConfig `new` is empty.\" in caplog.text config._sections[TaskConfig.name][\"new\"]._inputs = \"bar\" with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"inputs field of TaskConfig `new` must be populated with a list of DataNodeConfig\" \" objects. Current value of property `inputs` is ['b', 'a', 'r'].\" ) assert expected_error_message in caplog.text assert len(Config._collector.warnings) == 1 config._sections[TaskConfig.name][\"new\"]._inputs = [\"bar\"] with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"inputs field of TaskConfig `new` must be populated with a list of DataNodeConfig\" \" objects. Current value of property `inputs` is ['bar'].\" ) assert expected_error_message in caplog.text assert len(Config._collector.warnings) == 1 config._sections[TaskConfig.name][\"new\"]._inputs = [DataNodeConfig(\"bar\")] Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 assert len(Config._collector.warnings) == 1 config._sections[TaskConfig.name][\"new\"]._inputs = [\"bar\", DataNodeConfig(\"bar\")] with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"inputs field of TaskConfig `new` must be populated with a list of\" \" DataNodeConfig objects. Current value of property `inputs` is\" \" ['bar', 0: registration_queue.get() # Test UPDATE Event scenario.is_primary = False assert registration_queue.qsize() == 1 tp.set_primary(scenario) assert registration_queue.qsize() == 2 tp.subscribe_scenario(print, None, scenario=scenario) assert registration_queue.qsize() == 3 tp.unsubscribe_scenario(print, None, scenario=scenario) assert registration_queue.qsize() == 4 tp.tag(scenario, \"testing\") assert registration_queue.qsize() == 5 tp.untag(scenario, \"testing\") assert registration_queue.qsize() == 6 scenario.properties[\"flag\"] = \"production\" assert registration_queue.qsize() == 7 scenario.properties.update({\"description\": \"a scenario\", \"test_mult\": True}) assert registration_queue.qsize() == 9 scenario.properties.pop(\"test_mult\") assert registration_queue.qsize() == 10 scenario.name = \"my_scenario\" assert registration_queue.qsize() == 11 cycle.name = \"new cycle name\" assert registration_queue.qsize() == 12 cycle.properties[\"valid\"] = True assert registration_queue.qsize() == 13 cycle.properties.update({\"re_run_periodically\": True}) assert registration_queue.qsize() == 14 cycle.properties.pop(\"re_run_periodically\") assert registration_queue.qsize() == 15 sequence.properties[\"name\"] = \"weather_forecast\" assert registration_queue.qsize() == 16 tp.subscribe_sequence(print, None, sequence) assert registration_queue.qsize() == 17 tp.unsubscribe_sequence(print, None, sequence) assert registration_queue.qsize() == 18 task.skippable = True assert registration_queue.qsize() == 19 task.properties[\"number_of_run\"] = 2 assert registration_queue.qsize() == 20 task.properties.update({\"debug\": True}) assert registration_queue.qsize() == 21 task.properties.pop(\"debug\") assert registration_queue.qsize() == 22 dn.editor_id = \"new editor id\" assert registration_queue.qsize() == 23 dn.properties[\"sorted\"] = True assert registration_queue.qsize() == 24 dn.properties.update({\"only_fetch_first_100\": True}) assert registration_queue.qsize() == 25 dn.properties.pop(\"only_fetch_first_100\") assert registration_queue.qsize() == 26 published_events = [] while registration_queue.qsize() != 0: published_events.append(registration_queue.get()) expected_event_types = [ EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.CYCLE, EventEntityType.CYCLE, EventEntityType.CYCLE, EventEntityType.CYCLE, EventEntityType.SEQUENCE, EventEntityType.SEQUENCE, EventEntityType.SEQUENCE, EventEntityType.TASK, EventEntityType.TASK, EventEntityType.TASK, EventEntityType.TASK, EventEntityType.DATA_NODE, EventEntityType.DATA_NODE, EventEntityType.DATA_NODE, EventEntityType.DATA_NODE, ] expected_attribute_names = [ \"is_primary\", \"is_primary\", \"subscribers\", \"subscribers\", \"tags\", \"tags\", \"properties\", \"properties\", \"properties\", \"properties\", \"properties\", \"name\", \"properties\", \"properties\", \"properties\", \"properties\", \"subscribers\", \"subscribers\", \"skippable\", \"properties\", \"properties\", \"properties\", \"editor_id\", \"properties\", \"properties\", \"properties\", ] expected_event_entity_id = [ scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, cycle.id, cycle.id, cycle.id, cycle.id, sequence.id, sequence.id, sequence.id, task.id, task.id, task.id, task.id, dn.id, dn.id, dn.id, dn.id, ] expected_event_operation_type = [EventOperation.UPDATE] * len(expected_event_types) assert all( [ event.entity_type == expected_event_types[i] and event.entity_id == expected_event_entity_id[i] and event.operation == expected_event_operation_type[i] and event.attribute_name == expected_attribute_names[i] for i, event in enumerate(published_events) ] ) def test_publish_update_event_in_context_manager(): _, registration_queue = Notifier.register() dn_config = Config.configure_data_node(\"dn_config\") task_config = Config.configure_task(\"task_config\", print, [dn_config]) scenario_config = Config.configure_scenario( \"scenario_config\", [task_config], frequency=Frequency.DAILY, flag=\"test\" ) scenario_config.add_sequences({\"sequence_config\": [task_config]}) scenario = tp.create_scenario(scenario_config) cycle = scenario.cycle task = scenario.tasks[task_config.id] dn = scenario.data_nodes[dn_config.id] sequence = scenario.sequences[\"sequence_config\"] scenario.properties.update({\"description\": \"a scenario\"}) assert registration_queue.qsize() == 6 while registration_queue.qsize() > 0: registration_queue.get() # Test UPDATE Event in Context Manager assert registration_queue.qsize() == 0 # If multiple entities is in context, the last to enter will be the first to exit # So the published event will have the order starting with scenario first and ending with dn with dn as d, task as t, sequence as s, cycle as c, scenario as sc: sc.is_primary = True assert registration_queue.qsize() == 0 tp.set_primary(sc) assert registration_queue.qsize() == 0 sc.properties[\"flag\"] = \"production\" assert registration_queue.qsize() == 0 sc.properties.update({\"description\": \"a scenario\"}) assert registration_queue.qsize() == 0 sc.properties.pop(\"description\") assert registration_queue.qsize() == 0 sc.name = \"my_scenario\" assert registration_queue.qsize() == 0 c.name = \"another new cycle name\" assert registration_queue.qsize() == 0 c.properties[\"valid\"] = True assert registration_queue.qsize() == 0 c.properties.update({\"re_run_periodically\": True}) assert registration_queue.qsize() == 0 s.properties[\"name\"] = \"weather_forecast\" assert registration_queue.qsize() == 0 t.skippable = True assert registration_queue.qsize() == 0 t.properties[\"number_of_run\"] = 2 assert registration_queue.qsize() == 0 t.properties.update({\"debug\": True}) assert registration_queue.qsize() == 0 d.editor_id = \"another new editor id\" assert registration_queue.qsize() == 0 d.properties[\"sorted\"] = True assert registration_queue.qsize() == 0 d.properties.update({\"only_fetch_first_100\": True}) assert registration_queue.qsize() == 0 published_events = [] assert registration_queue.qsize() == 16 while registration_queue.qsize() != 0: published_events.append(registration_queue.get()) expected_event_types = [ EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.CYCLE, EventEntityType.CYCLE, EventEntityType.CYCLE, EventEntityType.SEQUENCE, EventEntityType.TASK, EventEntityType.TASK, EventEntityType.TASK, EventEntityType.DATA_NODE, EventEntityType.DATA_NODE, EventEntityType.DATA_NODE, ] expected_attribute_names = [ \"is_primary\", \"is_primary\", \"properties\", \"properties\", \"properties\", \"properties\", \"name\", \"properties\", \"properties\", \"properties\", \"skippable\", \"properties\", \"properties\", \"editor_id\", \"properties\", \"properties\", ] expected_event_entity_id = [ scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, cycle.id, cycle.id, cycle.id, sequence.id, task.id, task.id, task.id, dn.id, dn.id, dn.id, ] assert all( [ event.entity_type == expected_event_types[i] and event.entity_id == expected_event_entity_id[i] and event.operation == EventOperation.UPDATE and event.attribute_name == expected_attribute_names[i] for i, event in enumerate(published_events) ] ) def test_publish_submission_event(): _, registration_queue = Notifier.register() dn_config = Config.configure_data_node(\"dn_config\") task_config = Config.configure_task(\"task_config\", print, [dn_config]) scenario_config = Config.configure_scenario( \"scenario_config\", [task_config], frequency=Frequency.DAILY, flag=\"test\" ) scenario_config.add_sequences({\"sequence_config\": [task_config]}) scenario = tp.create_scenario(scenario_config) assert registration_queue.qsize() == 5 while registration_queue.qsize() > 0: registration_queue.get() # Test SUBMISSION Event job = scenario.submit()[0] assert registration_queue.qsize() == 6 published_events = [] while registration_queue.qsize() != 0: published_events.append(registration_queue.get()) expected_operations = [ EventOperation.CREATION, EventOperation.CREATION, EventOperation.UPDATE, EventOperation.UPDATE, EventOperation.UPDATE, EventOperation.SUBMISSION, ] expected_attribute_names = [None, None, \"jobs\", \"status\", \"submission_status\", None] expected_event_types = [ EventEntityType.SUBMISSION, EventEntityType.JOB, EventEntityType.SUBMISSION, EventEntityType.JOB, EventEntityType.SUBMISSION, EventEntityType.SCENARIO, ] expected_event_entity_id = [job.submit_id, job.id, job.submit_id, job.id, job.submit_id, scenario.id] assert all( [ event.entity_type == expected_event_types[i] and event.entity_id == expected_event_entity_id[i] and event.operation == expected_operations[i] and event.attribute_name == expected_attribute_names[i] for i, event in enumerate(published_events) ] ) def test_publish_deletion_event(): _, registration_queue = Notifier.register() dn_config = Config.configure_data_node(\"dn_config\") task_config = Config.configure_task(\"task_config\", print, [dn_config]) scenario_config = Config.configure_scenario( \"scenario_config\", [task_config], frequency=Frequency.DAILY, flag=\"test\" ) scenario_config.add_sequences({\"sequence_config\": [task_config]}) scenario = tp.create_scenario(scenario_config) cycle = scenario.cycle task = scenario.tasks[task_config.id] dn = scenario.data_nodes[dn_config.id] sequence = scenario.sequences[\"sequence_config\"] job = scenario.submit()[0] assert registration_queue.qsize() == 11 while registration_queue.qsize() > 0: registration_queue.get() # Test DELETION Event tp.delete(scenario.id) assert registration_queue.qsize() == 7 published_events = [] while registration_queue.qsize() != 0: published_events.append(registration_queue.get()) expected_event_types = [ EventEntityType.CYCLE, EventEntityType.SEQUENCE, EventEntityType.SCENARIO, EventEntityType.TASK, EventEntityType.JOB, EventEntityType.DATA_NODE, EventEntityType.SUBMISSION, ] expected_event_entity_id = [cycle.id, sequence.id, scenario.id, task.id, job.id, dn.id, job.submit_id] expected_event_operation_type = [EventOperation.DELETION] * len(expected_event_types) assert all( [ event.entity_type == expected_event_types[i] and event.entity_id == expected_event_entity_id[i] and event.operation == expected_event_operation_type[i] and event.attribute_name is None for i, event in enumerate(published_events) ] ) scenario = tp.create_scenario(scenario_config) cycle = scenario.cycle assert registration_queue.qsize() == 5 # only to clear the queue while registration_queue.qsize() != 0: registration_queue.get() tp.clean_all_entities_by_version() assert registration_queue.qsize() == 5 published_events = [] while registration_queue.qsize() != 0: published_events.append(registration_queue.get()) expected_event_types = [ EventEntityType.JOB, EventEntityType.CYCLE, EventEntityType.SCENARIO, EventEntityType.TASK, EventEntityType.DATA_NODE, ] expected_event_entity_id = [None, cycle.id, scenario.id, None, None] assert all( [ event.entity_type == expected_event_types[i] and event.entity_id == expected_event_entity_id[i] and event.operation == EventOperation.DELETION and event.attribute_name is None for i, event in enumerate(published_events) ] ) "} {"text": "import pytest from src.taipy.core.exceptions.exceptions import InvalidEventOperation from src.taipy.core.notification._topic import _Topic from src.taipy.core.notification.event import EventEntityType, EventOperation def test_general_topic_creation(): topic_1 = _Topic(None, None, None, None) assert topic_1.entity_type is None assert topic_1.entity_id is None assert topic_1.operation is None assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.SCENARIO, \"scenario_id\") assert topic_2.entity_type == EventEntityType.SCENARIO assert topic_2.entity_id == \"scenario_id\" assert topic_2.operation is None assert topic_2.attribute_name is None topic_3 = _Topic(None, None, EventOperation.CREATION) assert topic_3.entity_type is None assert topic_3.entity_id is None assert topic_3.operation == EventOperation.CREATION assert topic_3.attribute_name is None topic_4 = _Topic(None, None, EventOperation.UPDATE, \"properties\") assert topic_4.entity_type is None assert topic_4.entity_id is None assert topic_4.operation == EventOperation.UPDATE assert topic_4.attribute_name == \"properties\" topic_5 = _Topic(entity_type=EventEntityType.JOB, operation=EventOperation.DELETION) assert topic_5.entity_type == EventEntityType.JOB assert topic_5.entity_id is None assert topic_5.operation == EventOperation.DELETION assert topic_5.attribute_name is None topic_6 = _Topic(entity_type=EventEntityType.SEQUENCE) assert topic_6.entity_type == EventEntityType.SEQUENCE assert topic_6.entity_id is None assert topic_6.operation is None assert topic_6.attribute_name is None def test_topic_creation_cycle(): topic_1 = _Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.CREATION) assert topic_1.entity_type == EventEntityType.CYCLE assert topic_1.entity_id == \"cycle_id\" assert topic_1.operation == EventOperation.CREATION assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.UPDATE, \"frequency\") assert topic_2.entity_type == EventEntityType.CYCLE assert topic_2.entity_id == \"cycle_id\" assert topic_2.operation == EventOperation.UPDATE assert topic_2.attribute_name == \"frequency\" topic_3 = _Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.DELETION) assert topic_3.entity_type == EventEntityType.CYCLE assert topic_3.entity_id == \"cycle_id\" assert topic_3.operation == EventOperation.DELETION assert topic_3.attribute_name is None topic_4 = _Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.CREATION) assert topic_4.entity_type == EventEntityType.CYCLE assert topic_4.entity_id == \"cycle_id\" assert topic_4.operation == EventOperation.CREATION assert topic_4.attribute_name is None # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.CREATION, \"frequency\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.DELETION, \"frequency\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.CYCLE, \"cycle_id\", attribute_name=\"frequency\") with pytest.raises(InvalidEventOperation): _ = _Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.SUBMISSION) with pytest.raises(InvalidEventOperation): _ = _Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.SUBMISSION, \"frequency\") def test_topic_creation_scenario(): topic_1 = _Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.CREATION) assert topic_1.entity_type == EventEntityType.SCENARIO assert topic_1.entity_id == \"scenario_id\" assert topic_1.operation == EventOperation.CREATION assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.UPDATE, \"is_primary\") assert topic_2.entity_type == EventEntityType.SCENARIO assert topic_2.entity_id == \"scenario_id\" assert topic_2.operation == EventOperation.UPDATE assert topic_2.attribute_name == \"is_primary\" topic_3 = _Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.DELETION) assert topic_3.entity_type == EventEntityType.SCENARIO assert topic_3.entity_id == \"scenario_id\" assert topic_3.operation == EventOperation.DELETION assert topic_3.attribute_name is None topic_4 = _Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.SUBMISSION) assert topic_4.entity_type == EventEntityType.SCENARIO assert topic_4.entity_id == \"scenario_id\" assert topic_4.operation == EventOperation.SUBMISSION assert topic_4.attribute_name is None topic_5 = _Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.UPDATE, \"properties\") assert topic_5.entity_type == EventEntityType.SCENARIO assert topic_5.entity_id == \"scenario_id\" assert topic_5.operation == EventOperation.UPDATE assert topic_5.attribute_name == \"properties\" # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.CREATION, \"is_primary\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.DELETION, \"is_primary\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.SUBMISSION, \"is_primary\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SCENARIO, \"scenario_id\", attribute_name=\"is_primary\") def test_topic_creation_sequence(): topic_1 = _Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.CREATION) assert topic_1.entity_type == EventEntityType.SEQUENCE assert topic_1.entity_id == \"sequence_id\" assert topic_1.operation == EventOperation.CREATION assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.UPDATE, \"subscribers\") assert topic_2.entity_type == EventEntityType.SEQUENCE assert topic_2.entity_id == \"sequence_id\" assert topic_2.operation == EventOperation.UPDATE assert topic_2.attribute_name == \"subscribers\" topic_3 = _Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.DELETION) assert topic_3.entity_type == EventEntityType.SEQUENCE assert topic_3.entity_id == \"sequence_id\" assert topic_3.operation == EventOperation.DELETION assert topic_3.attribute_name is None topic_4 = _Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.SUBMISSION) assert topic_4.entity_type == EventEntityType.SEQUENCE assert topic_4.entity_id == \"sequence_id\" assert topic_4.operation == EventOperation.SUBMISSION assert topic_4.attribute_name is None topic_5 = _Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.DELETION) assert topic_5.entity_type == EventEntityType.SEQUENCE assert topic_5.entity_id == \"sequence_id\" assert topic_5.operation == EventOperation.DELETION assert topic_5.attribute_name is None # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.CREATION, \"subscribers\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.DELETION, \"subscribers\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.SUBMISSION, \"subscribers\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SEQUENCE, \"sequence_id\", attribute_name=\"subscribers\") def test_topic_creation_task(): topic_1 = _Topic(EventEntityType.TASK, \"task_id\", EventOperation.CREATION) assert topic_1.entity_type == EventEntityType.TASK assert topic_1.entity_id == \"task_id\" assert topic_1.operation == EventOperation.CREATION assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.TASK, \"task_id\", EventOperation.UPDATE, \"function\") assert topic_2.entity_type == EventEntityType.TASK assert topic_2.entity_id == \"task_id\" assert topic_2.operation == EventOperation.UPDATE assert topic_2.attribute_name == \"function\" topic_3 = _Topic(EventEntityType.TASK, \"task_id\", EventOperation.DELETION) assert topic_3.entity_type == EventEntityType.TASK assert topic_3.entity_id == \"task_id\" assert topic_3.operation == EventOperation.DELETION assert topic_3.attribute_name is None topic_4 = _Topic(EventEntityType.TASK, \"task_id\", EventOperation.SUBMISSION) assert topic_4.entity_type == EventEntityType.TASK assert topic_4.entity_id == \"task_id\" assert topic_4.operation == EventOperation.SUBMISSION assert topic_4.attribute_name is None topic_5 = _Topic(EventEntityType.TASK, \"task_id\", EventOperation.SUBMISSION) assert topic_5.entity_type == EventEntityType.TASK assert topic_5.entity_id == \"task_id\" assert topic_5.operation == EventOperation.SUBMISSION assert topic_5.attribute_name is None # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.TASK, \"task_id\", EventOperation.CREATION, \"function\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.TASK, \"task_id\", EventOperation.DELETION, \"function\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.TASK, \"task_id\", EventOperation.SUBMISSION, \"function\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.TASK, \"task_id\", attribute_name=\"function\") def test_topic_creation_datanode(): topic_1 = _Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.CREATION) assert topic_1.entity_type == EventEntityType.DATA_NODE assert topic_1.entity_id == \"dn_id\" assert topic_1.operation == EventOperation.CREATION assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.UPDATE, \"properties\") assert topic_2.entity_type == EventEntityType.DATA_NODE assert topic_2.entity_id == \"dn_id\" assert topic_2.operation == EventOperation.UPDATE assert topic_2.attribute_name == \"properties\" topic_3 = _Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.DELETION) assert topic_3.entity_type == EventEntityType.DATA_NODE assert topic_3.entity_id == \"dn_id\" assert topic_3.operation == EventOperation.DELETION assert topic_3.attribute_name is None topic_4 = _Topic(None, \"dn_id\", EventOperation.UPDATE, \"scope\") assert topic_4.entity_type is None assert topic_4.entity_id == \"dn_id\" assert topic_4.operation == EventOperation.UPDATE assert topic_4.attribute_name == \"scope\" # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.CREATION, \"properties\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.DELETION, \"properties\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.DATA_NODE, \"dn_id\", attribute_name=\"properties\") with pytest.raises(InvalidEventOperation): _ = _Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.SUBMISSION) # with pytest.raises(InvalidEventOperation): # _ = Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.SUBMISSION, \"properties\") def test_topic_creation_job(): topic_1 = _Topic(EventEntityType.JOB, \"job_id\", EventOperation.CREATION) assert topic_1.entity_type == EventEntityType.JOB assert topic_1.entity_id == \"job_id\" assert topic_1.operation == EventOperation.CREATION assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.JOB, \"job_id\", EventOperation.UPDATE, \"force\") assert topic_2.entity_type == EventEntityType.JOB assert topic_2.entity_id == \"job_id\" assert topic_2.operation == EventOperation.UPDATE assert topic_2.attribute_name == \"force\" topic_3 = _Topic(EventEntityType.JOB, \"job_id\", EventOperation.DELETION) assert topic_3.entity_type == EventEntityType.JOB assert topic_3.entity_id == \"job_id\" assert topic_3.operation == EventOperation.DELETION assert topic_3.attribute_name is None topic_4 = _Topic(EventEntityType.JOB, \"job_id\", EventOperation.CREATION) assert topic_4.entity_type == EventEntityType.JOB assert topic_4.entity_id == \"job_id\" assert topic_4.operation == EventOperation.CREATION assert topic_4.attribute_name is None # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.JOB, \"job_id\", EventOperation.CREATION, \"force\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.JOB, \"job_id\", EventOperation.DELETION, \"force\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.JOB, \"job_id\", attribute_name=\"force\") with pytest.raises(InvalidEventOperation): _ = _Topic(EventEntityType.JOB, \"job_id\", EventOperation.SUBMISSION) # with pytest.raises(InvalidEventOperation): # _ = Topic(EventEntityType.JOB, \"job_id\", EventOperation.SUBMISSION, \"force\") def test_topic_equal(): assert _Topic() == _Topic() assert _Topic(EventEntityType.SCENARIO) == _Topic(EventEntityType.SCENARIO) assert _Topic(entity_id=\"sequence_id\") == _Topic(entity_id=\"sequence_id\") assert _Topic(operation=EventOperation.SUBMISSION) == _Topic(operation=EventOperation.SUBMISSION) assert _Topic(EventEntityType.JOB, \"JOB_id\", EventOperation.UPDATE, \"status\") == _Topic( EventEntityType.JOB, \"JOB_id\", EventOperation.UPDATE, \"status\" ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pytest from src.taipy.core.exceptions.exceptions import InvalidEventAttributeName, InvalidEventOperation from src.taipy.core.notification.event import Event, EventEntityType, EventOperation from taipy.config.common.frequency import Frequency def test_event_creation_cycle(): event_1 = Event( entity_type=EventEntityType.CYCLE, operation=EventOperation.CREATION, entity_id=\"cycle_id\", ) assert event_1.creation_date is not None assert event_1.entity_type == EventEntityType.CYCLE assert event_1.entity_id == \"cycle_id\" assert event_1.operation == EventOperation.CREATION assert event_1.attribute_name is None event_2 = Event( entity_type=EventEntityType.CYCLE, operation=EventOperation.UPDATE, entity_id=\"cycle_id\", attribute_name=\"frequency\", attribute_value=Frequency.DAILY, ) assert event_2.creation_date is not None assert event_2.entity_type == EventEntityType.CYCLE assert event_2.entity_id == \"cycle_id\" assert event_2.operation == EventOperation.UPDATE assert event_2.attribute_name == \"frequency\" event_3 = Event(entity_type=EventEntityType.CYCLE, entity_id=\"cycle_id\", operation=EventOperation.DELETION) assert event_3.creation_date is not None assert event_3.entity_type == EventEntityType.CYCLE assert event_3.entity_id == \"cycle_id\" assert event_3.operation == EventOperation.DELETION assert event_3.attribute_name is None with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.CYCLE, operation=EventOperation.CREATION, entity_id=\"cycle_id\", attribute_name=\"frequency\", ) with pytest.raises(InvalidEventAttributeName): _ = Event(EventEntityType.CYCLE, EventOperation.DELETION, entity_id=\"cycle_id\", attribute_name=\"frequency\") with pytest.raises(InvalidEventOperation): _ = Event( entity_type=EventEntityType.CYCLE, operation=EventOperation.SUBMISSION, entity_id=\"cycle_id\", ) with pytest.raises(InvalidEventOperation): _ = Event( entity_type=EventEntityType.CYCLE, operation=EventOperation.SUBMISSION, entity_id=\"cycle_id\", attribute_name=\"frequency\", ) def test_event_creation_scenario(): event_1 = Event(entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.CREATION) assert event_1.creation_date is not None assert event_1.entity_type == EventEntityType.SCENARIO assert event_1.entity_id == \"scenario_id\" assert event_1.operation == EventOperation.CREATION assert event_1.attribute_name is None event_2 = Event( entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.UPDATE, attribute_name=\"is_primary\", attribute_value=True, ) assert event_2.creation_date is not None assert event_2.entity_type == EventEntityType.SCENARIO assert event_2.entity_id == \"scenario_id\" assert event_2.operation == EventOperation.UPDATE assert event_2.attribute_name == \"is_primary\" assert event_2.attribute_value is True event_3 = Event(entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.DELETION) assert event_3.creation_date is not None assert event_3.entity_type == EventEntityType.SCENARIO assert event_3.entity_id == \"scenario_id\" assert event_3.operation == EventOperation.DELETION assert event_3.attribute_name is None event_4 = Event(entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.SUBMISSION) assert event_4.creation_date is not None assert event_4.entity_type == EventEntityType.SCENARIO assert event_4.entity_id == \"scenario_id\" assert event_4.operation == EventOperation.SUBMISSION assert event_4.attribute_name is None with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.CREATION, attribute_name=\"is_primary\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.DELETION, attribute_name=\"is_primary\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.SUBMISSION, attribute_name=\"is_primary\", ) def test_event_creation_sequence(): event_1 = Event(entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.CREATION) assert event_1.creation_date is not None assert event_1.entity_type == EventEntityType.SEQUENCE assert event_1.entity_id == \"sequence_id\" assert event_1.operation == EventOperation.CREATION assert event_1.attribute_name is None event_2 = Event( entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.UPDATE, attribute_name=\"subscribers\", attribute_value=object(), ) assert event_2.creation_date is not None assert event_2.entity_type == EventEntityType.SEQUENCE assert event_2.entity_id == \"sequence_id\" assert event_2.operation == EventOperation.UPDATE assert event_2.attribute_name == \"subscribers\" event_3 = Event(entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.DELETION) assert event_3.creation_date is not None assert event_3.entity_type == EventEntityType.SEQUENCE assert event_3.entity_id == \"sequence_id\" assert event_3.operation == EventOperation.DELETION assert event_3.attribute_name is None event_4 = Event(entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.SUBMISSION) assert event_4.creation_date is not None assert event_4.entity_type == EventEntityType.SEQUENCE assert event_4.entity_id == \"sequence_id\" assert event_4.operation == EventOperation.SUBMISSION assert event_4.attribute_name is None with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.CREATION, attribute_name=\"subscribers\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.DELETION, attribute_name=\"subscribers\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.SUBMISSION, attribute_name=\"subscribers\", ) def test_event_creation_task(): event_1 = Event(entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.CREATION) assert event_1.creation_date is not None assert event_1.entity_type == EventEntityType.TASK assert event_1.entity_id == \"task_id\" assert event_1.operation == EventOperation.CREATION assert event_1.attribute_name is None event_2 = Event( entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.UPDATE, attribute_name=\"function\", ) assert event_2.creation_date is not None assert event_2.entity_type == EventEntityType.TASK assert event_2.entity_id == \"task_id\" assert event_2.operation == EventOperation.UPDATE assert event_2.attribute_name == \"function\" event_3 = Event(entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.DELETION) assert event_3.creation_date is not None assert event_3.entity_type == EventEntityType.TASK assert event_3.entity_id == \"task_id\" assert event_3.operation == EventOperation.DELETION assert event_3.attribute_name is None event_4 = Event(entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.SUBMISSION) assert event_4.creation_date is not None assert event_4.entity_type == EventEntityType.TASK assert event_4.entity_id == \"task_id\" assert event_4.operation == EventOperation.SUBMISSION assert event_4.attribute_name is None with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.CREATION, attribute_name=\"function\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.DELETION, attribute_name=\"function\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.SUBMISSION, attribute_name=\"function\", ) def test_event_creation_datanode(): event_1 = Event(entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.CREATION) assert event_1.creation_date is not None assert event_1.entity_type == EventEntityType.DATA_NODE assert event_1.entity_id == \"dn_id\" assert event_1.operation == EventOperation.CREATION assert event_1.attribute_name is None event_2 = Event( entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.UPDATE, attribute_name=\"properties\", ) assert event_2.creation_date is not None assert event_2.entity_type == EventEntityType.DATA_NODE assert event_2.entity_id == \"dn_id\" assert event_2.operation == EventOperation.UPDATE assert event_2.attribute_name == \"properties\" event_3 = Event(entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.DELETION) assert event_3.creation_date is not None assert event_3.entity_type == EventEntityType.DATA_NODE assert event_3.entity_id == \"dn_id\" assert event_3.operation == EventOperation.DELETION assert event_3.attribute_name is None with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.CREATION, attribute_name=\"properties\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.DELETION, attribute_name=\"properties\", ) with pytest.raises(InvalidEventOperation): _ = Event(entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.SUBMISSION) with pytest.raises(InvalidEventOperation): _ = Event( entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.SUBMISSION, attribute_name=\"properties\", ) def test_event_creation_job(): event_1 = Event(entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.CREATION) assert event_1.creation_date is not None assert event_1.entity_type == EventEntityType.JOB assert event_1.entity_id == \"job_id\" assert event_1.operation == EventOperation.CREATION assert event_1.attribute_name is None event_2 = Event( entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.UPDATE, attribute_name=\"force\" ) assert event_2.creation_date is not None assert event_2.entity_type == EventEntityType.JOB assert event_2.entity_id == \"job_id\" assert event_2.operation == EventOperation.UPDATE assert event_2.attribute_name == \"force\" event_3 = Event(entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.DELETION) assert event_3.creation_date is not None assert event_3.entity_type == EventEntityType.JOB assert event_3.entity_id == \"job_id\" assert event_3.operation == EventOperation.DELETION assert event_3.attribute_name is None with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.CREATION, attribute_name=\"force\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.DELETION, attribute_name=\"force\", ) with pytest.raises(InvalidEventOperation): _ = Event(entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.SUBMISSION) with pytest.raises(InvalidEventOperation): _ = Event( entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.SUBMISSION, attribute_name=\"force\", ) "} {"text": "from dataclasses import dataclass, field from math import exp from queue import SimpleQueue from colorama import init from src.taipy.core import taipy as tp from src.taipy.core.config import scenario_config from src.taipy.core.job.status import Status from src.taipy.core.notification.core_event_consumer import CoreEventConsumerBase from src.taipy.core.notification.event import Event, EventEntityType, EventOperation from src.taipy.core.notification.notifier import Notifier from taipy.config import Config, Frequency from tests.core.utils import assert_true_after_time class Snapshot: \"\"\" A captured snapshot of the recording core events consumer. \"\"\" def __init__(self): self.collected_events = [] self.entity_type_collected = {} self.operation_collected = {} self.attr_name_collected = {} def capture_event(self, event): self.collected_events.append(event) self.entity_type_collected[event.entity_type] = self.entity_type_collected.get(event.entity_type, 0) + 1 self.operation_collected[event.operation] = self.operation_collected.get(event.operation, 0) + 1 if event.attribute_name: self.attr_name_collected[event.attribute_name] = self.attr_name_collected.get(event.attribute_name, 0) + 1 class RecordingConsumer(CoreEventConsumerBase): \"\"\" A straightforward and no-thread core events consumer that allows to capture snapshots of received events. \"\"\" def __init__(self, registration_id: str, queue: SimpleQueue): super().__init__(registration_id, queue) def capture(self) -> Snapshot: \"\"\" Capture a snapshot of events received between the previous snapshot (or from the start of this consumer). \"\"\" snapshot = Snapshot() while not self.queue.empty(): event = self.queue.get() snapshot.capture_event(event) return snapshot def process_event(self, event: Event): # Nothing todo pass def start(self): # Nothing to do here pass def stop(self): # Nothing to do here either pass def identity(x): return x def test_events_published_for_scenario_creation(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) register_id_0, register_queue_0 = Notifier.register() all_evts = RecordingConsumer(register_id_0, register_queue_0) all_evts.start() # Create a scenario only trigger 6 creation events (for cycle, data node(x2), task, sequence and scenario) tp.create_scenario(sc_config) snapshot = all_evts.capture() assert len(snapshot.collected_events) == 6 assert snapshot.entity_type_collected.get(EventEntityType.CYCLE, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.DATA_NODE, 0) == 2 assert snapshot.entity_type_collected.get(EventEntityType.TASK, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.SEQUENCE, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.SCENARIO, 0) == 1 assert snapshot.operation_collected.get(EventOperation.CREATION, 0) == 6 all_evts.stop() def test_no_event_published_for_getting_scenario(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) scenario = tp.create_scenario(sc_config) register_id_0, register_queue_0 = Notifier.register() all_evts = RecordingConsumer(register_id_0, register_queue_0) all_evts.start() # Get all scenarios does not trigger any event tp.get_scenarios() snapshot = all_evts.capture() assert len(snapshot.collected_events) == 0 # Get one scenario does not trigger any event tp.get(scenario.id) snapshot = all_evts.capture() assert len(snapshot.collected_events) == 0 all_evts.stop() def test_events_published_for_writing_dn(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) scenario = tp.create_scenario(sc_config) register_id_0, register_queue_0 = Notifier.register() all_evts = RecordingConsumer(register_id_0, register_queue_0) all_evts.start() # Write input manually trigger 4 data node update events # for last_edit_date, editor_id, editor_expiration_date and edit_in_progress scenario.the_input.write(\"test\") snapshot = all_evts.capture() assert len(snapshot.collected_events) == 4 assert snapshot.entity_type_collected.get(EventEntityType.CYCLE, 0) == 0 assert snapshot.entity_type_collected.get(EventEntityType.DATA_NODE, 0) == 4 assert snapshot.entity_type_collected.get(EventEntityType.TASK, 0) == 0 assert snapshot.entity_type_collected.get(EventEntityType.SEQUENCE, 0) == 0 assert snapshot.entity_type_collected.get(EventEntityType.SCENARIO, 0) == 0 assert snapshot.operation_collected.get(EventOperation.CREATION, 0) == 0 assert snapshot.operation_collected.get(EventOperation.UPDATE, 0) == 4 all_evts.stop() def test_events_published_for_scenario_submission(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) scenario = tp.create_scenario(sc_config) scenario.the_input.write(\"test\") register_id_0, register_queue_0 = Notifier.register() all_evts = RecordingConsumer(register_id_0, register_queue_0) all_evts.start() # Submit a scenario triggers: # 1 scenario submission event # 7 dn update events (for last_edit_date, editor_id(x2), editor_expiration_date(x2) and edit_in_progress(x2)) # 1 job creation event # 3 job update events (for status: PENDING, RUNNING and COMPLETED) # 1 submission creation event # 1 submission update event for jobs # 3 submission update events (for status: PENDING, RUNNING and COMPLETED) scenario.submit() snapshot = all_evts.capture() assert len(snapshot.collected_events) == 17 assert snapshot.entity_type_collected.get(EventEntityType.CYCLE, 0) == 0 assert snapshot.entity_type_collected.get(EventEntityType.DATA_NODE, 0) == 7 assert snapshot.entity_type_collected.get(EventEntityType.TASK, 0) == 0 assert snapshot.entity_type_collected.get(EventEntityType.SEQUENCE, 0) == 0 assert snapshot.entity_type_collected.get(EventEntityType.SCENARIO, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.JOB, 0) == 4 assert snapshot.entity_type_collected.get(EventEntityType.SUBMISSION, 0) == 5 assert snapshot.operation_collected.get(EventOperation.CREATION, 0) == 2 assert snapshot.operation_collected.get(EventOperation.UPDATE, 0) == 14 assert snapshot.operation_collected.get(EventOperation.SUBMISSION, 0) == 1 assert snapshot.attr_name_collected[\"last_edit_date\"] == 1 assert snapshot.attr_name_collected[\"editor_id\"] == 2 assert snapshot.attr_name_collected[\"editor_expiration_date\"] == 2 assert snapshot.attr_name_collected[\"edit_in_progress\"] == 2 assert snapshot.attr_name_collected[\"status\"] == 3 assert snapshot.attr_name_collected[\"jobs\"] == 1 assert snapshot.attr_name_collected[\"submission_status\"] == 3 all_evts.stop() def test_events_published_for_scenario_deletion(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) scenario = tp.create_scenario(sc_config) scenario.the_input.write(\"test\") scenario.submit() register_id_0, register_queue_0 = Notifier.register() all_evts = RecordingConsumer(register_id_0, register_queue_0) all_evts.start() # Delete a scenario trigger 8 deletion events # 1 scenario deletion event # 1 cycle deletion event # 2 dn deletion events (for input and output) # 1 task deletion event # 1 sequence deletion event # 1 job deletion event # 1 submission deletion event tp.delete(scenario.id) snapshot = all_evts.capture() assert len(snapshot.collected_events) == 8 assert snapshot.entity_type_collected.get(EventEntityType.CYCLE, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.DATA_NODE, 0) == 2 assert snapshot.entity_type_collected.get(EventEntityType.TASK, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.SEQUENCE, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.SCENARIO, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.SUBMISSION, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.JOB, 0) == 1 assert snapshot.operation_collected.get(EventOperation.UPDATE, 0) == 0 assert snapshot.operation_collected.get(EventOperation.SUBMISSION, 0) == 0 assert snapshot.operation_collected.get(EventOperation.DELETION, 0) == 8 all_evts.stop() def test_job_events(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) register_id, register_queue = Notifier.register(entity_type=EventEntityType.JOB) consumer = RecordingConsumer(register_id, register_queue) consumer.start() # Create scenario scenario = tp.create_scenario(sc_config) snapshot = consumer.capture() assert len(snapshot.collected_events) == 0 # Submit scenario scenario.submit() snapshot = consumer.capture() # 2 events expected: one for creation, another for status update assert len(snapshot.collected_events) == 2 assert snapshot.collected_events[0].operation == EventOperation.CREATION assert snapshot.collected_events[0].entity_type == EventEntityType.JOB assert snapshot.collected_events[0].metadata.get(\"task_config_id\") == task_config.id assert snapshot.collected_events[1].operation == EventOperation.UPDATE assert snapshot.collected_events[1].entity_type == EventEntityType.JOB assert snapshot.collected_events[1].metadata.get(\"task_config_id\") == task_config.id assert snapshot.collected_events[1].attribute_name == \"status\" assert snapshot.collected_events[1].attribute_value == Status.BLOCKED job = tp.get_jobs()[0] tp.cancel_job(job) snapshot = consumer.capture() assert len(snapshot.collected_events) == 1 event = snapshot.collected_events[0] assert event.metadata.get(\"task_config_id\") == task_config.id assert event.attribute_name == \"status\" assert event.attribute_value == Status.CANCELED consumer.stop() def test_scenario_events(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) register_id, register_queue = Notifier.register(entity_type=EventEntityType.SCENARIO) consumer = RecordingConsumer(register_id, register_queue) consumer.start() scenario = tp.create_scenario(sc_config) snapshot = consumer.capture() assert len(snapshot.collected_events) == 1 assert snapshot.collected_events[0].operation == EventOperation.CREATION assert snapshot.collected_events[0].entity_type == EventEntityType.SCENARIO assert snapshot.collected_events[0].metadata.get(\"config_id\") == scenario.config_id scenario.submit() snapshot = consumer.capture() assert len(snapshot.collected_events) == 1 assert snapshot.collected_events[0].operation == EventOperation.SUBMISSION assert snapshot.collected_events[0].entity_type == EventEntityType.SCENARIO assert snapshot.collected_events[0].metadata.get(\"config_id\") == scenario.config_id # Delete scenario tp.delete(scenario.id) snapshot = consumer.capture() assert len(snapshot.collected_events) == 1 assert snapshot.collected_events[0].operation == EventOperation.DELETION assert snapshot.collected_events[0].entity_type == EventEntityType.SCENARIO consumer.stop() def test_data_node_events(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) register_id, register_queue = Notifier.register(entity_type=EventEntityType.DATA_NODE) consumer = RecordingConsumer(register_id, register_queue) consumer.start() scenario = tp.create_scenario(sc_config) snapshot = consumer.capture() # We expect two creation events since we have two data nodes: assert len(snapshot.collected_events) == 2 assert snapshot.collected_events[0].operation == EventOperation.CREATION assert snapshot.collected_events[0].entity_type == EventEntityType.DATA_NODE assert snapshot.collected_events[0].metadata.get(\"config_id\") in [output_config.id, input_config.id] assert snapshot.collected_events[1].operation == EventOperation.CREATION assert snapshot.collected_events[1].entity_type == EventEntityType.DATA_NODE assert snapshot.collected_events[1].metadata.get(\"config_id\") in [output_config.id, input_config.id] # Delete scenario tp.delete(scenario.id) snapshot = consumer.capture() assert len(snapshot.collected_events) == 2 assert snapshot.collected_events[0].operation == EventOperation.DELETION assert snapshot.collected_events[0].entity_type == EventEntityType.DATA_NODE assert snapshot.collected_events[1].operation == EventOperation.DELETION assert snapshot.collected_events[1].entity_type == EventEntityType.DATA_NODE consumer.stop() "} {"text": "from queue import SimpleQueue from src.taipy.core import taipy as tp from src.taipy.core.notification.core_event_consumer import CoreEventConsumerBase from src.taipy.core.notification.event import Event, EventEntityType, EventOperation from src.taipy.core.notification.notifier import Notifier from taipy.config import Config, Frequency from tests.core.utils import assert_true_after_time class AllCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.event_collected = 0 self.event_entity_type_collected: dict = {} self.event_operation_collected: dict = {} super().__init__(registration_id, queue) def process_event(self, event: Event): self.event_collected += 1 self.event_entity_type_collected[event.entity_type] = ( self.event_entity_type_collected.get(event.entity_type, 0) + 1 ) self.event_operation_collected[event.operation] = self.event_operation_collected.get(event.operation, 0) + 1 class ScenarioCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.scenario_event_collected = 0 self.event_operation_collected: dict = {} super().__init__(registration_id, queue) def process_event(self, event: Event): self.scenario_event_collected += 1 self.event_operation_collected[event.operation] = self.event_operation_collected.get(event.operation, 0) + 1 class TaskCreationCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.task_event_collected = 0 self.creation_event_operation_collected = 0 super().__init__(registration_id, queue) def process_event(self, event: Event): self.task_event_collected += 1 self.creation_event_operation_collected += 1 def test_core_event_consumer(): register_id_0, register_queue_0 = Notifier.register() all_evt_csumer_0 = AllCoreEventConsumerProcessor(register_id_0, register_queue_0) register_id_1, register_queue_1 = Notifier.register(entity_type=EventEntityType.SCENARIO) sc_evt_csumer_1 = ScenarioCoreEventConsumerProcessor(register_id_1, register_queue_1) register_id_2, register_queue_2 = Notifier.register( entity_type=EventEntityType.TASK, operation=EventOperation.CREATION ) task_creation_evt_csumer_2 = TaskCreationCoreEventConsumerProcessor(register_id_2, register_queue_2) all_evt_csumer_0.start() sc_evt_csumer_1.start() task_creation_evt_csumer_2.start() dn_config = Config.configure_data_node(\"dn_config\") task_config = Config.configure_task(\"task_config\", print, [dn_config]) scenario_config = Config.configure_scenario( \"scenario_config\", [task_config], frequency=Frequency.DAILY, sequences={\"seq\": [task_config]} ) # Create a scenario trigger 5 creation events scenario = tp.create_scenario(scenario_config) assert_true_after_time(lambda: all_evt_csumer_0.event_collected == 5, time=10) assert_true_after_time(lambda: len(all_evt_csumer_0.event_entity_type_collected) == 5, time=10) assert_true_after_time(lambda: all_evt_csumer_0.event_operation_collected[EventOperation.CREATION] == 5, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.scenario_event_collected == 1, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.event_operation_collected[EventOperation.CREATION] == 1, time=10) assert_true_after_time(lambda: len(sc_evt_csumer_1.event_operation_collected) == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.task_event_collected == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.creation_event_operation_collected == 1, time=10) # Delete a scenario trigger 5 update events tp.delete(scenario.id) assert_true_after_time(lambda: all_evt_csumer_0.event_collected == 10, time=10) assert_true_after_time(lambda: len(all_evt_csumer_0.event_entity_type_collected) == 5, time=10) assert_true_after_time(lambda: all_evt_csumer_0.event_operation_collected[EventOperation.DELETION] == 5, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.scenario_event_collected == 2, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.event_operation_collected[EventOperation.DELETION] == 1, time=10) assert_true_after_time(lambda: len(sc_evt_csumer_1.event_operation_collected) == 2, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.task_event_collected == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.creation_event_operation_collected == 1, time=10) all_evt_csumer_0.stop() sc_evt_csumer_1.stop() task_creation_evt_csumer_2.stop() "} {"text": " from queue import SimpleQueue from src.taipy.core.notification import EventEntityType, EventOperation from src.taipy.core.notification._registration import _Registration from src.taipy.core.notification._topic import _Topic def test_create_registration(): registration_0 = _Registration() assert isinstance(registration_0.registration_id, str) assert registration_0.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_0.queue, SimpleQueue) assert registration_0.queue.qsize() == 0 assert isinstance(registration_0.topic, _Topic) assert registration_0.topic.entity_type is None assert registration_0.topic.entity_id is None assert registration_0.topic.operation is None assert registration_0.topic.attribute_name is None registration_1 = _Registration( entity_type=EventEntityType.SCENARIO, entity_id=\"SCENARIO_scenario_id\", operation=EventOperation.CREATION ) assert isinstance(registration_1.registration_id, str) assert registration_1.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_1.queue, SimpleQueue) assert registration_1.queue.qsize() == 0 assert isinstance(registration_1.topic, _Topic) assert registration_1.topic.entity_type == EventEntityType.SCENARIO assert registration_1.topic.entity_id == \"SCENARIO_scenario_id\" assert registration_1.topic.operation == EventOperation.CREATION assert registration_1.topic.attribute_name is None registration_2 = _Registration( entity_type=EventEntityType.SEQUENCE, entity_id=\"SEQUENCE_scenario_id\", operation=EventOperation.UPDATE, attribute_name=\"tasks\", ) assert isinstance(registration_2.registration_id, str) assert registration_2.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_2.queue, SimpleQueue) assert registration_2.queue.qsize() == 0 assert isinstance(registration_2.topic, _Topic) assert registration_2.topic.entity_type == EventEntityType.SEQUENCE assert registration_2.topic.entity_id == \"SEQUENCE_scenario_id\" assert registration_2.topic.operation == EventOperation.UPDATE assert registration_2.topic.attribute_name == \"tasks\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import datetime from datetime import timedelta from src.taipy.core import CycleId from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.cycle.cycle import Cycle from taipy.config.common.frequency import Frequency def test_create_cycle_entity(current_datetime): cycle_1 = Cycle( Frequency.DAILY, {\"key\": \"value\"}, creation_date=current_datetime, start_date=current_datetime, end_date=current_datetime, name=\"foo\", ) assert cycle_1.id is not None assert cycle_1.name == \"foo\" assert cycle_1.properties == {\"key\": \"value\"} assert cycle_1.creation_date == current_datetime assert cycle_1.start_date == current_datetime assert cycle_1.end_date == current_datetime assert cycle_1.key == \"value\" assert cycle_1.frequency == Frequency.DAILY cycle_2 = Cycle(Frequency.YEARLY, {}, current_datetime, current_datetime, current_datetime) assert cycle_2.name == current_datetime.strftime(\"%Y\") assert cycle_2.frequency == Frequency.YEARLY cycle_3 = Cycle(Frequency.MONTHLY, {}, current_datetime, current_datetime, current_datetime) assert cycle_3.name == current_datetime.strftime(\"%B %Y\") assert cycle_3.frequency == Frequency.MONTHLY cycle_4 = Cycle(Frequency.WEEKLY, {}, current_datetime, current_datetime, current_datetime) assert cycle_4.name == current_datetime.strftime(\"Week %W %Y, from %d. %B\") assert cycle_4.frequency == Frequency.WEEKLY cycle_5 = Cycle(Frequency.DAILY, {}, current_datetime, current_datetime, current_datetime) assert cycle_5.name == current_datetime.strftime(\"%A, %d. %B %Y\") assert cycle_5.frequency == Frequency.DAILY def test_cycle_name(current_datetime): start_date = datetime.datetime(2023, 1, 2) cycle = Cycle(Frequency.DAILY, {}, current_datetime, start_date, start_date, \"name\", CycleId(\"id\")) assert cycle.name == \"name\" cycle = Cycle(Frequency.DAILY, {}, current_datetime, start_date, start_date, None, CycleId(\"id\")) assert cycle.name == \"Monday, 02. January 2023\" cycle = Cycle(Frequency.WEEKLY, {}, current_datetime, start_date, start_date, None, CycleId(\"id\")) assert cycle.name == \"Week 01 2023, from 02. January\" cycle = Cycle(Frequency.MONTHLY, {}, current_datetime, start_date, start_date, None, CycleId(\"id\")) assert cycle.name == \"January 2023\" cycle = Cycle(Frequency.QUARTERLY, {}, current_datetime, start_date, start_date, None, CycleId(\"id\")) assert cycle.name == \"2023 Q1\" cycle = Cycle(Frequency.YEARLY, {}, current_datetime, start_date, start_date, None, CycleId(\"id\")) assert cycle.name == \"2023\" def test_cycle_label(current_datetime): cycle = Cycle( Frequency.DAILY, {\"key\": \"value\"}, creation_date=current_datetime, start_date=current_datetime, end_date=current_datetime, ) assert cycle.get_label() == cycle.name assert cycle.get_simple_label() == cycle.name cycle._properties[\"label\"] = \"label\" assert cycle.get_label() == \"label\" assert cycle.get_simple_label() == \"label\" def test_add_property_to_scenario(current_datetime): cycle = Cycle( Frequency.WEEKLY, {\"key\": \"value\"}, current_datetime, current_datetime, current_datetime, name=\"foo\", ) assert cycle.properties == {\"key\": \"value\"} assert cycle.key == \"value\" cycle.properties[\"new_key\"] = \"new_value\" assert cycle.properties == {\"key\": \"value\", \"new_key\": \"new_value\"} assert cycle.key == \"value\" assert cycle.new_key == \"new_value\" def test_auto_set_and_reload(current_datetime): cycle_1 = Cycle( Frequency.WEEKLY, {\"key\": \"value\"}, current_datetime, current_datetime, current_datetime, name=\"foo\", ) _CycleManager._set(cycle_1) cycle_2 = _CycleManager._get(cycle_1) # auto set & reload on frequency attribute assert cycle_1.frequency == Frequency.WEEKLY cycle_1.frequency = Frequency.YEARLY assert cycle_1.frequency == Frequency.YEARLY assert cycle_2.frequency == Frequency.YEARLY cycle_2.frequency = Frequency.MONTHLY assert cycle_1.frequency == Frequency.MONTHLY assert cycle_2.frequency == Frequency.MONTHLY new_datetime_1 = current_datetime + timedelta(1) new_datetime_2 = current_datetime + timedelta(2) # auto set & reload on creation_date attribute assert cycle_1.creation_date == current_datetime assert cycle_2.creation_date == current_datetime cycle_1.creation_date = new_datetime_1 assert cycle_1.creation_date == new_datetime_1 assert cycle_2.creation_date == new_datetime_1 cycle_2.creation_date = new_datetime_2 assert cycle_1.creation_date == new_datetime_2 assert cycle_2.creation_date == new_datetime_2 # auto set & reload on start_date attribute assert cycle_1.start_date == current_datetime assert cycle_2.start_date == current_datetime cycle_1.start_date = new_datetime_1 assert cycle_1.start_date == new_datetime_1 assert cycle_2.start_date == new_datetime_1 cycle_2.start_date = new_datetime_2 assert cycle_1.start_date == new_datetime_2 assert cycle_2.start_date == new_datetime_2 # auto set & reload on end_date attribute assert cycle_1.end_date == current_datetime assert cycle_2.end_date == current_datetime cycle_1.end_date = new_datetime_1 assert cycle_1.end_date == new_datetime_1 assert cycle_2.end_date == new_datetime_1 cycle_2.end_date = new_datetime_2 assert cycle_1.end_date == new_datetime_2 assert cycle_2.end_date == new_datetime_2 # auto set & reload on names attribute assert cycle_1.name == \"foo\" assert cycle_2.name == \"foo\" cycle_1.name = \"fed\" assert cycle_1.name == \"fed\" assert cycle_2.name == \"fed\" cycle_2.name = \"def\" assert cycle_1.name == \"def\" assert cycle_2.name == \"def\" # auto set & reload on properties attribute assert cycle_1.properties == {\"key\": \"value\"} assert cycle_2.properties == {\"key\": \"value\"} cycle_1._properties[\"qux\"] = 4 assert cycle_1.properties[\"qux\"] == 4 assert cycle_2.properties[\"qux\"] == 4 assert cycle_1.properties == {\"key\": \"value\", \"qux\": 4} assert cycle_2.properties == {\"key\": \"value\", \"qux\": 4} cycle_2._properties[\"qux\"] = 5 assert cycle_1.properties[\"qux\"] == 5 assert cycle_2.properties[\"qux\"] == 5 cycle_1.properties[\"temp_key_1\"] = \"temp_value_1\" cycle_1.properties[\"temp_key_2\"] = \"temp_value_2\" assert cycle_1.properties == { \"qux\": 5, \"key\": \"value\", \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } assert cycle_2.properties == { \"qux\": 5, \"key\": \"value\", \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } cycle_1.properties.pop(\"temp_key_1\") assert \"temp_key_1\" not in cycle_1.properties.keys() assert \"temp_key_1\" not in cycle_1.properties.keys() assert cycle_1.properties == { \"key\": \"value\", \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } assert cycle_2.properties == { \"key\": \"value\", \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } cycle_2.properties.pop(\"temp_key_2\") assert cycle_1.properties == {\"key\": \"value\", \"qux\": 5} assert cycle_2.properties == {\"key\": \"value\", \"qux\": 5} assert \"temp_key_2\" not in cycle_1.properties.keys() assert \"temp_key_2\" not in cycle_2.properties.keys() cycle_1.properties[\"temp_key_3\"] = 0 assert cycle_1.properties == {\"key\": \"value\", \"qux\": 5, \"temp_key_3\": 0} assert cycle_2.properties == {\"key\": \"value\", \"qux\": 5, \"temp_key_3\": 0} cycle_1.properties.update({\"temp_key_3\": 1}) assert cycle_1.properties == {\"key\": \"value\", \"qux\": 5, \"temp_key_3\": 1} assert cycle_2.properties == {\"key\": \"value\", \"qux\": 5, \"temp_key_3\": 1} cycle_1.properties.update(dict()) assert cycle_1.properties == {\"key\": \"value\", \"qux\": 5, \"temp_key_3\": 1} assert cycle_2.properties == {\"key\": \"value\", \"qux\": 5, \"temp_key_3\": 1} cycle_1.properties.pop(\"key\") cycle_1.properties[\"temp_key_4\"] = 0 cycle_1.properties[\"temp_key_5\"] = 0 new_datetime_3 = new_datetime_1 + timedelta(5) with cycle_1 as cycle: assert cycle.frequency == Frequency.MONTHLY assert cycle.creation_date == new_datetime_2 assert cycle.start_date == new_datetime_2 assert cycle.end_date == new_datetime_2 assert cycle.name == \"def\" assert cycle._is_in_context assert cycle.properties[\"qux\"] == 5 assert cycle.properties[\"temp_key_3\"] == 1 assert cycle.properties[\"temp_key_4\"] == 0 assert cycle.properties[\"temp_key_5\"] == 0 cycle.frequency = Frequency.YEARLY cycle.creation_date = new_datetime_3 cycle.start_date = new_datetime_3 cycle.end_date = new_datetime_3 cycle.name = \"abc\" assert cycle.name == \"def\" assert cycle._name == \"abc\" cycle.properties[\"qux\"] = 9 cycle.properties.pop(\"temp_key_3\") cycle.properties.pop(\"temp_key_4\") cycle.properties.update({\"temp_key_4\": 1}) cycle.properties.update({\"temp_key_5\": 2}) cycle.properties.pop(\"temp_key_5\") cycle.properties.update(dict()) assert cycle.frequency == Frequency.MONTHLY assert cycle.creation_date == new_datetime_2 assert cycle.start_date == new_datetime_2 assert cycle.end_date == new_datetime_2 assert cycle._is_in_context assert cycle.properties[\"qux\"] == 5 assert cycle.name == \"def\" assert cycle.properties[\"temp_key_3\"] == 1 assert cycle.properties[\"temp_key_4\"] == 0 assert cycle.properties[\"temp_key_5\"] == 0 assert cycle_1.frequency == Frequency.YEARLY assert cycle_1.creation_date == new_datetime_3 assert cycle_1.start_date == new_datetime_3 assert cycle_1.end_date == new_datetime_3 assert cycle_1.name == \"abc\" assert cycle_1.properties[\"qux\"] == 9 assert \"temp_key_3\" not in cycle_1.properties.keys() assert cycle_1.properties[\"temp_key_4\"] == 1 assert \"temp_key_5\" not in cycle_1.properties.keys() "} {"text": "import os import pytest from src.taipy.core.cycle._cycle_fs_repository import _CycleFSRepository from src.taipy.core.cycle._cycle_sql_repository import _CycleSQLRepository from src.taipy.core.cycle.cycle import Cycle, CycleId from src.taipy.core.exceptions import ModelNotFound class TestCycleRepositories: @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_save_and_load(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) obj = repository._load(cycle.id) assert isinstance(obj, Cycle) @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_exists(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) assert repository._exists(cycle.id) assert not repository._exists(\"not-existed-cycle\") @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_load_all(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") repository._save(cycle) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_load_all_with_filters(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") cycle._name = f\"cycle-{i}\" repository._save(cycle) objs = repository._load_all(filters=[{\"id\": \"cycle-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_CycleSQLRepository]) def test_delete(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) repository._delete(cycle.id) with pytest.raises(ModelNotFound): repository._load(cycle.id) @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_delete_all(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") repository._save(cycle) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_delete_many(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") repository._save(cycle) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_search(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") cycle.name = f\"cycle-{i}\" repository._save(cycle) assert len(repository._load_all()) == 10 objs = repository._search(\"name\", \"cycle-2\") assert len(objs) == 1 assert isinstance(objs[0], Cycle) @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_export(self, tmpdir, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) repository._export(cycle.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _CycleFSRepository else os.path.join(tmpdir.strpath, \"cycle\") assert os.path.exists(os.path.join(dir_path, f\"{cycle.id}.json\")) "} {"text": "from datetime import datetime from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.cycle.cycle import Cycle from src.taipy.core.cycle.cycle_id import CycleId from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config def test_save_and_get_cycle_entity(tmpdir, cycle, current_datetime): _CycleManager._repository.base_path = tmpdir assert len(_CycleManager._get_all()) == 0 _CycleManager._set(cycle) assert _CycleManager._exists(cycle.id) cycle_1 = _CycleManager._get(cycle.id) assert cycle_1.id == cycle.id assert cycle_1.name == cycle.name assert cycle_1.properties == cycle.properties assert cycle_1.creation_date == cycle.creation_date assert cycle_1.start_date == cycle.start_date assert cycle_1.end_date == cycle.end_date assert cycle_1.frequency == cycle.frequency assert len(_CycleManager._get_all()) == 1 assert _CycleManager._get(cycle.id) == cycle assert _CycleManager._get(cycle.id).name == cycle.name assert isinstance(_CycleManager._get(cycle.id).creation_date, datetime) assert _CycleManager._get(cycle.id).creation_date == cycle.creation_date assert _CycleManager._get(cycle.id).frequency == Frequency.DAILY cycle_2_id = CycleId(\"cycle_2\") assert _CycleManager._get(cycle_2_id) is None assert not _CycleManager._exists(cycle_2_id) cycle_3 = Cycle( Frequency.MONTHLY, {}, creation_date=current_datetime, start_date=current_datetime, end_date=current_datetime, name=\"bar\", id=cycle_1.id, ) _CycleManager._set(cycle_3) cycle_3 = _CycleManager._get(cycle_1.id) assert _CycleManager._exists(cycle_1.id) assert len(_CycleManager._get_all()) == 1 assert cycle_3.id == cycle_1.id assert cycle_3.name == cycle_3.name assert cycle_3.properties == cycle_3.properties assert cycle_3.creation_date == current_datetime assert cycle_3.start_date == current_datetime assert cycle_3.end_date == current_datetime assert cycle_3.frequency == cycle_3.frequency def test_create_and_delete_cycle_entity(tmpdir): _CycleManager._repository.base_path = tmpdir assert len(_CycleManager._get_all()) == 0 cycle_1 = _CycleManager._create(Frequency.DAILY, name=\"foo\", key=\"value\") assert cycle_1.id is not None assert cycle_1.name == \"foo\" assert cycle_1.properties == {\"key\": \"value\"} assert cycle_1.creation_date is not None assert cycle_1.start_date is not None assert cycle_1.end_date is not None assert cycle_1.start_date < cycle_1.creation_date < cycle_1.end_date assert cycle_1.key == \"value\" assert cycle_1.frequency == Frequency.DAILY cycle_1_id = cycle_1.id assert _CycleManager._exists(cycle_1.id) assert len(_CycleManager._get_all()) == 1 assert _CycleManager._get(cycle_1_id) == cycle_1 assert _CycleManager._get(cycle_1_id).name == \"foo\" assert isinstance(_CycleManager._get(cycle_1_id).creation_date, datetime) assert _CycleManager._get(cycle_1_id).frequency == Frequency.DAILY cycle_2_id = CycleId(\"cycle_2\") assert _CycleManager._get(cycle_2_id) is None assert not _CycleManager._exists(cycle_2_id) cycle_3 = _CycleManager._create(Frequency.MONTHLY, \"bar\") assert cycle_3.id is not None assert cycle_3.name == \"bar\" assert isinstance(cycle_3.creation_date, datetime) assert cycle_3.frequency == Frequency.MONTHLY cycle_3_id = cycle_3.id assert _CycleManager._exists(cycle_3_id) assert len(_CycleManager._get_all()) == 2 assert _CycleManager._get(cycle_3_id).name == \"bar\" cycle_4 = _CycleManager._create(Frequency.YEARLY, \"baz\") cycle_4_id = cycle_4.id assert _CycleManager._exists(cycle_4_id) assert len(_CycleManager._get_all()) == 3 _CycleManager._delete(cycle_4_id) assert len(_CycleManager._get_all()) == 2 assert not _CycleManager._exists(cycle_4_id) assert _CycleManager._get(cycle_4_id) is None _CycleManager._delete_all() assert len(_CycleManager._get_all()) == 0 assert not any(_CycleManager._exists(cycle_id) for cycle_id in [cycle_1_id, cycle_3_id, cycle_4_id]) def test_get_cycle_start_date_and_end_date(): creation_date_1 = datetime.fromisoformat(\"2021-11-11T11:11:01.000001\") daily_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.DAILY, creation_date=creation_date_1) weekly_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.WEEKLY, creation_date=creation_date_1) monthly_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.MONTHLY, creation_date=creation_date_1) yearly_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.YEARLY, creation_date=creation_date_1) assert daily_start_date_1 == datetime.fromisoformat(\"2021-11-11T00:00:00.000000\") assert weekly_start_date_1 == datetime.fromisoformat(\"2021-11-08T00:00:00.000000\") assert monthly_start_date_1 == datetime.fromisoformat(\"2021-11-01T00:00:00.000000\") assert yearly_start_date_1 == datetime.fromisoformat(\"2021-01-01T00:00:00.000000\") daily_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.DAILY, start_date=daily_start_date_1) weekly_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.WEEKLY, start_date=weekly_start_date_1) monthly_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.MONTHLY, start_date=monthly_start_date_1) yearly_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.YEARLY, start_date=yearly_start_date_1) assert daily_end_date_1 == datetime.fromisoformat(\"2021-11-11T23:59:59.999999\") assert weekly_end_date_1 == datetime.fromisoformat(\"2021-11-14T23:59:59.999999\") assert monthly_end_date_1 == datetime.fromisoformat(\"2021-11-30T23:59:59.999999\") assert yearly_end_date_1 == datetime.fromisoformat(\"2021-12-31T23:59:59.999999\") creation_date_2 = datetime.now() daily_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.DAILY, creation_date=creation_date_2) daily_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.DAILY, daily_start_date_2) assert daily_start_date_2.date() == creation_date_2.date() assert daily_end_date_2.date() == creation_date_2.date() assert daily_start_date_2 < creation_date_2 < daily_end_date_2 weekly_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.WEEKLY, creation_date=creation_date_2) weekly_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.WEEKLY, weekly_start_date_2) assert weekly_start_date_2 < creation_date_2 < weekly_end_date_2 monthly_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.MONTHLY, creation_date=creation_date_2) monthly_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.MONTHLY, monthly_start_date_2) assert monthly_start_date_2.month == creation_date_2.month and monthly_start_date_2.day == 1 assert monthly_end_date_2.month == creation_date_2.month assert monthly_start_date_2 < creation_date_2 < monthly_end_date_2 yearly_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.YEARLY, creation_date=creation_date_2) yearly_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.YEARLY, yearly_start_date_2) assert yearly_start_date_2.year == creation_date_2.year assert yearly_start_date_2 == datetime(creation_date_2.year, 1, 1) assert yearly_end_date_2.year == creation_date_2.year assert yearly_end_date_2.date() == datetime(creation_date_2.year, 12, 31).date() assert yearly_start_date_2 < creation_date_2 < yearly_end_date_2 def test_hard_delete_shared_entities(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_config_1 = Config.configure_data_node(\"my_input_1\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_config_2 = Config.configure_data_node(\"my_input_2\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_config_3 = Config.configure_data_node(\"my_input_3\", \"in_memory\", scope=Scope.CYCLE, default_data=\"testing\") dn_config_4 = Config.configure_data_node(\"my_input_4\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_config_1, dn_config_2) task_config_2 = Config.configure_task(\"task_config_2\", print, dn_config_2, dn_config_3) task_config_3 = Config.configure_task(\"task_config_3\", print, dn_config_3, dn_config_4) # scope = global creation_date = datetime.now() scenario_config_1 = Config.configure_scenario( \"scenario_config_1\", [task_config_1, task_config_2, task_config_3], creation_date=creation_date, frequency=Frequency.DAILY, ) scenario_config_1.add_sequences( { \"sequence_1\": [task_config_1, task_config_2], \"sequence_2\": [task_config_1, task_config_2], \"sequence_3\": [task_config_3], } ) scenario_config_2 = Config.configure_scenario( \"scenario_config_2\", [task_config_2, task_config_3] ) # No Frequency so cycle attached to scenarios scenario_config_2.add_sequences({\"sequence_3\": [task_config_3]}) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config_1) scenario_2 = _ScenarioManager._create(scenario_config_1) scenario_3 = _ScenarioManager._create(scenario_config_2) scenario_1.submit() scenario_2.submit() scenario_3.submit() assert len(_ScenarioManager._get_all()) == 3 assert len(_SequenceManager._get_all()) == 7 assert len(_TaskManager._get_all()) == 7 assert len(_DataManager._get_all()) == 8 assert len(_JobManager._get_all()) == 8 assert len(_CycleManager._get_all()) == 1 _CycleManager._hard_delete(scenario_1.cycle.id) assert len(_CycleManager._get_all()) == 0 assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 2 assert len(_JobManager._get_all()) == 2 assert len(_DataManager._get_all()) == 3 def test_get_primary(tmpdir, cycle, current_datetime): _CycleManager._repository.base_path = tmpdir assert len(_CycleManager._get_all()) == 0 _CycleManager._set(cycle) cycle_1 = _CycleManager._get(cycle.id) cycle_2 = Cycle(Frequency.MONTHLY, {}, current_datetime, current_datetime, current_datetime, name=\"foo\") _CycleManager._set(cycle_2) cycle_2 = _CycleManager._get(cycle_2.id) cycles = _CycleManager._get_all() assert len(_CycleManager._get_all()) == 2 assert ( len(_CycleManager._get_cycles_by_frequency_and_start_date(cycle_1.frequency, cycle_1.start_date, cycles)) == 1 ) assert ( len(_CycleManager._get_cycles_by_frequency_and_start_date(cycle_2.frequency, cycle_2.start_date, cycles)) == 1 ) assert ( len( _CycleManager._get_cycles_by_frequency_and_start_date( Frequency.WEEKLY, datetime(2000, 1, 1, 1, 0, 0, 0), cycles ) ) == 0 ) assert ( len( _CycleManager._get_cycles_by_frequency_and_overlapping_date( cycle_1.frequency, cycle_1.creation_date, cycles ) ) == 1 ) assert ( _CycleManager._get_cycles_by_frequency_and_overlapping_date(cycle_1.frequency, cycle_1.creation_date, cycles)[0] == cycle_1 ) assert ( len( _CycleManager._get_cycles_by_frequency_and_overlapping_date( Frequency.WEEKLY, datetime(2000, 1, 1, 1, 0, 0, 0), cycles ) ) == 0 ) "} {"text": "from datetime import datetime from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.cycle._cycle_manager_factory import _CycleManagerFactory from src.taipy.core.cycle.cycle import Cycle from src.taipy.core.cycle.cycle_id import CycleId from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config def test_save_and_get_cycle_entity(init_sql_repo, cycle, current_datetime): _CycleManager._repository = _CycleManagerFactory._build_repository() _CycleManager._delete_all() assert len(_CycleManager._get_all()) == 0 _CycleManager._set(cycle) assert _CycleManager._exists(cycle.id) cycle_1 = _CycleManager._get(cycle.id) assert cycle_1.id == cycle.id assert cycle_1.name == cycle.name assert cycle_1.properties == cycle.properties assert cycle_1.creation_date == cycle.creation_date assert cycle_1.start_date == cycle.start_date assert cycle_1.end_date == cycle.end_date assert cycle_1.frequency == cycle.frequency assert len(_CycleManager._get_all()) == 1 assert _CycleManager._get(cycle.id) == cycle assert _CycleManager._get(cycle.id).name == cycle.name assert isinstance(_CycleManager._get(cycle.id).creation_date, datetime) assert _CycleManager._get(cycle.id).creation_date == cycle.creation_date assert _CycleManager._get(cycle.id).frequency == Frequency.DAILY cycle_2_id = CycleId(\"cycle_2\") assert _CycleManager._get(cycle_2_id) is None assert not _CycleManager._exists(cycle_2_id) cycle_3 = Cycle( Frequency.MONTHLY, {}, creation_date=current_datetime, start_date=current_datetime, end_date=current_datetime, name=\"bar\", id=cycle_1.id, ) _CycleManager._set(cycle_3) cycle_3 = _CycleManager._get(cycle_1.id) assert _CycleManager._exists(cycle_1.id) assert len(_CycleManager._get_all()) == 1 assert cycle_3.id == cycle_1.id assert cycle_3.name == cycle_3.name assert cycle_3.properties == cycle_3.properties assert cycle_3.creation_date == current_datetime assert cycle_3.start_date == current_datetime assert cycle_3.end_date == current_datetime assert cycle_3.frequency == cycle_3.frequency def test_create_and_delete_cycle_entity(init_sql_repo): _CycleManager._repository = _CycleManagerFactory._build_repository() _CycleManager._delete_all() assert len(_CycleManager._get_all()) == 0 cycle_1 = _CycleManager._create(Frequency.DAILY, name=\"foo\", key=\"value\", display_name=\"foo\") assert cycle_1.id is not None assert cycle_1.name == \"foo\" assert cycle_1.properties == {\"key\": \"value\", \"display_name\": \"foo\"} assert cycle_1.creation_date is not None assert cycle_1.start_date is not None assert cycle_1.end_date is not None assert cycle_1.start_date < cycle_1.creation_date < cycle_1.end_date assert cycle_1.key == \"value\" assert cycle_1.frequency == Frequency.DAILY cycle_1_id = cycle_1.id assert _CycleManager._exists(cycle_1.id) assert len(_CycleManager._get_all()) == 1 assert _CycleManager._get(cycle_1_id) == cycle_1 assert _CycleManager._get(cycle_1_id).name == \"foo\" assert isinstance(_CycleManager._get(cycle_1_id).creation_date, datetime) assert _CycleManager._get(cycle_1_id).frequency == Frequency.DAILY cycle_2_id = CycleId(\"cycle_2\") assert _CycleManager._get(cycle_2_id) is None assert not _CycleManager._exists(cycle_2_id) cycle_3 = _CycleManager._create(Frequency.MONTHLY, \"bar\") assert cycle_3.id is not None assert cycle_3.name == \"bar\" assert isinstance(cycle_3.creation_date, datetime) assert cycle_3.frequency == Frequency.MONTHLY cycle_3_id = cycle_3.id assert _CycleManager._exists(cycle_3_id) assert len(_CycleManager._get_all()) == 2 assert _CycleManager._get(cycle_3_id).name == \"bar\" cycle_4 = _CycleManager._create(Frequency.YEARLY, \"baz\") cycle_4_id = cycle_4.id assert _CycleManager._exists(cycle_4_id) assert len(_CycleManager._get_all()) == 3 _CycleManager._delete(cycle_4_id) assert len(_CycleManager._get_all()) == 2 assert not _CycleManager._exists(cycle_4_id) assert _CycleManager._get(cycle_4_id) is None _CycleManager._delete_all() assert len(_CycleManager._get_all()) == 0 assert not any(_CycleManager._exists(cycle_id) for cycle_id in [cycle_1_id, cycle_3_id, cycle_4_id]) def test_get_cycle_start_date_and_end_date(init_sql_repo): _CycleManager._repository = _CycleManagerFactory._build_repository() _CycleManager._delete_all() creation_date_1 = datetime.fromisoformat(\"2021-11-11T11:11:01.000001\") daily_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.DAILY, creation_date=creation_date_1) weekly_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.WEEKLY, creation_date=creation_date_1) monthly_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.MONTHLY, creation_date=creation_date_1) yearly_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.YEARLY, creation_date=creation_date_1) assert daily_start_date_1 == datetime.fromisoformat(\"2021-11-11T00:00:00.000000\") assert weekly_start_date_1 == datetime.fromisoformat(\"2021-11-08T00:00:00.000000\") assert monthly_start_date_1 == datetime.fromisoformat(\"2021-11-01T00:00:00.000000\") assert yearly_start_date_1 == datetime.fromisoformat(\"2021-01-01T00:00:00.000000\") daily_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.DAILY, start_date=daily_start_date_1) weekly_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.WEEKLY, start_date=weekly_start_date_1) monthly_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.MONTHLY, start_date=monthly_start_date_1) yearly_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.YEARLY, start_date=yearly_start_date_1) assert daily_end_date_1 == datetime.fromisoformat(\"2021-11-11T23:59:59.999999\") assert weekly_end_date_1 == datetime.fromisoformat(\"2021-11-14T23:59:59.999999\") assert monthly_end_date_1 == datetime.fromisoformat(\"2021-11-30T23:59:59.999999\") assert yearly_end_date_1 == datetime.fromisoformat(\"2021-12-31T23:59:59.999999\") creation_date_2 = datetime.now() daily_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.DAILY, creation_date=creation_date_2) daily_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.DAILY, daily_start_date_2) assert daily_start_date_2.date() == creation_date_2.date() assert daily_end_date_2.date() == creation_date_2.date() assert daily_start_date_2 < creation_date_2 < daily_end_date_2 weekly_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.WEEKLY, creation_date=creation_date_2) weekly_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.WEEKLY, weekly_start_date_2) assert weekly_start_date_2 < creation_date_2 < weekly_end_date_2 monthly_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.MONTHLY, creation_date=creation_date_2) monthly_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.MONTHLY, monthly_start_date_2) assert monthly_start_date_2.month == creation_date_2.month and monthly_start_date_2.day == 1 assert monthly_end_date_2.month == creation_date_2.month assert monthly_start_date_2 < creation_date_2 < monthly_end_date_2 yearly_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.YEARLY, creation_date=creation_date_2) yearly_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.YEARLY, yearly_start_date_2) assert yearly_start_date_2.year == creation_date_2.year assert yearly_start_date_2 == datetime(creation_date_2.year, 1, 1) assert yearly_end_date_2.year == creation_date_2.year assert yearly_end_date_2.date() == datetime(creation_date_2.year, 12, 31).date() assert yearly_start_date_2 < creation_date_2 < yearly_end_date_2 def test_hard_delete_shared_entities(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _ScenarioManager._repository = _ScenarioManagerFactory._build_repository() dn_config_1 = Config.configure_data_node(\"my_input_1\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_config_2 = Config.configure_data_node(\"my_input_2\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_config_3 = Config.configure_data_node(\"my_input_3\", \"in_memory\", scope=Scope.CYCLE, default_data=\"testing\") dn_config_4 = Config.configure_data_node(\"my_input_4\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_config_1, dn_config_2) task_config_2 = Config.configure_task(\"task_config_2\", print, dn_config_2, dn_config_3) task_config_3 = Config.configure_task(\"task_config_3\", print, dn_config_3, dn_config_4) # scope = global creation_date = datetime.now() scenario_config_1 = Config.configure_scenario( \"scenario_config_1\", [task_config_1, task_config_2, task_config_3], creation_date=creation_date, frequency=Frequency.DAILY, ) scenario_config_1.add_sequences( { \"sequence_1\": [task_config_1, task_config_2], \"sequence_2\": [task_config_1, task_config_2], \"sequence_3\": [task_config_3], } ) scenario_config_2 = Config.configure_scenario( \"scenario_config_2\", [task_config_2, task_config_3] ) # No Frequency so cycle attached to scenarios scenario_config_2.add_sequences({\"sequence_3\": [task_config_3]}) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config_1) scenario_2 = _ScenarioManager._create(scenario_config_1) scenario_3 = _ScenarioManager._create(scenario_config_2) scenario_1.submit() scenario_2.submit() scenario_3.submit() assert len(_ScenarioManager._get_all()) == 3 assert len(_SequenceManager._get_all()) == 7 assert len(_TaskManager._get_all()) == 7 assert len(_DataManager._get_all()) == 8 assert len(_JobManager._get_all()) == 8 assert len(_CycleManager._get_all()) == 1 _CycleManager._hard_delete(scenario_1.cycle.id) assert len(_CycleManager._get_all()) == 0 assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 2 assert len(_JobManager._get_all()) == 2 assert len(_DataManager._get_all()) == 3 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import dataclasses import pathlib from dataclasses import dataclass from typing import Any, Dict, Iterable, List, Optional, Union from src.taipy.core._manager._manager import _Manager from src.taipy.core._repository._abstract_converter import _AbstractConverter from src.taipy.core._repository._abstract_repository import _AbstractRepository from src.taipy.core._repository._filesystem_repository import _FileSystemRepository from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config @dataclass class MockModel: id: str name: str version: str def to_dict(self): return dataclasses.asdict(self) @staticmethod def from_dict(data: Dict[str, Any]): return MockModel(id=data[\"id\"], name=data[\"name\"], version=data[\"version\"]) @dataclass class MockEntity: def __init__(self, id: str, name: str, version: str = None) -> None: self.id = id self.name = name if version: self._version = version else: self._version = _VersionManager._get_latest_version() class MockConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, entity: MockEntity) -> MockModel: return MockModel(id=entity.id, name=entity.name, version=entity._version) @classmethod def _model_to_entity(cls, model: MockModel) -> MockEntity: return MockEntity(id=model.id, name=model.name, version=model.version) class MockRepository(_AbstractRepository): # type: ignore def __init__(self, **kwargs): self.repo = _FileSystemRepository(**kwargs, converter=MockConverter) def _to_model(self, obj: MockEntity): return MockModel(obj.id, obj.name, obj._version) def _from_model(self, model: MockModel): return MockEntity(model.id, model.name, model.version) def _load(self, entity_id: str) -> MockEntity: return self.repo._load(entity_id) def _load_all(self, filters: Optional[List[Dict]] = None) -> List[MockEntity]: return self.repo._load_all(filters) def _save(self, entity: MockEntity): return self.repo._save(entity) def _exists(self, entity_id: str) -> bool: return self.repo._exists(entity_id) def _delete(self, entity_id: str): return self.repo._delete(entity_id) def _delete_all(self): return self.repo._delete_all() def _delete_many(self, ids: Iterable[str]): return self.repo._delete_many(ids) def _delete_by(self, attribute: str, value: str): return self.repo._delete_by(attribute, value) def _search(self, attribute: str, value: Any, filters: Optional[List[Dict]] = None) -> List[MockEntity]: return self.repo._search(attribute, value, filters) def _export(self, entity_id: str, folder_path: Union[str, pathlib.Path]): return self.repo._export(self, entity_id, folder_path) @property def _storage_folder(self) -> pathlib.Path: return pathlib.Path(Config.core.storage_folder) # type: ignore class MockManager(_Manager[MockEntity]): _ENTITY_NAME = MockEntity.__name__ _repository = MockRepository(model_type=MockModel, dir_name=\"foo\") class TestManager: def test_save_and_fetch_model(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) fetched_model = MockManager._get(m.id) assert m == fetched_model def test_exists(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) assert MockManager._exists(m.id) def test_get(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) assert MockManager._get(m.id) == m def test_get_all(self): MockManager._delete_all() objs = [] for i in range(5): m = MockEntity(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) MockManager._set(m) _objs = MockManager._get_all() assert len(_objs) == 5 def test_delete(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) MockManager._delete(m.id) assert MockManager._get(m.id) is None def test_delete_all(self): objs = [] for i in range(5): m = MockEntity(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) MockManager._set(m) MockManager._delete_all() assert MockManager._get_all() == [] def test_delete_many(self): objs = [] for i in range(5): m = MockEntity(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) MockManager._set(m) MockManager._delete_many([\"uuid-0\", \"uuid-1\"]) assert len(MockManager._get_all()) == 3 def test_is_editable(self): m = MockEntity(\"uuid\", \"Foo\") MockManager._set(m) assert MockManager._is_editable(m) def test_is_readable(self): m = MockEntity(\"uuid\", \"Foo\") MockManager._set(m) assert MockManager._is_readable(m) "} {"text": " class NotifyMock: \"\"\" A shared class for testing notification on jobStatus of sequence level and scenario level \"entity\" can be understood as either \"scenario\" or \"sequence\". \"\"\" def __init__(self, entity): self.scenario = entity self.nb_called = 0 self.__name__ = \"NotifyMock\" def __call__(self, entity, job): assert entity == self.scenario if self.nb_called == 0: assert job.is_pending() if self.nb_called == 1: assert job.is_running() if self.nb_called == 2: assert job.is_finished() self.nb_called += 1 def assert_called_3_times(self): assert self.nb_called == 3 def assert_not_called(self): assert self.nb_called == 0 def reset(self): self.nb_called = 0 "} {"text": " def assert_true_after_time(assertion, msg=None, time=120): from datetime import datetime from time import sleep loops = 0 start = datetime.now() while (datetime.now() - start).seconds < time: sleep(1) # Limit CPU usage try: if assertion(): return except BaseException as e: print(\"Raise : \", e) loops += 1 continue if msg: print(msg) assert assertion() "} {"text": "import os import tempfile class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile(\"w\", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, \"r\") as fp: return fp.read() def __del__(self): os.unlink(self.filename) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from typing import List from src.taipy.core import DataNode, Sequence, SequenceId, Task, TaskId from src.taipy.core._entity._dag import _DAG from taipy.config.common.scope import Scope def assert_x(x: int, *nodes): for node in nodes: assert node.x == x def assert_y(y: List[int], *nodes): for node in nodes: assert node.y in y y.remove(node.y) def assert_x_y(x: int, y: List[int], *nodes): assert_x(x, *nodes) for node in nodes: assert node.y in y y.remove(node.y) def assert_edge_exists(src, dest, dag: _DAG): list_of_tuples = [(edge.src.entity.id, edge.dest.entity.id) for edge in dag.edges] assert (src, dest) in list_of_tuples class TestDAG: def test_get_dag_1(self): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) dag = sequence._get_dag() # s1 --- ---> s3 ---> t2 ---> s5 ---- # | | | # |---> t1 ---| -------------------------> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert dag.length == 7 assert dag.width == 2 assert dag._grid_length == 7 assert dag._grid_width == 3 assert len(dag.nodes) == 11 assert_x_y(0, [0, 2], dag.nodes[\"s1\"], dag.nodes[\"s2\"]) assert_x_y(1, [1], dag.nodes[\"t1\"]) assert_x_y(2, [0, 2], dag.nodes[\"s3\"], dag.nodes[\"s4\"]) assert_x_y(3, [0, 2], dag.nodes[\"t2\"], dag.nodes[\"t4\"]) assert_x_y(4, [0, 2], dag.nodes[\"s5\"], dag.nodes[\"s7\"]) assert_x_y(5, [1], dag.nodes[\"t3\"]) assert_x_y(6, [1], dag.nodes[\"s6\"]) assert len(dag.edges) == 11 assert_edge_exists(\"s1\", \"t1\", dag) assert_edge_exists(\"s2\", \"t1\", dag) assert_edge_exists(\"t1\", \"s3\", dag) assert_edge_exists(\"t1\", \"s4\", dag) assert_edge_exists(\"s3\", \"t2\", dag) assert_edge_exists(\"t2\", \"s5\", dag) assert_edge_exists(\"s5\", \"t3\", dag) assert_edge_exists(\"s4\", \"t3\", dag) assert_edge_exists(\"t3\", \"s6\", dag) assert_edge_exists(\"s4\", \"t4\", dag) assert_edge_exists(\"t4\", \"s7\", dag) def test_get_dag_2(self): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, None, [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # 6 | t2 _____ # 5 | \\ # 4 | s5 _________________ t3 _______ s6 # 3 | s1 __ _ s4 _____/ # 2 | \\ _ t1 ____/ \\_ t4 _______ s7 # 1 | / # 0 | s2 -- # |________________________________________________ # 0 1 2 3 4 dag = sequence._get_dag() assert dag.length == 5 assert dag.width == 3 assert dag._grid_length == 5 assert dag._grid_width == 7 assert len(dag.nodes) == 10 assert_x_y(0, [0, 3, 6], dag.nodes[\"s1\"], dag.nodes[\"s2\"], dag.nodes[\"t2\"]) assert_x_y(1, [2, 4], dag.nodes[\"t1\"], dag.nodes[\"s5\"]) assert_x_y(2, [3], dag.nodes[\"s4\"]) assert_x_y(3, [2, 4], dag.nodes[\"t3\"], dag.nodes[\"t4\"]) assert_x_y(4, [2, 4], dag.nodes[\"s6\"], dag.nodes[\"s7\"]) assert len(dag.edges) == 9 assert_edge_exists(\"s1\", \"t1\", dag) assert_edge_exists(\"s2\", \"t1\", dag) assert_edge_exists(\"t1\", \"s4\", dag) assert_edge_exists(\"t2\", \"s5\", dag) assert_edge_exists(\"s5\", \"t3\", dag) assert_edge_exists(\"s4\", \"t3\", dag) assert_edge_exists(\"t3\", \"s6\", dag) assert_edge_exists(\"s4\", \"t4\", dag) assert_edge_exists(\"t4\", \"s7\", dag) def test_get_dag_3(self): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"quuz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"corge\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"hugh\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2, data_node_3], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t2\")) task_3 = Task(\"fred\", {}, print, [data_node_4], [data_node_5], TaskId(\"t3\")) task_4 = Task(\"garply\", {}, print, output=[data_node_6], id=TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_7], None, TaskId(\"t5\")) sequence = Sequence({}, [task_5, task_3, task_4, task_1, task_2], SequenceId(\"p1\")) # 12 | s7 __ # 11 | \\ # 10 | \\ # 9 | t4 _ \\_ t5 # 8 | \\ ____ t3 ___ # 7 | \\ / \\ # 6 | s3 _ \\__ s6 _ s4 _/ \\___ s5 # 5 | \\ / \\ # 4 | \\ / \\____ t2 # 3 | s2 ___\\__ t1 __/ # 2 | / # 1 | / # 0 | s1 _/ # |________________________________________________ # 0 1 2 3 4 dag = sequence._get_dag() assert dag.length == 5 assert dag.width == 5 assert dag._grid_length == 5 assert dag._grid_width == 13 assert len(dag.nodes) == 12 assert_x_y( 0, [0, 3, 6, 9, 12], dag.nodes[\"s1\"], dag.nodes[\"s2\"], dag.nodes[\"s3\"], dag.nodes[\"s7\"], dag.nodes[\"t4\"] ) assert_x_y(1, [3, 6, 9], dag.nodes[\"t1\"], dag.nodes[\"t5\"], dag.nodes[\"s6\"]) assert_x_y(2, [6], dag.nodes[\"s4\"]) assert_x_y(3, [4, 8], dag.nodes[\"t2\"], dag.nodes[\"t3\"]) assert_x_y(4, [6], dag.nodes[\"s5\"]) assert len(dag.edges) == 9 assert_edge_exists(\"s1\", \"t1\", dag) assert_edge_exists(\"s2\", \"t1\", dag) assert_edge_exists(\"s3\", \"t1\", dag) assert_edge_exists(\"t1\", \"s4\", dag) assert_edge_exists(\"s4\", \"t2\", dag) assert_edge_exists(\"s4\", \"t3\", dag) assert_edge_exists(\"t3\", \"s5\", dag) assert_edge_exists(\"t4\", \"s6\", dag) assert_edge_exists(\"s7\", \"t5\", dag) def test_get_dag_4(self): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"quuz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"corge\", Scope.SCENARIO, \"s6\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2, data_node_3], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t2\")) task_3 = Task(\"fred\", {}, print, [data_node_4], [data_node_5], TaskId(\"t3\")) task_4 = Task(\"garply\", {}, print, output=[data_node_6], id=TaskId(\"t4\")) sequence = Sequence({}, [task_3, task_4, task_1, task_2], SequenceId(\"p1\")) # 6 | t4 __ # 5 | \\ # 4 | s3 _ \\__ s6 ______ t3 ___ # 3 | \\ ___ s4 _/ \\___ s5 # 2 | s2 __\\__ t1 __/ \\______ t2 # 1 | / # 0 | s1 _/ # |________________________________________________ # 0 1 2 3 4 dag = sequence._get_dag() assert dag.length == 5 assert dag.width == 4 assert dag._grid_length == 5 assert dag._grid_width == 7 assert len(dag.nodes) == 10 assert_x_y(0, [0, 2, 4, 6], dag.nodes[\"s1\"], dag.nodes[\"s2\"], dag.nodes[\"s3\"], dag.nodes[\"t4\"]) assert_x_y(1, [2, 4], dag.nodes[\"t1\"], dag.nodes[\"s6\"]) assert_x_y(2, [3], dag.nodes[\"s4\"]) assert_x_y(3, [2, 4], dag.nodes[\"t2\"], dag.nodes[\"t3\"]) assert_x_y(4, [3], dag.nodes[\"s5\"]) assert len(dag.edges) == 8 assert_edge_exists(\"s1\", \"t1\", dag) assert_edge_exists(\"s2\", \"t1\", dag) assert_edge_exists(\"s3\", \"t1\", dag) assert_edge_exists(\"t1\", \"s4\", dag) assert_edge_exists(\"s4\", \"t2\", dag) assert_edge_exists(\"s4\", \"t3\", dag) assert_edge_exists(\"t3\", \"s5\", dag) assert_edge_exists(\"t4\", \"s6\", dag) def test_get_dag_5(self): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") task_1 = Task(\"baz\", {}, print, [data_node_1], [data_node_2], TaskId(\"t1\")) sequence = Sequence({}, [task_1], SequenceId(\"p1\")) # 1 | # 0 | s1 __ t1 __ s2 # |_________________ # 0 1 2 dag = sequence._get_dag() assert dag.length == 3 assert dag.width == 1 assert dag._grid_length == 3 assert dag._grid_width == 1 assert len(dag.nodes) == 3 assert_x_y(0, [0], dag.nodes[\"s1\"]) assert_x_y(1, [0], dag.nodes[\"t1\"]) assert_x_y(2, [0], dag.nodes[\"s2\"]) assert len(dag.edges) == 2 assert_edge_exists(\"s1\", \"t1\", dag) assert_edge_exists(\"t1\", \"s2\", dag) def test_get_dag_6(self): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") task_1 = Task(\"quux\", {}, print, [data_node_1, data_node_2], [data_node_3], TaskId(\"t1\")) task_2 = Task(\"quuz\", {}, print, [data_node_2], [data_node_4], TaskId(\"t2\")) sequence = Sequence({}, [task_1, task_2], SequenceId(\"p1\")) # 2 | # | # 1 | s1 ___ t1 __ s3 # | / # 0 | s2 /__ t2 __ s4 # |_________________ # 0 1 2 dag = sequence._get_dag() assert dag.length == 3 assert dag.width == 2 assert dag._grid_length == 3 assert dag._grid_width == 2 assert len(dag.nodes) == 6 assert_x_y(0, [0, 1], dag.nodes[\"s1\"], dag.nodes[\"s2\"]) assert_x_y(1, [0, 1], dag.nodes[\"t1\"], dag.nodes[\"t2\"]) assert_x_y(2, [0, 1], dag.nodes[\"s3\"], dag.nodes[\"s4\"]) assert len(dag.edges) == 5 assert_edge_exists(\"s1\", \"t1\", dag) assert_edge_exists(\"s2\", \"t1\", dag) assert_edge_exists(\"t1\", \"s3\", dag) assert_edge_exists(\"s2\", \"t2\", dag) assert_edge_exists(\"t2\", \"s4\", dag) "} {"text": "from unittest import mock import pytest from src.taipy.core import taipy from src.taipy.core._entity._labeled import _Labeled from taipy.config import Config, Frequency, Scope class MockOwner: label = \"owner_label\" def get_label(self): return self.label def test_get_label(): labeled_entity = _Labeled() with pytest.raises(NotImplementedError): labeled_entity.get_label() with pytest.raises(NotImplementedError): labeled_entity.get_simple_label() with pytest.raises(AttributeError): labeled_entity._get_label() with pytest.raises(AttributeError): labeled_entity._get_simple_label() labeled_entity.id = \"id\" assert labeled_entity._get_label() == \"id\" assert labeled_entity._get_simple_label() == \"id\" labeled_entity.config_id = \"the config id\" assert labeled_entity._get_label() == \"the config id\" assert labeled_entity._get_simple_label() == \"the config id\" labeled_entity._properties = {\"name\": \"a name\"} assert labeled_entity._get_label() == \"a name\" assert labeled_entity._get_simple_label() == \"a name\" labeled_entity.owner_id = \"owner_id\" with mock.patch(\"src.taipy.core.get\") as get_mck: get_mck.return_value = MockOwner() assert labeled_entity._get_label() == \"owner_label > a name\" assert labeled_entity._get_simple_label() == \"a name\" labeled_entity._properties[\"label\"] = \"a wonderful label\" assert labeled_entity._get_label() == \"a wonderful label\" assert labeled_entity._get_simple_label() == \"a wonderful label\" def mult(n1, n2): return n1 * n2 def test_get_label_complex_case(): dn1_cfg = Config.configure_data_node(\"dn1\", scope=Scope.GLOBAL) dn2_cfg = Config.configure_data_node(\"dn2\", scope=Scope.CYCLE) dn3_cfg = Config.configure_data_node(\"dn3\", scope=Scope.CYCLE) dn4_cfg = Config.configure_data_node(\"dn4\", scope=Scope.SCENARIO) dn5_cfg = Config.configure_data_node(\"dn5\", scope=Scope.SCENARIO) tA_cfg = Config.configure_task(\"t_A_C\", mult, [dn1_cfg, dn2_cfg], dn3_cfg) tB_cfg = Config.configure_task(\"t_B_S\", mult, [dn3_cfg, dn4_cfg], dn5_cfg) scenario_cfg = Config.configure_scenario(\"scenario_cfg\", [tA_cfg, tB_cfg], [], Frequency.DAILY) scenario_cfg.add_sequences( { \"sequence_C\": [tA_cfg], \"sequence_S\": [tA_cfg, tB_cfg], } ) scenario = taipy.create_scenario(scenario_cfg, name=\"My Name\") cycle = scenario.cycle cycle.name = \"Today\" sequence_C = scenario.sequence_C sequence_S = scenario.sequence_S tA = scenario.t_A_C tB = scenario.t_B_S dn1 = scenario.dn1 dn2 = scenario.dn2 dn3 = scenario.dn3 dn4 = scenario.dn4 dn5 = scenario.dn5 assert cycle.get_label() == scenario.cycle.name assert cycle.get_simple_label() == scenario.cycle.name assert scenario.get_label() == \"Today > My Name\" assert scenario.get_simple_label() == \"My Name\" assert sequence_C.get_label() == \"Today > My Name > sequence_C\" assert sequence_C.get_simple_label() == \"sequence_C\" assert sequence_S.get_label() == \"Today > My Name > sequence_S\" assert sequence_S.get_simple_label() == \"sequence_S\" assert tA.get_label() == \"Today > t_A_C\" assert tA.get_simple_label() == \"t_A_C\" assert tB.get_label() == \"Today > My Name > t_B_S\" assert tB.get_simple_label() == \"t_B_S\" assert dn1.get_label() == \"dn1\" assert dn1.get_simple_label() == \"dn1\" assert dn2.get_label() == \"Today > dn2\" assert dn2.get_simple_label() == \"dn2\" assert dn3.get_label() == \"Today > dn3\" assert dn3.get_simple_label() == \"dn3\" assert dn4.get_label() == \"Today > My Name > dn4\" assert dn4.get_simple_label() == \"dn4\" assert dn5.get_label() == \"Today > My Name > dn5\" assert dn5.get_simple_label() == \"dn5\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.core._entity._entity_ids import _EntityIds class TestEntityIds: def test_add_two_entity_ids(self): entity_ids_1 = _EntityIds() entity_ids_2 = _EntityIds() entity_ids_1_address = id(entity_ids_1) entity_ids_1.data_node_ids.update([\"data_node_id_1\", \"data_node_id_2\"]) entity_ids_1.task_ids.update([\"task_id_1\", \"task_id_2\"]) entity_ids_1.job_ids.update([\"job_id_1\", \"job_id_2\"]) entity_ids_1.sequence_ids.update([\"sequence_id_1\", \"sequence_id_2\"]) entity_ids_1.scenario_ids.update([\"scenario_id_1\", \"scenario_id_2\"]) entity_ids_1.cycle_ids.update([\"cycle_id_1\", \"cycle_id_2\"]) entity_ids_2.data_node_ids.update([\"data_node_id_2\", \"data_node_id_3\"]) entity_ids_2.task_ids.update([\"task_id_2\", \"task_id_3\"]) entity_ids_2.job_ids.update([\"job_id_2\", \"job_id_3\"]) entity_ids_2.sequence_ids.update([\"sequence_id_2\", \"sequence_id_3\"]) entity_ids_2.scenario_ids.update([\"scenario_id_2\", \"scenario_id_3\"]) entity_ids_2.cycle_ids.update([\"cycle_id_2\", \"cycle_id_3\"]) entity_ids_1 += entity_ids_2 # += operator should not change the address of entity_ids_1 assert id(entity_ids_1) == entity_ids_1_address assert entity_ids_1.data_node_ids == {\"data_node_id_1\", \"data_node_id_2\", \"data_node_id_3\"} assert entity_ids_1.task_ids == {\"task_id_1\", \"task_id_2\", \"task_id_3\"} assert entity_ids_1.job_ids == {\"job_id_1\", \"job_id_2\", \"job_id_3\"} assert entity_ids_1.sequence_ids == {\"sequence_id_1\", \"sequence_id_2\", \"sequence_id_3\"} assert entity_ids_1.scenario_ids == {\"scenario_id_1\", \"scenario_id_2\", \"scenario_id_3\"} assert entity_ids_1.cycle_ids == {\"cycle_id_1\", \"cycle_id_2\", \"cycle_id_3\"} "} {"text": "import filecmp import os import shutil import sys from unittest.mock import patch import mongomock import pytest from src.taipy.core._entity._migrate_cli import _MigrateCLI @pytest.fixture(scope=\"function\", autouse=True) def clean_data_folder(): if os.path.exists(\"tests/core/_entity/.data\"): shutil.rmtree(\"tests/core/_entity/.data\") yield def test_migrate_fs_default(caplog): _MigrateCLI.create_parser() # Test migrate with default .data folder with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", \"--skip-backup\"]): _MigrateCLI.parse_arguments() assert \"Starting entity migration from '.data' folder\" in caplog.text def test_migrate_fs_specified_folder(caplog): _MigrateCLI.create_parser() # Copy data_sample to .data folder for testing data_sample_path = \"tests/core/_entity/data_sample\" data_path = \"tests/core/_entity/.data\" shutil.copytree(data_sample_path, data_path) # Run with --skip-backup to only test the migration with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path, \"--skip-backup\"]): _MigrateCLI.parse_arguments() assert f\"Starting entity migration from '{data_path}' folder\" in caplog.text # Compare migrated .data folder with data_sample_migrated dircmp_result = filecmp.dircmp(data_path, \"tests/core/_entity/data_sample_migrated\") assert not dircmp_result.diff_files and not dircmp_result.left_only and not dircmp_result.right_only for subdir in dircmp_result.subdirs.values(): assert not subdir.diff_files and not subdir.left_only and not subdir.right_only def test_migrate_fs_backup_and_remove(caplog): _MigrateCLI.create_parser() # Copy data_sample to .data folder for testing data_sample_path = \"tests/core/_entity/data_sample\" data_path = \"tests/core/_entity/.data\" backup_path = \"tests/core/_entity/.data_backup\" shutil.copytree(data_sample_path, data_path) # Remove backup when it does not exist should raise an error with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path, \"--remove-backup\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert f\"The backup folder '{backup_path}' does not exist.\" in caplog.text assert not os.path.exists(backup_path) # Run without --skip-backup to create the backup folder with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path]): _MigrateCLI.parse_arguments() assert f\"Backed up entities from '{data_path}' to '{backup_path}' folder before migration.\" in caplog.text assert os.path.exists(backup_path) # Remove backup with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path, \"--remove-backup\"]): _MigrateCLI.parse_arguments() assert f\"Removed backup entities from the backup folder '{backup_path}'.\" in caplog.text assert not os.path.exists(backup_path) def test_migrate_fs_backup_and_restore(caplog): _MigrateCLI.create_parser() # Copy data_sample to .data folder for testing data_sample_path = \"tests/core/_entity/data_sample\" data_path = \"tests/core/_entity/.data\" backup_path = \"tests/core/_entity/.data_backup\" shutil.copytree(data_sample_path, data_path) # Restore backup when it does not exist should raise an error with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path, \"--restore\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert f\"The backup folder '{backup_path}' does not exist.\" in caplog.text assert not os.path.exists(backup_path) # Run without --skip-backup to create the backup folder with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path]): _MigrateCLI.parse_arguments() assert os.path.exists(backup_path) # restore the backup with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path, \"--restore\"]): _MigrateCLI.parse_arguments() assert f\"Restored entities from the backup folder '{backup_path}' to '{data_path}'.\" in caplog.text assert not os.path.exists(backup_path) # Compare migrated .data folder with data_sample to ensure restoreing the backup worked dircmp_result = filecmp.dircmp(data_path, \"tests/core/_entity/data_sample\") assert not dircmp_result.diff_files and not dircmp_result.left_only and not dircmp_result.right_only for subdir in dircmp_result.subdirs.values(): assert not subdir.diff_files and not subdir.left_only and not subdir.right_only def test_migrate_fs_non_existing_folder(caplog): _MigrateCLI.create_parser() # Test migrate with a non-existing folder with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", \"non-existing-folder\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert \"Folder 'non-existing-folder' does not exist.\" in caplog.text @patch(\"src.taipy.core._entity._migrate_cli._migrate_sql_entities\") def test_migrate_sql_specified_path(_migrate_sql_entities_mock, tmp_sqlite): _MigrateCLI.create_parser() # Test the _migrate_sql_entities is called once with the correct path with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite, \"--skip-backup\"]): _MigrateCLI.parse_arguments() assert _migrate_sql_entities_mock.assert_called_once_with(path=tmp_sqlite) def test_migrate_sql_backup_and_remove(caplog, tmp_sqlite): _MigrateCLI.create_parser() # Create the .sqlite file to test with open(tmp_sqlite, \"w\") as f: f.write(\"\") file_name, file_extension = tmp_sqlite.rsplit(\".\", 1) backup_sqlite = f\"{file_name}_backup.{file_extension}\" # Remove backup when it does not exist should raise an error with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite, \"--remove-backup\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert f\"The backup database '{backup_sqlite}' does not exist.\" in caplog.text assert not os.path.exists(backup_sqlite) # Run without --skip-backup to create the backup database with pytest.raises(Exception): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite]): _MigrateCLI.parse_arguments() assert os.path.exists(backup_sqlite) # Remove backup with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite, \"--remove-backup\"]): _MigrateCLI.parse_arguments() assert f\"Removed backup entities from the backup database '{backup_sqlite}'.\" in caplog.text assert not os.path.exists(backup_sqlite) @pytest.mark.skipif(sys.platform == \"win32\", reason=\"Does not run on windows due to PermissionError: [WinError 32]\") def test_migrate_sql_backup_and_restore(caplog, tmp_sqlite): _MigrateCLI.create_parser() # Create the .sqlite file to test with open(tmp_sqlite, \"w\") as f: f.write(\"\") file_name, file_extension = tmp_sqlite.rsplit(\".\", 1) backup_sqlite = f\"{file_name}_backup.{file_extension}\" # Restore backup when it does not exist should raise an error with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite, \"--restore\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert f\"The backup database '{backup_sqlite}' does not exist.\" in caplog.text assert not os.path.exists(backup_sqlite) # Run without --skip-backup to create the backup database with pytest.raises(Exception): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite]): _MigrateCLI.parse_arguments() assert os.path.exists(backup_sqlite) # Restore the backup with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite, \"--restore\"]): _MigrateCLI.parse_arguments() assert f\"Restored entities from the backup database '{backup_sqlite}' to '{tmp_sqlite}'.\" in caplog.text assert not os.path.exists(backup_sqlite) def test_migrate_sql_non_existing_path(caplog): _MigrateCLI.create_parser() # Test migrate without providing a path with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert \"Missing the required sqlite path.\" in caplog.text caplog.clear() # Test migrate with a non-existing-path.sqlite file with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", \"non-existing-path.sqlite\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert \"File 'non-existing-path.sqlite' does not exist.\" in caplog.text @patch(\"src.taipy.core._entity._migrate_cli._migrate_mongo_entities\") def test_call_to_migrate_mongo(_migrate_mongo_entities_mock): _MigrateCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\"]): _MigrateCLI.parse_arguments() assert _migrate_mongo_entities_mock.assert_called_once_with() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\", \"host\", \"port\", \"user\", \"password\"]): _MigrateCLI.parse_arguments() assert _migrate_mongo_entities_mock.assert_called_once_with(\"host\", \"port\", \"user\", \"password\") @mongomock.patch(servers=((\"localhost\", 27017),)) def test_migrate_mongo_backup_and_remove(caplog): _MigrateCLI.create_parser() mongo_backup_path = \".mongo_backup\" # Remove backup when it does not exist should raise an error with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\", \"--remove-backup\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert f\"The backup folder '{mongo_backup_path}' does not exist.\" in caplog.text assert not os.path.exists(mongo_backup_path) # Run without --skip-backup to create the backup database with pytest.raises(Exception): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\"]): _MigrateCLI.parse_arguments() assert os.path.exists(mongo_backup_path) # Remove backup with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\", \"--remove-backup\"]): _MigrateCLI.parse_arguments() assert f\"Removed backup entities from the backup folder '{mongo_backup_path}'.\" in caplog.text assert not os.path.exists(mongo_backup_path) @mongomock.patch(servers=((\"localhost\", 27017),)) def test_migrate_mongo_backup_and_restore(caplog): _MigrateCLI.create_parser() mongo_backup_path = \".mongo_backup\" # Restore backup when it does not exist should raise an error with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\", \"--restore\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert f\"The backup folder '{mongo_backup_path}' does not exist.\" in caplog.text assert not os.path.exists(mongo_backup_path) # Run without --skip-backup to create the backup database with pytest.raises(Exception): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\"]): _MigrateCLI.parse_arguments() assert os.path.exists(mongo_backup_path) # Restore the backup with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\", \"--restore\"]): _MigrateCLI.parse_arguments() assert f\"Restored entities from the backup folder '{mongo_backup_path}'.\" in caplog.text assert not os.path.exists(mongo_backup_path) def test_not_provide_valid_repository_type(caplog): _MigrateCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\"]): _MigrateCLI.parse_arguments() assert \"the following arguments are required: --repository-type\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\"]): _MigrateCLI.parse_arguments() assert \"argument --repository-type: expected at least one argument\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"invalid-repository-type\"]): _MigrateCLI.parse_arguments() assert \"Unknown repository type invalid-repository-type\" in caplog.text "} {"text": "import pytest from src.taipy.core.common._utils import _retry_read_entity from taipy.config import Config def test_retry_decorator(mocker): func = mocker.Mock(side_effect=Exception()) @_retry_read_entity((Exception,)) def decorated_func(): func() with pytest.raises(Exception): decorated_func() # Called once in the normal flow and no retry # The Config.core.read_entity_retry is set to 0 at conftest.py assert Config.core.read_entity_retry == 0 assert func.call_count == 1 func.reset_mock() Config.core.read_entity_retry = 3 with pytest.raises(Exception): decorated_func() # Called once in the normal flow and 3 more times on the retry flow assert func.call_count == 4 def test_retry_decorator_exception_not_in_list(mocker): func = mocker.Mock(side_effect=KeyError()) Config.core.read_entity_retry = 3 @_retry_read_entity((Exception,)) def decorated_func(): func() with pytest.raises(KeyError): decorated_func() # Called only on the first time and not trigger retry because KeyError is not on the exceptions list assert func.called == 1 "} {"text": "from src.taipy.core.common.warn_if_inputs_not_ready import _warn_if_inputs_not_ready from src.taipy.core.data._data_manager_factory import _DataManagerFactory from taipy.config import Config def test_warn_inputs_all_not_ready(caplog): one = Config.configure_data_node(\"one\") two = Config.configure_data_node(\"two\") three = Config.configure_data_node(\"three\") data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}).values() _warn_if_inputs_not_ready(data_nodes) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in data_nodes ] assert all([expected_output in stdout for expected_output in expected_outputs]) def test_warn_inputs_all_ready(caplog): one = Config.configure_data_node(\"one\", default_data=1) two = Config.configure_data_node(\"two\", default_data=2) three = Config.configure_data_node(\"three\", default_data=3) data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}).values() _warn_if_inputs_not_ready(data_nodes) stdout = caplog.text not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in data_nodes ] assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_warn_inputs_one_ready(caplog): one = Config.configure_data_node(\"one\", default_data=1) two = Config.configure_data_node(\"two\") three = Config.configure_data_node(\"three\") data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}) _warn_if_inputs_not_ready(data_nodes.values()) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in [data_nodes[two], data_nodes[three]] ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in [data_nodes[one]] ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_submit_task_with_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") excel_dn_cfg = Config.configure_excel_data_node(\"wrong_excel_file_path\", default_path=\"wrong_path.xlsx\") json_dn_cfg = Config.configure_json_data_node(\"wrong_json_file_path\", default_path=\"wrong_path.json\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_path=\"wrong_path.pickle\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") input_dn_cfgs = [csv_dn_cfg, excel_dn_cfg, json_dn_cfg, pickle_dn_cfg, parquet_dn_cfg] dn_manager = _DataManagerFactory._build_manager() dns = [dn_manager._bulk_get_or_create([input_dn_cfg])[input_dn_cfg] for input_dn_cfg in input_dn_cfgs] _warn_if_inputs_not_ready(dns) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in dns ] assert all([expected_output in stdout for expected_output in expected_outputs]) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from datetime import datetime, timedelta from typing import Callable, Iterable, Optional from unittest.mock import ANY, patch import pytest from src.taipy.core import Job from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.common import _utils from src.taipy.core.common._utils import _Subscriber from src.taipy.core.config.job_config import JobConfig from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import ( DeletingPrimaryScenario, DifferentScenarioConfigs, InsufficientScenarioToCompare, NonExistingComparator, NonExistingScenario, NonExistingScenarioConfig, NonExistingTask, SequenceTaskConfigDoesNotExistInSameScenarioConfig, UnauthorizedTagError, ) from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.scenario.scenario_id import ScenarioId from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from src.taipy.core.task.task_id import TaskId from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils import assert_true_after_time from tests.core.utils.NotifyMock import NotifyMock def test_set_and_get_scenario(cycle): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() scenario_id_1 = ScenarioId(\"scenario_id_1\") scenario_1 = Scenario(\"scenario_name_1\", [], {}, [], scenario_id_1) input_dn_2 = InMemoryDataNode(\"foo\", Scope.SCENARIO) output_dn_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO) additional_dn_2 = InMemoryDataNode(\"zyx\", Scope.SCENARIO) task_name_2 = \"task_2\" task_2 = Task(task_name_2, {}, print, [input_dn_2], [output_dn_2], TaskId(\"task_id_2\")) scenario_id_2 = ScenarioId(\"scenario_id_2\") scenario_2 = Scenario( \"scenario_name_2\", [task_2], {}, [additional_dn_2], scenario_id_2, datetime.now(), True, cycle, sequences={\"sequence_2\": {\"tasks\": [task_2]}}, ) additional_dn_3 = InMemoryDataNode(\"baz\", Scope.SCENARIO) task_name_3 = \"task_3\" task_3 = Task(task_name_3, {}, print, id=TaskId(\"task_id_3\")) scenario_3_with_same_id = Scenario( \"scenario_name_3\", [task_3], {}, [additional_dn_3], scenario_id_1, datetime.now(), False, cycle, sequences={\"sequence_3\": {}}, ) # No existing scenario assert len(_ScenarioManager._get_all()) == 0 assert _ScenarioManager._get(scenario_id_1) is None assert _ScenarioManager._get(scenario_1) is None assert _ScenarioManager._get(scenario_id_2) is None assert _ScenarioManager._get(scenario_2) is None # Save one scenario. We expect to have only one scenario stored _ScenarioManager._set(scenario_1) assert len(_ScenarioManager._get_all()) == 1 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 0 assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).sequences) == 0 assert _ScenarioManager._get(scenario_id_2) is None assert _ScenarioManager._get(scenario_2) is None # Save a second scenario. Now, we expect to have a total of two scenarios stored _TaskManager._set(task_2) _CycleManager._set(cycle) _ScenarioManager._set(scenario_2) _DataManager._set(additional_dn_2) assert len(_ScenarioManager._get_all()) == 2 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 0 assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).sequences) == 0 assert _ScenarioManager._get(scenario_id_2).id == scenario_2.id assert _ScenarioManager._get(scenario_id_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_id_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_id_2).sequences) == 1 assert _ScenarioManager._get(scenario_2).id == scenario_2.id assert _ScenarioManager._get(scenario_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_2).sequences) == 1 assert _TaskManager._get(task_2.id).id == task_2.id assert _ScenarioManager._get(scenario_id_2).cycle == cycle assert _ScenarioManager._get(scenario_2).cycle == cycle assert _CycleManager._get(cycle.id).id == cycle.id # We save the first scenario again. We expect nothing to change _ScenarioManager._set(scenario_1) assert len(_ScenarioManager._get_all()) == 2 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 0 assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).sequences) == 0 assert _ScenarioManager._get(scenario_id_2).id == scenario_2.id assert _ScenarioManager._get(scenario_id_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_id_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_id_2).sequences) == 1 assert _ScenarioManager._get(scenario_2).id == scenario_2.id assert _ScenarioManager._get(scenario_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_2).sequences) == 1 assert _TaskManager._get(task_2.id).id == task_2.id assert _CycleManager._get(cycle.id).id == cycle.id # We save a third scenario with same id as the first one. # We expect the first scenario to be updated _DataManager._set(additional_dn_3) _TaskManager._set(task_3) _TaskManager._set(scenario_2.tasks[task_name_2]) _ScenarioManager._set(scenario_3_with_same_id) assert len(_ScenarioManager._get_all()) == 2 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_3_with_same_id.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 1 assert _ScenarioManager._get(scenario_id_1).cycle == cycle assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_3_with_same_id.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 1 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 1 assert len(_ScenarioManager._get(scenario_1).sequences) == 1 assert _ScenarioManager._get(scenario_1).cycle == cycle assert _ScenarioManager._get(scenario_id_2).id == scenario_2.id assert _ScenarioManager._get(scenario_id_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_id_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_id_2).sequences) == 1 assert _ScenarioManager._get(scenario_2).id == scenario_2.id assert _ScenarioManager._get(scenario_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_2).sequences) == 1 assert _TaskManager._get(task_2.id).id == task_2.id def test_raise_sequence_task_configs_not_in_scenario_config(): data_node = Config.configure_pickle_data_node(\"temp\") task_config_1 = Config.configure_task(\"task_1\", print, output=[data_node]) task_config_2 = Config.configure_task(\"task_2\", print, input=[data_node]) scenario_config_1 = Config.configure_scenario(\"scenario_1\") scenario_config_1.add_sequences({\"sequence_0\": []}) _ScenarioManager._create(scenario_config_1) scenario_config_1.add_sequences({\"sequence_1\": [task_config_1]}) with pytest.raises(SequenceTaskConfigDoesNotExistInSameScenarioConfig) as err: _ScenarioManager._create(scenario_config_1) assert err.value.args == ([task_config_1.id], \"sequence_1\", scenario_config_1.id) scenario_config_1._tasks = [task_config_1] _ScenarioManager._create(scenario_config_1) scenario_config_1.add_sequences({\"sequence_2\": [task_config_1]}) _ScenarioManager._create(scenario_config_1) scenario_config_1.add_sequences({\"sequence_3\": [task_config_1, task_config_2]}) with pytest.raises(SequenceTaskConfigDoesNotExistInSameScenarioConfig) as err: _ScenarioManager._create(scenario_config_1) assert err.value.args == ([task_config_2.id], \"sequence_3\", scenario_config_1.id) scenario_config_1._tasks = [task_config_1, task_config_2] _ScenarioManager._create(scenario_config_1) def test_get_all_on_multiple_versions_environment(): # Create 5 scenarios with 2 versions each # Only version 1.0 has the scenario with config_id = \"config_id_1\" # Only version 2.0 has the scenario with config_id = \"config_id_6\" for version in range(1, 3): for i in range(5): _ScenarioManager._set( Scenario(f\"config_id_{i+version}\", [], {}, [], ScenarioId(f\"id{i}_v{version}\"), version=f\"{version}.0\") ) _VersionManager._set_experiment_version(\"1.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_experiment_version(\"2.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 _VersionManager._set_development_version(\"1.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_development_version(\"2.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 def test_create_scenario_does_not_modify_config(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) creation_date_1 = datetime.now() name_1 = \"name_1\" scenario_config = Config.configure_scenario(\"sc\", None, None, Frequency.DAILY) assert scenario_config.properties.get(\"name\") is None assert len(scenario_config.properties) == 0 _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config, creation_date=creation_date_1, name=name_1) assert len(scenario_config.properties) == 0 assert len(scenario.properties) == 1 assert scenario.properties.get(\"name\") == name_1 assert scenario.name == name_1 scenario.properties[\"foo\"] = \"bar\" _ScenarioManager._set(scenario) assert len(scenario_config.properties) == 0 assert len(scenario.properties) == 2 assert scenario.properties.get(\"foo\") == \"bar\" assert scenario.properties.get(\"name\") == name_1 assert scenario.name == name_1 scenario_2 = _ScenarioManager._create(scenario_config, creation_date=creation_date_1) assert scenario_2.name is None def test_create_and_delete_scenario(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) creation_date_1 = datetime.now() creation_date_2 = creation_date_1 + timedelta(minutes=10) name_1 = \"name_1\" _ScenarioManager._delete_all() assert len(_ScenarioManager._get_all()) == 0 scenario_config = Config.configure_scenario(\"sc\", None, None, Frequency.DAILY) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config, creation_date=creation_date_1, name=name_1) assert scenario_1.config_id == \"sc\" assert scenario_1.sequences == {} assert scenario_1.tasks == {} assert scenario_1.additional_data_nodes == {} assert scenario_1.data_nodes == {} assert scenario_1.cycle.frequency == Frequency.DAILY assert scenario_1.is_primary assert scenario_1.cycle.creation_date == creation_date_1 assert scenario_1.cycle.start_date.date() == creation_date_1.date() assert scenario_1.cycle.end_date.date() == creation_date_1.date() assert scenario_1.creation_date == creation_date_1 assert scenario_1.name == name_1 assert scenario_1.properties[\"name\"] == name_1 assert scenario_1.tags == set() cycle_id_1 = scenario_1.cycle.id assert _CycleManager._get(cycle_id_1).id == cycle_id_1 _ScenarioManager._delete(scenario_1.id) assert _ScenarioManager._get(scenario_1.id) is None assert _CycleManager._get(cycle_id_1) is None # Recreate scenario_1 scenario_1 = _ScenarioManager._create(scenario_config, creation_date=creation_date_1, name=name_1) scenario_2 = _ScenarioManager._create(scenario_config, creation_date=creation_date_2) assert scenario_2.config_id == \"sc\" assert scenario_2.sequences == {} assert scenario_2.tasks == {} assert scenario_2.additional_data_nodes == {} assert scenario_2.data_nodes == {} assert scenario_2.cycle.frequency == Frequency.DAILY assert not scenario_2.is_primary assert scenario_2.cycle.creation_date == creation_date_1 assert scenario_2.cycle.start_date.date() == creation_date_2.date() assert scenario_2.cycle.end_date.date() == creation_date_2.date() assert scenario_2.properties.get(\"name\") is None assert scenario_2.tags == set() assert scenario_1 != scenario_2 assert scenario_1.cycle == scenario_2.cycle assert len(_ScenarioManager._get_all()) == 2 with pytest.raises(DeletingPrimaryScenario): _ScenarioManager._delete( scenario_1.id, ) _ScenarioManager._delete( scenario_2.id, ) assert len(_ScenarioManager._get_all()) == 1 _ScenarioManager._delete(scenario_1.id) assert len(_ScenarioManager._get_all()) == 0 def test_is_deletable(): assert len(_ScenarioManager._get_all()) == 0 scenario_config = Config.configure_scenario(\"sc\", None, None, Frequency.DAILY) creation_date = datetime.now() scenario_1_primary = _ScenarioManager._create(scenario_config, creation_date=creation_date, name=\"1\") scenario_2 = _ScenarioManager._create(scenario_config, creation_date=creation_date, name=\"2\") assert len(_ScenarioManager._get_all()) == 2 assert scenario_1_primary.is_primary assert not _ScenarioManager._is_deletable(scenario_1_primary) assert not _ScenarioManager._is_deletable(scenario_1_primary.id) assert not scenario_2.is_primary assert _ScenarioManager._is_deletable(scenario_2) assert _ScenarioManager._is_deletable(scenario_2.id) _ScenarioManager._hard_delete(scenario_2.id) del scenario_2 assert len(_ScenarioManager._get_all()) == 1 assert scenario_1_primary.is_primary assert _ScenarioManager._is_deletable(scenario_1_primary) assert _ScenarioManager._is_deletable(scenario_1_primary.id) def test_assign_scenario_as_parent_of_task_and_additional_data_nodes(): dn_config_1 = Config.configure_data_node(\"dn_1\", \"in_memory\", scope=Scope.GLOBAL) dn_config_2 = Config.configure_data_node(\"dn_2\", \"in_memory\", scope=Scope.GLOBAL) dn_config_3 = Config.configure_data_node(\"dn_3\", \"in_memory\", scope=Scope.SCENARIO) additional_dn_config_1 = Config.configure_data_node(\"additional_dn_1\", \"in_memory\", scope=Scope.GLOBAL) additional_dn_config_2 = Config.configure_data_node(\"additional_dn_2\", \"in_memory\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"task_1\", print, [dn_config_1], [dn_config_2]) task_config_2 = Config.configure_task(\"task_2\", print, [dn_config_2], [dn_config_3]) task_config_3 = Config.configure_task(\"task_3\", print, [dn_config_2], [dn_config_3]) scenario_config_1 = Config.configure_scenario( \"scenario_1\", [task_config_1, task_config_2], [additional_dn_config_1, additional_dn_config_2] ) scenario_config_1.add_sequences({\"sequence_1\": [task_config_1, task_config_2]}) scenario_config_2 = Config.configure_scenario( \"scenario_2\", [task_config_1, task_config_2, task_config_3], [additional_dn_config_1, additional_dn_config_2] ) scenario_config_2.add_sequences( {\"sequence_1\": [task_config_1, task_config_2], \"sequence_2\": [task_config_1, task_config_3]} ) scenario_1 = _ScenarioManager._create(scenario_config_1) sequence_1_s1 = scenario_1.sequences[\"sequence_1\"] assert all([sequence.parent_ids == {scenario_1.id} for sequence in scenario_1.sequences.values()]) tasks = scenario_1.tasks.values() assert all([task.parent_ids == {scenario_1.id, sequence_1_s1.id} for task in tasks]) data_nodes = {} for task in tasks: data_nodes.update(task.data_nodes) assert data_nodes[\"dn_1\"].parent_ids == {scenario_1.tasks[\"task_1\"].id} assert data_nodes[\"dn_2\"].parent_ids == {scenario_1.tasks[\"task_1\"].id, scenario_1.tasks[\"task_2\"].id} assert data_nodes[\"dn_3\"].parent_ids == {scenario_1.tasks[\"task_2\"].id} additional_data_nodes = scenario_1.additional_data_nodes assert additional_data_nodes[\"additional_dn_1\"].parent_ids == {scenario_1.id} assert additional_data_nodes[\"additional_dn_2\"].parent_ids == {scenario_1.id} scenario_2 = _ScenarioManager._create(scenario_config_2) sequence_1_s2 = scenario_2.sequences[\"sequence_1\"] sequence_2_s2 = scenario_2.sequences[\"sequence_2\"] assert all([sequence.parent_ids == {scenario_2.id} for sequence in scenario_2.sequences.values()]) assert scenario_1.tasks[\"task_1\"] == scenario_2.tasks[\"task_1\"] assert scenario_1.tasks[\"task_1\"].parent_ids == { scenario_1.id, sequence_1_s1.id, scenario_2.id, sequence_1_s2.id, sequence_2_s2.id, } assert scenario_1.tasks[\"task_2\"].parent_ids == {scenario_1.id, sequence_1_s1.id} assert scenario_2.tasks[\"task_2\"].parent_ids == {scenario_2.id, sequence_1_s2.id} assert scenario_2.tasks[\"task_3\"].parent_ids == {scenario_2.id, sequence_2_s2.id} additional_data_nodes = scenario_2.additional_data_nodes assert additional_data_nodes[\"additional_dn_1\"].parent_ids == {scenario_1.id, scenario_2.id} assert additional_data_nodes[\"additional_dn_2\"].parent_ids == {scenario_2.id} _ScenarioManager._hard_delete(scenario_1.id) _ScenarioManager._hard_delete(scenario_2.id) _TaskManager._delete_all() _DataManager._delete_all() dn_config_1 = Config.configure_data_node(\"dn_1\", \"in_memory\", scope=Scope.GLOBAL) dn_config_2 = Config.configure_data_node(\"dn_2\", \"in_memory\", scope=Scope.GLOBAL) dn_config_3 = Config.configure_data_node(\"dn_3\", \"in_memory\", scope=Scope.GLOBAL) additional_dn_config_1 = Config.configure_data_node(\"additional_dn_1\", \"in_memory\", scope=Scope.GLOBAL) additional_dn_config_2 = Config.configure_data_node(\"additional_dn_2\", \"in_memory\", scope=Scope.GLOBAL) task_config_1 = Config.configure_task(\"task_1\", print, [dn_config_1], [dn_config_2]) task_config_2 = Config.configure_task(\"task_2\", print, [dn_config_2], [dn_config_3]) task_config_3 = Config.configure_task(\"task_3\", print, [dn_config_2], [dn_config_3]) scenario_config_1 = Config.configure_scenario( \"scenario_1\", [task_config_1, task_config_2], [additional_dn_config_1, additional_dn_config_2] ) scenario_config_1.add_sequences({\"sequence_1\": [task_config_1, task_config_2]}) scenario_config_2 = Config.configure_scenario( \"scenario_2\", [task_config_1, task_config_2, task_config_3], [additional_dn_config_1, additional_dn_config_2] ) scenario_config_2.add_sequences( {\"sequence_1\": [task_config_1, task_config_2], \"sequence_2\": [task_config_1, task_config_3]} ) scenario_1 = _ScenarioManager._create(scenario_config_1) sequence_1_s1 = scenario_1.sequences[\"sequence_1\"] assert scenario_1.sequences[\"sequence_1\"].parent_ids == {scenario_1.id} tasks = scenario_1.tasks.values() assert all([task.parent_ids == {scenario_1.id, sequence_1_s1.id} for task in tasks]) data_nodes = {} for task in tasks: data_nodes.update(task.data_nodes) assert data_nodes[\"dn_1\"].parent_ids == {scenario_1.tasks[\"task_1\"].id} assert data_nodes[\"dn_2\"].parent_ids == {scenario_1.tasks[\"task_1\"].id, scenario_1.tasks[\"task_2\"].id} assert data_nodes[\"dn_3\"].parent_ids == {scenario_1.tasks[\"task_2\"].id} additional_data_nodes = scenario_1.additional_data_nodes assert additional_data_nodes[\"additional_dn_1\"].parent_ids == {scenario_1.id} assert additional_data_nodes[\"additional_dn_2\"].parent_ids == {scenario_1.id} scenario_2 = _ScenarioManager._create(scenario_config_2) sequence_1_s2 = scenario_2.sequences[\"sequence_1\"] sequence_2_s2 = scenario_2.sequences[\"sequence_2\"] assert scenario_1.sequences[\"sequence_1\"].parent_ids == {scenario_1.id} assert scenario_2.sequences[\"sequence_1\"].parent_ids == {scenario_2.id} assert scenario_2.sequences[\"sequence_2\"].parent_ids == {scenario_2.id} tasks = {**scenario_1.tasks, **scenario_2.tasks} assert tasks[\"task_1\"].parent_ids == { scenario_1.id, scenario_2.id, sequence_1_s1.id, sequence_1_s2.id, sequence_2_s2.id, } assert tasks[\"task_2\"].parent_ids == {scenario_1.id, scenario_2.id, sequence_1_s1.id, sequence_1_s2.id} assert tasks[\"task_3\"].parent_ids == {scenario_2.id, sequence_2_s2.id} additional_data_nodes = scenario_2.additional_data_nodes assert additional_data_nodes[\"additional_dn_1\"].parent_ids == {scenario_1.id, scenario_2.id} assert additional_data_nodes[\"additional_dn_2\"].parent_ids == {scenario_1.id, scenario_2.id} def mult_by_2(nb: int): return nb * 2 def mult_by_3(nb: int): return nb * 3 def mult_by_4(nb: int): return nb * 4 def test_scenario_manager_only_creates_data_node_once(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) # dn_1 ---> mult_by_2 ---> dn_2 ---> mult_by_3 ---> dn_6 # dn_1 ---> mult_by_4 ---> dn_4 dn_config_1 = Config.configure_data_node(\"foo\", \"in_memory\", Scope.GLOBAL, default_data=1) dn_config_2 = Config.configure_data_node(\"bar\", \"in_memory\", Scope.CYCLE, default_data=0) dn_config_6 = Config.configure_data_node(\"baz\", \"in_memory\", Scope.CYCLE, default_data=0) dn_config_4 = Config.configure_data_node(\"qux\", \"in_memory\", Scope.SCENARIO, default_data=0) task_mult_by_2_config = Config.configure_task(\"mult_by_2\", mult_by_2, [dn_config_1], dn_config_2) task_mult_by_3_config = Config.configure_task(\"mult_by_3\", mult_by_3, [dn_config_2], dn_config_6) task_mult_by_4_config = Config.configure_task(\"mult_by_4\", mult_by_4, [dn_config_1], dn_config_4) scenario_config = Config.configure_scenario( \"awesome_scenario\", [task_mult_by_2_config, task_mult_by_3_config, task_mult_by_4_config], None, Frequency.DAILY ) scenario_config.add_sequences( {\"by_6\": [task_mult_by_2_config, task_mult_by_3_config], \"by_4\": [task_mult_by_4_config]} ) _OrchestratorFactory._build_dispatcher() assert len(_DataManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 0 assert len(_SequenceManager._get_all()) == 0 assert len(_ScenarioManager._get_all()) == 0 assert len(_CycleManager._get_all()) == 0 scenario_1 = _ScenarioManager._create(scenario_config) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 3 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 1 assert scenario_1.foo.read() == 1 assert scenario_1.bar.read() == 0 assert scenario_1.baz.read() == 0 assert scenario_1.qux.read() == 0 assert scenario_1.by_6._get_sorted_tasks()[0][0].config_id == task_mult_by_2_config.id assert scenario_1.by_6._get_sorted_tasks()[1][0].config_id == task_mult_by_3_config.id assert scenario_1.by_4._get_sorted_tasks()[0][0].config_id == task_mult_by_4_config.id assert scenario_1.tasks.keys() == {task_mult_by_2_config.id, task_mult_by_3_config.id, task_mult_by_4_config.id} scenario_1_sorted_tasks = scenario_1._get_sorted_tasks() expected = [{task_mult_by_2_config.id, task_mult_by_4_config.id}, {task_mult_by_3_config.id}] for i, list_tasks_by_level in enumerate(scenario_1_sorted_tasks): assert set([t.config_id for t in list_tasks_by_level]) == expected[i] assert scenario_1.cycle.frequency == Frequency.DAILY _ScenarioManager._create(scenario_config) assert len(_DataManager._get_all()) == 5 assert len(_TaskManager._get_all()) == 4 assert len(_SequenceManager._get_all()) == 4 assert len(_ScenarioManager._get_all()) == 2 def test_notification_subscribe(mocker): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) mocker.patch(\"src.taipy.core._entity._reload._Reloader._reload\", side_effect=lambda m, o: o) scenario_config = Config.configure_scenario( \"awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], ) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) notify_1 = NotifyMock(scenario) notify_2 = NotifyMock(scenario) mocker.patch.object(_utils, \"_load_fct\", side_effect=[notify_1, notify_2]) # test subscribing notification _ScenarioManager._subscribe(callback=notify_1, scenario=scenario) _ScenarioManager._submit(scenario) notify_1.assert_called_3_times() notify_1.reset() # test unsubscribing notification # test notis subscribe only on new jobs # _ScenarioManager._get(scenario) _ScenarioManager._unsubscribe(callback=notify_1, scenario=scenario) _ScenarioManager._subscribe(callback=notify_2, scenario=scenario) _ScenarioManager._submit(scenario) notify_1.assert_not_called() notify_2.assert_called_3_times() class Notify: def __call__(self, *args, **kwargs): self.args = args def assert_called_with(self, args): assert args in self.args def test_notification_subscribe_multiple_params(mocker): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) mocker.patch(\"src.taipy.core._entity._reload._Reloader._reload\", side_effect=lambda m, o: o) scenario_config = Config.configure_scenario( \"awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], ) notify = mocker.Mock() _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._subscribe(callback=notify, params=[\"foobar\", 123, 1.2], scenario=scenario) mocker.patch.object(_ScenarioManager, \"_get\", return_value=scenario) _ScenarioManager._submit(scenario) notify.assert_called_with(\"foobar\", 123, 1.2, scenario, ANY) def notify_multi_param(param, *args): assert len(param) == 3 def notify1(*args, **kwargs): ... def notify2(*args, **kwargs): ... def test_notification_unsubscribe(mocker): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) mocker.patch(\"src.taipy.core._entity._reload._Reloader._reload\", side_effect=lambda m, o: o) scenario_config = Config.configure_scenario( \"awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], ) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) notify_1 = notify1 notify_2 = notify2 # test subscribing notification _ScenarioManager._subscribe(callback=notify_1, scenario=scenario) _ScenarioManager._unsubscribe(callback=notify_1, scenario=scenario) _ScenarioManager._subscribe(callback=notify_2, scenario=scenario) _ScenarioManager._submit(scenario.id) with pytest.raises(ValueError): _ScenarioManager._unsubscribe(callback=notify_1, scenario=scenario) _ScenarioManager._unsubscribe(callback=notify_2, scenario=scenario) def test_notification_unsubscribe_multi_param(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) scenario_config = Config.configure_scenario( \"awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], ) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) # test subscribing notification _ScenarioManager._subscribe(callback=notify_multi_param, params=[\"foobar\", 123, 0], scenario=scenario) _ScenarioManager._subscribe(callback=notify_multi_param, params=[\"foobar\", 123, 1], scenario=scenario) _ScenarioManager._subscribe(callback=notify_multi_param, params=[\"foobar\", 123, 2], scenario=scenario) assert len(scenario.subscribers) == 3 # if no params are passed, removes the first occurrence of the subscriber when theres more than one copy scenario.unsubscribe(notify_multi_param) assert len(scenario.subscribers) == 2 assert _Subscriber(notify_multi_param, [\"foobar\", 123, 0]) not in scenario.subscribers # If params are passed, find the corresponding pair of callback and params to remove scenario.unsubscribe(notify_multi_param, [\"foobar\", 123, 2]) assert len(scenario.subscribers) == 1 assert _Subscriber(notify_multi_param, [\"foobar\", 123, 2]) not in scenario.subscribers # If params are passed but is not on the list of subscribers, throws a ValueErrors with pytest.raises(ValueError): scenario.unsubscribe(notify_multi_param, [\"foobar\", 123, 10000]) def test_scenario_notification_subscribe_all(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) scenario_config = Config.configure_scenario( \"awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], ) other_scenario_config = Config.configure_scenario( \"other_scenario\", [ Config.configure_task( \"other_mult_by_2_2\", mult_by_2, [Config.configure_data_node(\"other_foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"other_bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], ) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) other_scenario = _ScenarioManager._create(other_scenario_config) notify_1 = NotifyMock(scenario) _ScenarioManager._subscribe(notify_1) assert len(_ScenarioManager._get(scenario.id).subscribers) == 1 assert len(_ScenarioManager._get(other_scenario.id).subscribers) == 1 def test_is_promotable_to_primary_scenario(): assert len(_ScenarioManager._get_all()) == 0 scenario_config = Config.configure_scenario(\"sc\", set(), set(), Frequency.DAILY) creation_date = datetime.now() scenario_1 = _ScenarioManager._create(scenario_config, creation_date=creation_date, name=\"1\") # primary scenario scenario_2 = _ScenarioManager._create(scenario_config, creation_date=creation_date, name=\"2\") assert len(_ScenarioManager._get_all()) == 2 assert scenario_1.is_primary assert not _ScenarioManager._is_promotable_to_primary(scenario_1) assert not _ScenarioManager._is_promotable_to_primary(scenario_1.id) assert not scenario_2.is_primary assert _ScenarioManager._is_promotable_to_primary(scenario_2) assert _ScenarioManager._is_promotable_to_primary(scenario_2.id) _ScenarioManager._set_primary(scenario_2) assert len(_ScenarioManager._get_all()) == 2 assert not scenario_1.is_primary assert _ScenarioManager._is_promotable_to_primary(scenario_1) assert _ScenarioManager._is_promotable_to_primary(scenario_1.id) assert scenario_2.is_primary assert not _ScenarioManager._is_promotable_to_primary(scenario_2) assert not _ScenarioManager._is_promotable_to_primary(scenario_2.id) def test_get_set_primary_scenario(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() cycle_1 = _CycleManager._create(Frequency.DAILY, name=\"foo\") scenario_1 = Scenario(\"sc_1\", [], {}, ScenarioId(\"sc_1\"), is_primary=False, cycle=cycle_1) scenario_2 = Scenario(\"sc_2\", [], {}, ScenarioId(\"sc_2\"), is_primary=False, cycle=cycle_1) _ScenarioManager._delete_all() _CycleManager._delete_all() assert len(_ScenarioManager._get_all()) == 0 assert len(_CycleManager._get_all()) == 0 _CycleManager._set(cycle_1) _ScenarioManager._set(scenario_1) _ScenarioManager._set(scenario_2) assert len(_ScenarioManager._get_primary_scenarios()) == 0 assert len(_ScenarioManager._get_all_by_cycle(cycle_1)) == 2 _ScenarioManager._set_primary(scenario_1) assert len(_ScenarioManager._get_primary_scenarios()) == 1 assert len(_ScenarioManager._get_all_by_cycle(cycle_1)) == 2 assert _ScenarioManager._get_primary(cycle_1) == scenario_1 _ScenarioManager._set_primary(scenario_2) assert len(_ScenarioManager._get_primary_scenarios()) == 1 assert len(_ScenarioManager._get_all_by_cycle(cycle_1)) == 2 assert _ScenarioManager._get_primary(cycle_1) == scenario_2 def test_hard_delete_one_single_scenario_with_scenario_data_nodes(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.SCENARIO) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) scenario_config = Config.configure_scenario(\"scenario_config\", [task_config]) scenario_config.add_sequences({\"sequence_config\": [task_config]}) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 _ScenarioManager._hard_delete(scenario.id) assert len(_ScenarioManager._get_all()) == 0 assert len(_SequenceManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 0 assert len(_DataManager._get_all()) == 0 assert len(_JobManager._get_all()) == 0 def test_hard_delete_one_scenario_among_two_with_scenario_data_nodes(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.SCENARIO) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) scenario_config = Config.configure_scenario(\"scenario_config\", [task_config]) scenario_config.add_sequences({\"sequence_config\": [task_config]}) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config) scenario_2 = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario_1.id) _ScenarioManager._submit(scenario_2.id) assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 2 assert len(_DataManager._get_all()) == 4 assert len(_JobManager._get_all()) == 2 _ScenarioManager._hard_delete(scenario_1.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 assert _ScenarioManager._get(scenario_2.id) is not None def test_hard_delete_one_scenario_among_two_with_cycle_data_nodes(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.CYCLE, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.CYCLE) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) scenario_config = Config.configure_scenario(\"scenario_config\", [task_config]) scenario_config.add_sequences({\"sequence_config\": [task_config]}) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config) scenario_2 = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario_1.id) _ScenarioManager._submit(scenario_2.id) assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 2 _ScenarioManager._hard_delete(scenario_1.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 2 assert _ScenarioManager._get(scenario_2.id) is not None def test_hard_delete_shared_entities(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_config_1 = Config.configure_data_node(\"my_input_1\", \"in_memory\", scope=Scope.CYCLE, default_data=\"testing\") dn_config_2 = Config.configure_data_node(\"my_input_2\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_config_3 = Config.configure_data_node(\"my_input_3\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") dn_config_4 = Config.configure_data_node(\"my_input_4\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_config_1, dn_config_2) task_config_2 = Config.configure_task(\"task_config_2\", print, dn_config_2, dn_config_3) task_config_3 = Config.configure_task(\"task_config_3\", print, dn_config_3, dn_config_4) # scope = global task_config_4 = Config.configure_task(\"task_config_4\", print, dn_config_1) # scope = cycle scenario_config_1 = Config.configure_scenario( \"scenario_config_1\", [task_config_1, task_config_2, task_config_3, task_config_4], frequency=Frequency.WEEKLY, ) scenario_config_1.add_sequences( { \"sequence_config_1\": [task_config_1, task_config_2], \"sequence_config_2\": [task_config_1, task_config_2], \"sequence_config_3\": [task_config_3], \"sequence_config_4\": [task_config_4], } ) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config_1) scenario_2 = _ScenarioManager._create(scenario_config_1) scenario_1.submit() scenario_2.submit() assert len(_CycleManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 8 assert len(_TaskManager._get_all()) == 6 assert len(_DataManager._get_all()) == 5 assert len(_JobManager._get_all()) == 8 _ScenarioManager._hard_delete(scenario_2.id) assert len(_CycleManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 4 assert len(_DataManager._get_all()) == 4 assert len(_JobManager._get_all()) == 6 def test_is_submittable(): assert len(_ScenarioManager._get_all()) == 0 dn_config = Config.configure_in_memory_data_node(\"dn\", 10) task_config = Config.configure_task(\"task\", print, [dn_config]) scenario_config = Config.configure_scenario(\"sc\", set([task_config]), set(), Frequency.DAILY) scenario = _ScenarioManager._create(scenario_config) assert len(_ScenarioManager._get_all()) == 1 assert _ScenarioManager._is_submittable(scenario) assert _ScenarioManager._is_submittable(scenario.id) assert not _ScenarioManager._is_submittable(\"Scenario_temp\") scenario.dn.edit_in_progress = True assert not _ScenarioManager._is_submittable(scenario) assert not _ScenarioManager._is_submittable(scenario.id) scenario.dn.edit_in_progress = False assert _ScenarioManager._is_submittable(scenario) assert _ScenarioManager._is_submittable(scenario.id) def test_submit(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = InMemoryDataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = InMemoryDataNode(\"fum\", Scope.SCENARIO, \"s8\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"thud\", {}, print, [data_node_6], [data_node_8], TaskId(\"t5\")) scenario = Scenario( \"scenario_name\", [task_5, task_4, task_2, task_1, task_3], {}, [], ScenarioId(\"sce_id\"), ) class MockOrchestrator(_Orchestrator): submit_calls = [] @classmethod def _lock_dn_output_and_create_job( cls, task: Task, submit_id: str, submit_entity_id: str, callbacks: Optional[Iterable[Callable]] = None, force: bool = False, ) -> Job: cls.submit_calls.append(task.id) return super()._lock_dn_output_and_create_job(task, submit_id, submit_entity_id, callbacks, force) with patch(\"src.taipy.core.task._task_manager._TaskManager._orchestrator\", new=MockOrchestrator): with pytest.raises(NonExistingScenario): _ScenarioManager._submit(scenario.id) with pytest.raises(NonExistingScenario): _ScenarioManager._submit(scenario) # scenario and sequence do exist, but tasks does not exist. # We expect an exception to be raised _ScenarioManager._set(scenario) with pytest.raises(NonExistingTask): _ScenarioManager._submit(scenario.id) with pytest.raises(NonExistingTask): _ScenarioManager._submit(scenario) # scenario, sequence, and tasks do exist. # We expect all the tasks to be submitted once, # and respecting specific constraints on the order _TaskManager._set(task_1) _TaskManager._set(task_2) _TaskManager._set(task_3) _TaskManager._set(task_4) _TaskManager._set(task_5) _ScenarioManager._submit(scenario.id) submit_calls = _TaskManager._orchestrator().submit_calls assert len(submit_calls) == 5 assert set(submit_calls) == {task_1.id, task_2.id, task_4.id, task_3.id, task_5.id} assert submit_calls.index(task_2.id) < submit_calls.index(task_3.id) assert submit_calls.index(task_1.id) < submit_calls.index(task_3.id) assert submit_calls.index(task_1.id) < submit_calls.index(task_2.id) assert submit_calls.index(task_1.id) < submit_calls.index(task_4.id) _ScenarioManager._submit(scenario) submit_calls = _TaskManager._orchestrator().submit_calls assert len(submit_calls) == 10 assert set(submit_calls) == {task_1.id, task_2.id, task_4.id, task_3.id, task_5.id} assert submit_calls.index(task_2.id) < submit_calls.index(task_3.id) assert submit_calls.index(task_1.id) < submit_calls.index(task_3.id) assert submit_calls.index(task_1.id) < submit_calls.index(task_2.id) assert submit_calls.index(task_1.id) < submit_calls.index(task_4.id) def my_print(a, b): print(a + b) def test_submit_task_with_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_path=\"wrong_path.pickle\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") json_dn_cfg = Config.configure_parquet_data_node(\"wrong_json_file_path\", default_path=\"wrong_path.json\") task_cfg = Config.configure_task(\"task\", my_print, [csv_dn_cfg, pickle_dn_cfg], parquet_dn_cfg) task_2_cfg = Config.configure_task(\"task2\", my_print, [csv_dn_cfg, parquet_dn_cfg], json_dn_cfg) scenario_cfg = Config.configure_scenario(\"scenario\", [task_cfg, task_2_cfg]) sc_manager = _ScenarioManagerFactory._build_manager() scenario = sc_manager._create(scenario_cfg) sc_manager._submit(scenario) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in scenario.get_inputs() ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in scenario.data_nodes.values() if input_dn not in scenario.get_inputs() ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_submit_task_with_one_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_data=\"value\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") json_dn_cfg = Config.configure_parquet_data_node(\"wrong_json_file_path\", default_path=\"wrong_path.json\") task_cfg = Config.configure_task(\"task\", my_print, [csv_dn_cfg, pickle_dn_cfg], parquet_dn_cfg) task_2_cfg = Config.configure_task(\"task2\", my_print, [csv_dn_cfg, parquet_dn_cfg], json_dn_cfg) scenario_cfg = Config.configure_scenario(\"scenario\", [task_cfg, task_2_cfg]) sce_manager = _ScenarioManagerFactory._build_manager() scenario = sce_manager._create(scenario_cfg) sce_manager._submit(scenario) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in scenario.get_inputs() if input_dn.config_id == \"wrong_csv_file_path\" ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in scenario.data_nodes.values() if input_dn.config_id != \"wrong_csv_file_path\" ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def subtraction(n1, n2): return n1 - n2 def addition(n1, n2): return n1 + n2 def test_scenarios_comparison_development_mode(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) scenario_config = Config.configure_scenario( \"Awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], comparators={\"bar\": [subtraction], \"foo\": [subtraction, addition]}, ) _OrchestratorFactory._build_dispatcher() assert scenario_config.comparators is not None scenario_1 = _ScenarioManager._create(scenario_config) scenario_2 = _ScenarioManager._create(scenario_config) with pytest.raises(InsufficientScenarioToCompare): _ScenarioManager._compare(scenario_1, data_node_config_id=\"bar\") scenario_3 = Scenario(\"awesome_scenario_config\", [], {}) with pytest.raises(DifferentScenarioConfigs): _ScenarioManager._compare(scenario_1, scenario_3, data_node_config_id=\"bar\") _ScenarioManager._submit(scenario_1.id) _ScenarioManager._submit(scenario_2.id) bar_comparison = _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id=\"bar\")[\"bar\"] assert bar_comparison[\"subtraction\"] == 0 foo_comparison = _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id=\"foo\")[\"foo\"] assert len(foo_comparison.keys()) == 2 assert foo_comparison[\"addition\"] == 2 assert foo_comparison[\"subtraction\"] == 0 assert len(_ScenarioManager._compare(scenario_1, scenario_2).keys()) == 2 with pytest.raises(NonExistingScenarioConfig): _ScenarioManager._compare(scenario_3, scenario_3) with pytest.raises(NonExistingComparator): _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id=\"abc\") def test_scenarios_comparison_standalone_mode(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE) scenario_config = Config.configure_scenario( \"Awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], comparators={\"bar\": [subtraction], \"foo\": [subtraction, addition]}, ) _OrchestratorFactory._build_dispatcher() assert scenario_config.comparators is not None scenario_1 = _ScenarioManager._create(scenario_config) scenario_2 = _ScenarioManager._create(scenario_config) with pytest.raises(InsufficientScenarioToCompare): _ScenarioManager._compare(scenario_1, data_node_config_id=\"bar\") scenario_3 = Scenario(\"awesome_scenario_config\", [], {}) with pytest.raises(DifferentScenarioConfigs): _ScenarioManager._compare(scenario_1, scenario_3, data_node_config_id=\"bar\") _ScenarioManager._submit(scenario_1.id) _ScenarioManager._submit(scenario_2.id) bar_comparison = _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id=\"bar\")[\"bar\"] assert_true_after_time(lambda: bar_comparison[\"subtraction\"] == 0) foo_comparison = _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id=\"foo\")[\"foo\"] assert_true_after_time(lambda: len(foo_comparison.keys()) == 2) assert_true_after_time(lambda: foo_comparison[\"addition\"] == 2) assert_true_after_time(lambda: foo_comparison[\"subtraction\"] == 0) assert_true_after_time(lambda: len(_ScenarioManager._compare(scenario_1, scenario_2).keys()) == 2) with pytest.raises(NonExistingScenarioConfig): _ScenarioManager._compare(scenario_3, scenario_3) with pytest.raises(NonExistingComparator): _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id=\"abc\") def test_tags(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() cycle_1 = _CycleManager._create(Frequency.DAILY, name=\"today\", creation_date=datetime.now()) cycle_2 = _CycleManager._create( Frequency.DAILY, name=\"tomorrow\", creation_date=datetime.now() + timedelta(days=1), ) cycle_3 = _CycleManager._create( Frequency.DAILY, name=\"yesterday\", creation_date=datetime.now() + timedelta(days=-1), ) scenario_no_tag = Scenario(\"scenario_no_tag\", [], {}, [], ScenarioId(\"scenario_no_tag\"), cycle=cycle_1) scenario_1_tag = Scenario( \"scenario_1_tag\", [], {}, [], ScenarioId(\"scenario_1_tag\"), cycle=cycle_1, tags={\"fst\"}, ) scenario_2_tags = Scenario( \"scenario_2_tags\", [], {}, [], ScenarioId(\"scenario_2_tags\"), cycle=cycle_2, tags={\"fst\", \"scd\"}, ) # Test has_tag assert len(scenario_no_tag.tags) == 0 assert not scenario_no_tag.has_tag(\"fst\") assert not scenario_no_tag.has_tag(\"scd\") assert len(scenario_1_tag.tags) == 1 assert scenario_1_tag.has_tag(\"fst\") assert not scenario_1_tag.has_tag(\"scd\") assert len(scenario_2_tags.tags) == 2 assert scenario_2_tags.has_tag(\"fst\") assert scenario_2_tags.has_tag(\"scd\") # test get and set serialize/deserialize tags _CycleManager._set(cycle_1) _CycleManager._set(cycle_2) _CycleManager._set(cycle_3) _ScenarioManager._set(scenario_no_tag) _ScenarioManager._set(scenario_1_tag) _ScenarioManager._set(scenario_2_tags) assert len(_ScenarioManager._get(ScenarioId(\"scenario_no_tag\")).tags) == 0 assert not _ScenarioManager._get(ScenarioId(\"scenario_no_tag\")).has_tag(\"fst\") assert not _ScenarioManager._get(ScenarioId(\"scenario_no_tag\")).has_tag(\"scd\") assert len(_ScenarioManager._get(ScenarioId(\"scenario_1_tag\")).tags) == 1 assert \"fst\" in _ScenarioManager._get(ScenarioId(\"scenario_1_tag\")).tags assert \"scd\" not in _ScenarioManager._get(ScenarioId(\"scenario_1_tag\")).tags assert len(_ScenarioManager._get(ScenarioId(\"scenario_2_tags\")).tags) == 2 assert \"fst\" in _ScenarioManager._get(ScenarioId(\"scenario_2_tags\")).tags assert \"scd\" in _ScenarioManager._get(ScenarioId(\"scenario_2_tags\")).tags # Test tag & untag _ScenarioManager._tag(scenario_no_tag, \"thd\") # add new tag _ScenarioManager._untag(scenario_1_tag, \"NOT_EXISTING_TAG\") # remove not existing tag does nothing _ScenarioManager._untag(scenario_1_tag, \"fst\") # remove `fst` tag assert len(scenario_no_tag.tags) == 1 assert not scenario_no_tag.has_tag(\"fst\") assert not scenario_no_tag.has_tag(\"scd\") assert scenario_no_tag.has_tag(\"thd\") assert len(scenario_1_tag.tags) == 0 assert not scenario_1_tag.has_tag(\"fst\") assert not scenario_1_tag.has_tag(\"scd\") assert not scenario_1_tag.has_tag(\"thd\") assert len(scenario_2_tags.tags) == 2 assert scenario_2_tags.has_tag(\"fst\") assert scenario_2_tags.has_tag(\"scd\") assert not scenario_2_tags.has_tag(\"thd\") _ScenarioManager._untag(scenario_no_tag, \"thd\") _ScenarioManager._set(scenario_no_tag) _ScenarioManager._tag(scenario_1_tag, \"fst\") _ScenarioManager._set(scenario_1_tag) # test getters assert not _ScenarioManager._get_by_tag(cycle_3, \"fst\") assert not _ScenarioManager._get_by_tag(cycle_3, \"scd\") assert not _ScenarioManager._get_by_tag(cycle_3, \"thd\") assert _ScenarioManager._get_by_tag(cycle_2, \"fst\") == scenario_2_tags assert _ScenarioManager._get_by_tag(cycle_2, \"scd\") == scenario_2_tags assert not _ScenarioManager._get_by_tag(cycle_2, \"thd\") assert _ScenarioManager._get_by_tag(cycle_1, \"fst\") == scenario_1_tag assert not _ScenarioManager._get_by_tag(cycle_1, \"scd\") assert not _ScenarioManager._get_by_tag(cycle_1, \"thd\") assert len(_ScenarioManager._get_all_by_tag(\"NOT_EXISTING\")) == 0 assert scenario_1_tag in _ScenarioManager._get_all_by_tag(\"fst\") assert scenario_2_tags in _ScenarioManager._get_all_by_tag(\"fst\") assert _ScenarioManager._get_all_by_tag(\"scd\") == [scenario_2_tags] assert len(_ScenarioManager._get_all_by_tag(\"thd\")) == 0 # test tag cycle mgt _ScenarioManager._tag( scenario_no_tag, \"fst\" ) # tag sc_no_tag should untag sc_1_tag with same cycle but not sc_2_tags assert not _ScenarioManager._get_by_tag(cycle_3, \"fst\") assert not _ScenarioManager._get_by_tag(cycle_3, \"scd\") assert not _ScenarioManager._get_by_tag(cycle_3, \"thd\") assert _ScenarioManager._get_by_tag(cycle_2, \"fst\") == scenario_2_tags assert _ScenarioManager._get_by_tag(cycle_2, \"scd\") == scenario_2_tags assert not _ScenarioManager._get_by_tag(cycle_2, \"thd\") assert _ScenarioManager._get_by_tag(cycle_1, \"fst\") == scenario_no_tag assert not _ScenarioManager._get_by_tag(cycle_1, \"scd\") assert not _ScenarioManager._get_by_tag(cycle_1, \"thd\") assert len(_ScenarioManager._get_all_by_tag(\"NOT_EXISTING\")) == 0 assert len(_ScenarioManager._get_all_by_tag(\"fst\")) == 2 assert scenario_2_tags in _ScenarioManager._get_all_by_tag(\"fst\") assert scenario_no_tag in _ScenarioManager._get_all_by_tag(\"fst\") assert _ScenarioManager._get_all_by_tag(\"scd\") == [scenario_2_tags] assert len(_ScenarioManager._get_all_by_tag(\"thd\")) == 0 def test_authorized_tags(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) scenario = Scenario(\"scenario_1\", [], {\"authorized_tags\": [\"foo\", \"bar\"]}, [], ScenarioId(\"scenario_1\")) scenario_2_cfg = Config.configure_scenario(\"scenario_2\", [], [], Frequency.DAILY, authorized_tags=[\"foo\", \"bar\"]) _OrchestratorFactory._build_dispatcher() scenario_2 = _ScenarioManager._create(scenario_2_cfg) _ScenarioManager._set(scenario) assert len(scenario.tags) == 0 assert len(scenario_2.tags) == 0 with pytest.raises(UnauthorizedTagError): _ScenarioManager._tag(scenario, \"baz\") _ScenarioManager._tag(scenario_2, \"baz\") assert len(scenario.tags) == 0 assert len(scenario_2.tags) == 0 _ScenarioManager._tag(scenario, \"foo\") _ScenarioManager._tag(scenario_2, \"foo\") assert len(scenario.tags) == 1 assert len(scenario_2.tags) == 1 _ScenarioManager._tag(scenario, \"bar\") _ScenarioManager._tag(scenario_2, \"bar\") assert len(scenario.tags) == 2 assert len(scenario_2.tags) == 2 _ScenarioManager._tag(scenario, \"foo\") _ScenarioManager._tag(scenario_2, \"foo\") assert len(scenario.tags) == 2 assert len(scenario_2.tags) == 2 _ScenarioManager._untag(scenario, \"foo\") _ScenarioManager._untag(scenario_2, \"foo\") assert len(scenario.tags) == 1 assert len(scenario_2.tags) == 1 def test_get_scenarios_by_config_id(): scenario_config_1 = Config.configure_scenario(\"s1\", sequence_configs=[]) scenario_config_2 = Config.configure_scenario(\"s2\", sequence_configs=[]) scenario_config_3 = Config.configure_scenario(\"s3\", sequence_configs=[]) s_1_1 = _ScenarioManager._create(scenario_config_1) s_1_2 = _ScenarioManager._create(scenario_config_1) s_1_3 = _ScenarioManager._create(scenario_config_1) assert len(_ScenarioManager._get_all()) == 3 s_2_1 = _ScenarioManager._create(scenario_config_2) s_2_2 = _ScenarioManager._create(scenario_config_2) assert len(_ScenarioManager._get_all()) == 5 s_3_1 = _ScenarioManager._create(scenario_config_3) assert len(_ScenarioManager._get_all()) == 6 s1_scenarios = _ScenarioManager._get_by_config_id(scenario_config_1.id) assert len(s1_scenarios) == 3 assert sorted([s_1_1.id, s_1_2.id, s_1_3.id]) == sorted([scenario.id for scenario in s1_scenarios]) s2_scenarios = _ScenarioManager._get_by_config_id(scenario_config_2.id) assert len(s2_scenarios) == 2 assert sorted([s_2_1.id, s_2_2.id]) == sorted([scenario.id for scenario in s2_scenarios]) s3_scenarios = _ScenarioManager._get_by_config_id(scenario_config_3.id) assert len(s3_scenarios) == 1 assert sorted([s_3_1.id]) == sorted([scenario.id for scenario in s3_scenarios]) def test_get_scenarios_by_config_id_in_multiple_versions_environment(): scenario_config_1 = Config.configure_scenario(\"s1\", sequence_configs=[]) scenario_config_2 = Config.configure_scenario(\"s2\", sequence_configs=[]) _VersionManager._set_experiment_version(\"1.0\") _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_2) _ScenarioManager._create(scenario_config_2) assert len(_ScenarioManager._get_by_config_id(scenario_config_1.id)) == 3 assert len(_ScenarioManager._get_by_config_id(scenario_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_2) _ScenarioManager._create(scenario_config_2) assert len(_ScenarioManager._get_by_config_id(scenario_config_1.id)) == 3 assert len(_ScenarioManager._get_by_config_id(scenario_config_2.id)) == 2 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import pytest from src.taipy.core.exceptions import ModelNotFound from src.taipy.core.scenario._scenario_fs_repository import _ScenarioFSRepository from src.taipy.core.scenario._scenario_sql_repository import _ScenarioSQLRepository from src.taipy.core.scenario.scenario import Scenario, ScenarioId class TestScenarioFSRepository: @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_save_and_load(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) obj = repository._load(scenario.id) assert isinstance(obj, Scenario) @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_exists(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) assert repository._exists(scenario.id) assert not repository._exists(\"not-existed-scenario\") @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_load_all(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_load_all_with_filters(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) objs = repository._load_all(filters=[{\"id\": \"scenario-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) repository._delete(scenario.id) with pytest.raises(ModelNotFound): repository._load(scenario.id) @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_all(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_many(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_by(self, scenario, repo, init_sql_repo): repository = repo() # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") scenario._version = f\"{(i+1) // 5}.0\" repository._save(scenario) objs = repository._load_all() assert len(objs) == 10 repository._delete_by(\"version\", \"1.0\") assert len(repository._load_all()) == 5 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_search(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) assert len(repository._load_all()) == 10 objs = repository._search(\"id\", \"scenario-2\") assert len(objs) == 1 assert isinstance(objs[0], Scenario) objs = repository._search(\"id\", \"scenario-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], Scenario) assert repository._search(\"id\", \"scenario-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_export(self, tmpdir, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) repository._export(scenario.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _ScenarioFSRepository else os.path.join(tmpdir.strpath, \"scenario\") assert os.path.exists(os.path.join(dir_path, f\"{scenario.id}.json\")) "} {"text": "from datetime import datetime, timedelta import pytest from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config.job_config import JobConfig from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.cycle._cycle_manager_factory import _CycleManagerFactory from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import DeletingPrimaryScenario from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.scenario.scenario_id import ScenarioId from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.sequence._sequence_manager_factory import _SequenceManagerFactory from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task from src.taipy.core.task.task_id import TaskId from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.conftest import init_managers def test_set_and_get_scenario(cycle, init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() _OrchestratorFactory._build_dispatcher() scenario_id_1 = ScenarioId(\"scenario_id_1\") scenario_1 = Scenario(\"scenario_name_1\", [], {}, [], scenario_id_1) input_dn_2 = InMemoryDataNode(\"foo\", Scope.SCENARIO) output_dn_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO) additional_dn_2 = InMemoryDataNode(\"zyx\", Scope.SCENARIO) task_name_2 = \"task_2\" task_2 = Task(task_name_2, {}, print, [input_dn_2], [output_dn_2], TaskId(\"task_id_2\")) scenario_id_2 = ScenarioId(\"scenario_id_2\") scenario_2 = Scenario( \"scenario_name_2\", [task_2], {}, [additional_dn_2], scenario_id_2, datetime.now(), True, cycle, sequences={\"sequence_2\": {\"tasks\": [task_2]}}, ) additional_dn_3 = InMemoryDataNode(\"baz\", Scope.SCENARIO) task_name_3 = \"task_3\" task_3 = Task(task_name_3, {}, print, id=TaskId(\"task_id_3\")) scenario_3_with_same_id = Scenario( \"scenario_name_3\", [task_3], {}, [additional_dn_3], scenario_id_1, datetime.now(), False, cycle, sequences={\"sequence_3\": {}}, ) # No existing scenario assert len(_ScenarioManager._get_all()) == 0 assert _ScenarioManager._get(scenario_id_1) is None assert _ScenarioManager._get(scenario_1) is None assert _ScenarioManager._get(scenario_id_2) is None assert _ScenarioManager._get(scenario_2) is None # Save one scenario. We expect to have only one scenario stored _ScenarioManager._set(scenario_1) assert len(_ScenarioManager._get_all()) == 1 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 0 assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).sequences) == 0 assert _ScenarioManager._get(scenario_id_2) is None assert _ScenarioManager._get(scenario_2) is None # Save a second scenario. Now, we expect to have a total of two scenarios stored _TaskManager._set(task_2) _CycleManager._set(cycle) _ScenarioManager._set(scenario_2) _DataManager._set(additional_dn_2) assert len(_ScenarioManager._get_all()) == 2 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 0 assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).sequences) == 0 assert _ScenarioManager._get(scenario_id_2).id == scenario_2.id assert _ScenarioManager._get(scenario_id_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_id_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_id_2).sequences) == 1 assert _ScenarioManager._get(scenario_2).id == scenario_2.id assert _ScenarioManager._get(scenario_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_2).sequences) == 1 assert _TaskManager._get(task_2.id).id == task_2.id assert _ScenarioManager._get(scenario_id_2).cycle == cycle assert _ScenarioManager._get(scenario_2).cycle == cycle assert _CycleManager._get(cycle.id).id == cycle.id # We save the first scenario again. We expect nothing to change _ScenarioManager._set(scenario_1) assert len(_ScenarioManager._get_all()) == 2 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 0 assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).sequences) == 0 assert _ScenarioManager._get(scenario_id_2).id == scenario_2.id assert _ScenarioManager._get(scenario_id_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_id_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_id_2).sequences) == 1 assert _ScenarioManager._get(scenario_2).id == scenario_2.id assert _ScenarioManager._get(scenario_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_2).sequences) == 1 assert _TaskManager._get(task_2.id).id == task_2.id assert _CycleManager._get(cycle.id).id == cycle.id # We save a third scenario with same id as the first one. # We expect the first scenario to be updated _DataManager._set(additional_dn_3) _TaskManager._set(task_3) _TaskManager._set(scenario_2.tasks[task_name_2]) _ScenarioManager._set(scenario_3_with_same_id) assert len(_ScenarioManager._get_all()) == 2 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_3_with_same_id.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 1 assert _ScenarioManager._get(scenario_id_1).cycle == cycle assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_3_with_same_id.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 1 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 1 assert len(_ScenarioManager._get(scenario_1).sequences) == 1 assert _ScenarioManager._get(scenario_1).cycle == cycle assert _ScenarioManager._get(scenario_id_2).id == scenario_2.id assert _ScenarioManager._get(scenario_id_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_id_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_id_2).sequences) == 1 assert _ScenarioManager._get(scenario_2).id == scenario_2.id assert _ScenarioManager._get(scenario_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_2).sequences) == 1 assert _TaskManager._get(task_2.id).id == task_2.id def test_get_all_on_multiple_versions_environment(init_sql_repo): init_managers() # Create 5 scenarios with 2 versions each # Only version 1.0 has the scenario with config_id = \"config_id_1\" # Only version 2.0 has the scenario with config_id = \"config_id_6\" for version in range(1, 3): for i in range(5): _ScenarioManager._set( Scenario(f\"config_id_{i+version}\", [], {}, ScenarioId(f\"id{i}_v{version}\"), version=f\"{version}.0\") ) _VersionManager._set_experiment_version(\"1.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_experiment_version(\"2.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 _VersionManager._set_development_version(\"1.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_development_version(\"2.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 def test_create_scenario_does_not_modify_config(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() creation_date_1 = datetime.now() name_1 = \"name_1\" scenario_config = Config.configure_scenario(\"sc\", None, None, Frequency.DAILY) _OrchestratorFactory._build_dispatcher() assert scenario_config.properties.get(\"name\") is None assert len(scenario_config.properties) == 0 scenario = _ScenarioManager._create(scenario_config, creation_date=creation_date_1, name=name_1) assert len(scenario_config.properties) == 0 assert len(scenario.properties) == 1 assert scenario.properties.get(\"name\") == name_1 assert scenario.name == name_1 scenario.properties[\"foo\"] = \"bar\" _ScenarioManager._set(scenario) assert len(scenario_config.properties) == 0 assert len(scenario.properties) == 2 assert scenario.properties.get(\"foo\") == \"bar\" assert scenario.properties.get(\"name\") == name_1 assert scenario.name == name_1 scenario_2 = _ScenarioManager._create(scenario_config, creation_date=creation_date_1) assert scenario_2.name is None def test_create_and_delete_scenario(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() creation_date_1 = datetime.now() creation_date_2 = creation_date_1 + timedelta(minutes=10) name_1 = \"name_1\" _ScenarioManager._delete_all() assert len(_ScenarioManager._get_all()) == 0 scenario_config = Config.configure_scenario(\"sc\", None, None, Frequency.DAILY) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config, creation_date=creation_date_1, name=name_1) assert scenario_1.config_id == \"sc\" assert scenario_1.sequences == {} assert scenario_1.tasks == {} assert scenario_1.additional_data_nodes == {} assert scenario_1.data_nodes == {} assert scenario_1.cycle.frequency == Frequency.DAILY assert scenario_1.is_primary assert scenario_1.cycle.creation_date == creation_date_1 assert scenario_1.cycle.start_date.date() == creation_date_1.date() assert scenario_1.cycle.end_date.date() == creation_date_1.date() assert scenario_1.creation_date == creation_date_1 assert scenario_1.name == name_1 assert scenario_1.properties[\"name\"] == name_1 assert scenario_1.tags == set() cycle_id_1 = scenario_1.cycle.id assert _CycleManager._get(cycle_id_1).id == cycle_id_1 _ScenarioManager._delete(scenario_1.id) assert _ScenarioManager._get(scenario_1.id) is None assert _CycleManager._get(cycle_id_1) is None # Recreate scenario_1 scenario_1 = _ScenarioManager._create(scenario_config, creation_date=creation_date_1, name=name_1) scenario_2 = _ScenarioManager._create(scenario_config, creation_date=creation_date_2) assert scenario_2.config_id == \"sc\" assert scenario_2.sequences == {} assert scenario_2.tasks == {} assert scenario_2.additional_data_nodes == {} assert scenario_2.data_nodes == {} assert scenario_2.cycle.frequency == Frequency.DAILY assert not scenario_2.is_primary assert scenario_2.cycle.creation_date == creation_date_1 assert scenario_2.cycle.start_date.date() == creation_date_2.date() assert scenario_2.cycle.end_date.date() == creation_date_2.date() assert scenario_2.properties.get(\"name\") is None assert scenario_2.tags == set() assert scenario_1 != scenario_2 assert scenario_1.cycle == scenario_2.cycle assert len(_ScenarioManager._get_all()) == 2 with pytest.raises(DeletingPrimaryScenario): _ScenarioManager._delete( scenario_1.id, ) _ScenarioManager._delete( scenario_2.id, ) assert len(_ScenarioManager._get_all()) == 1 _ScenarioManager._delete(scenario_1.id) assert len(_ScenarioManager._get_all()) == 0 def mult_by_2(nb: int): return nb * 2 def mult_by_3(nb: int): return nb * 3 def mult_by_4(nb: int): return nb * 4 def test_scenario_manager_only_creates_data_node_once(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() # dn_1 ---> mult_by_2 ---> dn_2 ---> mult_by_3 ---> dn_6 # dn_1 ---> mult_by_4 ---> dn_4 dn_config_1 = Config.configure_data_node(\"foo\", \"in_memory\", Scope.GLOBAL, default_data=1) dn_config_2 = Config.configure_data_node(\"bar\", \"in_memory\", Scope.CYCLE, default_data=0) dn_config_6 = Config.configure_data_node(\"baz\", \"in_memory\", Scope.CYCLE, default_data=0) dn_config_4 = Config.configure_data_node(\"qux\", \"in_memory\", Scope.SCENARIO, default_data=0) task_mult_by_2_config = Config.configure_task(\"mult_by_2\", mult_by_2, [dn_config_1], dn_config_2) task_mult_by_3_config = Config.configure_task(\"mult_by_3\", mult_by_3, [dn_config_2], dn_config_6) task_mult_by_4_config = Config.configure_task(\"mult_by_4\", mult_by_4, [dn_config_1], dn_config_4) scenario_config = Config.configure_scenario( \"awesome_scenario\", [task_mult_by_2_config, task_mult_by_3_config, task_mult_by_4_config], None, Frequency.DAILY ) scenario_config.add_sequences( {\"by_6\": [task_mult_by_2_config, task_mult_by_3_config], \"by_4\": [task_mult_by_4_config]} ) _OrchestratorFactory._build_dispatcher() assert len(_DataManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 0 assert len(_SequenceManager._get_all()) == 0 assert len(_ScenarioManager._get_all()) == 0 assert len(_CycleManager._get_all()) == 0 scenario_1 = _ScenarioManager._create(scenario_config) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 3 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 1 assert scenario_1.foo.read() == 1 assert scenario_1.bar.read() == 0 assert scenario_1.baz.read() == 0 assert scenario_1.qux.read() == 0 assert scenario_1.by_6._get_sorted_tasks()[0][0].config_id == task_mult_by_2_config.id assert scenario_1.by_6._get_sorted_tasks()[1][0].config_id == task_mult_by_3_config.id assert scenario_1.by_4._get_sorted_tasks()[0][0].config_id == task_mult_by_4_config.id assert scenario_1.tasks.keys() == {task_mult_by_2_config.id, task_mult_by_3_config.id, task_mult_by_4_config.id} scenario_1_sorted_tasks = scenario_1._get_sorted_tasks() expected = [{task_mult_by_2_config.id, task_mult_by_4_config.id}, {task_mult_by_3_config.id}] for i, list_tasks_by_level in enumerate(scenario_1_sorted_tasks): assert set([t.config_id for t in list_tasks_by_level]) == expected[i] assert scenario_1.cycle.frequency == Frequency.DAILY _ScenarioManager._create(scenario_config) assert len(_DataManager._get_all()) == 5 assert len(_TaskManager._get_all()) == 4 assert len(_SequenceManager._get_all()) == 4 assert len(_ScenarioManager._get_all()) == 2 def test_get_scenarios_by_config_id(init_sql_repo): init_managers() scenario_config_1 = Config.configure_scenario(\"s1\", sequence_configs=[]) scenario_config_2 = Config.configure_scenario(\"s2\", sequence_configs=[]) scenario_config_3 = Config.configure_scenario(\"s3\", sequence_configs=[]) s_1_1 = _ScenarioManager._create(scenario_config_1) s_1_2 = _ScenarioManager._create(scenario_config_1) s_1_3 = _ScenarioManager._create(scenario_config_1) assert len(_ScenarioManager._get_all()) == 3 s_2_1 = _ScenarioManager._create(scenario_config_2) s_2_2 = _ScenarioManager._create(scenario_config_2) assert len(_ScenarioManager._get_all()) == 5 s_3_1 = _ScenarioManager._create(scenario_config_3) assert len(_ScenarioManager._get_all()) == 6 s1_scenarios = _ScenarioManager._get_by_config_id(scenario_config_1.id) assert len(s1_scenarios) == 3 assert sorted([s_1_1.id, s_1_2.id, s_1_3.id]) == sorted([scenario.id for scenario in s1_scenarios]) s2_scenarios = _ScenarioManager._get_by_config_id(scenario_config_2.id) assert len(s2_scenarios) == 2 assert sorted([s_2_1.id, s_2_2.id]) == sorted([scenario.id for scenario in s2_scenarios]) s3_scenarios = _ScenarioManager._get_by_config_id(scenario_config_3.id) assert len(s3_scenarios) == 1 assert sorted([s_3_1.id]) == sorted([scenario.id for scenario in s3_scenarios]) def test_get_scenarios_by_config_id_in_multiple_versions_environment(init_sql_repo): init_managers() scenario_config_1 = Config.configure_scenario(\"s1\", sequence_configs=[]) scenario_config_2 = Config.configure_scenario(\"s2\", sequence_configs=[]) _VersionManager._set_experiment_version(\"1.0\") _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_2) _ScenarioManager._create(scenario_config_2) assert len(_ScenarioManager._get_by_config_id(scenario_config_1.id)) == 3 assert len(_ScenarioManager._get_by_config_id(scenario_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_2) _ScenarioManager._create(scenario_config_2) assert len(_ScenarioManager._get_by_config_id(scenario_config_1.id)) == 3 assert len(_ScenarioManager._get_by_config_id(scenario_config_2.id)) == 2 "} {"text": "from datetime import datetime, timedelta from unittest import mock import pytest from src.taipy.core.common._utils import _Subscriber from src.taipy.core.cycle._cycle_manager_factory import _CycleManagerFactory from src.taipy.core.cycle.cycle import Cycle, CycleId from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data.in_memory import DataNode, InMemoryDataNode from src.taipy.core.data.pickle import PickleDataNode from src.taipy.core.exceptions.exceptions import SequenceTaskDoesNotExistInScenario from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.scenario.scenario_id import ScenarioId from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task, TaskId from taipy.config import Frequency from taipy.config.common.scope import Scope from taipy.config.exceptions.exceptions import InvalidConfigurationId def test_create_primary_scenario(cycle): scenario = Scenario(\"foo\", set(), {\"key\": \"value\"}, is_primary=True, cycle=cycle) assert scenario.id is not None assert scenario.config_id == \"foo\" assert scenario.tasks == {} assert scenario.additional_data_nodes == {} assert scenario.data_nodes == {} assert scenario.sequences == {} assert scenario.properties == {\"key\": \"value\"} assert scenario.key == \"value\" assert scenario.creation_date is not None assert scenario.is_primary assert scenario.cycle == cycle assert scenario.tags == set() assert scenario.get_simple_label() == scenario.config_id with mock.patch(\"src.taipy.core.get\") as get_mck: class MockOwner: label = \"owner_label\" def get_label(self): return self.label get_mck.return_value = MockOwner() assert scenario.get_label() == \"owner_label > \" + scenario.config_id def test_create_scenario_at_time(current_datetime): scenario = Scenario(\"bar\", set(), {}, set(), ScenarioId(\"baz\"), creation_date=current_datetime) assert scenario.id == \"baz\" assert scenario.config_id == \"bar\" assert scenario.tasks == {} assert scenario.additional_data_nodes == {} assert scenario.data_nodes == {} assert scenario.sequences == {} assert scenario.properties == {} assert scenario.creation_date == current_datetime assert not scenario.is_primary assert scenario.cycle is None assert scenario.tags == set() assert scenario.get_simple_label() == scenario.config_id assert scenario.get_label() == scenario.config_id def test_create_scenario_with_task_and_additional_dn_and_sequence(): dn_1 = PickleDataNode(\"xyz\", Scope.SCENARIO) dn_2 = PickleDataNode(\"abc\", Scope.SCENARIO) task = Task(\"qux\", {}, print, [dn_1]) scenario = Scenario(\"quux\", set([task]), {}, set([dn_2]), sequences={\"acb\": {\"tasks\": [task]}}) sequence = scenario.sequences[\"acb\"] assert scenario.id is not None assert scenario.config_id == \"quux\" assert len(scenario.tasks) == 1 assert len(scenario.additional_data_nodes) == 1 assert len(scenario.data_nodes) == 2 assert len(scenario.sequences) == 1 assert scenario.qux == task assert scenario.xyz == dn_1 assert scenario.abc == dn_2 assert scenario.acb == sequence assert scenario.properties == {} assert scenario.tags == set() def test_create_scenario_invalid_config_id(): with pytest.raises(InvalidConfigurationId): Scenario(\"foo bar\", [], {}) def test_create_scenario_and_add_sequences(): input_1 = PickleDataNode(\"input_1\", Scope.SCENARIO) output_1 = PickleDataNode(\"output_1\", Scope.SCENARIO) output_2 = PickleDataNode(\"output_2\", Scope.SCENARIO) additional_dn_1 = PickleDataNode(\"additional_1\", Scope.SCENARIO) additional_dn_2 = PickleDataNode(\"additional_2\", Scope.SCENARIO) task_1 = Task(\"task_1\", {}, print, [input_1], [output_1], TaskId(\"task_id_1\")) task_2 = Task(\"task_2\", {}, print, [output_1], [output_2], TaskId(\"task_id_2\")) data_manager = _DataManagerFactory._build_manager() task_manager = _TaskManagerFactory._build_manager() data_manager._set(input_1) data_manager._set(output_1) data_manager._set(output_2) data_manager._set(additional_dn_1) data_manager._set(additional_dn_2) task_manager._set(task_1) task_manager._set(task_2) scenario = Scenario(\"scenario\", set([task_1]), {}) scenario.sequences = {\"sequence_1\": {\"tasks\": [task_1]}, \"sequence_2\": {\"tasks\": []}} assert scenario.id is not None assert scenario.config_id == \"scenario\" assert len(scenario.tasks) == 1 assert scenario.tasks.keys() == {task_1.config_id} assert len(scenario.additional_data_nodes) == 0 assert scenario.additional_data_nodes == {} assert len(scenario.data_nodes) == 2 assert scenario.data_nodes == { input_1.config_id: input_1, output_1.config_id: output_1, } assert len(scenario.sequences) == 2 assert scenario.sequence_1 == scenario.sequences[\"sequence_1\"] assert scenario.sequence_2 == scenario.sequences[\"sequence_2\"] assert scenario.sequences == {\"sequence_1\": scenario.sequence_1, \"sequence_2\": scenario.sequence_2} def test_create_scenario_overlapping_sequences(): input_1 = PickleDataNode(\"input_1\", Scope.SCENARIO) output_1 = PickleDataNode(\"output_1\", Scope.SCENARIO) output_2 = PickleDataNode(\"output_2\", Scope.SCENARIO) additional_dn_1 = PickleDataNode(\"additional_1\", Scope.SCENARIO) additional_dn_2 = PickleDataNode(\"additional_2\", Scope.SCENARIO) task_1 = Task(\"task_1\", {}, print, [input_1], [output_1], TaskId(\"task_id_1\")) task_2 = Task(\"task_2\", {}, print, [output_1], [output_2], TaskId(\"task_id_2\")) data_manager = _DataManagerFactory._build_manager() task_manager = _TaskManagerFactory._build_manager() data_manager._set(input_1) data_manager._set(output_1) data_manager._set(output_2) data_manager._set(additional_dn_1) data_manager._set(additional_dn_2) task_manager._set(task_1) task_manager._set(task_2) scenario = Scenario(\"scenario\", set([task_1, task_2]), {}) scenario.add_sequence(\"sequence_1\", [task_1]) scenario.add_sequence(\"sequence_2\", [task_1, task_2]) assert scenario.id is not None assert scenario.config_id == \"scenario\" assert len(scenario.tasks) == 2 assert scenario.tasks.keys() == {task_1.config_id, task_2.config_id} assert len(scenario.additional_data_nodes) == 0 assert scenario.additional_data_nodes == {} assert len(scenario.data_nodes) == 3 assert scenario.data_nodes == { input_1.config_id: input_1, output_1.config_id: output_1, output_2.config_id: output_2, } sequence_1 = scenario.sequences[\"sequence_1\"] sequence_2 = scenario.sequences[\"sequence_2\"] assert scenario.sequences == {\"sequence_1\": sequence_1, \"sequence_2\": sequence_2} scenario.remove_sequences([\"sequence_2\"]) assert scenario.sequences == {\"sequence_1\": sequence_1} scenario.remove_sequences([\"sequence_1\"]) assert scenario.sequences == {} def test_create_scenario_one_additional_dn(): input_1 = PickleDataNode(\"input_1\", Scope.SCENARIO) input_2 = PickleDataNode(\"input_2\", Scope.SCENARIO) output_1 = PickleDataNode(\"output_1\", Scope.SCENARIO) output_2 = PickleDataNode(\"output_2\", Scope.SCENARIO) additional_dn_1 = PickleDataNode(\"additional_1\", Scope.SCENARIO) additional_dn_2 = PickleDataNode(\"additional_2\", Scope.SCENARIO) task_1 = Task(\"task_1\", {}, print, [input_1], [output_1], TaskId(\"task_id_1\")) task_2 = Task(\"task_2\", {}, print, [input_2], [output_2], TaskId(\"task_id_2\")) data_manager = _DataManagerFactory._build_manager() task_manager = _TaskManagerFactory._build_manager() data_manager._set(input_1) data_manager._set(output_1) data_manager._set(input_2) data_manager._set(output_2) data_manager._set(additional_dn_1) data_manager._set(additional_dn_2) task_manager._set(task_1) task_manager._set(task_2) scenario = Scenario(\"scenario\", set(), {}, set([additional_dn_1])) assert scenario.id is not None assert scenario.config_id == \"scenario\" assert len(scenario.tasks) == 0 assert len(scenario.additional_data_nodes) == 1 assert len(scenario.data_nodes) == 1 assert scenario.tasks == {} assert scenario.additional_data_nodes == {additional_dn_1.config_id: additional_dn_1} assert scenario.data_nodes == {additional_dn_1.config_id: additional_dn_1} def test_create_scenario_wth_additional_dns(): input_1 = PickleDataNode(\"input_1\", Scope.SCENARIO) input_2 = PickleDataNode(\"input_2\", Scope.SCENARIO) output_1 = PickleDataNode(\"output_1\", Scope.SCENARIO) output_2 = PickleDataNode(\"output_2\", Scope.SCENARIO) additional_dn_1 = PickleDataNode(\"additional_1\", Scope.SCENARIO) additional_dn_2 = PickleDataNode(\"additional_2\", Scope.SCENARIO) task_1 = Task(\"task_1\", {}, print, [input_1], [output_1], TaskId(\"task_id_1\")) task_2 = Task(\"task_2\", {}, print, [input_2], [output_2], TaskId(\"task_id_2\")) data_manager = _DataManagerFactory._build_manager() task_manager = _TaskManagerFactory._build_manager() data_manager._set(input_1) data_manager._set(output_1) data_manager._set(input_2) data_manager._set(output_2) data_manager._set(additional_dn_1) data_manager._set(additional_dn_2) task_manager._set(task_1) task_manager._set(task_2) scenario = Scenario(\"scenario\", set(), {}, set([additional_dn_1, additional_dn_2])) assert scenario.id is not None assert scenario.config_id == \"scenario\" assert len(scenario.tasks) == 0 assert len(scenario.additional_data_nodes) == 2 assert len(scenario.data_nodes) == 2 assert scenario.tasks == {} assert scenario.additional_data_nodes == { additional_dn_1.config_id: additional_dn_1, additional_dn_2.config_id: additional_dn_2, } assert scenario.data_nodes == { additional_dn_1.config_id: additional_dn_1, additional_dn_2.config_id: additional_dn_2, } scenario_1 = Scenario(\"scenario_1\", set([task_1]), {}, set([additional_dn_1])) assert scenario_1.id is not None assert scenario_1.config_id == \"scenario_1\" assert len(scenario_1.tasks) == 1 assert len(scenario_1.additional_data_nodes) == 1 assert len(scenario_1.data_nodes) == 3 assert scenario_1.tasks.keys() == {task_1.config_id} assert scenario_1.additional_data_nodes == { additional_dn_1.config_id: additional_dn_1, } assert scenario_1.data_nodes == { input_1.config_id: input_1, output_1.config_id: output_1, additional_dn_1.config_id: additional_dn_1, } scenario_2 = Scenario(\"scenario_2\", set([task_1, task_2]), {}, set([additional_dn_1, additional_dn_2])) assert scenario_2.id is not None assert scenario_2.config_id == \"scenario_2\" assert len(scenario_2.tasks) == 2 assert len(scenario_2.additional_data_nodes) == 2 assert len(scenario_2.data_nodes) == 6 assert scenario_2.tasks.keys() == {task_1.config_id, task_2.config_id} assert scenario_2.additional_data_nodes == { additional_dn_1.config_id: additional_dn_1, additional_dn_2.config_id: additional_dn_2, } assert {dn_config_id: dn.id for dn_config_id, dn in scenario_2.data_nodes.items()} == { input_1.config_id: input_1.id, output_1.config_id: output_1.id, input_2.config_id: input_2.id, output_2.config_id: output_2.id, additional_dn_1.config_id: additional_dn_1.id, additional_dn_2.config_id: additional_dn_2.id, } def test_raise_sequence_tasks_not_in_scenario(data_node): task_1 = Task(\"task_1\", {}, print, output=[data_node]) task_2 = Task(\"task_2\", {}, print, input=[data_node]) with pytest.raises(SequenceTaskDoesNotExistInScenario) as err: Scenario(\"scenario\", [], {}, sequences={\"sequence\": {\"tasks\": [task_1]}}, scenario_id=\"SCENARIO_scenario\") assert err.value.args == ([task_1.id], \"sequence\", \"SCENARIO_scenario\") with pytest.raises(SequenceTaskDoesNotExistInScenario) as err: Scenario( \"scenario\", [task_1], {}, sequences={\"sequence\": {\"tasks\": [task_1, task_2]}}, scenario_id=\"SCENARIO_scenario\", ) assert err.value.args == ([task_2.id], \"sequence\", \"SCENARIO_scenario\") Scenario(\"scenario\", [task_1], {}, sequences={\"sequence\": {\"tasks\": [task_1]}}) Scenario( \"scenario\", [task_1, task_2], {}, sequences={\"sequence_1\": {\"tasks\": [task_1]}, \"sequence_2\": {\"tasks\": [task_1, task_2]}}, ) def test_raise_tasks_not_in_scenario_with_add_sequence_api(data_node): task_1 = Task(\"task_1\", {}, print, output=[data_node]) task_2 = Task(\"task_2\", {}, print, input=[data_node]) scenario = Scenario(\"scenario\", [task_1], {}) scenario_manager = _ScenarioManagerFactory._build_manager() task_manager = _TaskManagerFactory._build_manager() scenario_manager._set(scenario) task_manager._set(task_1) task_manager._set(task_2) scenario.add_sequences({\"sequence_1\": {}}) with pytest.raises(SequenceTaskDoesNotExistInScenario) as err: scenario.add_sequence(\"sequence_2\", [task_2]) assert err.value.args == ([task_2.id], \"sequence_2\", scenario.id) scenario.add_sequence(\"sequence_3\", [task_1]) with pytest.raises(SequenceTaskDoesNotExistInScenario) as err: scenario.add_sequences({\"sequence_4\": [task_2]}) assert err.value.args == ([task_2.id], \"sequence_4\", scenario.id) with pytest.raises(SequenceTaskDoesNotExistInScenario) as err: scenario.add_sequences({\"sequence_5\": [task_1, task_2]}) assert err.value.args == ([task_2.id], \"sequence_5\", scenario.id) scenario.tasks = [task_1, task_2] scenario.add_sequence(\"sequence_6\", [task_1, task_2]) def test_add_property_to_scenario(): scenario = Scenario(\"foo\", [], {\"key\": \"value\"}) assert scenario.properties == {\"key\": \"value\"} assert scenario.key == \"value\" scenario.properties[\"new_key\"] = \"new_value\" assert scenario.properties == {\"key\": \"value\", \"new_key\": \"new_value\"} assert scenario.key == \"value\" assert scenario.new_key == \"new_value\" def test_add_cycle_to_scenario(cycle): scenario = Scenario(\"foo\", [], {}) assert scenario.cycle is None _CycleManagerFactory._build_manager()._set(cycle) scenario.cycle = cycle assert scenario.cycle == cycle def test_add_and_remove_subscriber(): scenario = Scenario(\"foo\", [], {}) scenario._add_subscriber(print) assert len(scenario.subscribers) == 1 scenario._remove_subscriber(print) assert len(scenario.subscribers) == 0 def test_add_and_remove_tag(): scenario = Scenario(\"foo\", [], {}) assert len(scenario.tags) == 0 scenario._add_tag(\"tag\") assert len(scenario.tags) == 1 scenario._remove_tag(\"tag\") assert len(scenario.tags) == 0 def test_auto_set_and_reload(cycle, current_datetime, task, data_node): scenario_1 = Scenario( \"foo\", set(), {\"name\": \"bar\"}, set(), creation_date=current_datetime, is_primary=True, cycle=None, ) additional_dn = InMemoryDataNode(\"additional_dn\", Scope.SCENARIO) example_date = datetime.fromisoformat(\"2021-11-11T11:11:01.000001\") tmp_cycle = Cycle( Frequency.WEEKLY, {}, creation_date=example_date, start_date=example_date, end_date=example_date, name=\"cc\", id=CycleId(\"tmp_cc_id\"), ) sequence_1_name = \"sequence_1\" sequence_1 = Sequence({}, [], SequenceId(f\"SEQUENCE_{sequence_1_name}_{scenario_1.id}\")) tmp_sequence_name = \"tmp_sequence\" tmp_sequence = Sequence( {}, [], SequenceId(f\"SEQUENCE_{tmp_sequence_name}_{scenario_1.id}\"), ) _TaskManagerFactory._build_manager()._set(task) _DataManagerFactory._build_manager()._set(data_node) _DataManagerFactory._build_manager()._set(additional_dn) _CycleManagerFactory._build_manager()._set(cycle) scenario_manager = _ScenarioManagerFactory._build_manager() cycle_manager = _CycleManagerFactory._build_manager() cycle_manager._set(cycle) cycle_manager._set(tmp_cycle) scenario_manager._set(scenario_1) scenario_2 = scenario_manager._get(scenario_1) assert scenario_1.config_id == \"foo\" assert scenario_2.config_id == \"foo\" # auto set & reload on name attribute assert scenario_1.name == \"bar\" assert scenario_2.name == \"bar\" scenario_1.name = \"zab\" assert scenario_1.name == \"zab\" assert scenario_2.name == \"zab\" scenario_2.name = \"baz\" assert scenario_1.name == \"baz\" assert scenario_2.name == \"baz\" # auto set & reload on sequences attribute assert len(scenario_1.sequences) == 0 assert len(scenario_2.sequences) == 0 scenario_1.sequences = {tmp_sequence_name: {}} assert len(scenario_1.sequences) == 1 assert scenario_1.sequences[tmp_sequence_name] == tmp_sequence assert len(scenario_2.sequences) == 1 assert scenario_2.sequences[tmp_sequence_name] == tmp_sequence scenario_2.add_sequences({sequence_1_name: []}) assert len(scenario_1.sequences) == 2 assert scenario_1.sequences == {sequence_1_name: sequence_1, tmp_sequence_name: tmp_sequence} assert len(scenario_2.sequences) == 2 assert scenario_2.sequences == {sequence_1_name: sequence_1, tmp_sequence_name: tmp_sequence} scenario_2.remove_sequences([tmp_sequence_name]) assert len(scenario_1.sequences) == 1 assert scenario_1.sequences == {sequence_1_name: sequence_1} assert len(scenario_2.sequences) == 1 assert scenario_2.sequences == {sequence_1_name: sequence_1} assert len(scenario_1.tasks) == 0 assert len(scenario_1.data_nodes) == 0 scenario_1.tasks = {task} assert len(scenario_1.tasks) == 1 assert scenario_1.tasks[task.config_id] == task assert len(scenario_1.data_nodes) == 2 assert len(scenario_2.tasks) == 1 assert scenario_2.tasks[task.config_id] == task assert len(scenario_2.data_nodes) == 2 assert len(scenario_1.additional_data_nodes) == 0 scenario_1.additional_data_nodes = {additional_dn} assert len(scenario_1.additional_data_nodes) == 1 assert scenario_1.additional_data_nodes[additional_dn.config_id] == additional_dn assert len(scenario_1.data_nodes) == 3 assert len(scenario_2.additional_data_nodes) == 1 assert scenario_2.additional_data_nodes[additional_dn.config_id] == additional_dn assert len(scenario_2.data_nodes) == 3 new_datetime = current_datetime + timedelta(1) new_datetime_1 = current_datetime + timedelta(2) # auto set & reload on name attribute assert scenario_1.creation_date == current_datetime assert scenario_2.creation_date == current_datetime scenario_1.creation_date = new_datetime_1 assert scenario_1.creation_date == new_datetime_1 assert scenario_2.creation_date == new_datetime_1 scenario_2.creation_date = new_datetime assert scenario_1.creation_date == new_datetime assert scenario_2.creation_date == new_datetime # auto set & reload on cycle attribute assert scenario_1.cycle is None assert scenario_2.cycle is None scenario_1.cycle = tmp_cycle assert scenario_1.cycle == tmp_cycle assert scenario_2.cycle == tmp_cycle scenario_2.cycle = cycle assert scenario_1.cycle == cycle assert scenario_2.cycle == cycle # auto set & reload on is_primary attribute assert scenario_1.is_primary assert scenario_2.is_primary scenario_1.is_primary = False assert not scenario_1.is_primary assert not scenario_2.is_primary scenario_2.is_primary = True assert scenario_1.is_primary assert scenario_2.is_primary # auto set & reload on subscribers attribute assert len(scenario_1.subscribers) == 0 assert len(scenario_2.subscribers) == 0 scenario_1.subscribers.append(_Subscriber(print, [])) assert len(scenario_1.subscribers) == 1 assert len(scenario_2.subscribers) == 1 scenario_2.subscribers.append(_Subscriber(print, [])) assert len(scenario_1.subscribers) == 2 assert len(scenario_2.subscribers) == 2 scenario_1.subscribers.clear() assert len(scenario_1.subscribers) == 0 assert len(scenario_2.subscribers) == 0 scenario_1.subscribers.extend([_Subscriber(print, []), _Subscriber(map, [])]) assert len(scenario_1.subscribers) == 2 assert len(scenario_2.subscribers) == 2 scenario_1.subscribers.remove(_Subscriber(print, [])) assert len(scenario_1.subscribers) == 1 assert len(scenario_2.subscribers) == 1 scenario_1.subscribers + print + len assert len(scenario_1.subscribers) == 3 assert len(scenario_2.subscribers) == 3 scenario_1.subscribers = [] assert len(scenario_1.subscribers) == 0 assert len(scenario_2.subscribers) == 0 assert len(scenario_1.tags) == 0 scenario_1.tags = {\"hi\"} assert len(scenario_1.tags) == 1 assert len(scenario_2.tags) == 1 # auto set & reload on properties attribute assert scenario_1.properties == {\"name\": \"baz\"} assert scenario_2.properties == {\"name\": \"baz\"} scenario_1._properties[\"qux\"] = 4 assert scenario_1.properties[\"qux\"] == 4 assert scenario_2.properties[\"qux\"] == 4 assert scenario_1.properties == {\"name\": \"baz\", \"qux\": 4} assert scenario_2.properties == {\"name\": \"baz\", \"qux\": 4} scenario_2._properties[\"qux\"] = 5 assert scenario_1.properties[\"qux\"] == 5 assert scenario_2.properties[\"qux\"] == 5 scenario_1.properties[\"temp_key_1\"] = \"temp_value_1\" scenario_1.properties[\"temp_key_2\"] = \"temp_value_2\" assert scenario_1.properties == { \"name\": \"baz\", \"qux\": 5, \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } assert scenario_2.properties == { \"name\": \"baz\", \"qux\": 5, \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } scenario_1.properties.pop(\"temp_key_1\") assert \"temp_key_1\" not in scenario_1.properties.keys() assert \"temp_key_1\" not in scenario_1.properties.keys() assert scenario_1.properties == { \"name\": \"baz\", \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } assert scenario_2.properties == { \"name\": \"baz\", \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } scenario_2.properties.pop(\"temp_key_2\") assert scenario_1.properties == {\"name\": \"baz\", \"qux\": 5} assert scenario_2.properties == {\"name\": \"baz\", \"qux\": 5} assert \"temp_key_2\" not in scenario_1.properties.keys() assert \"temp_key_2\" not in scenario_2.properties.keys() scenario_1.properties[\"temp_key_3\"] = 0 assert scenario_1.properties == {\"name\": \"baz\", \"qux\": 5, \"temp_key_3\": 0} assert scenario_2.properties == {\"name\": \"baz\", \"qux\": 5, \"temp_key_3\": 0} scenario_1.properties.update({\"temp_key_3\": 1}) assert scenario_1.properties == {\"name\": \"baz\", \"qux\": 5, \"temp_key_3\": 1} assert scenario_2.properties == {\"name\": \"baz\", \"qux\": 5, \"temp_key_3\": 1} scenario_1.properties.update(dict()) assert scenario_1.properties == {\"name\": \"baz\", \"qux\": 5, \"temp_key_3\": 1} assert scenario_2.properties == {\"name\": \"baz\", \"qux\": 5, \"temp_key_3\": 1} scenario_1.properties[\"temp_key_4\"] = 0 scenario_1.properties[\"temp_key_5\"] = 0 with scenario_1 as scenario: assert scenario.config_id == \"foo\" assert len(scenario.tasks) == 1 assert len(scenario.sequences) == 1 assert scenario.sequences[\"sequence_1\"] == sequence_1 assert scenario.tasks[task.config_id] == task assert len(scenario.additional_data_nodes) == 1 assert scenario.additional_data_nodes[additional_dn.config_id] == additional_dn assert scenario.creation_date == new_datetime assert scenario.cycle == cycle assert scenario.is_primary assert len(scenario.subscribers) == 0 assert len(scenario.tags) == 1 assert scenario._is_in_context assert scenario.name == \"baz\" assert scenario.properties[\"qux\"] == 5 assert scenario.properties[\"temp_key_3\"] == 1 assert scenario.properties[\"temp_key_4\"] == 0 assert scenario.properties[\"temp_key_5\"] == 0 new_datetime_2 = new_datetime + timedelta(5) scenario.config_id = \"foo\" scenario.tasks = set() scenario.additional_data_nodes = set() scenario.remove_sequences([sequence_1_name]) scenario.creation_date = new_datetime_2 scenario.cycle = None scenario.is_primary = False scenario.subscribers = [print] scenario.tags = None scenario.name = \"qux\" scenario.properties[\"qux\"] = 9 scenario.properties.pop(\"temp_key_3\") scenario.properties.pop(\"temp_key_4\") scenario.properties.update({\"temp_key_4\": 1}) scenario.properties.update({\"temp_key_5\": 2}) scenario.properties.pop(\"temp_key_5\") scenario.properties.update(dict()) assert scenario.config_id == \"foo\" assert len(scenario.sequences) == 1 assert scenario.sequences[sequence_1_name] == sequence_1 assert len(scenario.tasks) == 1 assert scenario.tasks[task.config_id] == task assert len(scenario.additional_data_nodes) == 1 assert scenario.additional_data_nodes[additional_dn.config_id] == additional_dn assert scenario.creation_date == new_datetime assert scenario.cycle == cycle assert scenario.is_primary assert len(scenario.subscribers) == 0 assert len(scenario.tags) == 1 assert scenario._is_in_context assert scenario.name == \"baz\" assert scenario.properties[\"qux\"] == 5 assert scenario.properties[\"temp_key_3\"] == 1 assert scenario.properties[\"temp_key_4\"] == 0 assert scenario.properties[\"temp_key_5\"] == 0 assert scenario_1.config_id == \"foo\" assert len(scenario_1.sequences) == 0 assert len(scenario_1.tasks) == 0 assert len(scenario_1.additional_data_nodes) == 0 assert scenario_1.tasks == {} assert scenario_1.additional_data_nodes == {} assert scenario_1.creation_date == new_datetime_2 assert scenario_1.cycle is None assert not scenario_1.is_primary assert len(scenario_1.subscribers) == 1 assert len(scenario_1.tags) == 0 assert not scenario_1._is_in_context assert scenario_1.properties[\"qux\"] == 9 assert \"temp_key_3\" not in scenario_1.properties.keys() assert scenario_1.properties[\"temp_key_4\"] == 1 assert \"temp_key_5\" not in scenario_1.properties.keys() def test_is_deletable(): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_deletable\") as mock_submit: scenario = Scenario(\"foo\", [], {}) scenario.is_deletable() mock_submit.assert_called_once_with(scenario) def test_submit_scenario(): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._submit\") as mock_submit: scenario = Scenario(\"foo\", [], {}) scenario.submit(force=False) mock_submit.assert_called_once_with(scenario, None, False, False, None) def test_subscribe_scenario(): with mock.patch(\"src.taipy.core.subscribe_scenario\") as mock_subscribe: scenario = Scenario(\"foo\", [], {}) scenario.subscribe(None) mock_subscribe.assert_called_once_with(None, None, scenario) def test_unsubscribe_scenario(): with mock.patch(\"src.taipy.core.unsubscribe_scenario\") as mock_unsubscribe: scenario = Scenario(\"foo\", [], {}) scenario.unsubscribe(None) mock_unsubscribe.assert_called_once_with(None, None, scenario) def test_add_tag_scenario(): with mock.patch(\"src.taipy.core.tag\") as mock_add_tag: scenario = Scenario(\"foo\", [], {}) scenario.add_tag(\"tag\") mock_add_tag.assert_called_once_with(scenario, \"tag\") def test_remove_tag_scenario(): with mock.patch(\"src.taipy.core.untag\") as mock_remove_tag: scenario = Scenario(\"foo\", [], {}) scenario.remove_tag(\"tag\") mock_remove_tag.assert_called_once_with(scenario, \"tag\") def test_get_inputs_outputs_intermediate_data_nodes(): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario = Scenario(\"scenario\", {task_1, task_2, task_3, task_4}, {}, set(), ScenarioId(\"s1\")) # s1 --- ---> s3 ---> t2 ---> s5 ---- # | | | # |---> t1 ---| -------------------------> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert scenario.get_inputs() == {data_node_1, data_node_2} assert scenario.get_outputs() == {data_node_6, data_node_7} assert scenario.get_intermediate() == {data_node_3, data_node_4, data_node_5} data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, None, [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario = Scenario(\"scenario\", {task_1, task_2, task_3, task_4}, {}, set(), ScenarioId(\"s1\")) # s1 --- t2 ---> s5 ------ # | | # |---> t1 ---| -----> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert scenario.get_inputs() == {data_node_1, data_node_2} assert scenario.get_outputs() == {data_node_6, data_node_7} assert scenario.get_intermediate() == {data_node_4, data_node_5} data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = DataNode(\"d8\", Scope.SCENARIO, \"s8\") data_node_9 = DataNode(\"d9\", Scope.SCENARIO, \"s9\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"t5\", {}, print, [data_node_8], [data_node_9], TaskId(\"t5\")) task_6 = Task(\"t6\", {}, print, [data_node_7, data_node_9], id=TaskId(\"t6\")) scenario = Scenario(\"scenario\", {task_1, task_2, task_3, task_4, task_5, task_6}, {}, set(), ScenarioId(\"s1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 ---> t6 # | # s8 -------> t5 -------> s9 ------------------ assert scenario.get_inputs() == {data_node_1, data_node_2, data_node_6, data_node_8} assert scenario.get_outputs() == set() assert scenario.get_intermediate() == {data_node_5, data_node_4, data_node_7, data_node_9} data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = DataNode(\"hugh\", Scope.SCENARIO, \"s8\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_5], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4, data_node_6], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_8], None, TaskId(\"t5\")) scenario = Scenario(\"scenario\", {task_1, task_2, task_3, task_4, task_5}, {}, set(), ScenarioId(\"sc1\")) # s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 # t2 ---> s5 | # s8 ---> t5 s6 --| assert scenario.get_inputs() == {data_node_1, data_node_2, data_node_8, data_node_6} assert scenario.get_outputs() == {data_node_5, data_node_7} assert scenario.get_intermediate() == {data_node_4} def test_is_ready_to_run(): data_node_1 = PickleDataNode(\"foo\", Scope.SCENARIO, \"s1\", properties={\"default_data\": 1}) data_node_2 = PickleDataNode(\"bar\", Scope.SCENARIO, \"s2\", properties={\"default_data\": 2}) data_node_4 = PickleDataNode(\"qux\", Scope.SCENARIO, \"s4\", properties={\"default_data\": 4}) data_node_5 = PickleDataNode(\"quux\", Scope.SCENARIO, \"s5\", properties={\"default_data\": 5}) data_node_6 = PickleDataNode(\"quuz\", Scope.SCENARIO, \"s6\", properties={\"default_data\": 6}) data_node_7 = PickleDataNode(\"corge\", Scope.SCENARIO, \"s7\", properties={\"default_data\": 7}) data_node_8 = PickleDataNode(\"d8\", Scope.SCENARIO, \"s8\", properties={\"default_data\": 8}) data_node_9 = PickleDataNode(\"d9\", Scope.SCENARIO, \"s9\", properties={\"default_data\": 9}) task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"t5\", {}, print, [data_node_8], [data_node_9], TaskId(\"t5\")) task_6 = Task(\"t6\", {}, print, [data_node_7, data_node_9], id=TaskId(\"t6\")) scenario = Scenario(\"scenario\", {task_1, task_2, task_3, task_4, task_5, task_6}, {}, set(), ScenarioId(\"s1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 ---> t6 # | # s8 -------> t5 -------> s9 ------------------ assert scenario.get_inputs() == {data_node_1, data_node_2, data_node_6, data_node_8} data_manager = _DataManagerFactory._build_manager() data_manager._delete_all() for dn in [data_node_1, data_node_2, data_node_4, data_node_5, data_node_6, data_node_7, data_node_8, data_node_9]: data_manager._set(dn) assert scenario.is_ready_to_run() data_node_1.edit_in_progress = True assert not scenario.is_ready_to_run() data_node_2.edit_in_progress = True assert not scenario.is_ready_to_run() data_node_6.edit_in_progress = True data_node_8.edit_in_progress = True assert not scenario.is_ready_to_run() data_node_1.edit_in_progress = False data_node_2.edit_in_progress = False data_node_6.edit_in_progress = False data_node_8.edit_in_progress = False assert scenario.is_ready_to_run() def test_data_nodes_being_edited(): data_node_1 = PickleDataNode(\"foo\", Scope.SCENARIO, \"s1\", properties={\"default_data\": 1}) data_node_2 = PickleDataNode(\"bar\", Scope.SCENARIO, \"s2\", properties={\"default_data\": 2}) data_node_4 = PickleDataNode(\"qux\", Scope.SCENARIO, \"s4\", properties={\"default_data\": 4}) data_node_5 = PickleDataNode(\"quux\", Scope.SCENARIO, \"s5\", properties={\"default_data\": 5}) data_node_6 = PickleDataNode(\"quuz\", Scope.SCENARIO, \"s6\", properties={\"default_data\": 6}) data_node_7 = PickleDataNode(\"corge\", Scope.SCENARIO, \"s7\", properties={\"default_data\": 7}) data_node_8 = PickleDataNode(\"d8\", Scope.SCENARIO, \"s8\", properties={\"default_data\": 8}) data_node_9 = PickleDataNode(\"d9\", Scope.SCENARIO, \"s9\", properties={\"default_data\": 9}) task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"t5\", {}, print, [data_node_8], [data_node_9], TaskId(\"t5\")) task_6 = Task(\"t6\", {}, print, [data_node_7, data_node_9], id=TaskId(\"t6\")) scenario = Scenario(\"scenario\", {task_1, task_2, task_3, task_4, task_5, task_6}, {}, set(), ScenarioId(\"s1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 ---> t6 # | # s8 -------> t5 -------> s9 ------------------ data_manager = _DataManagerFactory._build_manager() for dn in [data_node_1, data_node_2, data_node_4, data_node_5, data_node_6, data_node_7, data_node_8, data_node_9]: data_manager._set(dn) assert len(scenario.data_nodes_being_edited()) == 0 assert scenario.data_nodes_being_edited() == set() data_node_1.edit_in_progress = True assert len(scenario.data_nodes_being_edited()) == 1 assert scenario.data_nodes_being_edited() == {data_node_1} data_node_2.edit_in_progress = True data_node_6.edit_in_progress = True data_node_8.edit_in_progress = True assert len(scenario.data_nodes_being_edited()) == 4 assert scenario.data_nodes_being_edited() == {data_node_1, data_node_2, data_node_6, data_node_8} data_node_4.edit_in_progress = True data_node_5.edit_in_progress = True data_node_9.edit_in_progress = True assert len(scenario.data_nodes_being_edited()) == 7 assert scenario.data_nodes_being_edited() == { data_node_1, data_node_2, data_node_4, data_node_5, data_node_6, data_node_8, data_node_9, } data_node_1.edit_in_progress = False data_node_2.edit_in_progress = False data_node_6.edit_in_progress = False data_node_8.edit_in_progress = False assert len(scenario.data_nodes_being_edited()) == 3 assert scenario.data_nodes_being_edited() == {data_node_4, data_node_5, data_node_9} data_node_4.edit_in_progress = False data_node_5.edit_in_progress = False data_node_7.edit_in_progress = True assert len(scenario.data_nodes_being_edited()) == 2 assert scenario.data_nodes_being_edited() == {data_node_7, data_node_9} data_node_7.edit_in_progress = False data_node_9.edit_in_progress = False assert len(scenario.data_nodes_being_edited()) == 0 assert scenario.data_nodes_being_edited() == set() def test_get_tasks(): task_1 = Task(\"grault\", {}, print, id=TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, id=TaskId(\"t4\")) scenario_1 = Scenario(\"scenario_1\", {task_1, task_2, task_3, task_4}, {}, set(), ScenarioId(\"s1\")) assert scenario_1.tasks == {\"grault\": task_1, \"garply\": task_2, \"waldo\": task_3, \"fred\": task_4} task_5 = Task(\"wallo\", {}, print, id=TaskId(\"t5\")) scenario_2 = Scenario(\"scenario_2\", {task_1, task_2, task_3, task_4, task_5}, {}, set(), ScenarioId(\"s2\")) assert scenario_2.tasks == {\"grault\": task_1, \"garply\": task_2, \"waldo\": task_3, \"fred\": task_4, \"wallo\": task_5} def test_get_set_of_tasks(): task_1 = Task(\"grault\", {}, print, id=TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, id=TaskId(\"t4\")) scenario_1 = Scenario(\"scenario_1\", {task_1, task_2, task_3, task_4}, {}, set(), ScenarioId(\"s1\")) assert scenario_1._get_set_of_tasks() == {task_1, task_2, task_3, task_4} task_5 = Task(\"wallo\", {}, print, id=TaskId(\"t5\")) scenario_2 = Scenario(\"scenario_2\", {task_1, task_2, task_3, task_4, task_5}, {}, set(), ScenarioId(\"s2\")) assert scenario_2._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5} def test_get_sorted_tasks(): def _assert_equal(tasks_a, tasks_b) -> bool: if len(tasks_a) != len(tasks_b): return False for i in range(len(tasks_a)): task_a, task_b = tasks_a[i], tasks_b[i] if isinstance(task_a, list) and isinstance(task_b, list): if not _assert_equal(task_a, task_b): return False elif isinstance(task_a, list) or isinstance(task_b, list): return False else: index_task_b = tasks_b.index(task_a) if any([isinstance(task_b, list) for task_b in tasks_b[i : index_task_b + 1]]): return False return True # s1 --- ---> s3 ---> t2 ---> s5 ---- # | | | # |---> t1 ---| -------------------------> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = InMemoryDataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario_1 = Scenario(\"scenario_1\", {task_1, task_2, task_3, task_4}, {}, [], ScenarioId(\"s1\")) assert scenario_1.get_inputs() == {data_node_1, data_node_2} assert scenario_1._get_set_of_tasks() == {task_1, task_2, task_3, task_4} _assert_equal(scenario_1._get_sorted_tasks(), [[task_1], [task_2, task_4], [task_3]]) # s1 --- t2 ---> s5 # | | # |---> t1 ---| -----> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, None, [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario_2 = Scenario(\"scenario_2\", {task_1, task_2, task_3, task_4}, {}, [], ScenarioId(\"s2\")) assert scenario_2.get_inputs() == {data_node_1, data_node_2} assert scenario_2._get_set_of_tasks() == {task_1, task_2, task_3, task_4} _assert_equal(scenario_2._get_sorted_tasks(), [[task_1, task_2], [task_3, task_4]]) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario_3 = Scenario(\"quest\", [task_4, task_2, task_1, task_3], {}, [], scenario_id=ScenarioId(\"s3\")) assert scenario_3.get_inputs() == {data_node_1, data_node_2, data_node_6} assert scenario_3._get_set_of_tasks() == {task_1, task_2, task_3, task_4} assert _assert_equal(scenario_3._get_sorted_tasks(), [[task_2, task_1], [task_4, task_3]]) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 ---> t6 # | # s8 -------> t5 -------> s9 ------------------ data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = InMemoryDataNode(\"d8\", Scope.SCENARIO, \"s8\") data_node_9 = InMemoryDataNode(\"d9\", Scope.SCENARIO, \"s9\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"t5\", {}, print, [data_node_8], [data_node_9], TaskId(\"t5\")) task_6 = Task(\"t6\", {}, print, [data_node_7, data_node_9], id=TaskId(\"t6\")) scenario_4 = Scenario(\"scenario_3\", [task_1, task_2, task_3, task_4, task_5, task_6], {}, [], ScenarioId(\"s4\")) assert scenario_4.get_inputs() == {data_node_1, data_node_2, data_node_6, data_node_8} assert scenario_4._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5, task_6} _assert_equal(scenario_4._get_sorted_tasks(), [[task_1, task_2, task_5], [task_3, task_4], [task_6]]) # s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 # t2 ---> s5 | # s8 ---> t5 s6 --| data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = InMemoryDataNode(\"hugh\", Scope.SCENARIO, \"s8\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_5], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4, data_node_6], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_8], None, TaskId(\"t5\")) scenario_5 = Scenario(\"scenario_4\", [task_1, task_2, task_3, task_4, task_5], {}, [], ScenarioId(\"s5\")) assert scenario_5.get_inputs() == {data_node_1, data_node_2, data_node_8, data_node_6} assert scenario_5._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5} _assert_equal(scenario_5._get_sorted_tasks(), [[task_1, task_2, task_5], [task_3, task_4]]) # p1 s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s5 # p2 t2 ---> s4 ---> t3 # p3 s6 ---> t5 data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, output=[data_node_4], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_5], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_6], None, TaskId(\"t5\")) scenario_6 = Scenario(\"quest\", [task_1, task_2, task_3, task_4, task_5], {}, [], ScenarioId(\"s6\")) assert scenario_6.get_inputs() == {data_node_1, data_node_2, data_node_6} assert scenario_6._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5} _assert_equal(scenario_6._get_sorted_tasks(), [[task_5, task_2, task_1], [task_4, task_3]]) # p1 s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s5 # p2 t2 ---> s4 ---> t3 # p3 s6 ---> t5 ---> s4 ---> t4 ---> s5 data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, output=[data_node_4], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_5], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_6], [data_node_4], None, TaskId(\"t5\")) scenario_7 = Scenario(\"quest\", [task_4, task_1, task_2, task_3, task_5], {}, [], scenario_id=ScenarioId(\"s7\")) assert scenario_7.get_inputs() == {data_node_1, data_node_2, data_node_6} assert scenario_7._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5} _assert_equal(scenario_7._get_sorted_tasks(), [[task_5, task_2, task_1], [task_4, task_3]]) # p1 s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s3 ---> t4 ---> s4 # p2 t2 ---> s3 ---> t3 # p3 s5 ---> t5 ---> s3 ---> t4 ---> s4 # p4 s3 ---> t4 ---> s4 data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"qux\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"quux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quuz\", Scope.SCENARIO, \"s5\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_3], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, output=[data_node_3], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_3], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_3], [data_node_4], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_5], [data_node_3], TaskId(\"t5\")) scenario_8 = Scenario(\"quest\", [task_1, task_2, task_3, task_4, task_5], {}, [], scenario_id=ScenarioId(\"s8\")) assert scenario_8.get_inputs() == {data_node_1, data_node_2, data_node_5} assert scenario_8._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5} _assert_equal(scenario_8._get_sorted_tasks(), [[task_5, task_2, task_1], [task_3, task_4]]) def test_add_and_remove_sequences(): data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s3\") data_node_4 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s5\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_3], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, [data_node_3], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_3], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_3], [data_node_4], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_5], [data_node_3], TaskId(\"t5\")) scenario_1 = Scenario(\"quest\", [task_1, task_2, task_3, task_4, task_5], {}, [], scenario_id=ScenarioId(\"s1\")) sequence_1 = Sequence({\"name\": \"sequence_1\"}, [task_1], SequenceId(f\"SEQUENCE_sequence_1_{scenario_1.id}\")) sequence_2 = Sequence({\"name\": \"sequence_2\"}, [task_1, task_2], SequenceId(f\"SEQUENCE_sequence_2_{scenario_1.id}\")) sequence_3 = Sequence( {\"name\": \"sequence_3\"}, [task_1, task_5, task_3], SequenceId(f\"SEQUENCE_sequence_3_{scenario_1.id}\") ) task_manager = _TaskManagerFactory._build_manager() data_manager = _DataManagerFactory._build_manager() scenario_manager = _ScenarioManagerFactory._build_manager() for dn in [data_node_1, data_node_2, data_node_3, data_node_4, data_node_5]: data_manager._set(dn) for t in [task_1, task_2, task_3, task_4, task_5]: task_manager._set(t) scenario_manager._set(scenario_1) assert scenario_1.get_inputs() == {data_node_1, data_node_2, data_node_5} assert scenario_1._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5} assert len(scenario_1.sequences) == 0 scenario_1.sequences = {\"sequence_1\": {\"tasks\": [task_1]}} assert scenario_1.sequences == {\"sequence_1\": sequence_1} scenario_1.add_sequences({\"sequence_2\": [task_1, task_2]}) assert scenario_1.sequences == {\"sequence_1\": sequence_1, \"sequence_2\": sequence_2} scenario_1.remove_sequences([\"sequence_1\"]) assert scenario_1.sequences == {\"sequence_2\": sequence_2} scenario_1.add_sequences({\"sequence_1\": [task_1], \"sequence_3\": [task_1, task_5, task_3]}) assert scenario_1.sequences == { \"sequence_2\": sequence_2, \"sequence_1\": sequence_1, \"sequence_3\": sequence_3, } scenario_1.remove_sequences([\"sequence_2\", \"sequence_3\"]) assert scenario_1.sequences == {\"sequence_1\": sequence_1} def test_check_consistency(): data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s3\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = InMemoryDataNode(\"d8\", Scope.SCENARIO, \"s8\") data_node_9 = InMemoryDataNode(\"d9\", Scope.SCENARIO, \"s9\") scenario_0 = Scenario(\"scenario_0\", [], {}) assert scenario_0._is_consistent() task_1 = Task(\"foo\", {}, print, [data_node_1], [data_node_2], TaskId(\"t1\")) scenario_1 = Scenario(\"scenario_1\", [task_1], {}) assert scenario_1._is_consistent() # s1 --- ---> s3 ---> t2 ---> s5 ---- # | | | # |---> t1 ---| -------------------------> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario_2 = Scenario(\"scenario_2\", {task_1, task_2, task_3, task_4}, {}, [], ScenarioId(\"s1\")) assert scenario_2._is_consistent() # s1 --- t2 ---> s5 # | | # |---> t1 ---| -----> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, None, [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario_3 = Scenario(\"scenario_3\", {task_1, task_2, task_3, task_4}, {}, [], ScenarioId(\"s2\")) assert scenario_3._is_consistent() # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario_4 = Scenario(\"scenario_4\", [task_4, task_2, task_1, task_3], {}, [], scenario_id=ScenarioId(\"s3\")) assert scenario_4._is_consistent() # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 ---> t6 # | # s8 -------> t5 -------> s9 ------------------ task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"t5\", {}, print, [data_node_8], [data_node_9], TaskId(\"t5\")) task_6 = Task(\"t6\", {}, print, [data_node_7, data_node_9], id=TaskId(\"t6\")) scenario_5 = Scenario(\"scenario_5\", [task_1, task_2, task_3, task_4, task_5, task_6], {}, [], ScenarioId(\"s4\")) assert scenario_5._is_consistent() # s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 # t2 ---> s5 | # s8 ---> t5 s6 --| task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_5], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4, data_node_6], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_8], None, TaskId(\"t5\")) scenario_6 = Scenario(\"scenario_6\", [task_1, task_2, task_3, task_4, task_5], {}, [], ScenarioId(\"s5\")) assert scenario_6._is_consistent() # p1 s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s5 # p2 t2 ---> s4 ---> t3 # p3 s6 ---> t5 task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_4], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_5], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_6], None, TaskId(\"t5\")) scenario_7 = Scenario(\"scenario_7\", [task_1, task_2, task_3, task_4, task_5], {}, [], ScenarioId(\"s6\")) assert scenario_7._is_consistent() # p1 s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s5 # p2 t2 ---> s4 ---> t3 # p3 s6 ---> t5 ---> s4 ---> t4 ---> s5 task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_4], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_5], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_6], [data_node_4], None, TaskId(\"t5\")) scenario_8 = Scenario(\"scenario_8\", [task_4, task_1, task_2, task_3, task_5], {}, [], scenario_id=ScenarioId(\"s7\")) assert scenario_8._is_consistent() # p1 s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s3 ---> t4 ---> s4 # p2 t2 ---> s3 ---> t3 # p3 s5 ---> t5 ---> s3 ---> t4 ---> s4 # p4 s3 ---> t4 ---> s4 task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_3], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_3], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_3], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_3], [data_node_4], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_5], [data_node_3], TaskId(\"t5\")) scenario_9 = Scenario(\"scenario_9\", [task_1, task_2, task_3, task_4, task_5], {}, [], scenario_id=ScenarioId(\"s8\")) assert scenario_9._is_consistent() "} {"text": "from src.taipy.core._version._version import _Version from taipy.config.config import Config def test_create_version(): v = _Version(\"foo\", config=Config.configure_data_node(\"dn\")) assert v.id == \"foo\" assert v.config is not None "} {"text": "import multiprocessing from unittest.mock import patch from src.taipy.core import Core, taipy from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from taipy.config.config import Config from tests.core.utils import assert_true_after_time from ...conftest import init_config m = multiprocessing.Manager() def twice(a): return a * 2 def triple(a): return a * 3 def migrate_pickle_path(dn): dn.path = \"bar.pkl\" return dn def migrate_skippable_task(task): task.skippable = True return task def migrate_foo_scenario(scenario): scenario.properties[\"foo\"] = \"bar\" return scenario def test_migrate_datanode(): scenario_v1 = submit_v1() init_config() Config.add_migration_function(\"2.0\", \"d1\", migrate_pickle_path) submit_v2() v1 = taipy.get(scenario_v1.id) assert v1.d1.version == \"2.0\" assert v1.d1.path == \"bar.pkl\" def test_migrate_datanode_in_standalone_mode(): scenario_v1 = submit_v1() init_config() Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) Config.add_migration_function(\"2.0\", \"d1\", migrate_pickle_path) scenario_cfg_v2 = config_scenario_v2() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario_v2 = _ScenarioManager._create(scenario_cfg_v2) jobs = _ScenarioManager._submit(scenario_v2) v1 = taipy.get(scenario_v1.id) assert v1.d1.version == \"2.0\" assert v1.d1.path == \"bar.pkl\" assert_true_after_time(jobs[0].is_completed) core.stop() def test_migrate_task(): scenario_v1 = submit_v1() init_config() Config.add_migration_function(\"2.0\", \"my_task\", migrate_skippable_task) submit_v2() v1 = taipy.get(scenario_v1.id) assert v1.my_task.version == \"2.0\" assert v1.my_task.skippable is True def test_migrate_task_in_standalone_mode(): scenario_v1 = submit_v1() init_config() Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) Config.add_migration_function(\"2.0\", \"my_task\", migrate_skippable_task) scenario_cfg_v2 = config_scenario_v2() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario_v2 = _ScenarioManager._create(scenario_cfg_v2) jobs = _ScenarioManager._submit(scenario_v2) v1 = taipy.get(scenario_v1.id) assert v1.my_task.version == \"2.0\" assert v1.my_task.skippable is True assert_true_after_time(jobs[0].is_completed) core.stop() def test_migrate_scenario(): scenario_v1 = submit_v1() init_config() Config.add_migration_function(\"2.0\", \"my_scenario\", migrate_foo_scenario) submit_v2() v1 = taipy.get(scenario_v1.id) assert v1.version == \"2.0\" assert v1.properties[\"foo\"] == \"bar\" def test_migrate_scenario_in_standalone_mode(): scenario_v1 = submit_v1() init_config() Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) Config.add_migration_function(\"2.0\", \"my_scenario\", migrate_foo_scenario) scenario_cfg_v2 = config_scenario_v2() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario_v2 = _ScenarioManager._create(scenario_cfg_v2) jobs = _ScenarioManager._submit(scenario_v2) v1 = taipy.get(scenario_v1.id) assert v1.version == \"2.0\" assert v1.properties[\"foo\"] == \"bar\" assert_true_after_time(jobs[0].is_completed) core.stop() def test_migrate_all_entities(): scenario_v1 = submit_v1() init_config() Config.add_migration_function(\"2.0\", \"d1\", migrate_pickle_path) Config.add_migration_function(\"2.0\", \"my_task\", migrate_skippable_task) Config.add_migration_function(\"2.0\", \"my_scenario\", migrate_foo_scenario) submit_v2() v1 = taipy.get(scenario_v1.id) assert v1.d1.version == \"2.0\" assert v1.my_task.version == \"2.0\" assert v1.d1.path == \"bar.pkl\" assert v1.my_task.skippable is True assert v1.properties[\"foo\"] == \"bar\" def test_migrate_all_entities_in_standalone_mode(): scenario_v1 = submit_v1() init_config() Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) Config.add_migration_function(\"2.0\", \"my_scenario\", migrate_foo_scenario) scenario_cfg_v2 = config_scenario_v2() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario_v2 = _ScenarioManager._create(scenario_cfg_v2) jobs = _ScenarioManager._submit(scenario_v2) v1 = taipy.get(scenario_v1.id) assert v1.version == \"2.0\" assert v1.properties[\"foo\"] == \"bar\" assert_true_after_time(jobs[0].is_completed) core.stop() def test_migrate_compatible_version(): scenario_cfg = config_scenario_v1() # Production 1.0 with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() scenario_v1 = _ScenarioManager._create(scenario_cfg) _ScenarioManager._submit(scenario_v1) assert scenario_v1.d2.read() == 2 assert len(_DataManager._get_all(version_number=\"all\")) == 2 core.stop() init_config() scenario_cfg = config_scenario_v1() # Production 2.0 is a compatible version with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario_v2 = _ScenarioManager._create(scenario_cfg) _ScenarioManager._submit(scenario_v2) assert scenario_v2.d2.read() == 2 assert len(_DataManager._get_all(version_number=\"all\")) == 4 core.stop() init_config() # Production 2.1 Config.add_migration_function( target_version=\"2.1\", config=\"d1\", migration_fct=migrate_pickle_path, ) scenario_cfg_v2_1 = config_scenario_v2() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() scenario_v2_1 = _ScenarioManager._create(scenario_cfg_v2_1) _ScenarioManager._submit(scenario_v2_1) core.stop() assert scenario_v2_1.d2.read() == 6 assert len(_DataManager._get_all(version_number=\"all\")) == 6 v1 = taipy.get(scenario_v1.id) assert v1.d1.version == \"2.1\" assert v1.d1.path == \"bar.pkl\" v2 = taipy.get(scenario_v2.id) assert v2.d1.version == \"2.1\" assert v2.d1.path == \"bar.pkl\" def submit_v1(): scenario_cfg_v1 = config_scenario_v1() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() scenario_v1 = _ScenarioManager._create(scenario_cfg_v1) _ScenarioManager._submit(scenario_v1) core.stop() return scenario_v1 def submit_v2(): scenario_cfg_v2 = config_scenario_v2() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario_v2 = _ScenarioManager._create(scenario_cfg_v2) _ScenarioManager._submit(scenario_v2) core.stop() return scenario_v2 def config_scenario_v1(): dn1 = Config.configure_pickle_data_node(id=\"d1\", default_data=1) dn2 = Config.configure_pickle_data_node(id=\"d2\") task_cfg = Config.configure_task(\"my_task\", twice, dn1, dn2) scenario_cfg = Config.configure_scenario(\"my_scenario\", [task_cfg]) scenario_cfg.add_sequences({\"my_sequence\": [task_cfg]}) return scenario_cfg def config_scenario_v2(): dn1 = Config.configure_pickle_data_node(id=\"d1\", default_data=2) dn2 = Config.configure_pickle_data_node(id=\"d2\") task_cfg = Config.configure_task(\"my_task\", triple, dn1, dn2) scenario_cfg = Config.configure_scenario(\"my_scenario\", [task_cfg]) scenario_cfg.add_sequences({\"my_scenario\": [task_cfg]}) return scenario_cfg "} {"text": "import os import pytest from src.taipy.core._version._version import _Version from src.taipy.core._version._version_fs_repository import _VersionFSRepository from src.taipy.core._version._version_sql_repository import _VersionSQLRepository from src.taipy.core.exceptions import ModelNotFound class TestVersionFSRepository: @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_save_and_load(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) obj = repository._load(_version.id) assert isinstance(obj, _Version) @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_exists(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) assert repository._exists(_version.id) assert not repository._exists(\"not-existed-version\") @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_load_all(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" repository._save(_version) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_load_all_with_filters(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" _version.name = f\"_version_{i}\" repository._save(_version) objs = repository._load_all(filters=[{\"id\": \"_version_2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_delete(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) repository._delete(_version.id) with pytest.raises(ModelNotFound): repository._load(_version.id) @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_delete_all(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" repository._save(_version) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_delete_many(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" repository._save(_version) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_search(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" _version.name = f\"_version_{i}\" repository._save(_version) assert len(repository._load_all()) == 10 objs = repository._search(\"id\", \"_version_2\") assert len(objs) == 1 assert isinstance(objs[0], _Version) @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_export(self, tmpdir, _version, repo, init_sql_repo): repository = repo() repository._save(_version) repository._export(_version.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _VersionFSRepository else os.path.join(tmpdir.strpath, \"version\") assert os.path.exists(os.path.join(dir_path, f\"{_version.id}.json\")) "} {"text": "from time import sleep from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._version._cli._version_cli import _VersionCLI from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from ...conftest import init_config def test_delete_version(caplog): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.1\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.1\"]): core = Core() core.run() core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() core.stop() all_versions = [version.id for version in _VersionManager._get_all()] production_version = _VersionManager._get_production_versions() assert len(all_versions) == 5 assert len(production_version) == 2 assert \"1.0\" in all_versions assert \"1.1\" in all_versions and \"1.1\" in production_version assert \"2.0\" in all_versions assert \"2.1\" in all_versions and \"2.1\" in production_version _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete\", \"1.0\"]): _VersionCLI.parse_arguments() assert \"Successfully delete version 1.0.\" in caplog.text all_versions = [version.id for version in _VersionManager._get_all()] assert len(all_versions) == 4 assert \"1.0\" not in all_versions # Test delete a non-existed version with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete\", \"non_exist_version\"]): _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text # Test delete production version will change the version from production to experiment with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete-production\", \"1.1\"]): _VersionCLI.parse_arguments() assert \"Successfully delete version 1.1 from the production version list.\" in caplog.text all_versions = [version.id for version in _VersionManager._get_all()] production_version = _VersionManager._get_production_versions() assert len(all_versions) == 4 assert \"1.1\" in all_versions and \"1.1\" not in production_version # Test delete a non-existed production version with pytest.raises(SystemExit) as e: with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete-production\", \"non_exist_version\"]): _VersionCLI.parse_arguments() assert str(e.value) == \"Version 'non_exist_version' is not a production version.\" def test_list_versions(capsys): with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.1\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.1\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.0\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() core.stop() _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--list\"]): _VersionCLI.parse_arguments() out, _ = capsys.readouterr() version_list = str(out).strip().split(\"\\n\") assert len(version_list) == 6 # 5 versions with the header assert all(column in version_list[0] for column in [\"Version number\", \"Mode\", \"Creation date\"]) assert all(column in version_list[1] for column in [\"2.1\", \"Production\", \"latest\"]) assert all(column in version_list[2] for column in [\"2.0\", \"Experiment\"]) and \"latest\" not in version_list[2] assert all(column in version_list[3] for column in [\"1.1\", \"Production\"]) and \"latest\" not in version_list[3] assert all(column in version_list[4] for column in [\"1.0\", \"Experiment\"]) and \"latest\" not in version_list[4] assert \"Development\" in version_list[5] and \"latest\" not in version_list[5] def test_rename_version(caplog): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() dev_ver = _VersionManager._get_development_version() _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"non_exist_version\", \"1.1\"]): # This should raise an exception since version \"non_exist_version\" does not exist _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"1.0\", \"2.0\"]): # This should raise an exception since 2.0 already exists _VersionCLI.parse_arguments() assert \"Version name '2.0' is already used.\" in caplog.text _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"1.0\", \"1.1\"]): _VersionCLI.parse_arguments() assert _VersionManager._get(\"1.0\") is None assert [version.id for version in _VersionManager._get_all()].sort() == [dev_ver, \"1.1\", \"2.0\"].sort() # All entities are assigned to the new version assert len(_DataManager._get_all(\"1.1\")) == 2 assert len(_TaskManager._get_all(\"1.1\")) == 1 assert len(_SequenceManager._get_all(\"1.1\")) == 1 assert len(_ScenarioManager._get_all(\"1.1\")) == 1 assert len(_JobManager._get_all(\"1.1\")) == 1 _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"2.0\", \"2.1\"]): _VersionCLI.parse_arguments() assert _VersionManager._get(\"2.0\") is None assert [version.id for version in _VersionManager._get_all()].sort() == [dev_ver, \"1.1\", \"2.1\"].sort() assert _VersionManager._get_production_versions() == [\"2.1\"] # All entities are assigned to the new version assert len(_DataManager._get_all(\"2.1\")) == 2 assert len(_TaskManager._get_all(\"2.1\")) == 1 assert len(_SequenceManager._get_all(\"2.1\")) == 1 assert len(_ScenarioManager._get_all(\"2.1\")) == 1 assert len(_JobManager._get_all(\"2.1\")) == 1 def test_compare_version_config(caplog): scenario_config_1 = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config_1) _ScenarioManager._submit(scenario) core.stop() init_config() scenario_config_2 = config_scenario() Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"bar.csv\") with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config_2) _ScenarioManager._submit(scenario) core.stop() _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"non_exist_version\", \"2.0\"]): # This should raise an exception since version \"non_exist_version\" does not exist _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"1.0\", \"non_exist_version\"]): # This should raise an exception since 2.0 already exists _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"1.0\", \"1.0\"]): _VersionCLI.parse_arguments() assert \"There is no difference between version 1.0 Configuration and version 1.0 Configuration.\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"1.0\", \"2.0\"]): _VersionCLI.parse_arguments() expected_message = \"\"\"Differences between version 1.0 Configuration and version 2.0 Configuration: \\tDATA_NODE \"d2\" has attribute \"default_path\" modified: foo.csv -> bar.csv\"\"\" assert expected_message in caplog.text def twice(a): return a * 2 def config_scenario(): data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) data_node_2_config = Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"foo.csv\") task_config = Config.configure_task(\"my_task\", twice, data_node_1_config, data_node_2_config) scenario_config = Config.configure_scenario(\"my_scenario\", [task_config], frequency=Frequency.DAILY) scenario_config.add_sequences({\"my_sequence\": [task_config]}) return scenario_config "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pytest from src.taipy.core._version._version import _Version from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config def test_save_and_get_version_entity(tmpdir): _VersionManager._repository.base_path = tmpdir assert len(_VersionManager._get_all()) == 0 version = _Version(id=\"foo\", config=Config._applied_config) _VersionManager._get_or_create(id=\"foo\", force=False) version_1 = _VersionManager._get(version.id) assert version_1.id == version.id assert Config._serializer._str(version_1.config) == Config._serializer._str(version.config) assert len(_VersionManager._get_all()) == 1 assert _VersionManager._get(version.id) == version "} {"text": "from time import sleep from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._version._cli._version_cli import _VersionCLI from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from ...conftest import init_config def test_delete_version(caplog, init_sql_repo): _ScenarioManagerFactory._build_manager() scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.1\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.1\"]): core = Core() core.run() core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() core.stop() all_versions = [version.id for version in _VersionManager._get_all()] production_version = _VersionManager._get_production_versions() assert len(all_versions) == 5 assert len(production_version) == 2 assert \"1.0\" in all_versions assert \"1.1\" in all_versions and \"1.1\" in production_version assert \"2.0\" in all_versions assert \"2.1\" in all_versions and \"2.1\" in production_version _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete\", \"1.0\"]): _VersionCLI.parse_arguments() assert \"Successfully delete version 1.0.\" in caplog.text all_versions = [version.id for version in _VersionManager._get_all()] assert len(all_versions) == 4 assert \"1.0\" not in all_versions # Test delete a non-existed version with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete\", \"non_exist_version\"]): _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text # Test delete production version will change the version from production to experiment with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete-production\", \"1.1\"]): _VersionCLI.parse_arguments() assert \"Successfully delete version 1.1 from the production version list.\" in caplog.text all_versions = [version.id for version in _VersionManager._get_all()] production_version = _VersionManager._get_production_versions() assert len(all_versions) == 4 assert \"1.1\" in all_versions and \"1.1\" not in production_version # Test delete a non-existed production version with pytest.raises(SystemExit) as e: with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete-production\", \"non_exist_version\"]): _VersionCLI.parse_arguments() assert str(e.value) == \"Version 'non_exist_version' is not a production version.\" def test_list_versions(capsys, init_sql_repo): _ScenarioManagerFactory._build_manager() with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.1\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.1\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.0\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() core.stop() _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--list\"]): _VersionCLI.parse_arguments() out, _ = capsys.readouterr() version_list = str(out).strip().split(\"\\n\") assert len(version_list) == 6 # 5 versions with the header assert all(column in version_list[0] for column in [\"Version number\", \"Mode\", \"Creation date\"]) assert all(column in version_list[1] for column in [\"2.1\", \"Production\", \"latest\"]) assert all(column in version_list[2] for column in [\"2.0\", \"Experiment\"]) and \"latest\" not in version_list[2] assert all(column in version_list[3] for column in [\"1.1\", \"Production\"]) and \"latest\" not in version_list[3] assert all(column in version_list[4] for column in [\"1.0\", \"Experiment\"]) and \"latest\" not in version_list[4] assert \"Development\" in version_list[5] and \"latest\" not in version_list[5] def test_rename_version(caplog, init_sql_repo): _ScenarioManagerFactory._build_manager() scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() dev_ver = _VersionManager._get_development_version() _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"non_exist_version\", \"1.1\"]): # This should raise an exception since version \"non_exist_version\" does not exist _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"1.0\", \"2.0\"]): # This should raise an exception since 2.0 already exists _VersionCLI.parse_arguments() assert \"Version name '2.0' is already used.\" in caplog.text _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"1.0\", \"1.1\"]): _VersionCLI.parse_arguments() assert _VersionManager._get(\"1.0\") is None assert [version.id for version in _VersionManager._get_all()].sort() == [dev_ver, \"1.1\", \"2.0\"].sort() # All entities are assigned to the new version assert len(_DataManager._get_all(\"1.1\")) == 2 assert len(_TaskManager._get_all(\"1.1\")) == 1 assert len(_SequenceManager._get_all(\"1.1\")) == 0 assert len(_ScenarioManager._get_all(\"1.1\")) == 1 assert len(_JobManager._get_all(\"1.1\")) == 1 _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"2.0\", \"2.1\"]): _VersionCLI.parse_arguments() assert _VersionManager._get(\"2.0\") is None assert [version.id for version in _VersionManager._get_all()].sort() == [dev_ver, \"1.1\", \"2.1\"].sort() assert _VersionManager._get_production_versions() == [\"2.1\"] # All entities are assigned to the new version assert len(_DataManager._get_all(\"2.1\")) == 2 assert len(_TaskManager._get_all(\"2.1\")) == 1 assert len(_SequenceManager._get_all(\"2.1\")) == 0 assert len(_ScenarioManager._get_all(\"2.1\")) == 1 assert len(_JobManager._get_all(\"2.1\")) == 1 def test_compare_version_config(caplog, init_sql_repo): _ScenarioManagerFactory._build_manager() scenario_config_1 = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config_1) _ScenarioManager._submit(scenario) core.stop() init_config() Config.configure_core(repository_type=\"sql\", repository_properties={\"db_location\": init_sql_repo}) _ScenarioManagerFactory._build_manager() scenario_config_2 = config_scenario() Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"bar.csv\") with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config_2) _ScenarioManager._submit(scenario) core.stop() _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"non_exist_version\", \"2.0\"]): # This should raise an exception since version \"non_exist_version\" does not exist _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"1.0\", \"non_exist_version\"]): # This should raise an exception since 2.0 already exists _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"1.0\", \"1.0\"]): _VersionCLI.parse_arguments() assert \"There is no difference between version 1.0 Configuration and version 1.0 Configuration.\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"1.0\", \"2.0\"]): _VersionCLI.parse_arguments() expected_message = \"\"\"Differences between version 1.0 Configuration and version 2.0 Configuration: \\tDATA_NODE \"d2\" has attribute \"default_path\" modified: foo.csv -> bar.csv\"\"\" assert expected_message in caplog.text def twice(a): return a * 2 def config_scenario(): data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) data_node_2_config = Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"foo.csv\") task_config = Config.configure_task(\"my_task\", twice, data_node_1_config, data_node_2_config) scenario_config = Config.configure_scenario(\"my_scenario\", [task_config], frequency=Frequency.DAILY) return scenario_config "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import multiprocessing import random import string from concurrent.futures import ProcessPoolExecutor from datetime import datetime, timedelta from functools import partial from time import sleep import pytest from src.taipy.core import taipy from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from taipy.config import Config from taipy.config.common.scope import Scope from taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked from tests.core.utils import assert_true_after_time # ################################ USER FUNCTIONS ################################## def multiply(nb1: float, nb2: float): sleep(0.1) return nb1 * nb2 def lock_multiply(lock, nb1: float, nb2: float): with lock: return multiply(nb1, nb2) def mult_by_2(n): return n * 2 def nothing(): return True def concat(a, b): return a + b def _error(): raise Exception # ################################ TEST METHODS ################################## def test_submit_task(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) before_creation = datetime.now() sleep(0.1) task = _create_task(multiply) output_dn_id = task.output[f\"{task.config_id}_output0\"].id _OrchestratorFactory._build_dispatcher() assert _DataManager._get(output_dn_id).last_edit_date > before_creation assert _DataManager._get(output_dn_id).job_ids == [] assert _DataManager._get(output_dn_id).is_ready_for_reading before_submission_creation = datetime.now() sleep(0.1) job = _Orchestrator.submit_task(task) sleep(0.1) after_submission_creation = datetime.now() assert _DataManager._get(output_dn_id).read() == 42 assert _DataManager._get(output_dn_id).last_edit_date > before_submission_creation assert _DataManager._get(output_dn_id).last_edit_date < after_submission_creation assert _DataManager._get(output_dn_id).job_ids == [job.id] assert _DataManager._get(output_dn_id).is_ready_for_reading assert job.is_completed() def test_submit_sequence_generate_unique_submit_id(): dn_1 = InMemoryDataNode(\"dn_config_id_1\", Scope.SCENARIO) dn_2 = InMemoryDataNode(\"dn_config_id_2\", Scope.SCENARIO) task_1 = Task(\"task_config_id_1\", {}, print, [dn_1]) task_2 = Task(\"task_config_id_2\", {}, print, [dn_1], [dn_2]) _DataManager._set(dn_1) _DataManager._set(dn_2) _TaskManager._set(task_1) _TaskManager._set(task_2) scenario = Scenario(\"scenario\", [task_1, task_2], {}, sequences={\"sequence\": {\"tasks\": [task_1, task_2]}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] jobs_1 = taipy.submit(sequence) jobs_2 = taipy.submit(sequence) assert len(jobs_1) == 2 assert len(jobs_2) == 2 submit_ids_1 = [job.submit_id for job in jobs_1] submit_ids_2 = [job.submit_id for job in jobs_2] assert len(set(submit_ids_1)) == 1 assert len(set(submit_ids_2)) == 1 assert set(submit_ids_1) != set(submit_ids_2) def test_submit_scenario_generate_unique_submit_id(): dn_1 = InMemoryDataNode(\"dn_config_id_1\", Scope.SCENARIO) dn_2 = InMemoryDataNode(\"dn_config_id_2\", Scope.SCENARIO) dn_3 = InMemoryDataNode(\"dn_config_id_3\", Scope.SCENARIO) task_1 = Task(\"task_config_id_1\", {}, print, [dn_1]) task_2 = Task(\"task_config_id_2\", {}, print, [dn_2]) task_3 = Task(\"task_config_id_3\", {}, print, [dn_3]) scenario = Scenario(\"scenario_config_id\", [task_1, task_2, task_3], {}) _DataManager._set(dn_1) _DataManager._set(dn_2) _TaskManager._set(task_1) _TaskManager._set(task_2) _TaskManager._set(task_3) _ScenarioManager._set(scenario) jobs_1 = taipy.submit(scenario) jobs_2 = taipy.submit(scenario) assert len(jobs_1) == 3 assert len(jobs_2) == 3 def test_submit_entity_store_entity_id_in_job(): dn_1 = InMemoryDataNode(\"dn_config_id_1\", Scope.SCENARIO) dn_2 = InMemoryDataNode(\"dn_config_id_2\", Scope.SCENARIO) dn_3 = InMemoryDataNode(\"dn_config_id_3\", Scope.SCENARIO) task_1 = Task(\"task_config_id_1\", {}, print, [dn_1]) task_2 = Task(\"task_config_id_2\", {}, print, [dn_2]) task_3 = Task(\"task_config_id_3\", {}, print, [dn_3]) scenario = Scenario(\"scenario_config_id\", [task_1, task_2, task_3], {}) _DataManager._set(dn_1) _DataManager._set(dn_2) _TaskManager._set(task_1) _TaskManager._set(task_2) _TaskManager._set(task_3) _ScenarioManager._set(scenario) jobs_1 = taipy.submit(scenario) assert all(job.submit_entity_id == scenario.id for job in jobs_1) job_1 = taipy.submit(task_1) assert job_1.submit_entity_id == task_1.id def test_submit_task_that_return_multiple_outputs(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) def return_2tuple(nb1, nb2): return multiply(nb1, nb2), multiply(nb1, nb2) / 2 def return_list(nb1, nb2): return [multiply(nb1, nb2), multiply(nb1, nb2) / 2] with_tuple = _create_task(return_2tuple, 2) with_list = _create_task(return_list, 2) _OrchestratorFactory._build_dispatcher() _Orchestrator.submit_task(with_tuple) _Orchestrator.submit_task(with_list) assert ( with_tuple.output[f\"{with_tuple.config_id}_output0\"].read() == with_list.output[f\"{with_list.config_id}_output0\"].read() == 42 ) assert ( with_tuple.output[f\"{with_tuple.config_id}_output1\"].read() == with_list.output[f\"{with_list.config_id}_output1\"].read() == 21 ) def test_submit_task_returns_single_iterable_output(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) def return_2tuple(nb1, nb2): return multiply(nb1, nb2), multiply(nb1, nb2) / 2 def return_list(nb1, nb2): return [multiply(nb1, nb2), multiply(nb1, nb2) / 2] task_with_tuple = _create_task(return_2tuple, 1) task_with_list = _create_task(return_list, 1) _OrchestratorFactory._build_dispatcher() _Orchestrator.submit_task(task_with_tuple) assert task_with_tuple.output[f\"{task_with_tuple.config_id}_output0\"].read() == (42, 21) assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 _Orchestrator.submit_task(task_with_list) assert task_with_list.output[f\"{task_with_list.config_id}_output0\"].read() == [42, 21] assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 def test_data_node_not_written_due_to_wrong_result_nb(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) def return_2tuple(): return lambda nb1, nb2: (multiply(nb1, nb2), multiply(nb1, nb2) / 2) task = _create_task(return_2tuple(), 3) _OrchestratorFactory._build_dispatcher() job = _Orchestrator.submit_task(task) assert task.output[f\"{task.config_id}_output0\"].read() == 0 assert job.is_failed() assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 def test_scenario_only_submit_same_task_once(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() dn_0 = InMemoryDataNode(\"dn_config_0\", Scope.SCENARIO, properties={\"default_data\": 0}) dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) task_1 = Task(\"task_config_1\", {}, print, input=[dn_0], output=[dn_1], id=\"task_1\") task_2 = Task(\"task_config_2\", {}, print, input=[dn_1], id=\"task_2\") task_3 = Task(\"task_config_3\", {}, print, input=[dn_1], output=[dn_2], id=\"task_3\") scenario_1 = Scenario( \"scenario_config_1\", [task_1, task_2, task_3], {}, \"scenario_1\", sequences={\"sequence_1\": {\"tasks\": [task_1, task_2]}, \"sequence_2\": {\"tasks\": [task_1, task_3]}}, ) sequence_1 = scenario_1.sequences[\"sequence_1\"] sequence_2 = scenario_1.sequences[\"sequence_2\"] jobs = _Orchestrator.submit(scenario_1) assert len(jobs) == 3 assert all([job.is_completed() for job in jobs]) assert all(not _Orchestrator._is_blocked(job) for job in jobs) jobs = _Orchestrator.submit(sequence_1) assert len(jobs) == 2 assert all([job.is_completed() for job in jobs]) assert all(not _Orchestrator._is_blocked(job) for job in jobs) jobs = _Orchestrator.submit(sequence_2) assert len(jobs) == 2 assert all([job.is_completed() for job in jobs]) assert all(not _Orchestrator._is_blocked(job) for job in jobs) def test_update_status_fail_job(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() dn_0 = InMemoryDataNode(\"dn_config_0\", Scope.SCENARIO, properties={\"default_data\": 0}) dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) task_0 = Task(\"task_config_0\", {}, _error, output=[dn_0], id=\"task_0\") task_1 = Task(\"task_config_1\", {}, print, input=[dn_0], output=[dn_1], id=\"task_1\") task_2 = Task(\"task_config_2\", {}, print, input=[dn_1], id=\"task_2\") task_3 = Task(\"task_config_3\", {}, print, input=[dn_2], id=\"task_3\") scenario_1 = Scenario(\"scenario_config_1\", [task_0, task_1, task_2, task_3], {}, \"scenario_1\") scenario_2 = Scenario(\"scenario_config_2\", [task_0, task_1, task_2, task_3], {}, \"scenario_2\") _DataManager._set(dn_0) _DataManager._set(dn_1) _DataManager._set(dn_2) _TaskManager._set(task_0) _TaskManager._set(task_1) _TaskManager._set(task_2) _TaskManager._set(task_3) _ScenarioManager._set(scenario_1) _ScenarioManager._set(scenario_2) job = _Orchestrator.submit_task(task_0) assert job.is_failed() jobs = _Orchestrator.submit(scenario_1) tasks_jobs = {job._task.id: job for job in jobs} assert tasks_jobs[\"task_0\"].is_failed() assert all([job.is_abandoned() for job in [tasks_jobs[\"task_1\"], tasks_jobs[\"task_2\"]]]) assert tasks_jobs[\"task_3\"].is_completed() assert all(not _Orchestrator._is_blocked(job) for job in jobs) jobs = _Orchestrator.submit(scenario_2) tasks_jobs = {job._task.id: job for job in jobs} assert tasks_jobs[\"task_0\"].is_failed() assert all([job.is_abandoned() for job in [tasks_jobs[\"task_1\"], tasks_jobs[\"task_2\"]]]) assert tasks_jobs[\"task_3\"].is_completed() assert all(not _Orchestrator._is_blocked(job) for job in jobs) def test_update_status_fail_job_in_parallel(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() dn_0 = InMemoryDataNode(\"dn_config_0\", Scope.SCENARIO, properties={\"default_data\": 0}) dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) task_0 = Task(\"task_config_0\", {}, _error, output=[dn_0], id=\"task_0\") task_1 = Task(\"task_config_1\", {}, print, input=[dn_0], output=[dn_1], id=\"task_1\") task_2 = Task(\"task_config_2\", {}, print, input=[dn_1], id=\"task_2\") task_3 = Task(\"task_config_3\", {}, print, input=[dn_2], id=\"task_3\") scenario_1 = Scenario( \"scenario_config_1\", set([task_0, task_1, task_2, task_3]), {}, set(), \"scenario_1\", sequences={\"sequence_1\": {\"tasks\": [task_0, task_1, task_2]}}, ) scenario_2 = Scenario( \"scenario_config_2\", set([task_0, task_1, task_2, task_3]), {}, set(), \"scenario_2\", ) _DataManager._set(dn_0) _DataManager._set(dn_1) _DataManager._set(dn_2) _TaskManager._set(task_0) _TaskManager._set(task_1) _TaskManager._set(task_2) _TaskManager._set(task_3) _ScenarioManager._set(scenario_1) _ScenarioManager._set(scenario_2) sequence_1 = scenario_1.sequences[\"sequence_1\"] job = _Orchestrator.submit_task(task_0) assert_true_after_time(job.is_failed) jobs = _Orchestrator.submit(sequence_1) tasks_jobs = {job._task.id: job for job in jobs} assert_true_after_time(tasks_jobs[\"task_0\"].is_failed) assert_true_after_time(lambda: all([job.is_abandoned() for job in [tasks_jobs[\"task_1\"], tasks_jobs[\"task_2\"]]])) assert_true_after_time(lambda: all(not _Orchestrator._is_blocked(job) for job in jobs)) jobs = _Orchestrator.submit(scenario_1.sequences[\"sequence_1\"]) tasks_jobs = {job._task.id: job for job in jobs} assert_true_after_time(tasks_jobs[\"task_0\"].is_failed) assert_true_after_time(lambda: all([job.is_abandoned() for job in [tasks_jobs[\"task_1\"], tasks_jobs[\"task_2\"]]])) assert_true_after_time(lambda: all(not _Orchestrator._is_blocked(job) for job in jobs)) jobs = _Orchestrator.submit(scenario_1) tasks_jobs = {job._task.id: job for job in jobs} assert_true_after_time(tasks_jobs[\"task_0\"].is_failed) assert_true_after_time(tasks_jobs[\"task_3\"].is_completed) assert_true_after_time(lambda: all([job.is_abandoned() for job in [tasks_jobs[\"task_1\"], tasks_jobs[\"task_2\"]]])) assert_true_after_time(lambda: all(not _Orchestrator._is_blocked(job) for job in jobs)) jobs = _Orchestrator.submit(scenario_2) tasks_jobs = {job._task.id: job for job in jobs} assert_true_after_time(tasks_jobs[\"task_0\"].is_failed) assert_true_after_time(tasks_jobs[\"task_3\"].is_completed) assert_true_after_time(lambda: all([job.is_abandoned() for job in [tasks_jobs[\"task_1\"], tasks_jobs[\"task_2\"]]])) assert_true_after_time(lambda: all(not _Orchestrator._is_blocked(job) for job in jobs)) def test_submit_task_in_parallel(): m = multiprocessing.Manager() lock = m.Lock() Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) task = _create_task(partial(lock_multiply, lock)) _OrchestratorFactory._build_dispatcher() with lock: assert task.output[f\"{task.config_id}_output0\"].read() == 0 job = _Orchestrator.submit_task(task) assert_true_after_time(job.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time(lambda: task.output[f\"{task.config_id}_output0\"].read() == 42) assert_true_after_time(job.is_completed) assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 def test_submit_sequence_in_parallel(): m = multiprocessing.Manager() lock = m.Lock() Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) task = _create_task(partial(lock_multiply, lock)) sequence = Sequence({}, [task], \"sequence_id\") _OrchestratorFactory._build_dispatcher() with lock: assert task.output[f\"{task.config_id}_output0\"].read() == 0 job = _Orchestrator.submit(sequence)[0] assert_true_after_time(job.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time(lambda: task.output[f\"{task.config_id}_output0\"].read() == 42) assert_true_after_time(job.is_completed) assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 def test_submit_scenario_in_parallel(): m = multiprocessing.Manager() lock = m.Lock() Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) task = _create_task(partial(lock_multiply, lock)) scenario = Scenario(\"scenario_config\", [task], {}, [], \"scenario_id\") _OrchestratorFactory._build_dispatcher() with lock: assert task.output[f\"{task.config_id}_output0\"].read() == 0 job = _Orchestrator.submit(scenario)[0] assert_true_after_time(job.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time(lambda: task.output[f\"{task.config_id}_output0\"].read() == 42) assert_true_after_time(job.is_completed) assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 def sleep_fct(seconds): sleep(seconds) def sleep_and_raise_error_fct(seconds): sleep(seconds) raise Exception def test_submit_task_synchronously_in_parallel(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() sleep_period = 1 start_time = datetime.now() task = Task(\"sleep_task\", {}, function=partial(sleep, sleep_period)) job = _Orchestrator.submit_task(task, wait=True) assert (datetime.now() - start_time).seconds >= sleep_period assert_true_after_time(job.is_completed) def test_submit_sequence_synchronously_in_parallel(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() sleep_period = 1 start_time = datetime.now() task = Task(\"sleep_task\", {}, function=partial(sleep, sleep_period)) sequence = Sequence({}, [task], \"sequence_id\") job = _Orchestrator.submit(sequence, wait=True)[0] assert (datetime.now() - start_time).seconds >= sleep_period assert_true_after_time(job.is_completed) def test_submit_scenario_synchronously_in_parallel(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() sleep_period = 1 start_time = datetime.now() task = Task(\"sleep_task\", {}, function=partial(sleep, sleep_period)) scenario = Scenario(\"scenario_config\", [task], {}) job = _Orchestrator.submit(scenario, wait=True)[0] assert (datetime.now() - start_time).seconds >= sleep_period assert_true_after_time(job.is_completed) def test_submit_fail_task_synchronously_in_parallel(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() sleep_period = 1.0 start_time = datetime.now() task = Task(\"sleep_task\", {}, function=partial(sleep_and_raise_error_fct, sleep_period)) job = _Orchestrator.submit_task(task, wait=True) assert (datetime.now() - start_time).seconds >= sleep_period assert_true_after_time(job.is_failed) def test_submit_fail_sequence_synchronously_in_parallel(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() sleep_period = 1.0 start_time = datetime.now() task = Task(\"sleep_task\", {}, function=partial(sleep_and_raise_error_fct, sleep_period)) sequence = Sequence({}, [task], \"sequence_id\") job = _Orchestrator.submit(sequence, wait=True)[0] assert (datetime.now() - start_time).seconds >= sleep_period assert_true_after_time(job.is_failed) def test_submit_fail_scenario_synchronously_in_parallel(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() sleep_period = 1.0 start_time = datetime.now() task = Task(\"sleep_task\", {}, function=partial(sleep_and_raise_error_fct, sleep_period)) scenario = Scenario(\"scenario_config\", [task], {}) job = _Orchestrator.submit(scenario, wait=True)[0] assert (datetime.now() - start_time).seconds >= sleep_period assert_true_after_time(job.is_failed) def test_submit_task_synchronously_in_parallel_with_timeout(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() task_duration = 2 timeout_duration = task_duration - 1 task = Task(\"sleep_task\", {}, function=partial(sleep, task_duration)) start_time = datetime.now() job = _Orchestrator.submit_task(task, wait=True, timeout=timeout_duration) end_time = datetime.now() assert timeout_duration <= (end_time - start_time).seconds assert_true_after_time(job.is_completed) def test_submit_task_multithreading_multiple_task(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_1 = m.Lock() lock_2 = m.Lock() task_1 = _create_task(partial(lock_multiply, lock_1)) task_2 = _create_task(partial(lock_multiply, lock_2)) _OrchestratorFactory._build_dispatcher() with lock_1: with lock_2: job_1 = _Orchestrator.submit_task(task_1) job_2 = _Orchestrator.submit_task(task_2) assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert task_2.output[f\"{task_2.config_id}_output0\"].read() == 0 assert_true_after_time(job_1.is_running) assert_true_after_time(job_2.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2) assert_true_after_time(lambda: task_2.output[f\"{task_2.config_id}_output0\"].read() == 42) assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert_true_after_time(job_2.is_completed) assert_true_after_time(job_1.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time(lambda: task_1.output[f\"{task_1.config_id}_output0\"].read() == 42) assert_true_after_time(job_1.is_completed) assert_true_after_time(job_2.is_completed) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) def test_submit_sequence_multithreading_multiple_task(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_1 = m.Lock() lock_2 = m.Lock() task_1 = _create_task(partial(lock_multiply, lock_1)) task_2 = _create_task(partial(lock_multiply, lock_2)) sequence = Sequence({}, [task_1, task_2], \"sequence_id\") _OrchestratorFactory._build_dispatcher() with lock_1: with lock_2: tasks_jobs = {job._task.id: job for job in _Orchestrator.submit(sequence)} job_1 = tasks_jobs[task_1.id] job_2 = tasks_jobs[task_2.id] assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert task_2.output[f\"{task_2.config_id}_output0\"].read() == 0 assert_true_after_time(job_1.is_running) assert_true_after_time(job_2.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2) assert_true_after_time(lambda: task_2.output[f\"{task_2.config_id}_output0\"].read() == 42) assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert_true_after_time(job_2.is_completed) assert_true_after_time(job_1.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time(lambda: task_1.output[f\"{task_1.config_id}_output0\"].read() == 42) assert_true_after_time(job_1.is_completed) assert_true_after_time(job_2.is_completed) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) def test_submit_scenario_multithreading_multiple_task(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_1 = m.Lock() lock_2 = m.Lock() task_1 = _create_task(partial(lock_multiply, lock_1)) task_2 = _create_task(partial(lock_multiply, lock_2)) scenario = Scenario(\"scenario_config\", [task_1, task_2], {}) _OrchestratorFactory._build_dispatcher() with lock_1: with lock_2: tasks_jobs = {job._task.id: job for job in _Orchestrator.submit(scenario)} job_1 = tasks_jobs[task_1.id] job_2 = tasks_jobs[task_2.id] assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert task_2.output[f\"{task_2.config_id}_output0\"].read() == 0 assert_true_after_time(job_1.is_running) assert_true_after_time(job_2.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2) assert_true_after_time(lambda: task_2.output[f\"{task_2.config_id}_output0\"].read() == 42) assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert_true_after_time(job_2.is_completed) assert_true_after_time(job_1.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time(lambda: task_1.output[f\"{task_1.config_id}_output0\"].read() == 42) assert_true_after_time(job_1.is_completed) assert_true_after_time(job_2.is_completed) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) def test_submit_task_multithreading_multiple_task_in_sync_way_to_check_job_status(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_0 = m.Lock() lock_1 = m.Lock() lock_2 = m.Lock() task_0 = _create_task(partial(lock_multiply, lock_0)) task_1 = _create_task(partial(lock_multiply, lock_1)) task_2 = _create_task(partial(lock_multiply, lock_2)) _OrchestratorFactory._build_dispatcher() with lock_0: job_0 = _Orchestrator.submit_task(task_0) assert_true_after_time(job_0.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) with lock_1: with lock_2: assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert task_2.output[f\"{task_2.config_id}_output0\"].read() == 0 job_2 = _Orchestrator.submit_task(task_2) job_1 = _Orchestrator.submit_task(task_1) assert_true_after_time(job_0.is_running) assert_true_after_time(job_1.is_pending) assert_true_after_time(job_2.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2) assert_true_after_time(lambda: task_2.output[f\"{task_2.config_id}_output0\"].read() == 42) assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert_true_after_time(job_0.is_running) assert_true_after_time(job_1.is_running) assert_true_after_time(job_2.is_completed) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2) assert_true_after_time(lambda: task_1.output[f\"{task_1.config_id}_output0\"].read() == 42) assert task_0.output[f\"{task_0.config_id}_output0\"].read() == 0 assert_true_after_time(job_0.is_running) assert_true_after_time(job_1.is_completed) assert job_2.is_completed() assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) assert task_0.output[f\"{task_0.config_id}_output0\"].read() == 42 assert job_0.is_completed() assert job_1.is_completed() assert job_2.is_completed() def test_blocked_task(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_1 = m.Lock() lock_2 = m.Lock() foo_cfg = Config.configure_data_node(\"foo\", default_data=1) bar_cfg = Config.configure_data_node(\"bar\") baz_cfg = Config.configure_data_node(\"baz\") _OrchestratorFactory._build_dispatcher() dns = _DataManager._bulk_get_or_create([foo_cfg, bar_cfg, baz_cfg]) foo = dns[foo_cfg] bar = dns[bar_cfg] baz = dns[baz_cfg] task_1 = Task(\"by_2\", {}, partial(lock_multiply, lock_1, 2), [foo], [bar]) task_2 = Task(\"by_3\", {}, partial(lock_multiply, lock_2, 3), [bar], [baz]) assert task_1.foo.is_ready_for_reading # foo is ready assert not task_1.bar.is_ready_for_reading # But bar is not ready assert not task_2.baz.is_ready_for_reading # neither does baz assert len(_Orchestrator.blocked_jobs) == 0 job_2 = _Orchestrator.submit_task(task_2) # job 2 is submitted first assert job_2.is_blocked() # since bar is not is_valid the job 2 is blocked assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) assert len(_Orchestrator.blocked_jobs) == 1 with lock_2: with lock_1: job_1 = _Orchestrator.submit_task( task_1, ) # job 1 is submitted and locked assert_true_after_time(job_1.is_running) # so it is still running assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert not _DataManager._get(task_1.bar.id).is_ready_for_reading # And bar still not ready assert_true_after_time(job_2.is_blocked) # the job_2 remains blocked assert_true_after_time(job_1.is_completed) # job1 unlocked and can complete assert _DataManager._get(task_1.bar.id).is_ready_for_reading # bar becomes ready assert _DataManager._get(task_1.bar.id).read() == 2 # the data is computed and written assert_true_after_time(job_2.is_running) # And job 2 can start running assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert len(_Orchestrator.blocked_jobs) == 0 assert_true_after_time(job_2.is_completed) # job 2 unlocked so it can complete assert _DataManager._get(task_2.baz.id).is_ready_for_reading # baz becomes ready assert _DataManager._get(task_2.baz.id).read() == 6 # the data is computed and written assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) def test_blocked_sequence(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_1 = m.Lock() lock_2 = m.Lock() foo_cfg = Config.configure_data_node(\"foo\", default_data=1) bar_cfg = Config.configure_data_node(\"bar\") baz_cfg = Config.configure_data_node(\"baz\") _OrchestratorFactory._build_dispatcher() dns = _DataManager._bulk_get_or_create([foo_cfg, bar_cfg, baz_cfg]) foo = dns[foo_cfg] bar = dns[bar_cfg] baz = dns[baz_cfg] task_1 = Task(\"by_2\", {}, partial(lock_multiply, lock_1, 2), [foo], [bar]) task_2 = Task(\"by_3\", {}, partial(lock_multiply, lock_2, 3), [bar], [baz]) sequence = Sequence({}, [task_1, task_2], \"sequence_id\") assert task_1.foo.is_ready_for_reading # foo is ready assert not task_1.bar.is_ready_for_reading # But bar is not ready assert not task_2.baz.is_ready_for_reading # neither does baz assert len(_Orchestrator.blocked_jobs) == 0 with lock_2: with lock_1: jobs = _Orchestrator.submit(sequence) # sequence is submitted tasks_jobs = {job._task.id: job for job in jobs} job_1, job_2 = tasks_jobs[task_1.id], tasks_jobs[task_2.id] assert_true_after_time(job_1.is_running) # job 1 is submitted and locked so it is still running assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert not _DataManager._get(task_1.bar.id).is_ready_for_reading # And bar still not ready assert_true_after_time(job_2.is_blocked) # the job_2 remains blocked assert_true_after_time(job_1.is_completed) # job1 unlocked and can complete assert _DataManager._get(task_1.bar.id).is_ready_for_reading # bar becomes ready assert _DataManager._get(task_1.bar.id).read() == 2 # the data is computed and written assert_true_after_time(job_2.is_running) # And job 2 can start running assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert len(_Orchestrator.blocked_jobs) == 0 assert_true_after_time(job_2.is_completed) # job 2 unlocked so it can complete assert _DataManager._get(task_2.baz.id).is_ready_for_reading # baz becomes ready assert _DataManager._get(task_2.baz.id).read() == 6 # the data is computed and written assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) def test_blocked_scenario(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_1 = m.Lock() lock_2 = m.Lock() foo_cfg = Config.configure_data_node(\"foo\", default_data=1) bar_cfg = Config.configure_data_node(\"bar\") baz_cfg = Config.configure_data_node(\"baz\") _OrchestratorFactory._build_dispatcher() dns = _DataManager._bulk_get_or_create([foo_cfg, bar_cfg, baz_cfg]) foo = dns[foo_cfg] bar = dns[bar_cfg] baz = dns[baz_cfg] task_1 = Task(\"by_2\", {}, partial(lock_multiply, lock_1, 2), [foo], [bar]) task_2 = Task(\"by_3\", {}, partial(lock_multiply, lock_2, 3), [bar], [baz]) scenario = Scenario(\"scenario_config\", [task_1, task_2], {}) assert task_1.foo.is_ready_for_reading # foo is ready assert not task_1.bar.is_ready_for_reading # But bar is not ready assert not task_2.baz.is_ready_for_reading # neither does baz assert len(_Orchestrator.blocked_jobs) == 0 with lock_2: with lock_1: jobs = _Orchestrator.submit(scenario) # scenario is submitted tasks_jobs = {job._task.id: job for job in jobs} job_1, job_2 = tasks_jobs[task_1.id], tasks_jobs[task_2.id] assert_true_after_time(job_1.is_running) # job 1 is submitted and locked so it is still running assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert not _DataManager._get(task_1.bar.id).is_ready_for_reading # And bar still not ready assert_true_after_time(job_2.is_blocked) # the job_2 remains blocked assert_true_after_time(job_1.is_completed) # job1 unlocked and can complete assert _DataManager._get(task_1.bar.id).is_ready_for_reading # bar becomes ready assert _DataManager._get(task_1.bar.id).read() == 2 # the data is computed and written assert_true_after_time(job_2.is_running) # And job 2 can start running assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert len(_Orchestrator.blocked_jobs) == 0 assert_true_after_time(job_2.is_completed) # job 2 unlocked so it can complete assert _DataManager._get(task_2.baz.id).is_ready_for_reading # baz becomes ready assert _DataManager._get(task_2.baz.id).read() == 6 # the data is computed and written assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) def test_task_orchestrator_create_synchronous_dispatcher(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() assert _OrchestratorFactory._dispatcher._nb_available_workers == 1 def test_task_orchestrator_create_standalone_dispatcher(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=3) _OrchestratorFactory._build_dispatcher() assert isinstance(_OrchestratorFactory._dispatcher._executor, ProcessPoolExecutor) assert _OrchestratorFactory._dispatcher._nb_available_workers == 3 def modified_config_task(n): from taipy.config import Config assert_true_after_time(lambda: Config.core.storage_folder == \".my_data/\") assert_true_after_time(lambda: Config.core.custom_property == \"custom_property\") return n * 2 def test_can_exec_task_with_modified_config(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) Config.configure_core(storage_folder=\".my_data/\", custom_property=\"custom_property\") dn_input_config = Config.configure_data_node(\"input\", \"pickle\", scope=Scope.SCENARIO, default_data=1) dn_output_config = Config.configure_data_node(\"output\", \"pickle\") task_config = Config.configure_task(\"task_config\", modified_config_task, dn_input_config, dn_output_config) scenario_config = Config.configure_scenario(\"scenario_config\", [task_config]) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) jobs = scenario.submit() assert_true_after_time(jobs[0].is_finished, time=120) assert_true_after_time( jobs[0].is_completed ) # If the job is completed, that means the asserts in the task are successful def update_config_task(n): from taipy.config import Config # The exception will be saved to logger, and there is no way to check for it # so it will be checked here with pytest.raises(ConfigurationUpdateBlocked): Config.core.storage_folder = \".new_storage_folder/\" with pytest.raises(ConfigurationUpdateBlocked): Config.core.properties = {\"custom_property\": \"new_custom_property\"} Config.core.storage_folder = \".new_storage_folder/\" Config.core.properties = {\"custom_property\": \"new_custom_property\"} return n * 2 def test_cannot_exec_task_that_update_config(): \"\"\" _ConfigBlocker singleton is not passed to the subprocesses. That means in each subprocess, the config update will not be blocked. After rebuilding a new Config in each subprocess, the Config should be blocked. \"\"\" Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) dn_input_config = Config.configure_data_node(\"input\", \"pickle\", scope=Scope.SCENARIO, default_data=1) dn_output_config = Config.configure_data_node(\"output\", \"pickle\") task_config = Config.configure_task(\"task_config\", update_config_task, dn_input_config, dn_output_config) scenario_config = Config.configure_scenario(\"scenario_config\", [task_config]) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) jobs = scenario.submit() # The job should fail due to an exception is raised assert_true_after_time(jobs[0].is_failed) def test_can_execute_task_with_development_mode(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_input_config = Config.configure_data_node(\"input\", \"pickle\", scope=Scope.SCENARIO, default_data=1) dn_output_config = Config.configure_data_node(\"output\", \"pickle\") task_config = Config.configure_task(\"task_config\", mult_by_2, dn_input_config, dn_output_config) scenario_config = Config.configure_scenario(\"scenario_config\", [task_config]) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) scenario.submit() while scenario.output.edit_in_progress: sleep(1) assert 2 == scenario.output.read() def test_need_to_run_no_output(): hello_cfg = Config.configure_data_node(\"hello\", default_data=\"Hello \") world_cfg = Config.configure_data_node(\"world\", default_data=\"world !\") task_cfg = Config.configure_task(\"name\", input=[hello_cfg, world_cfg], function=concat, output=[]) task = _create_task_from_config(task_cfg) assert _OrchestratorFactory._dispatcher._needs_to_run(task) def test_need_to_run_task_not_skippable(): hello_cfg = Config.configure_data_node(\"hello\", default_data=\"Hello \") world_cfg = Config.configure_data_node(\"world\", default_data=\"world !\") hello_world_cfg = Config.configure_data_node(\"hello_world\") task_cfg = Config.configure_task( \"name\", input=[hello_cfg, world_cfg], function=concat, output=[hello_world_cfg], skippable=False ) task = _create_task_from_config(task_cfg) assert _OrchestratorFactory._dispatcher._needs_to_run(task) def test_need_to_run_skippable_task_no_input(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) hello_world_cfg = Config.configure_data_node(\"hello_world\") task_cfg = Config.configure_task(\"name\", input=[], function=nothing, output=[hello_world_cfg], skippable=True) _OrchestratorFactory._build_dispatcher() task = _create_task_from_config(task_cfg) assert _OrchestratorFactory._dispatcher._needs_to_run(task) _Orchestrator.submit_task(task) assert not _OrchestratorFactory._dispatcher._needs_to_run(task) def test_need_to_run_skippable_task_no_validity_period_on_output(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) hello_cfg = Config.configure_data_node(\"hello\", default_data=\"Hello \") world_cfg = Config.configure_data_node(\"world\", default_data=\"world !\") hello_world_cfg = Config.configure_data_node(\"hello_world\") task_cfg = Config.configure_task( \"name\", input=[hello_cfg, world_cfg], function=concat, output=[hello_world_cfg], skippable=True ) _OrchestratorFactory._build_dispatcher() task = _create_task_from_config(task_cfg) assert _OrchestratorFactory._dispatcher._needs_to_run(task) _Orchestrator.submit_task(task) assert not _OrchestratorFactory._dispatcher._needs_to_run(task) def test_need_to_run_skippable_task_with_validity_period_is_valid_on_output(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) hello_cfg = Config.configure_data_node(\"hello\", default_data=\"Hello \") world_cfg = Config.configure_data_node(\"world\", default_data=\"world !\") hello_world_cfg = Config.configure_data_node(\"hello_world\", validity_days=1) task_cfg = Config.configure_task( \"name\", input=[hello_cfg, world_cfg], function=concat, output=[hello_world_cfg], skippable=True ) _OrchestratorFactory._build_dispatcher() task = _create_task_from_config(task_cfg) assert _OrchestratorFactory._dispatcher._needs_to_run(task) job = _Orchestrator.submit_task(task) assert not _OrchestratorFactory._dispatcher._needs_to_run(task) job_skipped = _Orchestrator.submit_task(task) assert job.is_completed() assert job.is_finished() assert job_skipped.is_skipped() assert job_skipped.is_finished() def test_need_to_run_skippable_task_with_validity_period_obsolete_on_output(): hello_cfg = Config.configure_data_node(\"hello\", default_data=\"Hello \") world_cfg = Config.configure_data_node(\"world\", default_data=\"world !\") hello_world_cfg = Config.configure_data_node(\"hello_world\", validity_days=1) task_cfg = Config.configure_task( \"name\", input=[hello_cfg, world_cfg], function=concat, output=[hello_world_cfg], skippable=True ) task = _create_task_from_config(task_cfg) assert _OrchestratorFactory._dispatcher._needs_to_run(task) _Orchestrator.submit_task(task) output = task.hello_world output._last_edit_date = datetime.now() - timedelta(days=1, minutes=30) _DataManager()._set(output) assert _OrchestratorFactory._dispatcher._needs_to_run(task) # ################################ UTIL METHODS ################################## def _create_task(function, nb_outputs=1): output_dn_config_id = \"\".join(random.choice(string.ascii_lowercase) for _ in range(10)) dn_input_configs = [ Config.configure_data_node(\"input1\", \"pickle\", Scope.SCENARIO, default_data=21), Config.configure_data_node(\"input2\", \"pickle\", Scope.SCENARIO, default_data=2), ] dn_output_configs = [ Config.configure_data_node(f\"{output_dn_config_id}_output{i}\", \"pickle\", Scope.SCENARIO, default_data=0) for i in range(nb_outputs) ] input_dn = _DataManager._bulk_get_or_create(dn_input_configs).values() output_dn = _DataManager._bulk_get_or_create(dn_output_configs).values() return Task( output_dn_config_id, {}, function=function, input=input_dn, output=output_dn, ) def _create_task_from_config(task_cfg): return _TaskManager()._bulk_get_or_create([task_cfg])[0] "} {"text": "from unittest import mock import pytest from src.taipy.core._orchestrator._dispatcher import _DevelopmentJobDispatcher, _JobDispatcher, _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.exceptions.exceptions import OrchestratorNotBuilt from taipy.config import Config def test_build_orchestrator(): _OrchestratorFactory._orchestrator = None _OrchestratorFactory._dispatcher = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is None orchestrator = _OrchestratorFactory._build_orchestrator() assert orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator == _Orchestrator dispatcher = _OrchestratorFactory._build_dispatcher() assert isinstance(dispatcher, _JobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _JobDispatcher) _OrchestratorFactory._orchestrator = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is not None with mock.patch( \"src.taipy.core._orchestrator._orchestrator_factory._OrchestratorFactory._build_dispatcher\" ) as build_dispatcher, mock.patch( \"src.taipy.core._orchestrator._orchestrator._Orchestrator.initialize\" ) as initialize: orchestrator = _OrchestratorFactory._build_orchestrator() assert orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator == _Orchestrator build_dispatcher.assert_not_called() initialize.assert_called_once() def test_build_development_dispatcher(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._orchestrator = None _OrchestratorFactory._dispatcher = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is None with pytest.raises(OrchestratorNotBuilt): _OrchestratorFactory._build_dispatcher() _OrchestratorFactory._build_orchestrator() assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._dispatcher is None _OrchestratorFactory._build_dispatcher() assert isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) def test_build_standalone_dispatcher(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher) assert not isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 2 _OrchestratorFactory._dispatcher._nb_available_workers = 1 _OrchestratorFactory._build_dispatcher(force_restart=False) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 1 _OrchestratorFactory._build_dispatcher(force_restart=True) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 2 "} {"text": "import multiprocessing from concurrent.futures import ProcessPoolExecutor from functools import partial from unittest import mock from unittest.mock import MagicMock from pytest import raises from src.taipy.core import DataNodeId, JobId, TaskId from src.taipy.core._orchestrator._dispatcher._development_job_dispatcher import _DevelopmentJobDispatcher from src.taipy.core._orchestrator._dispatcher._standalone_job_dispatcher import _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.job.job import Job from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.task.task import Task from taipy.config.config import Config from tests.core.utils import assert_true_after_time def execute(lock): with lock: ... return None def _error(): raise RuntimeError(\"Something bad has happened\") def test_build_development_job_dispatcher(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() dispatcher = _OrchestratorFactory._dispatcher assert isinstance(dispatcher, _DevelopmentJobDispatcher) assert dispatcher._nb_available_workers == 1 with raises(NotImplementedError): assert dispatcher.start() assert dispatcher.is_running() with raises(NotImplementedError): dispatcher.stop() def test_build_standalone_job_dispatcher(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() dispatcher = _OrchestratorFactory._dispatcher assert not isinstance(dispatcher, _DevelopmentJobDispatcher) assert isinstance(dispatcher, _StandaloneJobDispatcher) assert isinstance(dispatcher._executor, ProcessPoolExecutor) assert dispatcher._nb_available_workers == 2 assert_true_after_time(dispatcher.is_running) dispatcher.stop() dispatcher.join() assert_true_after_time(lambda: not dispatcher.is_running()) def test_can_execute_2_workers(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock = m.Lock() task_id = TaskId(\"task_id1\") output = list(_DataManager._bulk_get_or_create([Config.configure_data_node(\"input1\", default_data=21)]).values()) _OrchestratorFactory._build_dispatcher() task = Task( config_id=\"name\", properties={}, input=[], function=partial(execute, lock), output=output, id=task_id, ) job_id = JobId(\"id1\") job = Job(job_id, task, \"submit_id\", task.id) dispatcher = _StandaloneJobDispatcher(_OrchestratorFactory._orchestrator) with lock: assert dispatcher._can_execute() dispatcher._dispatch(job) assert dispatcher._can_execute() dispatcher._dispatch(job) assert not dispatcher._can_execute() assert_true_after_time(lambda: dispatcher._can_execute()) def test_can_execute_synchronous(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId(\"task_id1\") task = Task(config_id=\"name\", properties={}, input=[], function=print, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id) job_id = JobId(\"id1\") job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher assert dispatcher._can_execute() dispatcher._dispatch(job) assert dispatcher._can_execute() def test_exception_in_user_function(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId(\"task_id1\") job_id = JobId(\"id1\") task = Task(config_id=\"name\", properties={}, input=[], function=_error, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id) job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher dispatcher._dispatch(job) assert job.is_failed() assert 'RuntimeError(\"Something bad has happened\")' in str(job.stacktrace[0]) def test_exception_in_writing_data(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId(\"task_id1\") job_id = JobId(\"id1\") output = MagicMock() output.id = DataNodeId(\"output_id\") output.config_id = \"my_raising_datanode\" output._is_in_cache = False output.write.side_effect = ValueError() task = Task(config_id=\"name\", properties={}, input=[], function=print, output=[output], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id) job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher with mock.patch(\"src.taipy.core.data._data_manager._DataManager._get\") as get: get.return_value = output dispatcher._dispatch(job) assert job.is_failed() assert \"node\" in job.stacktrace[0] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from unittest import mock import pytest from src.taipy.core.config.data_node_config import DataNodeConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.csv import CSVDataNode from src.taipy.core.data.data_node import DataNode from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import InvalidConfigurationId @pytest.fixture def output(): return [DataNode(\"name_1\"), DataNode(\"name_2\"), DataNode(\"name_3\")] @pytest.fixture def output_config(): return [DataNodeConfig(\"name_1\"), DataNodeConfig(\"name_2\"), DataNodeConfig(\"name_3\")] @pytest.fixture def input(): return [DataNode(\"input_name_1\"), DataNode(\"input_name_2\"), DataNode(\"input_name_3\")] @pytest.fixture def input_config(): return [DataNodeConfig(\"input_name_1\"), DataNodeConfig(\"input_name_2\"), DataNodeConfig(\"input_name_3\")] def test_create_task(): name = \"name_1\" task = Task(name, {}, print, [], []) assert f\"TASK_{name}_\" in task.id assert task.config_id == \"name_1\" with pytest.raises(InvalidConfigurationId): Task(\"foo bar\", {}, print, [], []) path = \"my/csv/path\" foo_dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": True}) task = Task(\"name_1\", {}, print, [foo_dn], []) assert task.config_id == \"name_1\" assert task.id is not None assert task.owner_id is None assert task.parent_ids == set() assert task.foo == foo_dn assert task.foo.path == path with pytest.raises(AttributeError): task.bar task = Task(\"name_1\", {}, print, [foo_dn], [], parent_ids={\"parent_id\"}) assert task.parent_ids == {\"parent_id\"} path = \"my/csv/path\" abc_dn = InMemoryDataNode(\"name_1ea\", Scope.SCENARIO, properties={\"path\": path}) task = Task(\"name_1ea\", {}, print, [abc_dn], [], owner_id=\"owner_id\", parent_ids={\"parent_id_1\", \"parent_id_2\"}) assert task.config_id == \"name_1ea\" assert task.id is not None assert task.owner_id == \"owner_id\" assert task.parent_ids == {\"parent_id_1\", \"parent_id_2\"} assert task.name_1ea == abc_dn assert task.name_1ea.path == path with pytest.raises(AttributeError): task.bar with mock.patch(\"src.taipy.core.get\") as get_mck: class MockOwner: label = \"owner_label\" def get_label(self): return self.label get_mck.return_value = MockOwner() assert task.get_label() == \"owner_label > \" + task.config_id assert task.get_simple_label() == task.config_id def test_can_not_change_task_output(output): task = Task(\"name_1\", {}, print, output=output) with pytest.raises(Exception): task.output = {} assert list(task.output.values()) == output output.append(output[0]) assert list(task.output.values()) != output def test_can_not_change_task_input(input): task = Task(\"name_1\", {}, print, input=input) with pytest.raises(Exception): task.input = {} assert list(task.input.values()) == input input.append(input[0]) assert list(task.input.values()) != input def test_can_not_change_task_config_output(output_config): task_config = Config.configure_task(\"name_1\", print, [], output=output_config) assert task_config.output_configs == output_config with pytest.raises(Exception): task_config.output_configs = [] output_config.append(output_config[0]) assert task_config._output != output_config def test_can_not_update_task_output_values(output_config): data_node_cfg = Config.configure_data_node(\"data_node_cfg\") task_config = Config.configure_task(\"name_1\", print, [], output=output_config) task_config.output_configs.append(data_node_cfg) assert task_config.output_configs == output_config task_config.output_configs[0] = data_node_cfg assert task_config.output_configs[0] != data_node_cfg def test_can_not_update_task_input_values(input_config): data_node_config = DataNodeConfig(\"data_node\") task_config = Config.configure_task(\"name_1\", print, input=input_config, output=[]) task_config.input_configs.append(data_node_config) assert task_config.input_configs == input_config task_config.input_configs[0] = data_node_config assert task_config.input_configs[0] != data_node_config def mock_func(): pass def test_auto_set_and_reload(data_node): task_1 = Task( config_id=\"foo\", properties={}, function=print, input=None, output=None, owner_id=None, skippable=False ) _DataManager._set(data_node) _TaskManager._set(task_1) task_2 = _TaskManager._get(task_1) # auto set & reload on function attribute assert task_1.function == print assert task_2.function == print task_1.function = sum assert task_1.function == sum assert task_2.function == sum task_2.function = mock_func assert task_1.function == mock_func assert task_2.function == mock_func # auto set & reload on skippable attribute assert not task_1.skippable assert not task_2.skippable task_1.skippable = True assert task_1.skippable assert task_2.skippable task_2.skippable = False assert not task_1.skippable assert not task_2.skippable # auto set & reload on parent_ids attribute (set() object does not have auto set yet) assert task_1.parent_ids == set() assert task_2.parent_ids == set() task_1._parent_ids.update([\"sc2\"]) _TaskManager._set(task_1) assert task_1.parent_ids == {\"sc2\"} assert task_2.parent_ids == {\"sc2\"} task_2._parent_ids.clear() task_2._parent_ids.update([\"sc1\"]) _TaskManager._set(task_2) assert task_1.parent_ids == {\"sc1\"} assert task_2.parent_ids == {\"sc1\"} # auto set & reload on properties attribute assert task_1.properties == {} assert task_2.properties == {} task_1._properties[\"qux\"] = 4 assert task_1.properties[\"qux\"] == 4 assert task_2.properties[\"qux\"] == 4 assert task_1.properties == {\"qux\": 4} assert task_2.properties == {\"qux\": 4} task_2._properties[\"qux\"] = 5 assert task_1.properties[\"qux\"] == 5 assert task_2.properties[\"qux\"] == 5 task_1.properties[\"temp_key_1\"] = \"temp_value_1\" task_1.properties[\"temp_key_2\"] = \"temp_value_2\" assert task_1.properties == { \"qux\": 5, \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } assert task_2.properties == { \"qux\": 5, \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } task_1.properties.pop(\"temp_key_1\") assert \"temp_key_1\" not in task_1.properties.keys() assert \"temp_key_1\" not in task_1.properties.keys() assert task_1.properties == { \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } assert task_2.properties == { \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } task_2.properties.pop(\"temp_key_2\") assert task_1.properties == {\"qux\": 5} assert task_2.properties == {\"qux\": 5} assert \"temp_key_2\" not in task_1.properties.keys() assert \"temp_key_2\" not in task_2.properties.keys() task_1.properties[\"temp_key_3\"] = 0 assert task_1.properties == {\"qux\": 5, \"temp_key_3\": 0} assert task_2.properties == {\"qux\": 5, \"temp_key_3\": 0} task_1.properties.update({\"temp_key_3\": 1}) assert task_1.properties == {\"qux\": 5, \"temp_key_3\": 1} assert task_2.properties == {\"qux\": 5, \"temp_key_3\": 1} task_1.properties.update(dict()) assert task_1.properties == {\"qux\": 5, \"temp_key_3\": 1} assert task_2.properties == {\"qux\": 5, \"temp_key_3\": 1} task_1.properties[\"temp_key_4\"] = 0 task_1.properties[\"temp_key_5\"] = 0 with task_1 as task: assert task.config_id == \"foo\" assert task.owner_id is None assert task.function == mock_func assert not task.skippable assert task._is_in_context assert task.properties[\"qux\"] == 5 assert task.properties[\"temp_key_3\"] == 1 assert task.properties[\"temp_key_4\"] == 0 assert task.properties[\"temp_key_5\"] == 0 task.function = print task.skippable = True task.properties[\"qux\"] = 9 task.properties.pop(\"temp_key_3\") task.properties.pop(\"temp_key_4\") task.properties.update({\"temp_key_4\": 1}) task.properties.update({\"temp_key_5\": 2}) task.properties.pop(\"temp_key_5\") task.properties.update(dict()) assert task.config_id == \"foo\" assert task.owner_id is None assert task.function == mock_func assert not task.skippable assert task._is_in_context assert task.properties[\"qux\"] == 5 assert task.properties[\"temp_key_3\"] == 1 assert task.properties[\"temp_key_4\"] == 0 assert task.properties[\"temp_key_5\"] == 0 assert task_1.config_id == \"foo\" assert task_1.owner_id is None assert task_1.function == print assert task.skippable assert not task_1._is_in_context assert task_1.properties[\"qux\"] == 9 assert \"temp_key_3\" not in task_1.properties.keys() assert task_1.properties[\"temp_key_4\"] == 1 assert \"temp_key_5\" not in task_1.properties.keys() def test_get_parents(task): with mock.patch(\"src.taipy.core.get_parents\") as mck: task.get_parents() mck.assert_called_once_with(task) def test_submit_task(task: Task): with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._submit\") as mock_submit: task.submit([], True) mock_submit.assert_called_once_with(task, [], True, False, None) "} {"text": "import os import pytest from src.taipy.core.data._data_sql_repository import _DataSQLRepository from src.taipy.core.exceptions import ModelNotFound from src.taipy.core.task._task_fs_repository import _TaskFSRepository from src.taipy.core.task._task_sql_repository import _TaskSQLRepository from src.taipy.core.task.task import Task, TaskId class TestTaskFSRepository: @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_save_and_load(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) repository._save(task) obj = repository._load(task.id) assert isinstance(obj, Task) @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_exists(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) repository._save(task) assert repository._exists(task.id) assert not repository._exists(\"not-existed-task\") @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_load_all(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) for i in range(10): task.id = TaskId(f\"task-{i}\") repository._save(task) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_load_all_with_filters(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) for i in range(10): task.id = TaskId(f\"task-{i}\") task.owner_id = f\"owner-{i}\" repository._save(task) objs = repository._load_all(filters=[{\"owner_id\": \"owner-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_delete(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) repository._save(task) repository._delete(task.id) with pytest.raises(ModelNotFound): repository._load(task.id) @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_delete_all(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) for i in range(10): task.id = TaskId(f\"task-{i}\") repository._save(task) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_delete_many(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) for i in range(10): task.id = TaskId(f\"task-{i}\") repository._save(task) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_delete_by(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): task.id = TaskId(f\"task-{i}\") task._version = f\"{(i+1) // 5}.0\" repository._save(task) objs = repository._load_all() assert len(objs) == 10 repository._delete_by(\"version\", \"1.0\") assert len(repository._load_all()) == 5 @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_search(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node], version=\"random_version_number\") for i in range(10): task.id = TaskId(f\"task-{i}\") task.owner_id = f\"owner-{i}\" repository._save(task) assert len(repository._load_all()) == 10 objs = repository._search(\"owner_id\", \"owner-2\") assert len(objs) == 1 assert isinstance(objs[0], Task) objs = repository._search(\"owner_id\", \"owner-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], Task) assert repository._search(\"owner_id\", \"owner-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_export(self, tmpdir, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) repository._save(task) repository._export(task.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _TaskFSRepository else os.path.join(tmpdir.strpath, \"task\") assert os.path.exists(os.path.join(dir_path, f\"{task.id}.json\")) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import uuid from unittest import mock import pytest from src.taipy.core import taipy from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import ModelNotFound, NonExistingTask from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task from src.taipy.core.task.task_id import TaskId from taipy.config.common.scope import Scope from taipy.config.config import Config def test_create_and_save(): input_configs = [Config.configure_data_node(\"my_input\", \"in_memory\")] output_configs = Config.configure_data_node(\"my_output\", \"in_memory\") task_config = Config.configure_task(\"foo\", print, input_configs, output_configs) task = _create_task_from_config(task_config) assert task.id is not None assert task.config_id == \"foo\" assert len(task.input) == 1 assert len(_DataManager._get_all()) == 2 assert task.my_input.id is not None assert task.my_input.config_id == \"my_input\" assert task.my_output.id is not None assert task.my_output.config_id == \"my_output\" assert task.function == print assert task.parent_ids == set() task_retrieved_from_manager = _TaskManager._get(task.id) assert task_retrieved_from_manager.id == task.id assert task_retrieved_from_manager.config_id == task.config_id assert len(task_retrieved_from_manager.input) == len(task.input) assert task_retrieved_from_manager.my_input.id is not None assert task_retrieved_from_manager.my_input.config_id == task.my_input.config_id assert task_retrieved_from_manager.my_output.id is not None assert task_retrieved_from_manager.my_output.config_id == task.my_output.config_id assert task_retrieved_from_manager.function == task.function assert task_retrieved_from_manager.parent_ids == set() def test_do_not_recreate_existing_data_node(): input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO) output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.SCENARIO) _DataManager._create_and_set(input_config, \"scenario_id\", \"task_id\") assert len(_DataManager._get_all()) == 1 task_config = Config.configure_task(\"foo\", print, input_config, output_config) _create_task_from_config(task_config, scenario_id=\"scenario_id\") assert len(_DataManager._get_all()) == 2 def test_assign_task_as_parent_of_datanode(): dn_config_1 = Config.configure_data_node(\"dn_1\", \"in_memory\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(\"dn_2\", \"in_memory\", scope=Scope.SCENARIO) dn_config_3 = Config.configure_data_node(\"dn_3\", \"in_memory\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"task_1\", print, dn_config_1, dn_config_2) task_config_2 = Config.configure_task(\"task_2\", print, dn_config_2, dn_config_3) tasks = _TaskManager._bulk_get_or_create([task_config_1, task_config_2], \"cycle_id\", \"scenario_id\") assert len(_DataManager._get_all()) == 3 assert len(_TaskManager._get_all()) == 2 assert len(tasks) == 2 dns = {dn.config_id: dn for dn in _DataManager._get_all()} assert dns[\"dn_1\"].parent_ids == {tasks[0].id} assert dns[\"dn_2\"].parent_ids == set([tasks[0].id, tasks[1].id]) assert dns[\"dn_3\"].parent_ids == {tasks[1].id} def test_do_not_recreate_existing_task(): input_config_scope_scenario = Config.configure_data_node(\"my_input_1\", \"in_memory\", Scope.SCENARIO) output_config_scope_scenario = Config.configure_data_node(\"my_output_1\", \"in_memory\", Scope.SCENARIO) task_config_1 = Config.configure_task(\"bar\", print, input_config_scope_scenario, output_config_scope_scenario) # task_config_2 scope is Scenario task_1 = _create_task_from_config(task_config_1) assert len(_TaskManager._get_all()) == 1 task_2 = _create_task_from_config(task_config_1) # Do not create. It already exists for None scenario assert len(_TaskManager._get_all()) == 1 assert task_1.id == task_2.id task_3 = _create_task_from_config(task_config_1, None, None) # Do not create. It already exists for None scenario assert len(_TaskManager._get_all()) == 1 assert task_1.id == task_2.id assert task_2.id == task_3.id task_4 = _create_task_from_config(task_config_1, None, \"scenario_1\") # Create even if sequence is the same. assert len(_TaskManager._get_all()) == 2 assert task_1.id == task_2.id assert task_2.id == task_3.id assert task_3.id != task_4.id task_5 = _create_task_from_config( task_config_1, None, \"scenario_1\" ) # Do not create. It already exists for scenario_1 assert len(_TaskManager._get_all()) == 2 assert task_1.id == task_2.id assert task_2.id == task_3.id assert task_3.id != task_4.id assert task_4.id == task_5.id task_6 = _create_task_from_config(task_config_1, None, \"scenario_2\") assert len(_TaskManager._get_all()) == 3 assert task_1.id == task_2.id assert task_2.id == task_3.id assert task_3.id != task_4.id assert task_4.id == task_5.id assert task_5.id != task_6.id assert task_3.id != task_6.id input_config_scope_cycle = Config.configure_data_node(\"my_input_2\", \"in_memory\", Scope.CYCLE) output_config_scope_cycle = Config.configure_data_node(\"my_output_2\", \"in_memory\", Scope.CYCLE) task_config_2 = Config.configure_task(\"xyz\", print, input_config_scope_cycle, output_config_scope_cycle) # task_config_3 scope is Cycle task_7 = _create_task_from_config(task_config_2) assert len(_TaskManager._get_all()) == 4 task_8 = _create_task_from_config(task_config_2) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id task_9 = _create_task_from_config(task_config_2, None, None) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id assert task_8.id == task_9.id task_10 = _create_task_from_config( task_config_2, None, \"scenario\" ) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id task_11 = _create_task_from_config( task_config_2, None, \"scenario\" ) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id assert task_10.id == task_11.id task_12 = _create_task_from_config(task_config_2, \"cycle\", None) assert len(_TaskManager._get_all()) == 5 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id assert task_10.id == task_11.id assert task_11.id != task_12.id task_13 = _create_task_from_config(task_config_2, \"cycle\", None) assert len(_TaskManager._get_all()) == 5 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id assert task_10.id == task_11.id assert task_11.id != task_12.id assert task_12.id == task_13.id def test_set_and_get_task(): task_id_1 = TaskId(\"id1\") first_task = Task(\"name_1\", {}, print, [], [], task_id_1) task_id_2 = TaskId(\"id2\") second_task = Task(\"name_2\", {}, print, [], [], task_id_2) third_task_with_same_id_as_first_task = Task(\"name_is_not_1_anymore\", {}, print, [], [], task_id_1) # No task at initialization assert len(_TaskManager._get_all()) == 0 assert _TaskManager._get(task_id_1) is None assert _TaskManager._get(first_task) is None assert _TaskManager._get(task_id_2) is None assert _TaskManager._get(second_task) is None # Save one task. We expect to have only one task stored _TaskManager._set(first_task) assert len(_TaskManager._get_all()) == 1 assert _TaskManager._get(task_id_1).id == first_task.id assert _TaskManager._get(first_task).id == first_task.id assert _TaskManager._get(task_id_2) is None assert _TaskManager._get(second_task) is None # Save a second task. Now, we expect to have a total of two tasks stored _TaskManager._set(second_task) assert len(_TaskManager._get_all()) == 2 assert _TaskManager._get(task_id_1).id == first_task.id assert _TaskManager._get(first_task).id == first_task.id assert _TaskManager._get(task_id_2).id == second_task.id assert _TaskManager._get(second_task).id == second_task.id # We save the first task again. We expect nothing to change _TaskManager._set(first_task) assert len(_TaskManager._get_all()) == 2 assert _TaskManager._get(task_id_1).id == first_task.id assert _TaskManager._get(first_task).id == first_task.id assert _TaskManager._get(task_id_2).id == second_task.id assert _TaskManager._get(second_task).id == second_task.id # We save a third task with same id as the first one. # We expect the first task to be updated _TaskManager._set(third_task_with_same_id_as_first_task) assert len(_TaskManager._get_all()) == 2 assert _TaskManager._get(task_id_1).id == third_task_with_same_id_as_first_task.id assert _TaskManager._get(task_id_1).config_id == third_task_with_same_id_as_first_task.config_id assert _TaskManager._get(first_task).id == third_task_with_same_id_as_first_task.id assert _TaskManager._get(task_id_2).id == second_task.id assert _TaskManager._get(second_task).id == second_task.id def test_get_all_on_multiple_versions_environment(): # Create 5 tasks with 2 versions each # Only version 1.0 has the task with config_id = \"config_id_1\" # Only version 2.0 has the task with config_id = \"config_id_6\" for version in range(1, 3): for i in range(5): _TaskManager._set( Task( f\"config_id_{i+version}\", {}, print, [], [], id=TaskId(f\"id{i}_v{version}\"), version=f\"{version}.0\" ) ) _VersionManager._set_experiment_version(\"1.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_experiment_version(\"2.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 _VersionManager._set_development_version(\"1.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_development_version(\"2.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 def test_ensure_conservation_of_order_of_data_nodes_on_task_creation(): embedded_1 = Config.configure_data_node(\"dn_1\", \"in_memory\", scope=Scope.SCENARIO) embedded_2 = Config.configure_data_node(\"dn_2\", \"in_memory\", scope=Scope.SCENARIO) embedded_3 = Config.configure_data_node(\"a_dn_3\", \"in_memory\", scope=Scope.SCENARIO) embedded_4 = Config.configure_data_node(\"dn_4\", \"in_memory\", scope=Scope.SCENARIO) embedded_5 = Config.configure_data_node(\"dn_5\", \"in_memory\", scope=Scope.SCENARIO) input = [embedded_1, embedded_2, embedded_3] output = [embedded_4, embedded_5] task_config_1 = Config.configure_task(\"name_1\", print, input, output) task_config_2 = Config.configure_task(\"name_2\", print, input, output) task_1, task_2 = _TaskManager._bulk_get_or_create([task_config_1, task_config_2]) assert [i.config_id for i in task_1.input.values()] == [embedded_1.id, embedded_2.id, embedded_3.id] assert [o.config_id for o in task_1.output.values()] == [embedded_4.id, embedded_5.id] assert [i.config_id for i in task_2.input.values()] == [embedded_1.id, embedded_2.id, embedded_3.id] assert [o.config_id for o in task_2.output.values()] == [embedded_4.id, embedded_5.id] def test_delete_raise_exception(): dn_input_config_1 = Config.configure_data_node( \"my_input_1\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\" ) dn_output_config_1 = Config.configure_data_node(\"my_output_1\", \"in_memory\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_input_config_1, dn_output_config_1) task_1 = _create_task_from_config(task_config_1) _TaskManager._delete(task_1.id) with pytest.raises(ModelNotFound): _TaskManager._delete(task_1.id) def test_hard_delete(): dn_input_config_1 = Config.configure_data_node( \"my_input_1\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\" ) dn_output_config_1 = Config.configure_data_node(\"my_output_1\", \"in_memory\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_input_config_1, dn_output_config_1) task_1 = _create_task_from_config(task_config_1) assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 _TaskManager._hard_delete(task_1.id) assert len(_TaskManager._get_all()) == 0 assert len(_DataManager._get_all()) == 2 def test_is_submittable(): assert len(_TaskManager._get_all()) == 0 dn_config = Config.configure_in_memory_data_node(\"dn\", 10) task_config = Config.configure_task(\"task\", print, [dn_config]) task = _TaskManager._bulk_get_or_create([task_config])[0] assert len(_TaskManager._get_all()) == 1 assert _TaskManager._is_submittable(task) assert _TaskManager._is_submittable(task.id) assert not _TaskManager._is_submittable(\"Task_temp\") task.input[\"dn\"].edit_in_progress = True assert not _TaskManager._is_submittable(task) assert not _TaskManager._is_submittable(task.id) task.input[\"dn\"].edit_in_progress = False assert _TaskManager._is_submittable(task) assert _TaskManager._is_submittable(task.id) def test_submit_task(): data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") task_1 = Task( \"grault\", {}, print, [data_node_1], [data_node_2], TaskId(\"t1\"), ) class MockOrchestrator(_Orchestrator): submit_calls = [] submit_ids = [] def submit_task(self, task, callbacks=None, force=False, wait=False, timeout=None): submit_id = f\"SUBMISSION_{str(uuid.uuid4())}\" self.submit_calls.append(task) self.submit_ids.append(submit_id) return None with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._orchestrator\", new=MockOrchestrator): # Task does not exist, we expect an exception with pytest.raises(NonExistingTask): _TaskManager._submit(task_1) with pytest.raises(NonExistingTask): _TaskManager._submit(task_1.id) _TaskManager._set(task_1) _TaskManager._submit(task_1) call_ids = [call.id for call in MockOrchestrator.submit_calls] assert call_ids == [task_1.id] assert len(MockOrchestrator.submit_ids) == 1 _TaskManager._submit(task_1) assert len(MockOrchestrator.submit_ids) == 2 assert len(MockOrchestrator.submit_ids) == len(set(MockOrchestrator.submit_ids)) _TaskManager._submit(task_1) assert len(MockOrchestrator.submit_ids) == 3 assert len(MockOrchestrator.submit_ids) == len(set(MockOrchestrator.submit_ids)) def my_print(a, b): print(a + b) def test_submit_task_with_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_path=\"wrong_path.pickle\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") task_cfg = Config.configure_task(\"task\", my_print, [csv_dn_cfg, pickle_dn_cfg], parquet_dn_cfg) task_manager = _TaskManagerFactory._build_manager() tasks = task_manager._bulk_get_or_create([task_cfg]) task = tasks[0] taipy.submit(task) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in task.input.values() ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in task.output.values() ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_submit_task_with_one_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") pickle_dn_cfg = Config.configure_pickle_data_node(\"pickle_file_path\", default_data=\"value\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") task_cfg = Config.configure_task(\"task\", my_print, [csv_dn_cfg, pickle_dn_cfg], parquet_dn_cfg) task_manager = _TaskManagerFactory._build_manager() tasks = task_manager._bulk_get_or_create([task_cfg]) task = tasks[0] taipy.submit(task) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in [task.input[\"wrong_csv_file_path\"]] ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in [task.input[\"pickle_file_path\"], task.output[\"wrong_parquet_file_path\"]] ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_get_tasks_by_config_id(): dn_config = Config.configure_data_node(\"dn\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"t1\", print, dn_config) task_config_2 = Config.configure_task(\"t2\", print, dn_config) task_config_3 = Config.configure_task(\"t3\", print, dn_config) t_1_1 = _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_1\")[0] t_1_2 = _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_2\")[0] t_1_3 = _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_3\")[0] assert len(_TaskManager._get_all()) == 3 t_2_1 = _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_4\")[0] t_2_2 = _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_5\")[0] assert len(_TaskManager._get_all()) == 5 t_3_1 = _TaskManager._bulk_get_or_create([task_config_3], scenario_id=\"scenario_6\")[0] assert len(_TaskManager._get_all()) == 6 t1_tasks = _TaskManager._get_by_config_id(task_config_1.id) assert len(t1_tasks) == 3 assert sorted([t_1_1.id, t_1_2.id, t_1_3.id]) == sorted([task.id for task in t1_tasks]) t2_tasks = _TaskManager._get_by_config_id(task_config_2.id) assert len(t2_tasks) == 2 assert sorted([t_2_1.id, t_2_2.id]) == sorted([task.id for task in t2_tasks]) t3_tasks = _TaskManager._get_by_config_id(task_config_3.id) assert len(t3_tasks) == 1 assert sorted([t_3_1.id]) == sorted([task.id for task in t3_tasks]) def test_get_scenarios_by_config_id_in_multiple_versions_environment(): dn_config = Config.configure_data_node(\"dn\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"t1\", print, dn_config) task_config_2 = Config.configure_task(\"t2\", print, dn_config) _VersionManager._set_experiment_version(\"1.0\") _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_1\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_2\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_3\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_4\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_5\")[0] assert len(_TaskManager._get_by_config_id(task_config_1.id)) == 3 assert len(_TaskManager._get_by_config_id(task_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_1\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_2\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_3\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_4\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_5\")[0] assert len(_TaskManager._get_by_config_id(task_config_1.id)) == 3 assert len(_TaskManager._get_by_config_id(task_config_2.id)) == 2 def _create_task_from_config(task_config, *args, **kwargs): return _TaskManager._bulk_get_or_create([task_config], *args, **kwargs)[0] "} {"text": "from src.taipy.core.data import InMemoryDataNode from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.task._task_model import _TaskModel from taipy.config.common.scope import Scope def test_none_properties_attribute_compatible(): model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"parent_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"output_id\"], \"version\": \"latest\", \"skippable\": False, } ) assert len(model.properties) == 0 def test_skippable_compatibility_with_non_existing_output(): model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"output_id\"], \"version\": \"latest\", \"skippable\": False, } ) assert not model.skippable def test_skippable_compatibility_with_no_output(): model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [], \"version\": \"latest\", \"skippable\": False, } ) assert not model.skippable def test_skippable_compatibility_with_one_output(): manager = _DataManagerFactory._build_manager() manager._set(InMemoryDataNode(\"cfg_id\", Scope.SCENARIO, id=\"dn_id\")) model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"dn_id\"], \"version\": \"latest\", \"skippable\": True, } ) assert model.skippable def test_skippable_compatibility_with_many_outputs(): manager = _DataManagerFactory._build_manager() manager._set(InMemoryDataNode(\"cfg_id\", Scope.SCENARIO, id=\"dn_id\")) manager._set(InMemoryDataNode(\"cfg_id_2\", Scope.SCENARIO, id=\"dn_2_id\")) model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"dn_id\", \"dn_2_id\"], \"version\": \"latest\", \"skippable\": True, } ) assert model.skippable "} {"text": "import uuid from unittest import mock import pytest from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import ModelNotFound, NonExistingTask from src.taipy.core.job._job_manager_factory import _JobManagerFactory from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task from src.taipy.core.task.task_id import TaskId from taipy.config.common.scope import Scope from taipy.config.config import Config def init_managers(): _JobManagerFactory._build_manager()._delete_all() _TaskManagerFactory._build_manager()._delete_all() _DataManagerFactory._build_manager()._delete_all() def test_create_and_save(init_sql_repo): init_managers() input_configs = [Config.configure_data_node(\"my_input\", \"in_memory\")] output_configs = Config.configure_data_node(\"my_output\", \"in_memory\") task_config = Config.configure_task(\"foo\", print, input_configs, output_configs) task = _create_task_from_config(task_config) assert task.id is not None assert task.config_id == \"foo\" assert len(task.input) == 1 assert len(_DataManager._get_all()) == 2 assert task.my_input.id is not None assert task.my_input.config_id == \"my_input\" assert task.my_output.id is not None assert task.my_output.config_id == \"my_output\" assert task.function == print assert task.parent_ids == set() task_retrieved_from_manager = _TaskManager._get(task.id) assert task_retrieved_from_manager.id == task.id assert task_retrieved_from_manager.config_id == task.config_id assert len(task_retrieved_from_manager.input) == len(task.input) assert task_retrieved_from_manager.my_input.id is not None assert task_retrieved_from_manager.my_input.config_id == task.my_input.config_id assert task_retrieved_from_manager.my_output.id is not None assert task_retrieved_from_manager.my_output.config_id == task.my_output.config_id assert task_retrieved_from_manager.function == task.function assert task_retrieved_from_manager.parent_ids == set() def test_do_not_recreate_existing_data_node(init_sql_repo): init_managers() input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO) output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.SCENARIO) _DataManager._create_and_set(input_config, \"scenario_id\", \"task_id\") assert len(_DataManager._get_all()) == 1 task_config = Config.configure_task(\"foo\", print, input_config, output_config) _create_task_from_config(task_config, scenario_id=\"scenario_id\") assert len(_DataManager._get_all()) == 2 def test_do_not_recreate_existing_task(init_sql_repo): init_managers() assert len(_TaskManager._get_all()) == 0 input_config_scope_scenario = Config.configure_data_node(\"my_input_1\", \"in_memory\", Scope.SCENARIO) output_config_scope_scenario = Config.configure_data_node(\"my_output_1\", \"in_memory\", Scope.SCENARIO) task_config_1 = Config.configure_task(\"bar\", print, input_config_scope_scenario, output_config_scope_scenario) # task_config_1 scope is Scenario task_1 = _create_task_from_config(task_config_1) assert len(_TaskManager._get_all()) == 1 task_2 = _create_task_from_config(task_config_1) # Do not create. It already exists for None scenario assert len(_TaskManager._get_all()) == 1 assert task_1.id == task_2.id task_3 = _create_task_from_config(task_config_1, None, None) # Do not create. It already exists for None scenario assert len(_TaskManager._get_all()) == 1 assert task_1.id == task_2.id assert task_2.id == task_3.id task_4 = _create_task_from_config(task_config_1, None, \"scenario_1\") # Create even if sequence is the same. assert len(_TaskManager._get_all()) == 2 assert task_1.id == task_2.id assert task_2.id == task_3.id assert task_3.id != task_4.id task_5 = _create_task_from_config( task_config_1, None, \"scenario_1\" ) # Do not create. It already exists for scenario_1 assert len(_TaskManager._get_all()) == 2 assert task_1.id == task_2.id assert task_2.id == task_3.id assert task_3.id != task_4.id assert task_4.id == task_5.id task_6 = _create_task_from_config(task_config_1, None, \"scenario_2\") assert len(_TaskManager._get_all()) == 3 assert task_1.id == task_2.id assert task_2.id == task_3.id assert task_3.id != task_4.id assert task_4.id == task_5.id assert task_5.id != task_6.id assert task_3.id != task_6.id input_config_scope_cycle = Config.configure_data_node(\"my_input_2\", \"in_memory\", Scope.CYCLE) output_config_scope_cycle = Config.configure_data_node(\"my_output_2\", \"in_memory\", Scope.CYCLE) task_config_2 = Config.configure_task(\"xyz\", print, input_config_scope_cycle, output_config_scope_cycle) # task_config_3 scope is Cycle task_7 = _create_task_from_config(task_config_2) assert len(_TaskManager._get_all()) == 4 task_8 = _create_task_from_config(task_config_2) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id task_9 = _create_task_from_config(task_config_2, None, None) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id assert task_8.id == task_9.id task_10 = _create_task_from_config( task_config_2, None, \"scenario\" ) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id task_11 = _create_task_from_config( task_config_2, None, \"scenario\" ) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id assert task_10.id == task_11.id task_12 = _create_task_from_config(task_config_2, \"cycle\", None) assert len(_TaskManager._get_all()) == 5 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id assert task_10.id == task_11.id assert task_11.id != task_12.id task_13 = _create_task_from_config(task_config_2, \"cycle\", None) assert len(_TaskManager._get_all()) == 5 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id assert task_10.id == task_11.id assert task_11.id != task_12.id assert task_12.id == task_13.id def test_set_and_get_task(init_sql_repo): init_managers() task_id_1 = TaskId(\"id1\") first_task = Task(\"name_1\", {}, print, [], [], task_id_1) task_id_2 = TaskId(\"id2\") second_task = Task(\"name_2\", {}, print, [], [], task_id_2) third_task_with_same_id_as_first_task = Task(\"name_is_not_1_anymore\", {}, print, [], [], task_id_1) # No task at initialization assert len(_TaskManager._get_all()) == 0 assert _TaskManager._get(task_id_1) is None assert _TaskManager._get(first_task) is None assert _TaskManager._get(task_id_2) is None assert _TaskManager._get(second_task) is None # Save one task. We expect to have only one task stored _TaskManager._set(first_task) assert len(_TaskManager._get_all()) == 1 assert _TaskManager._get(task_id_1).id == first_task.id assert _TaskManager._get(first_task).id == first_task.id assert _TaskManager._get(task_id_2) is None assert _TaskManager._get(second_task) is None # Save a second task. Now, we expect to have a total of two tasks stored _TaskManager._set(second_task) assert len(_TaskManager._get_all()) == 2 assert _TaskManager._get(task_id_1).id == first_task.id assert _TaskManager._get(first_task).id == first_task.id assert _TaskManager._get(task_id_2).id == second_task.id assert _TaskManager._get(second_task).id == second_task.id # We save the first task again. We expect nothing to change _TaskManager._set(first_task) assert len(_TaskManager._get_all()) == 2 assert _TaskManager._get(task_id_1).id == first_task.id assert _TaskManager._get(first_task).id == first_task.id assert _TaskManager._get(task_id_2).id == second_task.id assert _TaskManager._get(second_task).id == second_task.id # We save a third task with same id as the first one. # We expect the first task to be updated _TaskManager._set(third_task_with_same_id_as_first_task) assert len(_TaskManager._get_all()) == 2 assert _TaskManager._get(task_id_1).id == third_task_with_same_id_as_first_task.id assert _TaskManager._get(task_id_1).config_id == third_task_with_same_id_as_first_task.config_id assert _TaskManager._get(first_task).id == third_task_with_same_id_as_first_task.id assert _TaskManager._get(task_id_2).id == second_task.id assert _TaskManager._get(second_task).id == second_task.id def test_get_all_on_multiple_versions_environment(init_sql_repo): Config.configure_global_app(repository_type=\"sql\") init_managers() # Create 5 tasks with 2 versions each # Only version 1.0 has the task with config_id = \"config_id_1\" # Only version 2.0 has the task with config_id = \"config_id_6\" for version in range(1, 3): for i in range(5): _TaskManager._set( Task( f\"config_id_{i+version}\", {}, print, [], [], id=TaskId(f\"id{i}_v{version}\"), version=f\"{version}.0\" ) ) _VersionManager._set_experiment_version(\"1.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_experiment_version(\"2.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 _VersionManager._set_development_version(\"1.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_development_version(\"2.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 def test_ensure_conservation_of_order_of_data_nodes_on_task_creation(init_sql_repo): init_managers() embedded_1 = Config.configure_data_node(\"dn_1\", \"in_memory\", scope=Scope.SCENARIO) embedded_2 = Config.configure_data_node(\"dn_2\", \"in_memory\", scope=Scope.SCENARIO) embedded_3 = Config.configure_data_node(\"a_dn_3\", \"in_memory\", scope=Scope.SCENARIO) embedded_4 = Config.configure_data_node(\"dn_4\", \"in_memory\", scope=Scope.SCENARIO) embedded_5 = Config.configure_data_node(\"dn_5\", \"in_memory\", scope=Scope.SCENARIO) input = [embedded_1, embedded_2, embedded_3] output = [embedded_4, embedded_5] task_config_1 = Config.configure_task(\"name_1\", print, input, output) task_config_2 = Config.configure_task(\"name_2\", print, input, output) task_1, task_2 = _TaskManager._bulk_get_or_create([task_config_1, task_config_2]) assert [i.config_id for i in task_1.input.values()] == [embedded_1.id, embedded_2.id, embedded_3.id] assert [o.config_id for o in task_1.output.values()] == [embedded_4.id, embedded_5.id] assert [i.config_id for i in task_2.input.values()] == [embedded_1.id, embedded_2.id, embedded_3.id] assert [o.config_id for o in task_2.output.values()] == [embedded_4.id, embedded_5.id] def test_delete_raise_exception(init_sql_repo): init_managers() dn_input_config_1 = Config.configure_data_node( \"my_input_1\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\" ) dn_output_config_1 = Config.configure_data_node(\"my_output_1\", \"in_memory\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_input_config_1, dn_output_config_1) task_1 = _create_task_from_config(task_config_1) _TaskManager._delete(task_1.id) with pytest.raises(ModelNotFound): _TaskManager._delete(task_1.id) def test_hard_delete(init_sql_repo): init_managers() dn_input_config_1 = Config.configure_data_node( \"my_input_1\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\" ) dn_output_config_1 = Config.configure_data_node(\"my_output_1\", \"in_memory\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_input_config_1, dn_output_config_1) task_1 = _create_task_from_config(task_config_1) assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 _TaskManager._hard_delete(task_1.id) assert len(_TaskManager._get_all()) == 0 assert len(_DataManager._get_all()) == 2 def test_submit_task(): data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") task_1 = Task( \"grault\", {}, print, [data_node_1], [data_node_2], TaskId(\"t1\"), ) class MockOrchestrator(_Orchestrator): submit_calls = [] submit_ids = [] def submit_task(self, task, callbacks=None, force=False, wait=False, timeout=None): submit_id = f\"SUBMISSION_{str(uuid.uuid4())}\" self.submit_calls.append(task) self.submit_ids.append(submit_id) return None with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._orchestrator\", new=MockOrchestrator): # Task does not exist, we expect an exception with pytest.raises(NonExistingTask): _TaskManager._submit(task_1) with pytest.raises(NonExistingTask): _TaskManager._submit(task_1.id) _TaskManager._set(task_1) _TaskManager._submit(task_1) call_ids = [call.id for call in MockOrchestrator.submit_calls] assert call_ids == [task_1.id] assert len(MockOrchestrator.submit_ids) == 1 _TaskManager._submit(task_1) assert len(MockOrchestrator.submit_ids) == 2 assert len(MockOrchestrator.submit_ids) == len(set(MockOrchestrator.submit_ids)) _TaskManager._submit(task_1) assert len(MockOrchestrator.submit_ids) == 3 assert len(MockOrchestrator.submit_ids) == len(set(MockOrchestrator.submit_ids)) def test_get_tasks_by_config_id(init_sql_repo): init_managers() dn_config = Config.configure_data_node(\"dn\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"t1\", print, dn_config) task_config_2 = Config.configure_task(\"t2\", print, dn_config) task_config_3 = Config.configure_task(\"t3\", print, dn_config) t_1_1 = _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_1\")[0] t_1_2 = _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_2\")[0] t_1_3 = _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_3\")[0] assert len(_TaskManager._get_all()) == 3 t_2_1 = _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_4\")[0] t_2_2 = _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_5\")[0] assert len(_TaskManager._get_all()) == 5 t_3_1 = _TaskManager._bulk_get_or_create([task_config_3], scenario_id=\"scenario_6\")[0] assert len(_TaskManager._get_all()) == 6 t1_tasks = _TaskManager._get_by_config_id(task_config_1.id) assert len(t1_tasks) == 3 assert sorted([t_1_1.id, t_1_2.id, t_1_3.id]) == sorted([task.id for task in t1_tasks]) t2_tasks = _TaskManager._get_by_config_id(task_config_2.id) assert len(t2_tasks) == 2 assert sorted([t_2_1.id, t_2_2.id]) == sorted([task.id for task in t2_tasks]) t3_tasks = _TaskManager._get_by_config_id(task_config_3.id) assert len(t3_tasks) == 1 assert sorted([t_3_1.id]) == sorted([task.id for task in t3_tasks]) def test_get_scenarios_by_config_id_in_multiple_versions_environment(init_sql_repo): init_managers() dn_config = Config.configure_data_node(\"dn\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"t1\", print, dn_config) task_config_2 = Config.configure_task(\"t2\", print, dn_config) _VersionManager._set_experiment_version(\"1.0\") _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_1\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_2\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_3\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_4\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_5\")[0] assert len(_TaskManager._get_by_config_id(task_config_1.id)) == 3 assert len(_TaskManager._get_by_config_id(task_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_1\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_2\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_3\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_4\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_5\")[0] assert len(_TaskManager._get_by_config_id(task_config_1.id)) == 3 assert len(_TaskManager._get_by_config_id(task_config_2.id)) == 2 def _create_task_from_config(task_config, *args, **kwargs): return _TaskManager._bulk_get_or_create([task_config], *args, **kwargs)[0] "} {"text": "from datetime import timedelta from time import sleep from typing import Union from unittest import mock from unittest.mock import MagicMock import pytest from src.taipy.core import JobId, Sequence, SequenceId, TaskId from src.taipy.core._orchestrator._dispatcher._development_job_dispatcher import _DevelopmentJobDispatcher from src.taipy.core._orchestrator._dispatcher._standalone_job_dispatcher import _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.job.job import Job from src.taipy.core.job.status import Status from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from taipy.config.common.scope import Scope from taipy.config.config import Config @pytest.fixture def task_id(): return TaskId(\"task_id1\") @pytest.fixture def task(task_id): return Task(config_id=\"name\", properties={}, function=print, input=[], output=[], id=task_id) @pytest.fixture def job_id(): return JobId(\"id1\") @pytest.fixture(scope=\"class\") def scenario(): return Scenario( \"scenario_config\", [], {}, [], \"SCENARIO_scenario_config\", version=\"random_version_number\", ) @pytest.fixture def job(task, job_id): return Job(job_id, task, \"submit_id\", \"SCENARIO_scenario_config\") @pytest.fixture def replace_in_memory_write_fct(): default_write = InMemoryDataNode.write InMemoryDataNode.write = _error yield InMemoryDataNode.write = default_write def _foo(): return 42 def _error(): raise RuntimeError(\"Something bad has happened\") def test_create_job(scenario, task, job): from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory _ScenarioManagerFactory._build_manager()._set(scenario) assert job.id == \"id1\" assert task in job assert job.is_submitted() assert job.submit_id is not None assert job.submit_entity_id == \"SCENARIO_scenario_config\" assert job.submit_entity == scenario with mock.patch(\"src.taipy.core.get\") as get_mck: get_mck.return_value = task assert job.get_label() == \"name > \" + job.id assert job.get_simple_label() == job.id def test_comparison(task): job_id_1 = JobId(\"id1\") job_id_2 = JobId(\"id2\") job_1 = Job(job_id_1, task, \"submit_id\", \"scenario_entity_id\") sleep(0.01) # Comparison is based on time, precision on Windows is not enough important job_2 = Job(job_id_2, task, \"submit_id\", \"scenario_entity_id\") assert job_1 < job_2 assert job_2 > job_1 assert job_1 <= job_2 assert job_1 <= job_1 assert job_2 >= job_1 assert job_1 >= job_1 assert job_1 == job_1 assert job_1 != job_2 def test_status_job(task): submission = _SubmissionManagerFactory._build_manager()._create(task.id) job = Job(\"job_id\", task, submission.id, \"SCENARIO_scenario_config\") submission.jobs = [job] assert job.is_submitted() assert job.is_skipped() is False assert job.is_pending() is False assert job.is_blocked() is False assert job.is_canceled() is False assert job.is_failed() is False assert job.is_completed() is False assert job.is_running() is False job.canceled() assert job.is_canceled() job.failed() assert job.is_failed() job.running() assert job.is_running() job.completed() assert job.is_completed() job.pending() assert job.is_pending() job.blocked() assert job.is_blocked() job.skipped() assert job.is_skipped() def test_notification_job(task): subscribe = MagicMock() submission = _SubmissionManagerFactory._build_manager()._create(task.id) job = Job(\"job_id\", task, submission.id, \"SCENARIO_scenario_config\") submission.jobs = [job] job._on_status_change(subscribe) job.running() subscribe.assert_called_once_with(job) subscribe.reset_mock() job.completed() subscribe.assert_called_once_with(job) subscribe.reset_mock() job.skipped() subscribe.assert_called_once_with(job) def test_handle_exception_in_user_function(task_id, job_id): task = Task(config_id=\"name\", properties={}, input=[], function=_error, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task.id) job = Job(job_id, task, submission.id, \"scenario_entity_id\") submission.jobs = [job] _dispatch(task, job) job = _JobManager._get(job_id) assert job.is_failed() assert 'raise RuntimeError(\"Something bad has happened\")' in str(job.stacktrace[0]) def test_handle_exception_in_input_data_node(task_id, job_id): data_node = InMemoryDataNode(\"data_node\", scope=Scope.SCENARIO) task = Task(config_id=\"name\", properties={}, input=[data_node], function=print, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task.id) job = Job(job_id, task, submission.id, \"scenario_entity_id\") submission.jobs = [job] _dispatch(task, job) job = _JobManager._get(job_id) assert job.is_failed() assert \"taipy.core.exceptions.exceptions.NoData\" in str(job.stacktrace[0]) def test_handle_exception_in_ouptut_data_node(replace_in_memory_write_fct, task_id, job_id): data_node = InMemoryDataNode(\"data_node\", scope=Scope.SCENARIO) task = Task(config_id=\"name\", properties={}, input=[], function=_foo, output=[data_node], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task.id) job = Job(job_id, task, submission.id, \"scenario_entity_id\") submission.jobs = [job] _dispatch(task, job) job = _JobManager._get(job_id) assert job.is_failed() assert \"taipy.core.exceptions.exceptions.DataNodeWritingError\" in str(job.stacktrace[0]) def test_auto_set_and_reload(current_datetime, job_id): task_1 = Task(config_id=\"name_1\", properties={}, function=_foo, id=TaskId(\"task_1\")) task_2 = Task(config_id=\"name_2\", properties={}, function=_foo, id=TaskId(\"task_2\")) submission = _SubmissionManagerFactory._build_manager()._create(task_1.id) job_1 = Job(job_id, task_1, submission.id, \"scenario_entity_id\") submission.jobs = [job_1] _TaskManager._set(task_1) _TaskManager._set(task_2) _JobManager._set(job_1) job_2 = _JobManager._get(job_1, \"submit_id_2\") # auto set & reload on task attribute assert job_1.task.id == task_1.id assert job_2.task.id == task_1.id job_1.task = task_2 assert job_1.task.id == task_2.id assert job_2.task.id == task_2.id job_2.task = task_1 assert job_1.task.id == task_1.id assert job_2.task.id == task_1.id # auto set & reload on force attribute assert not job_1.force assert not job_2.force job_1.force = True assert job_1.force assert job_2.force job_2.force = False assert not job_1.force assert not job_2.force # auto set & reload on status attribute assert job_1.status == Status.SUBMITTED assert job_2.status == Status.SUBMITTED job_1.status = Status.CANCELED assert job_1.status == Status.CANCELED assert job_2.status == Status.CANCELED job_2.status = Status.BLOCKED assert job_1.status == Status.BLOCKED assert job_2.status == Status.BLOCKED # auto set & reload on creation_date attribute new_datetime = current_datetime + timedelta(1) new_datetime_1 = current_datetime + timedelta(1) job_1.creation_date = new_datetime_1 assert job_1.creation_date == new_datetime_1 assert job_2.creation_date == new_datetime_1 job_2.creation_date = new_datetime assert job_1.creation_date == new_datetime assert job_2.creation_date == new_datetime with job_1 as job: assert job.task.id == task_1.id assert not job.force assert job.status == Status.BLOCKED assert job.creation_date == new_datetime assert job._is_in_context new_datetime_2 = new_datetime + timedelta(1) job.task = task_2 job.force = True job.status = Status.COMPLETED job.creation_date = new_datetime_2 assert job.task.id == task_1.id assert not job.force assert job.status == Status.BLOCKED assert job.creation_date == new_datetime assert job._is_in_context assert job_1.task.id == task_2.id assert job_1.force assert job_1.status == Status.COMPLETED assert job_1.creation_date == new_datetime_2 assert not job_1._is_in_context def _dispatch(task: Task, job: Job, mode=JobConfig._DEVELOPMENT_MODE): Config.configure_job_executions(mode=mode) _OrchestratorFactory._build_dispatcher() _TaskManager._set(task) _JobManager._set(job) dispatcher: Union[_StandaloneJobDispatcher, _DevelopmentJobDispatcher] = _StandaloneJobDispatcher( _OrchestratorFactory._orchestrator ) if mode == JobConfig._DEVELOPMENT_MODE: dispatcher = _DevelopmentJobDispatcher(_OrchestratorFactory._orchestrator) dispatcher._dispatch(job) def test_is_deletable(): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_deletable\") as mock_submit: task = Task(config_id=\"name_1\", properties={}, function=_foo, id=TaskId(\"task_1\")) job = Job(job_id, task, \"submit_id_1\", \"scenario_entity_id\") job.is_deletable() mock_submit.assert_called_once_with(job) "} {"text": "import multiprocessing import random import string from functools import partial from time import sleep from unittest import mock import pytest from src.taipy.core._orchestrator._dispatcher._job_dispatcher import _JobDispatcher from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import JobNotDeletedException from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.job.job_id import JobId from src.taipy.core.job.status import Status from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils import assert_true_after_time def multiply(nb1: float, nb2: float): return nb1 * nb2 def lock_multiply(lock, nb1: float, nb2: float): with lock: return multiply(1 or nb1, 2 or nb2) def test_create_jobs(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task = _create_task(multiply, name=\"get_job\") _OrchestratorFactory._build_dispatcher() job_1 = _JobManager._create(task, [print], \"submit_id\", \"secnario_id\", True) assert _JobManager._get(job_1.id) == job_1 assert job_1.is_submitted() assert task.config_id in job_1.id assert job_1.task.id == task.id assert job_1.submit_id == \"submit_id\" assert job_1.submit_entity_id == \"secnario_id\" assert job_1.force job_2 = _JobManager._create(task, [print], \"submit_id_1\", \"secnario_id\", False) assert _JobManager._get(job_2.id) == job_2 assert job_2.is_submitted() assert task.config_id in job_2.id assert job_2.task.id == task.id assert job_2.submit_id == \"submit_id_1\" assert job_2.submit_entity_id == \"secnario_id\" assert not job_2.force def test_get_job(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task = _create_task(multiply, name=\"get_job\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) assert _JobManager._get(job_1.id) == job_1 assert _JobManager._get(job_1.id).submit_entity_id == task.id job_2 = _OrchestratorFactory._orchestrator.submit_task(task) assert job_1 != job_2 assert _JobManager._get(job_1.id).id == job_1.id assert _JobManager._get(job_2.id).id == job_2.id assert _JobManager._get(job_2.id).submit_entity_id == task.id def test_get_latest_job(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task = _create_task(multiply, name=\"get_latest_job\") task_2 = _create_task(multiply, name=\"get_latest_job_2\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) assert _JobManager._get_latest(task) == job_1 assert _JobManager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important job_2 = _OrchestratorFactory._orchestrator.submit_task(task_2) assert _JobManager._get_latest(task).id == job_1.id assert _JobManager._get_latest(task_2).id == job_2.id sleep(0.01) # Comparison is based on time, precision on Windows is not enough important job_1_bis = _OrchestratorFactory._orchestrator.submit_task(task) assert _JobManager._get_latest(task).id == job_1_bis.id assert _JobManager._get_latest(task_2).id == job_2.id def test_get_job_unknown(): assert _JobManager._get(JobId(\"Unknown\")) is None def test_get_jobs(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task = _create_task(multiply, name=\"get_all_jobs\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) job_2 = _OrchestratorFactory._orchestrator.submit_task(task) assert {job.id for job in _JobManager._get_all()} == {job_1.id, job_2.id} def test_delete_job(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task = _create_task(multiply, name=\"delete_job\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) job_2 = _OrchestratorFactory._orchestrator.submit_task(task) _JobManager._delete(job_1) assert [job.id for job in _JobManager._get_all()] == [job_2.id] assert _JobManager._get(job_1.id) is None m = multiprocessing.Manager() lock = m.Lock() def inner_lock_multiply(nb1: float, nb2: float): with lock: return multiply(1 or nb1, 2 or nb2) def test_raise_when_trying_to_delete_unfinished_job(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock = m.Lock() dnm = _DataManagerFactory._build_manager() dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dnm._set(dn_1) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) dnm._set(dn_2) dn_3 = InMemoryDataNode(\"dn_config_3\", Scope.SCENARIO) dnm._set(dn_3) task = Task( \"task_config_1\", {}, partial(lock_multiply, lock), [dn_1, dn_2], [dn_3], id=\"raise_when_delete_unfinished\" ) _OrchestratorFactory._build_dispatcher() with lock: job = _OrchestratorFactory._orchestrator.submit_task(task) assert_true_after_time(lambda: len(_JobDispatcher._dispatched_processes) == 1) assert_true_after_time(job.is_running) with pytest.raises(JobNotDeletedException): _JobManager._delete(job) with pytest.raises(JobNotDeletedException): _JobManager._delete(job, force=False) assert_true_after_time(job.is_completed) _JobManager._delete(job) def test_force_deleting_unfinished_job(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock = m.Lock() dnm = _DataManagerFactory._build_manager() dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dnm._set(dn_1) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) dnm._set(dn_2) dn_3 = InMemoryDataNode(\"dn_config_3\", Scope.SCENARIO) dnm._set(dn_3) task = Task( \"task_config_1\", {}, partial(lock_multiply, lock), [dn_1, dn_2], [dn_3], id=\"force_deleting_unfinished_job\" ) _OrchestratorFactory._build_dispatcher() with lock: job = _OrchestratorFactory._orchestrator.submit_task(task) assert_true_after_time(job.is_running) with pytest.raises(JobNotDeletedException): _JobManager._delete(job, force=False) _JobManager._delete(job, force=True) assert _JobManager._get(job.id) is None def test_cancel_single_job(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=1) task = _create_task(multiply, name=\"cancel_single_job\") _OrchestratorFactory._build_dispatcher() assert_true_after_time(_OrchestratorFactory._dispatcher.is_running) _OrchestratorFactory._dispatcher.stop() assert_true_after_time(lambda: not _OrchestratorFactory._dispatcher.is_running()) job = _OrchestratorFactory._orchestrator.submit_task(task) assert_true_after_time(job.is_pending) assert_true_after_time(lambda: len(_JobDispatcher._dispatched_processes) == 0) _JobManager._cancel(job.id) assert_true_after_time(job.is_canceled) assert_true_after_time(job.is_canceled) @mock.patch( \"src.taipy.core._orchestrator._orchestrator._Orchestrator._orchestrate_job_to_run_or_block\", return_value=\"orchestrated_job\", ) @mock.patch(\"src.taipy.core._orchestrator._orchestrator._Orchestrator._cancel_jobs\") def test_cancel_canceled_abandoned_failed_jobs(cancel_jobs, orchestrated_job): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=1) task = _create_task(multiply, name=\"test_cancel_canceled_abandoned_failed_jobs\") _OrchestratorFactory._build_dispatcher() assert_true_after_time(_OrchestratorFactory._dispatcher.is_running) _OrchestratorFactory._dispatcher.stop() assert_true_after_time(lambda: not _OrchestratorFactory._dispatcher.is_running()) job = _OrchestratorFactory._orchestrator.submit_task(task) job.canceled() assert job.is_canceled() _JobManager._cancel(job) cancel_jobs.assert_not_called() assert job.is_canceled() job = _OrchestratorFactory._orchestrator.submit_task(task) job.failed() assert job.is_failed() _JobManager._cancel(job) cancel_jobs.assert_not_called() assert job.is_failed() job = _OrchestratorFactory._orchestrator.submit_task(task) job.abandoned() assert job.is_abandoned() _JobManager._cancel(job) cancel_jobs.assert_not_called() assert job.is_abandoned() @mock.patch( \"src.taipy.core._orchestrator._orchestrator._Orchestrator._orchestrate_job_to_run_or_block\", return_value=\"orchestrated_job\", ) @mock.patch(\"src.taipy.core.job.job.Job.canceled\") def test_cancel_completed_skipped_jobs(cancel_jobs, orchestrated_job): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=1) task = _create_task(multiply, name=\"cancel_single_job\") _OrchestratorFactory._build_dispatcher() assert_true_after_time(_OrchestratorFactory._dispatcher.is_running) _OrchestratorFactory._dispatcher.stop() assert_true_after_time(lambda: not _OrchestratorFactory._dispatcher.is_running()) job = _OrchestratorFactory._orchestrator.submit_task(task) job.completed() assert job.is_completed() cancel_jobs.assert_not_called() _JobManager._cancel(job) assert job.is_completed() job = _OrchestratorFactory._orchestrator.submit_task(task) job.failed() assert job.is_failed() cancel_jobs.assert_not_called() _JobManager._cancel(job) assert job.is_failed() job = _OrchestratorFactory._orchestrator.submit_task(task) job.skipped() assert job.is_skipped() cancel_jobs.assert_not_called() _JobManager._cancel(job) assert job.is_skipped() def test_cancel_single_running_job(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock = m.Lock() dnm = _DataManagerFactory._build_manager() dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dnm._set(dn_1) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) dnm._set(dn_2) dn_3 = InMemoryDataNode(\"dn_config_3\", Scope.SCENARIO) dnm._set(dn_3) task = Task(\"task_config_1\", {}, partial(lock_multiply, lock), [dn_1, dn_2], [dn_3], id=\"cancel_single_job\") _OrchestratorFactory._build_dispatcher() assert_true_after_time(_OrchestratorFactory._dispatcher.is_running) assert_true_after_time(lambda: _OrchestratorFactory._dispatcher._nb_available_workers == 2) with lock: job = _OrchestratorFactory._orchestrator.submit_task(task) assert_true_after_time(lambda: len(_JobDispatcher._dispatched_processes) == 1) assert_true_after_time(lambda: _OrchestratorFactory._dispatcher._nb_available_workers == 1) assert_true_after_time(job.is_running) _JobManager._cancel(job) assert_true_after_time(job.is_running) assert_true_after_time(lambda: len(_JobDispatcher._dispatched_processes) == 0) assert_true_after_time(lambda: _OrchestratorFactory._dispatcher._nb_available_workers == 2) assert_true_after_time(job.is_completed) def test_cancel_subsequent_jobs(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=1) _OrchestratorFactory._build_dispatcher() orchestrator = _OrchestratorFactory._orchestrator submission_manager = _SubmissionManagerFactory._build_manager() lock_0 = m.Lock() dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) dn_3 = InMemoryDataNode(\"dn_config_3\", Scope.SCENARIO, properties={\"default_data\": 3}) dn_4 = InMemoryDataNode(\"dn_config_4\", Scope.SCENARIO, properties={\"default_data\": 4}) task_1 = Task(\"task_config_1\", {}, partial(lock_multiply, lock_0), [dn_1, dn_2], [dn_3], id=\"task_1\") task_2 = Task(\"task_config_2\", {}, multiply, [dn_1, dn_3], [dn_4], id=\"task_2\") task_3 = Task(\"task_config_3\", {}, print, [dn_4], id=\"task_3\") # Can't get tasks under 1 scenario due to partial not serializable submission_1 = submission_manager._create(\"scenario_id\") submission_2 = submission_manager._create(\"scenario_id\") _DataManager._set(dn_1) _DataManager._set(dn_2) _DataManager._set(dn_3) _DataManager._set(dn_4) with lock_0: job_1 = orchestrator._lock_dn_output_and_create_job( task_1, submit_id=submission_1.id, submit_entity_id=submission_1.entity_id ) orchestrator._orchestrate_job_to_run_or_block([job_1]) job_2 = orchestrator._lock_dn_output_and_create_job( task_2, submit_id=submission_1.id, submit_entity_id=submission_1.entity_id ) orchestrator._orchestrate_job_to_run_or_block([job_2]) job_3 = orchestrator._lock_dn_output_and_create_job( task_3, submit_id=submission_1.id, submit_entity_id=submission_1.entity_id ) orchestrator._orchestrate_job_to_run_or_block([job_3]) submission_1.jobs = [job_1, job_2, job_3] assert_true_after_time(lambda: _OrchestratorFactory._orchestrator.jobs_to_run.qsize() == 0) assert_true_after_time(lambda: len(_OrchestratorFactory._orchestrator.blocked_jobs) == 2) assert_true_after_time(job_1.is_running) assert_true_after_time(job_2.is_blocked) assert_true_after_time(job_3.is_blocked) job_4 = _OrchestratorFactory._orchestrator._lock_dn_output_and_create_job( task_1, submit_id=submission_2.id, submit_entity_id=submission_2.entity_id ) orchestrator._orchestrate_job_to_run_or_block([job_4]) job_5 = _OrchestratorFactory._orchestrator._lock_dn_output_and_create_job( task_2, submit_id=submission_2.id, submit_entity_id=submission_2.entity_id ) orchestrator._orchestrate_job_to_run_or_block([job_5]) job_6 = _OrchestratorFactory._orchestrator._lock_dn_output_and_create_job( task_3, submit_id=submission_2.id, submit_entity_id=submission_2.entity_id ) orchestrator._orchestrate_job_to_run_or_block([job_6]) submission_2.jobs = [job_4, job_5, job_6] assert_true_after_time(job_4.is_pending) assert_true_after_time(job_5.is_blocked) assert_true_after_time(job_6.is_blocked) assert _OrchestratorFactory._orchestrator.jobs_to_run.qsize() == 1 assert len(_OrchestratorFactory._orchestrator.blocked_jobs) == 4 _JobManager._cancel(job_4) assert_true_after_time(job_4.is_canceled) assert_true_after_time(job_5.is_abandoned) assert_true_after_time(job_6.is_abandoned) assert _OrchestratorFactory._orchestrator.jobs_to_run.qsize() == 0 assert len(_OrchestratorFactory._orchestrator.blocked_jobs) == 2 _JobManager._cancel(job_1) assert_true_after_time(job_1.is_running) assert_true_after_time(job_2.is_abandoned) assert_true_after_time(job_3.is_abandoned) assert_true_after_time(job_1.is_completed) assert_true_after_time(job_2.is_abandoned) assert_true_after_time(job_3.is_abandoned) assert_true_after_time(job_4.is_canceled) assert_true_after_time(job_5.is_abandoned) assert_true_after_time(job_6.is_abandoned) assert_true_after_time( lambda: all( not _OrchestratorFactory._orchestrator._is_blocked(job) for job in [job_1, job_2, job_3, job_4, job_5, job_6] ) ) assert_true_after_time(lambda: _OrchestratorFactory._orchestrator.jobs_to_run.qsize() == 0) def test_is_deletable(): assert len(_JobManager._get_all()) == 0 task = _create_task(print, 0, \"task\") job = _OrchestratorFactory._orchestrator.submit_task(task) assert job.is_completed() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.abandoned() assert job.is_abandoned() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.canceled() assert job.is_canceled() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.failed() assert job.is_failed() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.skipped() assert job.is_skipped() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.blocked() assert job.is_blocked() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) job.running() assert job.is_running() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) job.pending() assert job.is_pending() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) job.status = Status.SUBMITTED assert job.is_submitted() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) def _create_task(function, nb_outputs=1, name=None): input1_dn_config = Config.configure_data_node(\"input1\", \"pickle\", Scope.SCENARIO, default_data=21) input2_dn_config = Config.configure_data_node(\"input2\", \"pickle\", Scope.SCENARIO, default_data=2) output_dn_configs = [ Config.configure_data_node(f\"output{i}\", \"pickle\", Scope.SCENARIO, default_data=0) for i in range(nb_outputs) ] _DataManager._bulk_get_or_create({cfg for cfg in output_dn_configs}) name = name or \"\".join(random.choice(string.ascii_lowercase) for _ in range(10)) task_config = Config.configure_task( id=name, function=function, input=[input1_dn_config, input2_dn_config], output=output_dn_configs, ) return _TaskManager._bulk_get_or_create([task_config])[0] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import multiprocessing import random import string from functools import partial from time import sleep import pytest from src.taipy.core import Task from src.taipy.core._orchestrator._dispatcher._job_dispatcher import _JobDispatcher from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._repository.db._sql_connection import _build_connection, _SQLConnection from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data import InMemoryDataNode from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.exceptions.exceptions import JobNotDeletedException from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.job._job_manager_factory import _JobManagerFactory from src.taipy.core.job.job_id import JobId from src.taipy.core.job.status import Status from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils import assert_true_after_time def multiply(nb1: float, nb2: float): return nb1 * nb2 def lock_multiply(lock, nb1: float, nb2: float): with lock: return multiply(nb1 or 1, nb2 or 2) def init_managers(): _TaskManagerFactory._build_manager()._delete_all() _DataManagerFactory._build_manager()._delete_all() _JobManagerFactory._build_manager()._delete_all() def test_create_jobs(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() task = _create_task(multiply, name=\"get_job\") _OrchestratorFactory._build_dispatcher() job_1 = _JobManager._create(task, [print], \"submit_id\", \"secnario_id\", True) assert _JobManager._get(job_1.id) == job_1 assert job_1.is_submitted() assert task.config_id in job_1.id assert job_1.task.id == task.id assert job_1.submit_id == \"submit_id\" assert job_1.submit_entity_id == \"secnario_id\" assert job_1.force job_2 = _JobManager._create(task, [print], \"submit_id_1\", \"secnario_id\", False) assert _JobManager._get(job_2.id) == job_2 assert job_2.is_submitted() assert task.config_id in job_2.id assert job_2.task.id == task.id assert job_2.submit_id == \"submit_id_1\" assert job_2.submit_entity_id == \"secnario_id\" assert not job_2.force def test_get_job(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() task = _create_task(multiply, name=\"get_job\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) assert _JobManager._get(job_1.id) == job_1 assert _JobManager._get(job_1.id).submit_entity_id == task.id job_2 = _OrchestratorFactory._orchestrator.submit_task(task) assert job_1 != job_2 assert _JobManager._get(job_1.id).id == job_1.id assert _JobManager._get(job_2.id).id == job_2.id assert _JobManager._get(job_2.id).submit_entity_id == task.id def test_get_latest_job(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() task = _create_task(multiply, name=\"get_latest_job\") task_2 = _create_task(multiply, name=\"get_latest_job_2\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) assert _JobManager._get_latest(task) == job_1 assert _JobManager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important job_2 = _OrchestratorFactory._orchestrator.submit_task(task_2) assert _JobManager._get_latest(task).id == job_1.id assert _JobManager._get_latest(task_2).id == job_2.id sleep(0.01) # Comparison is based on time, precision on Windows is not enough important job_1_bis = _OrchestratorFactory._orchestrator.submit_task(task) assert _JobManager._get_latest(task).id == job_1_bis.id assert _JobManager._get_latest(task_2).id == job_2.id def test_get_job_unknown(init_sql_repo): init_managers() assert _JobManager._get(JobId(\"Unknown\")) is None def test_get_jobs(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() task = _create_task(multiply, name=\"get_all_jobs\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) job_2 = _OrchestratorFactory._orchestrator.submit_task(task) assert {job.id for job in _JobManager._get_all()} == {job_1.id, job_2.id} def test_delete_job(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() task = _create_task(multiply, name=\"delete_job\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) job_2 = _OrchestratorFactory._orchestrator.submit_task(task) _JobManager._delete(job_1) assert [job.id for job in _JobManager._get_all()] == [job_2.id] assert _JobManager._get(job_1.id) is None def test_raise_when_trying_to_delete_unfinished_job(init_sql_repo): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) init_managers() m = multiprocessing.Manager() lock = m.Lock() dnm = _DataManagerFactory._build_manager() dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dnm._set(dn_1) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) dnm._set(dn_2) dn_3 = InMemoryDataNode(\"dn_config_3\", Scope.SCENARIO) dnm._set(dn_3) task = Task(\"task_cfg\", {}, partial(lock_multiply, lock), [dn_1, dn_2], [dn_3], id=\"raise_when_delete_unfinished\") _OrchestratorFactory._build_dispatcher() with lock: job = _OrchestratorFactory._orchestrator.submit_task(task) assert_true_after_time(lambda: len(_JobDispatcher._dispatched_processes) == 1) assert_true_after_time(job.is_running) with pytest.raises(JobNotDeletedException): _JobManager._delete(job) with pytest.raises(JobNotDeletedException): _JobManager._delete(job, force=False) assert_true_after_time(job.is_completed) _JobManager._delete(job) def test_force_deleting_unfinished_job(init_sql_repo): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) init_managers() m = multiprocessing.Manager() lock = m.Lock() dnm = _DataManagerFactory._build_manager() dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dnm._set(dn_1) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) dnm._set(dn_2) dn_3 = InMemoryDataNode(\"dn_config_3\", Scope.SCENARIO) dnm._set(dn_3) task_1 = Task( \"task_config_1\", {}, partial(lock_multiply, lock), [dn_1, dn_2], [dn_3], id=\"delete_force_unfinished_job\" ) reference_last_edit_date = dn_3.last_edit_date _OrchestratorFactory._build_dispatcher() with lock: job = _OrchestratorFactory._orchestrator.submit_task(task_1) assert_true_after_time(job.is_running) with pytest.raises(JobNotDeletedException): _JobManager._delete(job, force=False) _JobManager._delete(job, force=True) assert _JobManager._get(job.id) is None assert_true_after_time(lambda: reference_last_edit_date != dn_3.last_edit_date) def test_is_deletable(init_sql_repo): init_managers() assert len(_JobManager._get_all()) == 0 task = _create_task(print, 0, \"task\") job = _OrchestratorFactory._orchestrator.submit_task(task) assert job.is_completed() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.abandoned() assert job.is_abandoned() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.canceled() assert job.is_canceled() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.failed() assert job.is_failed() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.skipped() assert job.is_skipped() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.blocked() assert job.is_blocked() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) job.running() assert job.is_running() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) job.pending() assert job.is_pending() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) job.status = Status.SUBMITTED assert job.is_submitted() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) def _create_task(function, nb_outputs=1, name=None): input1_dn_config = Config.configure_data_node(\"input1\", scope=Scope.SCENARIO, default_data=21) input2_dn_config = Config.configure_data_node(\"input2\", scope=Scope.SCENARIO, default_data=2) output_dn_configs = [ Config.configure_data_node(f\"output{i}\", scope=Scope.SCENARIO, default_data=0) for i in range(nb_outputs) ] _DataManager._bulk_get_or_create({cfg for cfg in output_dn_configs}) name = name or \"\".join(random.choice(string.ascii_lowercase) for _ in range(10)) task_config = Config.configure_task( id=name, function=function, input=[input1_dn_config, input2_dn_config], output=output_dn_configs, ) return _TaskManager._bulk_get_or_create([task_config])[0] "} {"text": "import os import pytest from src.taipy.core.data._data_sql_repository import _DataSQLRepository from src.taipy.core.exceptions import ModelNotFound from src.taipy.core.job._job_fs_repository import _JobFSRepository from src.taipy.core.job._job_sql_repository import _JobSQLRepository from src.taipy.core.job.job import Job, JobId, Task from src.taipy.core.task._task_sql_repository import _TaskSQLRepository class TestJobRepository: @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_save_and_load(self, data_node, job, repo, init_sql_repo): _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task repository = repo() repository._save(job) obj = repository._load(job.id) assert isinstance(obj, Job) @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_exists(self, data_node, job, repo, init_sql_repo): _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task repository = repo() repository._save(job) assert repository._exists(job.id) assert not repository._exists(\"not-existed-job\") @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_load_all(self, data_node, job, repo, init_sql_repo): _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task repository = repo() for i in range(10): job.id = JobId(f\"job-{i}\") repository._save(job) jobs = repository._load_all() assert len(jobs) == 10 @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_load_all_with_filters(self, data_node, job, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task for i in range(10): job.id = JobId(f\"job-{i}\") repository._save(job) objs = repository._load_all(filters=[{\"id\": \"job-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_delete(self, data_node, job, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task repository._save(job) repository._delete(job.id) with pytest.raises(ModelNotFound): repository._load(job.id) @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_delete_all(self, data_node, job, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task for i in range(10): job.id = JobId(f\"job-{i}\") repository._save(job) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_delete_many(self, data_node, job, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task for i in range(10): job.id = JobId(f\"job-{i}\") repository._save(job) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_delete_by(self, data_node, job, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): job.id = JobId(f\"job-{i}\") job._version = f\"{(i+1) // 5}.0\" repository._save(job) objs = repository._load_all() assert len(objs) == 10 repository._delete_by(\"version\", \"1.0\") assert len(repository._load_all()) == 5 @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_search(self, data_node, job, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task for i in range(10): job.id = JobId(f\"job-{i}\") repository._save(job) assert len(repository._load_all()) == 10 objs = repository._search(\"id\", \"job-2\") assert len(objs) == 1 assert isinstance(objs[0], Job) objs = repository._search(\"id\", \"job-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], Job) assert repository._search(\"id\", \"job-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_export(self, tmpdir, job, repo, init_sql_repo): repository = repo() repository._save(job) repository._export(job.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _JobFSRepository else os.path.join(tmpdir.strpath, \"job\") assert os.path.exists(os.path.join(dir_path, f\"{job.id}.json\")) "} {"text": "from unittest import mock import pytest from src.taipy.core.common._utils import _Subscriber from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data.data_node import DataNode from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.data.pickle import PickleDataNode from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task, TaskId from taipy.config.common.scope import Scope def test_create_sequence(): input = InMemoryDataNode(\"foo\", Scope.SCENARIO) output = InMemoryDataNode(\"bar\", Scope.SCENARIO) task = Task(\"baz\", {}, print, [input], [output], TaskId(\"task_id\")) sequence = Sequence({\"description\": \"description\"}, [task], sequence_id=SequenceId(\"name_1\")) assert sequence.id == \"name_1\" assert sequence.owner_id is None assert sequence.description == \"description\" assert sequence.foo == input assert sequence.bar == output assert sequence.baz.id == task.id assert sequence.tasks == {task.config_id: task} assert sequence.data_nodes == {\"foo\": input, \"bar\": output} assert sequence.parent_ids == set() with pytest.raises(AttributeError): sequence.qux assert sequence.get_label() == sequence.id assert sequence.get_simple_label() == sequence.id input_1 = InMemoryDataNode(\"input\", Scope.SCENARIO) output_1 = InMemoryDataNode(\"output\", Scope.SCENARIO) task_1 = Task(\"task_1\", {}, print, [input_1], [output_1], TaskId(\"task_id_1\")) sequence_1 = Sequence( {\"description\": \"description\"}, [task_1], owner_id=\"owner_id\", parent_ids={\"scenario_id\"}, sequence_id=SequenceId(\"name_1\"), ) assert sequence_1.id == \"name_1\" assert sequence_1.owner_id == \"owner_id\" assert sequence_1.description == \"description\" assert sequence_1.input == input_1 assert sequence_1.output == output_1 assert sequence_1.task_1 == task_1 assert sequence_1.tasks == {task_1.config_id: task_1} assert sequence_1.data_nodes == {\"input\": input_1, \"output\": output_1} assert sequence_1.parent_ids == {\"scenario_id\"} assert sequence_1.id is not None with mock.patch(\"src.taipy.core.get\") as get_mck: class MockOwner: label = \"owner_label\" def get_label(self): return self.label get_mck.return_value = MockOwner() assert sequence_1.get_label() == \"owner_label > \" + sequence_1.id assert sequence_1.get_simple_label() == sequence_1.id sequence_2 = Sequence( {\"description\": \"description\", \"name\": \"Name\"}, [task, task_1], owner_id=\"owner_id\", parent_ids={\"parent_id_1\", \"parent_id_2\"}, sequence_id=SequenceId(\"name_2\"), ) assert sequence_2.owner_id == \"owner_id\" assert sequence_2.id == \"name_2\" assert sequence_2.description == \"description\" assert sequence_2.tasks == {task.config_id: task, task_1.config_id: task_1} assert sequence_2.data_nodes == {\"foo\": input, \"bar\": output, \"input\": input_1, \"output\": output_1} assert sequence_2.parent_ids == {\"parent_id_1\", \"parent_id_2\"} with mock.patch(\"src.taipy.core.get\") as get_mck: class MockOwner: label = \"owner_label\" def get_label(self): return self.label get_mck.return_value = MockOwner() assert sequence_2.get_label() == \"owner_label > \" + sequence_2.name assert sequence_2.get_simple_label() == sequence_2.name def test_check_consistency(): sequence_1 = Sequence({}, [], \"name_1\") assert sequence_1._is_consistent() input_2 = InMemoryDataNode(\"foo\", Scope.SCENARIO) output_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO) task_2 = Task(\"tfoo\", {}, print, [input_2], [output_2], TaskId(\"task_id_2\")) sequence_2 = Sequence({}, [task_2], \"name_2\") assert sequence_2._is_consistent() data_node_3 = InMemoryDataNode(\"foo\", Scope.SCENARIO) task_3 = Task(\"tfoo\", {}, print, [data_node_3], [data_node_3], TaskId(\"task_id_3\")) sequence_3 = Sequence({}, [task_3], \"name_3\") assert not sequence_3._is_consistent() # Not a dag input_4 = InMemoryDataNode(\"foo\", Scope.SCENARIO) output_4 = InMemoryDataNode(\"bar\", Scope.SCENARIO) task_4_1 = Task(\"tfoo\", {}, print, [input_4], [output_4], TaskId(\"task_id_4_1\")) task_4_2 = Task(\"tbar\", {}, print, [output_4], [input_4], TaskId(\"task_id_4_2\")) sequence_4 = Sequence({}, [task_4_1, task_4_2], \"name_4\") assert not sequence_4._is_consistent() # Not a Dag class FakeDataNode: config_id = \"config_id_of_a_fake_dn\" input_6 = DataNode(\"foo\", Scope.SCENARIO, \"input_id_5\") output_6 = DataNode(\"bar\", Scope.SCENARIO, \"output_id_5\") task_6_1 = Task(\"tfoo\", {}, print, [input_6], [output_6], TaskId(\"task_id_5_1\")) task_6_2 = Task(\"tbar\", {}, print, [output_6], [FakeDataNode()], TaskId(\"task_id_5_2\")) sequence_6 = Sequence({}, [task_6_1, task_6_2], \"name_5\") assert not sequence_6._is_consistent() # Not a DataNode intermediate_7 = DataNode(\"foo\", Scope.SCENARIO, \"intermediate_id_7\") output_7 = DataNode(\"bar\", Scope.SCENARIO, \"output_id_7\") task_7_1 = Task(\"tfoo\", {}, print, [], [intermediate_7], TaskId(\"task_id_7_1\")) task_7_2 = Task(\"tbar\", {}, print, [intermediate_7], [output_7], TaskId(\"task_id_7_2\")) sequence_7 = Sequence({}, [task_7_1, task_7_2], \"name_7\") assert sequence_7._is_consistent() input_8 = DataNode(\"foo\", Scope.SCENARIO, \"output_id_8\") intermediate_8 = DataNode(\"bar\", Scope.SCENARIO, \"intermediate_id_8\") task_8_1 = Task(\"tfoo\", {}, print, [input_8], [intermediate_8], TaskId(\"task_id_8_1\")) task_8_2 = Task(\"tbar\", {}, print, [intermediate_8], [], TaskId(\"task_id_8_2\")) sequence_8 = Sequence({}, [task_8_1, task_8_2], \"name_8\") assert sequence_8._is_consistent() input_9_1 = DataNode(\"foo\", Scope.SCENARIO, \"input_id_9_1\") output_9_1 = DataNode(\"bar\", Scope.SCENARIO, \"output_id_9_1\") input_9_2 = DataNode(\"baz\", Scope.SCENARIO, \"input_id_9_2\") output_9_2 = DataNode(\"qux\", Scope.SCENARIO, \"output_id_9_2\") task_9_1 = Task(\"tfoo\", {}, print, [input_9_1], [output_9_1], TaskId(\"task_id_9_1\")) task_9_2 = Task(\"tbar\", {}, print, [input_9_2], [output_9_2], TaskId(\"task_id_9_2\")) sequence_9 = Sequence({}, [task_9_1, task_9_2], \"name_9\") assert not sequence_9._is_consistent() # Not connected input_10_1 = DataNode(\"foo\", Scope.SCENARIO, \"output_id_10_1\") intermediate_10_1 = DataNode(\"bar\", Scope.SCENARIO, \"intermediate_id_10_1\") intermediate_10_2 = DataNode(\"baz\", Scope.SCENARIO, \"intermediate_id_10_2\") output_10 = DataNode(\"qux\", Scope.SCENARIO, \"output_id_10\") post_10 = DataNode(\"quux\", Scope.SCENARIO, \"post_id_10\") task_10_1 = Task(\"tfoo\", {}, print, [input_10_1], [intermediate_10_1], TaskId(\"task_id_10_1\")) task_10_2 = Task(\"tbar\", {}, print, [], [intermediate_10_2], TaskId(\"task_id_10_2\")) task_10_3 = Task(\"tbaz\", {}, print, [intermediate_10_1, intermediate_10_2], [output_10], TaskId(\"task_id_10_3\")) task_10_4 = Task(\"tqux\", {}, print, [output_10], [post_10], TaskId(\"task_id_10_4\")) task_10_5 = Task(\"tquux\", {}, print, [output_10], [], TaskId(\"task_id_10_5\")) sequence_10 = Sequence({}, [task_10_1, task_10_2, task_10_3, task_10_4, task_10_5], \"name_10\") assert sequence_10._is_consistent() def test_get_sorted_tasks(): def assert_equal(tasks_a, tasks_b) -> bool: if len(tasks_a) != len(tasks_b): return False for i in range(len(tasks_a)): task_a, task_b = tasks_a[i], tasks_b[i] if isinstance(task_a, list) and isinstance(task_b, list): if not assert_equal(task_a, task_b): return False elif isinstance(task_a, list) or isinstance(task_b, list): return False else: index_task_b = tasks_b.index(task_a) if any([isinstance(task_b, list) for task_b in tasks_b[i : index_task_b + 1]]): return False return True data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- ---> s3 ---> t2 ---> s5 ---- # | | | # |---> t1 ---| -------------------------> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert assert_equal(sequence._get_sorted_tasks(), [[task_1], [task_2, task_4], [task_3]]) data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, None, [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- t2 ---> s5 ------ # | | # |---> t1 ---| -----> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert assert_equal(sequence._get_sorted_tasks(), [[task_2, task_1], [task_4, task_3]]) data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert assert_equal(sequence._get_sorted_tasks(), [[task_2, task_1], [task_4, task_3]]) data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, output=[data_node_5], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert assert_equal(sequence._get_sorted_tasks(), [[task_2, task_1], [task_4, task_3]]) data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = DataNode(\"hugh\", Scope.SCENARIO, \"s8\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, output=[data_node_5], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_8], None, TaskId(\"t5\")) sequence = Sequence({}, [task_5, task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 # t2 ---> s5 # s8 ---> t5 assert assert_equal(sequence._get_sorted_tasks(), [[task_5, task_2, task_1], [task_4, task_3]]) def test_get_inputs(): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- ---> s3 ---> t2 ---> s5 ---- # | | | # |---> t1 ---| -------------------------> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert sequence.get_inputs() == {data_node_1, data_node_2} assert sequence.get_outputs() == {data_node_6, data_node_7} assert sequence.get_intermediate() == {data_node_3, data_node_4, data_node_5} data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, None, [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- t2 ---> s5 ------ # | | # |---> t1 ---| -----> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert sequence.get_inputs() == {data_node_1, data_node_2} assert sequence.get_outputs() == {data_node_6, data_node_7} assert sequence.get_intermediate() == {data_node_4, data_node_5} data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert sequence.get_inputs() == {data_node_1, data_node_2, data_node_6} assert sequence.get_outputs() == {data_node_7} assert sequence.get_intermediate() == {data_node_4, data_node_5} data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = DataNode(\"hugh\", Scope.SCENARIO, \"s8\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_5], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4, data_node_6], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_8], None, TaskId(\"t5\")) sequence = Sequence({}, [task_5, task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 # t2 ---> s5 | # s8 ---> t5 s6 --| assert sequence.get_inputs() == {data_node_1, data_node_2, data_node_8, data_node_6} assert sequence.get_outputs() == {data_node_5, data_node_7} assert sequence.get_intermediate() == {data_node_4} def test_is_ready_to_run(): data_node_1 = PickleDataNode(\"foo\", Scope.SCENARIO, \"s1\", properties={\"default_data\": 1}) data_node_2 = PickleDataNode(\"bar\", Scope.SCENARIO, \"s2\", properties={\"default_data\": 2}) data_node_4 = PickleDataNode(\"qux\", Scope.SCENARIO, \"s4\", properties={\"default_data\": 4}) data_node_5 = PickleDataNode(\"quux\", Scope.SCENARIO, \"s5\", properties={\"default_data\": 5}) data_node_6 = PickleDataNode(\"quuz\", Scope.SCENARIO, \"s6\", properties={\"default_data\": 6}) data_node_7 = PickleDataNode(\"corge\", Scope.SCENARIO, \"s7\", properties={\"default_data\": 7}) task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 data_manager = _DataManagerFactory._build_manager() for dn in [data_node_1, data_node_2, data_node_4, data_node_5, data_node_6, data_node_7]: data_manager._set(dn) assert sequence.is_ready_to_run() data_node_1.edit_in_progress = True assert not sequence.is_ready_to_run() data_node_2.edit_in_progress = True data_node_6.edit_in_progress = True assert not sequence.is_ready_to_run() data_node_1.edit_in_progress = False data_node_2.edit_in_progress = False data_node_6.edit_in_progress = False assert sequence.is_ready_to_run() def test_data_nodes_being_edited(): data_node_1 = PickleDataNode(\"foo\", Scope.SCENARIO, \"s1\", properties={\"default_data\": 1}) data_node_2 = PickleDataNode(\"bar\", Scope.SCENARIO, \"s2\", properties={\"default_data\": 2}) data_node_4 = PickleDataNode(\"qux\", Scope.SCENARIO, \"s4\", properties={\"default_data\": 4}) data_node_5 = PickleDataNode(\"quux\", Scope.SCENARIO, \"s5\", properties={\"default_data\": 5}) data_node_6 = PickleDataNode(\"quuz\", Scope.SCENARIO, \"s6\", properties={\"default_data\": 6}) data_node_7 = PickleDataNode(\"corge\", Scope.SCENARIO, \"s7\", properties={\"default_data\": 7}) task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 data_manager = _DataManagerFactory._build_manager() for dn in [data_node_1, data_node_2, data_node_4, data_node_5, data_node_6, data_node_7]: data_manager._set(dn) assert len(sequence.data_nodes_being_edited()) == 0 assert sequence.data_nodes_being_edited() == set() data_node_1.edit_in_progress = True assert len(sequence.data_nodes_being_edited()) == 1 assert sequence.data_nodes_being_edited() == {data_node_1} data_node_2.edit_in_progress = True data_node_6.edit_in_progress = True assert len(sequence.data_nodes_being_edited()) == 3 assert sequence.data_nodes_being_edited() == {data_node_1, data_node_2, data_node_6} data_node_4.edit_in_progress = True data_node_5.edit_in_progress = True assert len(sequence.data_nodes_being_edited()) == 5 assert sequence.data_nodes_being_edited() == {data_node_1, data_node_2, data_node_4, data_node_5, data_node_6} data_node_1.edit_in_progress = False data_node_2.edit_in_progress = False data_node_6.edit_in_progress = False assert len(sequence.data_nodes_being_edited()) == 2 assert sequence.data_nodes_being_edited() == {data_node_4, data_node_5} data_node_4.edit_in_progress = False data_node_5.edit_in_progress = False data_node_7.edit_in_progress = True assert len(sequence.data_nodes_being_edited()) == 1 assert sequence.data_nodes_being_edited() == {data_node_7} data_node_7.edit_in_progress = False assert len(sequence.data_nodes_being_edited()) == 0 assert sequence.data_nodes_being_edited() == set() def test_get_tasks(): task_1 = Task(\"grault\", {}, print, id=TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, id=TaskId(\"t3\")) sequence_1 = Sequence({}, [task_1, task_2, task_3], SequenceId(\"p1\")) assert sequence_1.tasks == {\"grault\": task_1, \"garply\": task_2, \"waldo\": task_3} def test_get_set_of_tasks(): task_1 = Task(\"grault\", {}, print, id=TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, id=TaskId(\"t3\")) sequence_1 = Sequence({}, [task_1, task_2, task_3], SequenceId(\"p1\")) assert sequence_1._get_set_of_tasks() == {task_1, task_2, task_3} def test_auto_set_and_reload(task): tmp_task = Task(\"tmp_task_config_id\", {}, print, list(task.output.values()), [], TaskId(\"tmp_task_id\")) scenario = Scenario(\"scenario\", [task, tmp_task], {}, sequences={\"foo\": {}}) _TaskManager._set(task) _TaskManager._set(tmp_task) _ScenarioManager._set(scenario) sequence_1 = scenario.sequences[\"foo\"] sequence_2 = _SequenceManager._get(sequence_1) # auto set & reload on tasks attribute assert len(sequence_1.tasks) == 0 assert len(sequence_2.tasks) == 0 sequence_1.tasks = [tmp_task] assert len(sequence_1.tasks) == 1 assert sequence_1.tasks[tmp_task.config_id].id == tmp_task.id assert len(sequence_2.tasks) == 1 assert sequence_2.tasks[tmp_task.config_id].id == tmp_task.id sequence_2.tasks = [task] assert len(sequence_1.tasks) == 1 assert sequence_1.tasks[task.config_id].id == task.id assert len(sequence_2.tasks) == 1 assert sequence_2.tasks[task.config_id].id == task.id assert sequence_1.owner_id == scenario.id assert sequence_2.owner_id == scenario.id # auto set & reload on subscribers attribute assert len(sequence_1.subscribers) == 0 assert len(sequence_2.subscribers) == 0 sequence_1.subscribers.append(print) assert len(sequence_1.subscribers) == 1 assert len(sequence_2.subscribers) == 1 sequence_2.subscribers.append(print) assert len(sequence_1.subscribers) == 2 assert len(sequence_2.subscribers) == 2 sequence_1.subscribers.clear() assert len(sequence_1.subscribers) == 0 assert len(sequence_2.subscribers) == 0 sequence_1.subscribers.extend([print, map]) assert len(sequence_1.subscribers) == 2 assert len(sequence_2.subscribers) == 2 sequence_1.subscribers.remove(_Subscriber(print, [])) assert len(sequence_1.subscribers) == 1 assert len(sequence_2.subscribers) == 1 sequence_2.subscribers.clear() assert len(sequence_1.subscribers) == 0 assert len(sequence_2.subscribers) == 0 sequence_1.subscribers + print + len assert len(sequence_1.subscribers) == 2 assert len(sequence_2.subscribers) == 2 sequence_1.subscribers = [] assert len(sequence_1.subscribers) == 0 assert len(sequence_2.subscribers) == 0 # auto set & reload on properties attribute assert sequence_1.properties == {\"name\": \"foo\"} assert sequence_2.properties == {\"name\": \"foo\"} sequence_1.properties[\"qux\"] = 4 assert sequence_1.properties[\"qux\"] == 4 assert sequence_2.properties[\"qux\"] == 4 sequence_2.properties[\"qux\"] = 5 assert sequence_1.properties[\"qux\"] == 5 assert sequence_2.properties[\"qux\"] == 5 sequence_1.properties[\"temp_key_1\"] = \"temp_value_1\" sequence_1.properties[\"temp_key_2\"] = \"temp_value_2\" assert sequence_1.properties == { \"qux\": 5, \"name\": \"foo\", \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } assert sequence_2.properties == { \"qux\": 5, \"name\": \"foo\", \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } sequence_1.properties.pop(\"temp_key_1\") assert \"temp_key_1\" not in sequence_1.properties.keys() assert \"temp_key_1\" not in sequence_1.properties.keys() assert sequence_1.properties == { \"qux\": 5, \"name\": \"foo\", \"temp_key_2\": \"temp_value_2\", } assert sequence_2.properties == { \"qux\": 5, \"name\": \"foo\", \"temp_key_2\": \"temp_value_2\", } sequence_2.properties.pop(\"temp_key_2\") assert sequence_1.properties == {\"name\": \"foo\", \"qux\": 5} assert sequence_2.properties == {\"name\": \"foo\", \"qux\": 5} assert \"temp_key_2\" not in sequence_1.properties.keys() assert \"temp_key_2\" not in sequence_2.properties.keys() sequence_1.properties[\"temp_key_3\"] = 0 assert sequence_1.properties == {\"name\": \"foo\", \"qux\": 5, \"temp_key_3\": 0} assert sequence_2.properties == {\"name\": \"foo\", \"qux\": 5, \"temp_key_3\": 0} sequence_1.properties.update({\"temp_key_3\": 1}) assert sequence_1.properties == {\"name\": \"foo\", \"qux\": 5, \"temp_key_3\": 1} assert sequence_2.properties == {\"name\": \"foo\", \"qux\": 5, \"temp_key_3\": 1} sequence_1.properties.update(dict()) assert sequence_1.properties == {\"name\": \"foo\", \"qux\": 5, \"temp_key_3\": 1} assert sequence_2.properties == {\"name\": \"foo\", \"qux\": 5, \"temp_key_3\": 1} sequence_1.properties[\"temp_key_4\"] = 0 sequence_1.properties[\"temp_key_5\"] = 0 with sequence_1 as sequence: assert len(sequence.tasks) == 1 assert sequence.tasks[task.config_id].id == task.id assert len(sequence.subscribers) == 0 assert sequence._is_in_context assert sequence.properties[\"qux\"] == 5 assert sequence.properties[\"temp_key_3\"] == 1 assert sequence.properties[\"temp_key_4\"] == 0 assert sequence.properties[\"temp_key_5\"] == 0 sequence.tasks = [] sequence.subscribers = [print] sequence.properties[\"qux\"] = 9 sequence.properties.pop(\"temp_key_3\") sequence.properties.pop(\"temp_key_4\") sequence.properties.update({\"temp_key_4\": 1}) sequence.properties.update({\"temp_key_5\": 2}) sequence.properties.pop(\"temp_key_5\") sequence.properties.update(dict()) assert len(sequence.tasks) == 1 assert sequence.tasks[task.config_id].id == task.id assert len(sequence.subscribers) == 0 assert sequence._is_in_context assert sequence.properties[\"qux\"] == 5 assert sequence.properties[\"temp_key_3\"] == 1 assert sequence.properties[\"temp_key_4\"] == 0 assert sequence.properties[\"temp_key_5\"] == 0 assert len(sequence_1.tasks) == 0 assert len(sequence_1.subscribers) == 1 assert not sequence_1._is_in_context assert sequence_1.properties[\"qux\"] == 9 assert \"temp_key_3\" not in sequence_1.properties.keys() assert sequence_1.properties[\"temp_key_4\"] == 1 assert \"temp_key_5\" not in sequence_1.properties.keys() def test_get_parents(sequence): with mock.patch(\"src.taipy.core.get_parents\") as mck: sequence.get_parents() mck.assert_called_once_with(sequence) def test_subscribe_sequence(): with mock.patch(\"src.taipy.core.subscribe_sequence\") as mck: sequence = Sequence({}, [], \"id\") sequence.subscribe(None) mck.assert_called_once_with(None, None, sequence) def test_unsubscribe_sequence(): with mock.patch(\"src.taipy.core.unsubscribe_sequence\") as mck: sequence = Sequence({}, [], \"id\") sequence.unsubscribe(None) mck.assert_called_once_with(None, None, sequence) def test_submit_sequence(): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._submit\") as mck: sequence = Sequence({}, [], \"id\") sequence.submit(None, False) mck.assert_called_once_with(sequence, None, False, False, None) "} {"text": "from src.taipy.core.sequence._sequence_converter import _SequenceConverter from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.task.task import Task def test_entity_to_model(sequence): sequence_model_1 = _SequenceConverter._entity_to_model(sequence) expected_sequence_model_1 = { \"id\": \"sequence_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id_1\", \"parent_id_2\"], \"properties\": {}, \"tasks\": [], \"subscribers\": [], \"version\": \"random_version_number\", } sequence_model_1[\"parent_ids\"] = sorted(sequence_model_1[\"parent_ids\"]) assert sequence_model_1 == expected_sequence_model_1 task_1 = Task(\"task_1\", {}, print) task_2 = Task(\"task_2\", {}, print) sequence_2 = Sequence( {\"name\": \"sequence_2\"}, [task_1, task_2], \"SEQUENCE_sq_1_SCENARIO_sc\", \"SCENARIO_sc\", [\"SCENARIO_sc\"], [], \"random_version\", ) sequence_model_2 = _SequenceConverter._entity_to_model(sequence_2) expected_sequence_model_2 = { \"id\": \"SEQUENCE_sq_1_SCENARIO_sc\", \"owner_id\": \"SCENARIO_sc\", \"parent_ids\": [\"SCENARIO_sc\"], \"properties\": {\"name\": \"sequence_2\"}, \"tasks\": [task_1.id, task_2.id], \"subscribers\": [], \"version\": \"random_version\", } assert sequence_model_2 == expected_sequence_model_2 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pytest from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from src.taipy.core.task.task_id import TaskId from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.conftest import init_managers def test_set_and_get_sequence(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() _OrchestratorFactory._build_dispatcher() input_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO) output_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO) task = Task(\"task\", {}, print, [input_dn], [output_dn], TaskId(\"task_id\")) scenario = Scenario(\"scenario\", {task}, {}, set()) _ScenarioManager._set(scenario) sequence_name_1 = \"p1\" sequence_id_1 = SequenceId(f\"SEQUENCE_{sequence_name_1}_{scenario.id}\") sequence_name_2 = \"p2\" sequence_id_2 = SequenceId(f\"SEQUENCE_{sequence_name_2}_{scenario.id}\") # No existing Sequence assert _SequenceManager._get(sequence_id_1) is None assert _SequenceManager._get(sequence_id_2) is None scenario.add_sequences({sequence_name_1: []}) sequence_1 = scenario.sequences[sequence_name_1] # Save one sequence. We expect to have only one sequence stored _SequenceManager._set(sequence_1) assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 0 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 0 assert _SequenceManager._get(sequence_id_2) is None # Save a second sequence. Now, we expect to have a total of two sequences stored _TaskManager._set(task) scenario.add_sequences({sequence_name_2: [task]}) sequence_2 = scenario.sequences[sequence_name_2] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 0 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 0 assert _SequenceManager._get(sequence_id_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_id_2).tasks) == 1 assert _SequenceManager._get(sequence_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_2).tasks) == 1 # We save the first sequence again. We expect nothing to change scenario.add_sequences({sequence_name_1: {}}) sequence_1 = scenario.sequences[sequence_name_1] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 0 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 0 assert _SequenceManager._get(sequence_id_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_id_2).tasks) == 1 assert _SequenceManager._get(sequence_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_2).tasks) == 1 # We save a third sequence with same id as the first one. # We expect the first sequence to be updated scenario.add_sequences({sequence_name_1: [task]}) sequence_3 = scenario.sequences[sequence_name_1] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert _SequenceManager._get(sequence_id_1).id == sequence_3.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 1 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 1 assert _SequenceManager._get(sequence_id_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_id_2).tasks) == 1 assert _SequenceManager._get(sequence_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_2).tasks) == 1 assert _TaskManager._get(task.id).id == task.id def test_get_all_on_multiple_versions_environment(init_sql_repo): init_managers() # Create 5 sequences from Scenario with 2 versions each for version in range(1, 3): for i in range(5): _ScenarioManager._set( Scenario( f\"config_id_{i+version}\", [], {}, [], f\"SCENARIO_id_{i}_v{version}\", version=f\"{version}.0\", sequences={\"sequence\": {}}, ) ) _VersionManager._set_experiment_version(\"1.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"1.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 1 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 0 ) _VersionManager._set_experiment_version(\"2.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 0 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v2\"}])) == 1 ) _VersionManager._set_development_version(\"1.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"1.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 1 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"1.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v2\"}])) == 0 ) _VersionManager._set_development_version(\"2.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 0 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v2\"}])) == 1 ) def mult_by_two(nb: int): return nb * 2 def mult_by_3(nb: int): return nb * 3 def test_get_or_create_data(init_sql_repo): # only create intermediate data node once Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() dn_config_1 = Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1) dn_config_2 = Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0) dn_config_6 = Config.configure_data_node(\"baz\", \"in_memory\", Scope.SCENARIO, default_data=0) task_config_mult_by_two = Config.configure_task(\"mult_by_two\", mult_by_two, [dn_config_1], dn_config_2) task_config_mult_by_3 = Config.configure_task(\"mult_by_3\", mult_by_3, [dn_config_2], dn_config_6) # dn_1 ---> mult_by_two ---> dn_2 ---> mult_by_3 ---> dn_6 scenario_config = Config.configure_scenario(\"scenario\", [task_config_mult_by_two, task_config_mult_by_3]) _OrchestratorFactory._build_dispatcher() assert len(_DataManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 0 scenario = _ScenarioManager._create(scenario_config) scenario.add_sequences({\"by_6\": list(scenario.tasks.values())}) sequence = scenario.sequences[\"by_6\"] assert sequence.name == \"by_6\" assert len(_DataManager._get_all()) == 3 assert len(_TaskManager._get_all()) == 2 assert len(sequence._get_sorted_tasks()) == 2 assert sequence.foo.read() == 1 assert sequence.bar.read() == 0 assert sequence.baz.read() == 0 assert sequence._get_sorted_tasks()[0][0].config_id == task_config_mult_by_two.id assert sequence._get_sorted_tasks()[1][0].config_id == task_config_mult_by_3.id _SequenceManager._submit(sequence.id) assert sequence.foo.read() == 1 assert sequence.bar.read() == 2 assert sequence.baz.read() == 6 sequence.foo.write(\"new data value\") assert sequence.foo.read() == \"new data value\" assert sequence.bar.read() == 2 assert sequence.baz.read() == 6 sequence.bar.write(7) assert sequence.foo.read() == \"new data value\" assert sequence.bar.read() == 7 assert sequence.baz.read() == 6 with pytest.raises(AttributeError): sequence.WRONG.write(7) def test_hard_delete_one_single_sequence_with_scenario_data_nodes(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.SCENARIO) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create([task_config]) scenario = Scenario(\"scenario\", set(tasks), {}, sequences={\"sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] sequence.submit() assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 _SequenceManager._hard_delete(sequence.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 def test_hard_delete_one_single_sequence_with_cycle_data_nodes(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.CYCLE, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.CYCLE) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create([task_config]) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] sequence.submit() assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 _SequenceManager._hard_delete(sequence.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 def test_hard_delete_shared_entities(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() input_dn = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") intermediate_dn = Config.configure_data_node(\"my_inter\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") output_dn = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") task_1 = Config.configure_task(\"task_1\", print, input_dn, intermediate_dn) task_2 = Config.configure_task(\"task_2\", print, intermediate_dn, output_dn) _OrchestratorFactory._build_dispatcher() tasks_scenario_1 = _TaskManager._bulk_get_or_create([task_1, task_2], scenario_id=\"scenario_id_1\") tasks_scenario_2 = _TaskManager._bulk_get_or_create([task_1, task_2], scenario_id=\"scenario_id_2\") scenario_1 = Scenario(\"scenario_1\", tasks_scenario_1, {}, sequences={\"sequence\": {\"tasks\": tasks_scenario_1}}) scenario_2 = Scenario(\"scenario_2\", tasks_scenario_2, {}, sequences={\"sequence\": {\"tasks\": tasks_scenario_2}}) _ScenarioManager._set(scenario_1) _ScenarioManager._set(scenario_2) sequence_1 = scenario_1.sequences[\"sequence\"] sequence_2 = scenario_2.sequences[\"sequence\"] _SequenceManager._submit(sequence_1.id) _SequenceManager._submit(sequence_2.id) assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 3 assert len(_DataManager._get_all()) == 4 assert len(_JobManager._get_all()) == 4 _SequenceManager._hard_delete(sequence_1.id) assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 3 assert len(_DataManager._get_all()) == 4 assert len(_JobManager._get_all()) == 4 "} {"text": "import json from pathlib import Path from typing import Callable, Iterable, Optional from unittest import mock from unittest.mock import ANY import pytest from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.common import _utils from src.taipy.core.common._utils import _Subscriber from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import ( InvalidSequenceId, ModelNotFound, NonExistingSequence, NonExistingTask, SequenceBelongsToNonExistingScenario, ) from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.sequence._sequence_manager_factory import _SequenceManagerFactory from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from src.taipy.core.task.task_id import TaskId from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils.NotifyMock import NotifyMock def test_breakdown_sequence_id(): with pytest.raises(InvalidSequenceId): _SequenceManager._breakdown_sequence_id(\"scenario_id\") with pytest.raises(InvalidSequenceId): _SequenceManager._breakdown_sequence_id(\"sequence_id\") with pytest.raises(InvalidSequenceId): _SequenceManager._breakdown_sequence_id(\"SEQUENCE_sequence_id\") with pytest.raises(InvalidSequenceId): _SequenceManager._breakdown_sequence_id(\"SCENARIO_scenario_id\") with pytest.raises(InvalidSequenceId): _SequenceManager._breakdown_sequence_id(\"sequence_SCENARIO_scenario_id\") with pytest.raises(InvalidSequenceId): _SequenceManager._breakdown_sequence_id(\"SEQUENCE_sequence_scenario_id\") sequence_name, scenario_id = _SequenceManager._breakdown_sequence_id(\"SEQUENCE_sequence_SCENARIO_scenario\") assert sequence_name == \"sequence\" and scenario_id == \"SCENARIO_scenario\" sequence_name, scenario_id = _SequenceManager._breakdown_sequence_id(\"SEQUENCEsequenceSCENARIO_scenario\") assert sequence_name == \"sequence\" and scenario_id == \"SCENARIO_scenario\" def test_raise_sequence_does_not_belong_to_scenario(): with pytest.raises(SequenceBelongsToNonExistingScenario): sequence = Sequence({\"name\": \"sequence_name\"}, [], \"SEQUENCE_sequence_name_SCENARIO_scenario_id\") _SequenceManager._set(sequence) def __init(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() input_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO) output_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO) task = Task(\"task\", {}, print, [input_dn], [output_dn], TaskId(\"task_id\")) scenario = Scenario(\"scenario\", set([task]), {}, set()) _ScenarioManager._set(scenario) return scenario, task def test_set_and_get_sequence_no_existing_sequence(): scenario, task = __init() sequence_name_1 = \"p1\" sequence_id_1 = SequenceId(f\"SEQUENCE_{sequence_name_1}_{scenario.id}\") sequence_name_2 = \"p2\" sequence_id_2 = SequenceId(f\"SEQUENCE_{sequence_name_2}_{scenario.id}\") assert _SequenceManager._get(sequence_id_1) is None assert _SequenceManager._get(sequence_id_2) is None assert _SequenceManager._get(\"sequence\") is None def test_set_and_get(): scenario, task = __init() sequence_name_1 = \"p1\" sequence_id_1 = SequenceId(f\"SEQUENCE_{sequence_name_1}_{scenario.id}\") sequence_name_2 = \"p2\" sequence_id_2 = SequenceId(f\"SEQUENCE_{sequence_name_2}_{scenario.id}\") scenario.add_sequences({sequence_name_1: []}) sequence_1 = scenario.sequences[sequence_name_1] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 0 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 0 assert _SequenceManager._get(sequence_id_2) is None # Save a second sequence. Now, we expect to have a total of two sequences stored _TaskManager._set(task) scenario.add_sequences({sequence_name_2: [task]}) sequence_2 = scenario.sequences[sequence_name_2] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 0 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 0 assert _SequenceManager._get(sequence_id_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_id_2).tasks) == 1 assert _SequenceManager._get(sequence_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_2).tasks) == 1 assert _TaskManager._get(task.id).id == task.id # We save the first sequence again. We expect nothing to change scenario.add_sequence(sequence_name_1, []) sequence_1 = scenario.sequences[sequence_name_1] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 0 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 0 assert _SequenceManager._get(sequence_id_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_id_2).tasks) == 1 assert _SequenceManager._get(sequence_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_2).tasks) == 1 assert _TaskManager._get(task.id).id == task.id # We save a third sequence with same name as the first one. # We expect the first sequence to be updated scenario.add_sequences({sequence_name_1: [task]}) sequence_3 = scenario.sequences[sequence_name_1] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert _SequenceManager._get(sequence_id_1).id == sequence_3.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 1 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 1 assert _SequenceManager._get(sequence_id_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_id_2).tasks) == 1 assert _SequenceManager._get(sequence_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_2).tasks) == 1 assert _TaskManager._get(task.id).id == task.id def test_get_all_on_multiple_versions_environment(): # Create 5 sequences from Scenario with 2 versions each for version in range(1, 3): for i in range(5): _ScenarioManager._set( Scenario( f\"config_id_{i+version}\", [], {}, [], f\"SCENARIO_id_{i}_v{version}\", version=f\"{version}.0\", sequences={\"sequence\": {}}, ) ) _VersionManager._set_experiment_version(\"1.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"1.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 1 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 0 ) _VersionManager._set_experiment_version(\"2.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 0 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v2\"}])) == 1 ) _VersionManager._set_development_version(\"1.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"1.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 1 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"1.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v2\"}])) == 0 ) _VersionManager._set_development_version(\"2.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 0 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v2\"}])) == 1 ) def test_is_submittable(): dn = InMemoryDataNode(\"dn\", Scope.SCENARIO, properties={\"default_data\": 10}) task = Task(\"task\", {}, print, [dn]) scenario = Scenario(\"scenario\", set([task]), {}, set()) _ScenarioManager._set(scenario) scenario.add_sequences({\"sequence\": list([task])}) sequence = scenario.sequences[\"sequence\"] assert len(_SequenceManager._get_all()) == 1 assert _SequenceManager._is_submittable(sequence) assert _SequenceManager._is_submittable(sequence.id) assert not _SequenceManager._is_submittable(\"Sequence_temp\") assert not _SequenceManager._is_submittable(\"SEQUENCE_temp_SCENARIO_scenario\") scenario.dn.edit_in_progress = True assert not _SequenceManager._is_submittable(sequence) assert not _SequenceManager._is_submittable(sequence.id) scenario.dn.edit_in_progress = False assert _SequenceManager._is_submittable(sequence) assert _SequenceManager._is_submittable(sequence.id) def test_submit(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = InMemoryDataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario = Scenario(\"sce\", {task_1, task_2, task_3, task_4}, {}) sequence_name = \"sequence\" sequence_id = Sequence._new_id(sequence_name, scenario.id) class MockOrchestrator(_Orchestrator): submit_calls = [] @classmethod def _lock_dn_output_and_create_job( cls, task: Task, submit_id: str, submit_entity_id: str, callbacks: Optional[Iterable[Callable]] = None, force: bool = False, ): cls.submit_calls.append(task) return super()._lock_dn_output_and_create_job(task, submit_id, submit_entity_id, callbacks, force) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._orchestrator\", new=MockOrchestrator): # sequence does not exists. We expect an exception to be raised with pytest.raises(NonExistingSequence): _SequenceManager._submit(sequence_id) _ScenarioManager._set(scenario) scenario.add_sequences({sequence_name: [task_4, task_2, task_1, task_3]}) # sequence, and tasks does exist. We expect the tasks to be submitted # in a specific order _TaskManager._set(task_1) _TaskManager._set(task_2) _TaskManager._set(task_3) _TaskManager._set(task_4) sequence = scenario.sequences[sequence_name] _SequenceManager._submit(sequence.id) calls_ids = [t.id for t in _TaskManager._orchestrator().submit_calls] tasks_ids = [task_1.id, task_2.id, task_4.id, task_3.id] assert calls_ids == tasks_ids _SequenceManager._submit(sequence) calls_ids = [t.id for t in _TaskManager._orchestrator().submit_calls] tasks_ids = tasks_ids * 2 assert set(calls_ids) == set(tasks_ids) def test_assign_sequence_as_parent_of_task(): dn_config_1 = Config.configure_data_node(\"dn_1\", \"in_memory\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(\"dn_2\", \"in_memory\", scope=Scope.SCENARIO) dn_config_3 = Config.configure_data_node(\"dn_3\", \"in_memory\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"task_1\", print, [dn_config_1], [dn_config_2]) task_config_2 = Config.configure_task(\"task_2\", print, [dn_config_2], [dn_config_3]) task_config_3 = Config.configure_task(\"task_3\", print, [dn_config_2], [dn_config_3]) tasks = _TaskManager._bulk_get_or_create([task_config_1, task_config_2, task_config_3], \"scenario_id\") sequence_1 = _SequenceManager._create(\"sequence_1\", [tasks[0], tasks[1]], scenario_id=\"scenario_id\") sequence_2 = _SequenceManager._create(\"sequence_2\", [tasks[0], tasks[2]], scenario_id=\"scenario_id\") tasks_1 = list(sequence_1.tasks.values()) tasks_2 = list(sequence_2.tasks.values()) assert len(tasks_1) == 2 assert len(tasks_2) == 2 assert tasks_1[0].parent_ids == {sequence_1.id, sequence_2.id} assert tasks_2[0].parent_ids == {sequence_1.id, sequence_2.id} assert tasks_1[1].parent_ids == {sequence_1.id} assert tasks_2[1].parent_ids == {sequence_2.id} g = 0 def mock_function_no_input_no_output(): global g g += 1 def mock_function_one_input_no_output(inp): global g g += inp def mock_function_no_input_one_output(): global g return g def test_submit_sequence_from_tasks_with_one_or_no_input_output(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() # test no input and no output Task task_no_input_no_output = Task(\"task_no_input_no_output\", {}, mock_function_no_input_no_output) scenario_1 = Scenario(\"scenario_1\", {task_no_input_no_output}, {}) _TaskManager._set(task_no_input_no_output) _ScenarioManager._set(scenario_1) scenario_1.add_sequences({\"my_sequence_1\": [task_no_input_no_output]}) sequence_1 = scenario_1.sequences[\"my_sequence_1\"] assert len(sequence_1._get_sorted_tasks()) == 1 _SequenceManager._submit(sequence_1) assert g == 1 # test one input and no output Task data_node_input = InMemoryDataNode(\"input_dn\", Scope.SCENARIO, properties={\"default_data\": 2}) task_one_input_no_output = Task( \"task_one_input_no_output\", {}, mock_function_one_input_no_output, input=[data_node_input] ) scenario_2 = Scenario(\"scenario_2\", {task_one_input_no_output}, {}) _DataManager._set(data_node_input) data_node_input.unlock_edit() _TaskManager._set(task_one_input_no_output) _ScenarioManager._set(scenario_2) scenario_2.add_sequences({\"my_sequence_2\": [task_one_input_no_output]}) sequence_2 = scenario_2.sequences[\"my_sequence_2\"] assert len(sequence_2._get_sorted_tasks()) == 1 _SequenceManager._submit(sequence_2) assert g == 3 # test no input and one output Task data_node_output = InMemoryDataNode(\"output_dn\", Scope.SCENARIO, properties={\"default_data\": None}) task_no_input_one_output = Task( \"task_no_input_one_output\", {}, mock_function_no_input_one_output, output=[data_node_output] ) scenario_3 = Scenario(\"scenario_3\", {task_no_input_one_output}, {}) _DataManager._set(data_node_output) assert data_node_output.read() is None _TaskManager._set(task_no_input_one_output) _ScenarioManager._set(scenario_3) scenario_3.add_sequences({\"my_sequence_3\": [task_no_input_one_output]}) sequence_3 = scenario_3.sequences[\"my_sequence_3\"] assert len(sequence_2._get_sorted_tasks()) == 1 _SequenceManager._submit(sequence_3) assert data_node_output.read() == 3 def mult_by_two(nb: int): return nb * 2 def mult_by_3(nb: int): return nb * 3 def test_get_or_create_data(): # only create intermediate data node once Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_config_1 = Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1) dn_config_2 = Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0) dn_config_6 = Config.configure_data_node(\"baz\", \"in_memory\", Scope.SCENARIO, default_data=0) task_config_mult_by_two = Config.configure_task(\"mult_by_two\", mult_by_two, [dn_config_1], dn_config_2) task_config_mult_by_3 = Config.configure_task(\"mult_by_3\", mult_by_3, [dn_config_2], dn_config_6) # dn_1 ---> mult_by_two ---> dn_2 ---> mult_by_3 ---> dn_6 scenario_config = Config.configure_scenario(\"scenario\", [task_config_mult_by_two, task_config_mult_by_3]) _OrchestratorFactory._build_dispatcher() assert len(_DataManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 0 scenario = _ScenarioManager._create(scenario_config) scenario.add_sequences({\"by_6\": list(scenario.tasks.values())}) sequence = scenario.sequences[\"by_6\"] assert sequence.name == \"by_6\" assert len(_DataManager._get_all()) == 3 assert len(_TaskManager._get_all()) == 2 assert len(sequence._get_sorted_tasks()) == 2 assert sequence.foo.read() == 1 assert sequence.bar.read() == 0 assert sequence.baz.read() == 0 assert sequence._get_sorted_tasks()[0][0].config_id == task_config_mult_by_two.id assert sequence._get_sorted_tasks()[1][0].config_id == task_config_mult_by_3.id _SequenceManager._submit(sequence.id) assert sequence.foo.read() == 1 assert sequence.bar.read() == 2 assert sequence.baz.read() == 6 sequence.foo.write(\"new data value\") assert sequence.foo.read() == \"new data value\" assert sequence.bar.read() == 2 assert sequence.baz.read() == 6 sequence.bar.write(7) assert sequence.foo.read() == \"new data value\" assert sequence.bar.read() == 7 assert sequence.baz.read() == 6 with pytest.raises(AttributeError): sequence.WRONG.write(7) def notify1(*args, **kwargs): ... def notify2(*args, **kwargs): ... def notify_multi_param(*args, **kwargs): ... def test_sequence_notification_subscribe(mocker): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) mocker.patch(\"src.taipy.core._entity._reload._Reloader._reload\", side_effect=lambda m, o: o) task_configs = [ Config.configure_task( \"mult_by_two\", mult_by_two, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ] _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create(task_configs=task_configs) scenario = Scenario(\"scenario\", set(tasks), {}, sequences={\"by_1\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"by_1\"] notify_1 = NotifyMock(sequence) notify_1.__name__ = \"notify_1\" notify_1.__module__ = \"notify_1\" notify_2 = NotifyMock(sequence) notify_2.__name__ = \"notify_2\" notify_2.__module__ = \"notify_2\" # Mocking this because NotifyMock is a class that does not loads correctly when getting the sequence # from the storage. mocker.patch.object(_utils, \"_load_fct\", side_effect=[notify_1, notify_1, notify_2, notify_2, notify_2, notify_2]) # test subscription callback = mock.MagicMock() _SequenceManager._submit(sequence.id, [callback]) callback.assert_called() # test sequence subscribe notification _SequenceManager._subscribe(callback=notify_1, sequence=sequence) _SequenceManager._submit(sequence.id) notify_1.assert_called_3_times() notify_1.reset() # test sequence unsubscribe notification # test subscribe notification only on new job _SequenceManager._unsubscribe(callback=notify_1, sequence=sequence) _SequenceManager._subscribe(callback=notify_2, sequence=sequence) _SequenceManager._submit(sequence) notify_1.assert_not_called() notify_2.assert_called_3_times() def test_sequence_notification_subscribe_multi_param(mocker): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) mocker.patch(\"src.taipy.core._entity._reload._Reloader._reload\", side_effect=lambda m, o: o) task_configs = [ Config.configure_task( \"mult_by_two\", mult_by_two, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ] _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create(task_configs) scenario = Scenario(\"scenario\", set(tasks), {}, sequences={\"by_6\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"by_6\"] notify = mocker.Mock() # test sequence subscribe notification _SequenceManager._subscribe(callback=notify, params=[\"foobar\", 123, 1.2], sequence=sequence) mocker.patch.object(_SequenceManager, \"_get\", return_value=sequence) _SequenceManager._submit(sequence.id) # as the callback is called with Sequence/Scenario and Job objects # we can assert that is called with params plus a sequence object that we know # of and a job object that is represented by ANY in this case notify.assert_called_with(\"foobar\", 123, 1.2, sequence, ANY) def test_sequence_notification_unsubscribe(mocker): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) mocker.patch(\"src.taipy.core._entity._reload._Reloader._reload\", side_effect=lambda m, o: o) task_configs = [ Config.configure_task( \"mult_by_two\", mult_by_two, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ] _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create(task_configs) scenario = Scenario(\"scenario\", set(tasks), {}, sequences={\"by_6\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"by_6\"] notify_1 = notify1 notify_2 = notify2 _SequenceManager._subscribe(callback=notify_1, sequence=sequence) _SequenceManager._unsubscribe(callback=notify_1, sequence=sequence) _SequenceManager._subscribe(callback=notify_2, sequence=sequence) _SequenceManager._submit(sequence.id) with pytest.raises(ValueError): _SequenceManager._unsubscribe(callback=notify_1, sequence=sequence) _SequenceManager._unsubscribe(callback=notify_2, sequence=sequence) def test_sequence_notification_unsubscribe_multi_param(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task_configs = [ Config.configure_task( \"mult_by_two\", mult_by_two, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ] _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create(task_configs) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"by_6\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"by_6\"] _SequenceManager._subscribe(callback=notify_multi_param, params=[\"foobar\", 123, 0], sequence=sequence) _SequenceManager._subscribe(callback=notify_multi_param, params=[\"foobar\", 123, 1], sequence=sequence) _SequenceManager._subscribe(callback=notify_multi_param, params=[\"foobar\", 123, 2], sequence=sequence) assert len(sequence.subscribers) == 3 sequence.unsubscribe(notify_multi_param) assert len(sequence.subscribers) == 2 assert _Subscriber(notify_multi_param, [\"foobar\", 123, 0]) not in sequence.subscribers sequence.unsubscribe(notify_multi_param, [\"foobar\", 123, 2]) assert len(sequence.subscribers) == 1 assert _Subscriber(notify_multi_param, [\"foobar\", 123, 2]) not in sequence.subscribers with pytest.raises(ValueError): sequence.unsubscribe(notify_multi_param, [\"foobar\", 123, 10000]) def test_sequence_notification_subscribe_all(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task_configs = [ Config.configure_task( \"mult_by_two\", mult_by_two, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ] _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create(task_configs) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"by_6\": {\"tasks\": tasks}, \"other_sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"by_6\"] other_sequence = scenario.sequences[\"other_sequence\"] notify_1 = NotifyMock(sequence) _SequenceManager._subscribe(notify_1) assert len(_SequenceManager._get(sequence.id).subscribers) == 1 assert len(_SequenceManager._get(other_sequence.id).subscribers) == 1 def test_delete(): sequence_id = \"SEQUENCE_sequence_SCENARIO_scenario_id_1\" with pytest.raises(ModelNotFound): _SequenceManager._delete(sequence_id) scenario_1 = Scenario(\"scenario_1\", [], {}, scenario_id=\"SCENARIO_scenario_id_1\") scenario_2 = Scenario(\"scenario_2\", [], {}, scenario_id=\"SCENARIO_scenario_id_2\") _ScenarioManager._set(scenario_1) _ScenarioManager._set(scenario_2) with pytest.raises(ModelNotFound): _SequenceManager._delete(sequence_id) scenario_1.add_sequences({\"sequence\": {}}) assert len(_SequenceManager._get_all()) == 1 _SequenceManager._delete(sequence_id) assert len(_SequenceManager._get_all()) == 0 scenario_1.add_sequences({\"sequence\": {}, \"sequence_1\": {}}) assert len(_SequenceManager._get_all()) == 2 _SequenceManager._delete(sequence_id) assert len(_SequenceManager._get_all()) == 1 scenario_1.add_sequences({\"sequence_1\": {}, \"sequence_2\": {}, \"sequence_3\": {}}) scenario_2.add_sequences({\"sequence_1_2\": {}, \"sequence_2_2\": {}}) assert len(_SequenceManager._get_all()) == 5 _SequenceManager._delete_all() assert len(_SequenceManager._get_all()) == 0 scenario_1.add_sequences({\"sequence_1\": {}, \"sequence_2\": {}, \"sequence_3\": {}, \"sequence_4\": {}}) scenario_2.add_sequences({\"sequence_1_2\": {}, \"sequence_2_2\": {}}) assert len(_SequenceManager._get_all()) == 6 _SequenceManager._delete_many( [ \"SEQUENCE_sequence_1_SCENARIO_scenario_id_1\", \"SEQUENCE_sequence_2_SCENARIO_scenario_id_1\", \"SEQUENCE_sequence_1_2_SCENARIO_scenario_id_2\", ] ) assert len(_SequenceManager._get_all()) == 3 with pytest.raises(ModelNotFound): _SequenceManager._delete_many( [\"SEQUENCE_sequence_1_SCENARIO_scenario_id_1\", \"SEQUENCE_sequence_2_SCENARIO_scenario_id_1\"] ) def test_delete_version(): scenario_1_0 = Scenario( \"scenario_config\", [], {}, scenario_id=\"SCENARIO_id_1_v1_0\", version=\"1.0\", sequences={\"sequence_1\": {}, \"sequence_2\": {}}, ) scenario_1_1 = Scenario( \"scenario_config\", [], {}, scenario_id=\"SCENARIO_id_1_v1_1\", version=\"1.1\", sequences={\"sequence_1\": {}, \"sequence_2\": {}}, ) _ScenarioManager._set(scenario_1_0) _ScenarioManager._set(scenario_1_1) _VersionManager._set_experiment_version(\"1.1\") assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 2 _VersionManager._set_experiment_version(\"1.0\") assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 2 _SequenceManager._delete_by_version(\"1.0\") assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 0 assert len(scenario_1_0.sequences) == 0 assert len(scenario_1_1.sequences) == 2 _VersionManager._set_experiment_version(\"1.1\") assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 2 assert len(scenario_1_0.sequences) == 0 assert len(scenario_1_1.sequences) == 2 _SequenceManager._delete_by_version(\"1.1\") assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 0 def test_exists(): scenario = Scenario(\"scenario\", [], {}, scenario_id=\"SCENARIO_scenario\", sequences={\"sequence\": {}}) _ScenarioManager._set(scenario) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert not _SequenceManager._exists(\"SEQUENCE_sequence_not_exist_SCENARIO_scenario\") assert not _SequenceManager._exists(\"SEQUENCE_sequence_SCENARIO_scenario_id\") assert _SequenceManager._exists(\"SEQUENCE_sequence_SCENARIO_scenario\") assert _SequenceManager._exists(scenario.sequences[\"sequence\"]) def test_export(tmpdir_factory): path = tmpdir_factory.mktemp(\"data\") task = Task(\"task\", {}, print, id=TaskId(\"task_id\")) scenario = Scenario( \"scenario\", set([task]), {}, set(), version=\"1.0\", sequences={\"sequence_1\": {}, \"sequence_2\": {\"tasks\": [task], \"properties\": {\"xyz\": \"acb\"}}}, ) _TaskManager._set(task) _ScenarioManager._set(scenario) sequence_1 = scenario.sequences[\"sequence_1\"] sequence_2 = scenario.sequences[\"sequence_2\"] _SequenceManager._export(sequence_1.id, Path(path)) export_sequence_json_file_path = f\"{path}/sequences/{sequence_1.id}.json\" with open(export_sequence_json_file_path, \"rb\") as f: sequence_json_file = json.load(f) expected_json = { \"id\": sequence_1.id, \"owner_id\": scenario.id, \"parent_ids\": [scenario.id], \"name\": \"sequence_1\", \"tasks\": [], \"properties\": {}, \"subscribers\": [], } assert expected_json == sequence_json_file _SequenceManager._export(sequence_2.id, Path(path)) export_sequence_json_file_path = f\"{path}/sequences/{sequence_2.id}.json\" with open(export_sequence_json_file_path, \"rb\") as f: sequence_json_file = json.load(f) expected_json = { \"id\": sequence_2.id, \"owner_id\": scenario.id, \"parent_ids\": [scenario.id], \"name\": \"sequence_2\", \"tasks\": [task.id], \"properties\": {\"xyz\": \"acb\"}, \"subscribers\": [], } assert expected_json == sequence_json_file def test_hard_delete_one_single_sequence_with_scenario_data_nodes(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.SCENARIO) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create([task_config]) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] sequence.submit() assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 _SequenceManager._hard_delete(sequence.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 def test_hard_delete_one_single_sequence_with_cycle_data_nodes(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.CYCLE, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.CYCLE) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create([task_config]) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] sequence.submit() assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 _SequenceManager._hard_delete(sequence.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 def test_hard_delete_shared_entities(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) input_dn = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") intermediate_dn = Config.configure_data_node(\"my_inter\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") output_dn = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") task_1 = Config.configure_task(\"task_1\", print, input_dn, intermediate_dn) task_2 = Config.configure_task(\"task_2\", print, intermediate_dn, output_dn) _OrchestratorFactory._build_dispatcher() tasks_scenario_1 = _TaskManager._bulk_get_or_create([task_1, task_2], scenario_id=\"scenario_id_1\") tasks_scenario_2 = _TaskManager._bulk_get_or_create([task_1, task_2], scenario_id=\"scenario_id_2\") scenario_1 = Scenario(\"scenario_1\", tasks_scenario_1, {}, sequences={\"sequence\": {\"tasks\": tasks_scenario_1}}) scenario_2 = Scenario(\"scenario_2\", tasks_scenario_2, {}, sequences={\"sequence\": {\"tasks\": tasks_scenario_2}}) _ScenarioManager._set(scenario_1) _ScenarioManager._set(scenario_2) sequence_1 = scenario_1.sequences[\"sequence\"] sequence_2 = scenario_2.sequences[\"sequence\"] _SequenceManager._submit(sequence_1.id) _SequenceManager._submit(sequence_2.id) assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 3 assert len(_DataManager._get_all()) == 4 assert len(_JobManager._get_all()) == 4 _SequenceManager._hard_delete(sequence_1.id) assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 3 assert len(_DataManager._get_all()) == 4 assert len(_JobManager._get_all()) == 4 def my_print(a, b): print(a + b) def test_submit_task_with_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_path=\"wrong_path.pickle\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") json_dn_cfg = Config.configure_parquet_data_node(\"wrong_json_file_path\", default_path=\"wrong_path.json\") task_cfg = Config.configure_task(\"task\", my_print, [csv_dn_cfg, pickle_dn_cfg], parquet_dn_cfg) task_2_cfg = Config.configure_task(\"task2\", my_print, [csv_dn_cfg, parquet_dn_cfg], json_dn_cfg) tasks = _TaskManager._bulk_get_or_create([task_cfg, task_2_cfg]) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] pip_manager = _SequenceManagerFactory._build_manager() pip_manager._submit(sequence) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in sequence.get_inputs() ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in sequence.data_nodes.values() if input_dn not in sequence.get_inputs() ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_submit_task_with_one_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_data=\"value\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") json_dn_cfg = Config.configure_parquet_data_node(\"wrong_json_file_path\", default_path=\"wrong_path.json\") task_cfg = Config.configure_task(\"task\", my_print, [csv_dn_cfg, pickle_dn_cfg], parquet_dn_cfg) task_2_cfg = Config.configure_task(\"task2\", my_print, [csv_dn_cfg, parquet_dn_cfg], json_dn_cfg) tasks = _TaskManager._bulk_get_or_create([task_cfg, task_2_cfg]) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] pip_manager = _SequenceManagerFactory._build_manager() pip_manager._submit(sequence) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in sequence.get_inputs() if input_dn.config_id == \"wrong_csv_file_path\" ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in sequence.data_nodes.values() if input_dn.config_id != \"wrong_csv_file_path\" ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) "} {"text": "import datetime import json import os import pathlib from dataclasses import dataclass from enum import Enum from time import sleep import numpy as np import pandas as pd import pytest from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.json import JSONDataNode from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.exceptions.exceptions import NoData from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import InvalidConfigurationId @pytest.fixture(scope=\"function\", autouse=True) def cleanup(): yield path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.json\") if os.path.isfile(path): os.remove(path) class MyCustomObject: def __init__(self, id, integer, text): self.id = id self.integer = integer self.text = text class MyCustomObject2: def __init__(self, id, boolean, text): self.id = id self.boolean = boolean self.text = text class MyEnum(Enum): A = 1 B = 2 C = 3 @dataclass class CustomDataclass: integer: int string: str class MyCustomEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, MyCustomObject): return {\"__type__\": \"MyCustomObject\", \"id\": o.id, \"integer\": o.integer, \"text\": o.text} return super().default(self, o) class MyCustomDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): super().__init__(object_hook=self.object_hook, *args, **kwargs) def object_hook(self, o): if o.get(\"__type__\") == \"MyCustomObject\": return MyCustomObject(o[\"id\"], o[\"integer\"], o[\"text\"]) else: return o class TestJSONDataNode: def test_create(self): path = \"data/node/path\" dn = JSONDataNode(\"foo_bar\", Scope.SCENARIO, properties={\"default_path\": path, \"name\": \"super name\"}) assert isinstance(dn, JSONDataNode) assert dn.storage_type() == \"json\" assert dn.config_id == \"foo_bar\" assert dn.name == \"super name\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.last_edit_date is None assert dn.job_ids == [] assert not dn.is_ready_for_reading assert dn.path == path with pytest.raises(InvalidConfigurationId): dn = JSONDataNode( \"foo bar\", Scope.SCENARIO, properties={\"default_path\": path, \"has_header\": False, \"name\": \"super name\"} ) def test_get_user_properties(self, json_file): dn_1 = JSONDataNode(\"dn_1\", Scope.SCENARIO, properties={\"path\": json_file}) assert dn_1._get_user_properties() == {} dn_2 = JSONDataNode( \"dn_2\", Scope.SCENARIO, properties={ \"default_data\": \"foo\", \"default_path\": json_file, \"encoder\": MyCustomEncoder, \"decoder\": MyCustomDecoder, \"foo\": \"bar\", }, ) # default_data, default_path, path, encoder, decoder are filtered out assert dn_2._get_user_properties() == {\"foo\": \"bar\"} def test_new_json_data_node_with_existing_file_is_ready_for_reading(self): not_ready_dn_cfg = Config.configure_data_node( \"not_ready_data_node_config_id\", \"json\", default_path=\"NOT_EXISTING.json\" ) not_ready_dn = _DataManager._bulk_get_or_create([not_ready_dn_cfg])[not_ready_dn_cfg] assert not not_ready_dn.is_ready_for_reading assert not_ready_dn.path == \"NOT_EXISTING.json\" path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/json/example_list.json\") ready_dn_cfg = Config.configure_data_node(\"ready_data_node_config_id\", \"json\", default_path=path) ready_dn = _DataManager._bulk_get_or_create([ready_dn_cfg])[ready_dn_cfg] assert ready_dn.is_ready_for_reading def test_read_non_existing_json(self): not_existing_json = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": \"WRONG.json\"}) with pytest.raises(NoData): assert not_existing_json.read() is None not_existing_json.read_or_raise() def test_read(self): path_1 = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/json/example_list.json\") dn_1 = JSONDataNode(\"bar\", Scope.SCENARIO, properties={\"default_path\": path_1}) data_1 = dn_1.read() assert isinstance(data_1, list) assert len(data_1) == 4 path_2 = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/json/example_dict.json\") dn_2 = JSONDataNode(\"bar\", Scope.SCENARIO, properties={\"default_path\": path_2}) data_2 = dn_2.read() assert isinstance(data_2, dict) assert data_2[\"id\"] == \"1\" path_3 = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/json/example_int.json\") dn_3 = JSONDataNode(\"bar\", Scope.SCENARIO, properties={\"default_path\": path_3}) data_3 = dn_3.read() assert isinstance(data_3, int) assert data_3 == 1 path_4 = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/json/example_null.json\") dn_4 = JSONDataNode(\"bar\", Scope.SCENARIO, properties={\"default_path\": path_4}) data_4 = dn_4.read() assert data_4 is None def test_read_invalid_json(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/invalid.json.txt\") dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path}) with pytest.raises(ValueError): dn.read() def test_append_to_list(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) original_data = json_dn.read() # Append a dictionary append_data_1 = {\"a\": 1, \"b\": 2, \"c\": 3} json_dn.append(append_data_1) assert json_dn.read() == original_data + [append_data_1] # Append a list of dictionaries append_data_data_2 = [{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}] json_dn.append(append_data_data_2) assert json_dn.read() == original_data + [append_data_1] + append_data_data_2 def test_append_to_a_dictionary(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) original_data = {\"a\": 1, \"b\": 2, \"c\": 3} json_dn.write(original_data) # Append another dictionary append_data_1 = {\"d\": 1, \"e\": 2, \"f\": 3} json_dn.append(append_data_1) assert json_dn.read() == {**original_data, **append_data_1} # Append an overlap dictionary append_data_data_2 = {\"a\": 10, \"b\": 20, \"g\": 30} json_dn.append(append_data_data_2) assert json_dn.read() == {**original_data, **append_data_1, **append_data_data_2} def test_write(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) data = {\"a\": 1, \"b\": 2, \"c\": 3} json_dn.write(data) assert np.array_equal(json_dn.read(), data) def test_write_with_different_encoding(self, json_file): data = {\"\u2265a\": 1, \"b\": 2} utf8_dn = JSONDataNode(\"utf8_dn\", Scope.SCENARIO, properties={\"default_path\": json_file}) utf16_dn = JSONDataNode( \"utf16_dn\", Scope.SCENARIO, properties={\"default_path\": json_file, \"encoding\": \"utf-16\"} ) # If a file is written with utf-8 encoding, it can only be read with utf-8, not utf-16 encoding utf8_dn.write(data) assert np.array_equal(utf8_dn.read(), data) with pytest.raises(UnicodeError): utf16_dn.read() # If a file is written with utf-16 encoding, it can only be read with utf-16, not utf-8 encoding utf16_dn.write(data) assert np.array_equal(utf16_dn.read(), data) with pytest.raises(UnicodeError): utf8_dn.read() def test_write_non_serializable(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) data = {\"a\": 1, \"b\": json_dn} with pytest.raises(TypeError): json_dn.write(data) def test_write_date(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) now = datetime.datetime.now() data = {\"date\": now} json_dn.write(data) read_data = json_dn.read() assert read_data[\"date\"] == now def test_write_enum(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) data = [MyEnum.A, MyEnum.B, MyEnum.C] json_dn.write(data) read_data = json_dn.read() assert read_data == [MyEnum.A, MyEnum.B, MyEnum.C] def test_write_dataclass(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) json_dn.write(CustomDataclass(integer=1, string=\"foo\")) read_data = json_dn.read() assert read_data.integer == 1 assert read_data.string == \"foo\" def test_write_custom_encoder(self, json_file): json_dn = JSONDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": json_file, \"encoder\": MyCustomEncoder} ) data = [MyCustomObject(\"1\", 1, \"abc\"), 100] json_dn.write(data) read_data = json_dn.read() assert read_data[0][\"__type__\"] == \"MyCustomObject\" assert read_data[0][\"id\"] == \"1\" assert read_data[0][\"integer\"] == 1 assert read_data[0][\"text\"] == \"abc\" assert read_data[1] == 100 def test_read_write_custom_encoder_decoder(self, json_file): json_dn = JSONDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": json_file, \"encoder\": MyCustomEncoder, \"decoder\": MyCustomDecoder}, ) data = [MyCustomObject(\"1\", 1, \"abc\"), 100] json_dn.write(data) read_data = json_dn.read() assert isinstance(read_data[0], MyCustomObject) assert read_data[0].id == \"1\" assert read_data[0].integer == 1 assert read_data[0].text == \"abc\" assert read_data[1] == 100 def test_filter(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) json_dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, {\"KWARGS_KEY\": \"KWARGS_VALUE\"}, ] ) assert len(json_dn.filter((\"foo\", 1, Operator.EQUAL))) == 3 assert len(json_dn.filter((\"foo\", 1, Operator.NOT_EQUAL))) == 3 assert len(json_dn.filter((\"bar\", 2, Operator.EQUAL))) == 3 assert len(json_dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)) == 4 assert json_dn[0] == {\"foo\": 1, \"bar\": 1} assert json_dn[2] == {\"foo\": 1} assert json_dn[:2] == [{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}] @pytest.mark.parametrize( [\"properties\", \"exists\"], [ ({}, False), ({\"default_data\": {\"foo\": \"bar\"}}, True), ], ) def test_create_with_default_data(self, properties, exists): dn = JSONDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) assert os.path.exists(dn.path) is exists def test_set_path(self): dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": \"foo.json\"}) assert dn.path == \"foo.json\" dn.path = \"bar.json\" assert dn.path == \"bar.json\" def test_read_write_after_modify_path(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/json/example_dict.json\") new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.json\") dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path}) read_data = dn.read() assert read_data is not None dn.path = new_path with pytest.raises(FileNotFoundError): dn.read() dn.write({\"other\": \"stuff\"}) assert dn.read() == {\"other\": \"stuff\"} def test_get_system_modified_date_instead_of_last_edit_date(self, tmpdir_factory): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.json\")) pd.DataFrame([]).to_json(temp_file_path) dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path}) dn.write([1, 2, 3]) previous_edit_date = dn.last_edit_date sleep(0.1) pd.DataFrame([4, 5, 6]).to_json(temp_file_path) new_edit_date = datetime.datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert previous_edit_date < dn.last_edit_date assert new_edit_date == dn.last_edit_date sleep(0.1) dn.write([1, 2, 3]) assert new_edit_date < dn.last_edit_date os.unlink(temp_file_path) "} {"text": "from importlib import util from unittest.mock import patch import modin.pandas as modin_pd import numpy as np import pandas as pd import pytest from modin.pandas.test.utils import df_equals from pandas.testing import assert_frame_equal from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.data.sql import SQLDataNode from src.taipy.core.exceptions.exceptions import MissingAppendQueryBuilder, MissingRequiredProperty from taipy.config.common.scope import Scope class MyCustomObject: def __init__(self, foo=None, bar=None, *args, **kwargs): self.foo = foo self.bar = bar self.args = args self.kwargs = kwargs def my_write_query_builder_with_pandas(data: pd.DataFrame): insert_data = data.to_dict(\"records\") return [\"DELETE FROM example\", (\"INSERT INTO example VALUES (:foo, :bar)\", insert_data)] def my_write_query_builder_with_modin(data: modin_pd.DataFrame): insert_data = data.to_dict(\"records\") return [\"DELETE FROM example\", (\"INSERT INTO example VALUES (:foo, :bar)\", insert_data)] def my_append_query_builder_with_pandas(data: pd.DataFrame): insert_data = data.to_dict(\"records\") return [(\"INSERT INTO example VALUES (:foo, :bar)\", insert_data)] def my_append_query_builder_with_modin(data: modin_pd.DataFrame): insert_data = data.to_dict(\"records\") return [(\"INSERT INTO example VALUES (:foo, :bar)\", insert_data)] def single_write_query_builder(data): return \"DELETE FROM example\" class TestSQLDataNode: __pandas_properties = [ { \"db_name\": \"taipy.sqlite3\", \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", \"other\": \"value\", }, }, ] __modin_properties = [ { \"db_name\": \"taipy.sqlite3\", \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_modin, \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", \"other\": \"value\", }, }, ] if util.find_spec(\"pyodbc\"): __pandas_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mssql\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) __modin_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mssql\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_modin, \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) if util.find_spec(\"pymysql\"): __pandas_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mysql\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) __modin_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mysql\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_modin, \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) if util.find_spec(\"psycopg2\"): __pandas_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"postgresql\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) __modin_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"postgresql\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_modin, \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) @pytest.mark.parametrize(\"modin_properties\", __modin_properties) def test_create(self, pandas_properties, modin_properties): dn = SQLDataNode( \"foo_bar\", Scope.SCENARIO, properties=pandas_properties, ) assert isinstance(dn, SQLDataNode) assert dn.storage_type() == \"sql\" assert dn.config_id == \"foo_bar\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.exposed_type == \"pandas\" assert dn.read_query == \"SELECT * FROM example\" assert dn.write_query_builder == my_write_query_builder_with_pandas dn = SQLDataNode( \"foo_bar\", Scope.SCENARIO, properties=modin_properties, ) assert isinstance(dn, SQLDataNode) assert dn.storage_type() == \"sql\" assert dn.config_id == \"foo_bar\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.exposed_type == \"modin\" assert dn.read_query == \"SELECT * FROM example\" assert dn.write_query_builder == my_write_query_builder_with_modin @pytest.mark.parametrize(\"properties\", __pandas_properties + __modin_properties) def test_get_user_properties(self, properties): custom_properties = properties.copy() custom_properties[\"foo\"] = \"bar\" dn = SQLDataNode( \"foo_bar\", Scope.SCENARIO, properties=custom_properties, ) assert dn._get_user_properties() == {\"foo\": \"bar\"} @pytest.mark.parametrize( \"properties\", [ {}, {\"db_username\": \"foo\"}, {\"db_username\": \"foo\", \"db_password\": \"foo\"}, {\"db_username\": \"foo\", \"db_password\": \"foo\", \"db_name\": \"foo\"}, {\"engine\": \"sqlite\"}, {\"engine\": \"mssql\", \"db_name\": \"foo\"}, {\"engine\": \"mysql\", \"db_username\": \"foo\"}, {\"engine\": \"postgresql\", \"db_username\": \"foo\", \"db_password\": \"foo\"}, ], ) def test_create_with_missing_parameters(self, properties): with pytest.raises(MissingRequiredProperty): SQLDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\")) with pytest.raises(MissingRequiredProperty): SQLDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) @pytest.mark.parametrize(\"modin_properties\", __modin_properties) def test_write_query_builder(self, pandas_properties, modin_properties): custom_properties = pandas_properties.copy() custom_properties.pop(\"db_extra_args\") dn = SQLDataNode(\"foo_bar\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock: # mock connection execute dn.write(pd.DataFrame({\"foo\": [1, 2, 3], \"bar\": [4, 5, 6]})) assert len(engine_mock.mock_calls[4].args) == 1 assert engine_mock.mock_calls[4].args[0].text == \"DELETE FROM example\" assert len(engine_mock.mock_calls[5].args) == 2 assert engine_mock.mock_calls[5].args[0].text == \"INSERT INTO example VALUES (:foo, :bar)\" assert engine_mock.mock_calls[5].args[1] == [ {\"foo\": 1, \"bar\": 4}, {\"foo\": 2, \"bar\": 5}, {\"foo\": 3, \"bar\": 6}, ] custom_properties[\"write_query_builder\"] = single_write_query_builder dn = SQLDataNode(\"foo_bar\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock: # mock connection execute dn.write(pd.DataFrame({\"foo\": [1, 2, 3], \"bar\": [4, 5, 6]})) assert len(engine_mock.mock_calls[4].args) == 1 assert engine_mock.mock_calls[4].args[0].text == \"DELETE FROM example\" custom_properties = modin_properties.copy() custom_properties.pop(\"db_extra_args\") dn = SQLDataNode(\"foo_bar\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock: # mock connection execute dn.write(modin_pd.DataFrame({\"foo\": [1, 2, 3], \"bar\": [4, 5, 6]})) assert len(engine_mock.mock_calls[4].args) == 1 assert engine_mock.mock_calls[4].args[0].text == \"DELETE FROM example\" assert len(engine_mock.mock_calls[5].args) == 2 assert engine_mock.mock_calls[5].args[0].text == \"INSERT INTO example VALUES (:foo, :bar)\" assert engine_mock.mock_calls[5].args[1] == [ {\"foo\": 1, \"bar\": 4}, {\"foo\": 2, \"bar\": 5}, {\"foo\": 3, \"bar\": 6}, ] custom_properties[\"write_query_builder\"] = single_write_query_builder dn = SQLDataNode(\"foo_bar\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock: # mock connection execute dn.write(modin_pd.DataFrame({\"foo\": [1, 2, 3], \"bar\": [4, 5, 6]})) assert len(engine_mock.mock_calls[4].args) == 1 assert engine_mock.mock_calls[4].args[0].text == \"DELETE FROM example\" @pytest.mark.parametrize( \"tmp_sqlite_path\", [ \"tmp_sqlite_db_file_path\", \"tmp_sqlite_sqlite3_file_path\", ], ) def test_sqlite_read_file_with_different_extension(self, tmp_sqlite_path, request): tmp_sqlite_path = request.getfixturevalue(tmp_sqlite_path) folder_path, db_name, file_extension = tmp_sqlite_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * from example\", \"write_query_builder\": single_write_query_builder, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, } dn = SQLDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) data = dn.read() assert data.equals(pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}])) def test_sqlite_append_pandas(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"append_query_builder\": my_append_query_builder_with_pandas, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, } dn = SQLDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) original_data = pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}]) data = dn.read() assert_frame_equal(data, original_data) append_data_1 = pd.DataFrame([{\"foo\": 5, \"bar\": 6}, {\"foo\": 7, \"bar\": 8}]) dn.append(append_data_1) assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True)) def test_sqlite_append_modin(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"append_query_builder\": my_append_query_builder_with_pandas, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"modin\", } dn = SQLDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) original_data = modin_pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}]) data = dn.read() df_equals(data, original_data) append_data_1 = modin_pd.DataFrame([{\"foo\": 5, \"bar\": 6}, {\"foo\": 7, \"bar\": 8}]) dn.append(append_data_1) df_equals(dn.read(), modin_pd.concat([original_data, append_data_1]).reset_index(drop=True)) def test_sqlite_append_without_append_query_builder(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, } dn = SQLDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) with pytest.raises(MissingAppendQueryBuilder): dn.append(pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}])) def test_filter_pandas_exposed_type(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"pandas\", } dn = SQLDataNode(\"foo\", Scope.SCENARIO, properties=properties) dn.write( pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}, ] ) ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, 2, 2])) assert dn[\"bar\"].equals(pd.Series([1, 2, 3, 1, 2, 3])) assert dn[:2].equals(pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = pd.DataFrame([{\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, ] ) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_modin_exposed_type(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_modin, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"modin\", } dn = SQLDataNode(\"foo\", Scope.SCENARIO, properties=properties) dn.write( pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}, ] ) ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, 2, 2])) assert dn[\"bar\"].equals(pd.Series([1, 2, 3, 1, 2, 3])) assert dn[:2].equals(modin_pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = modin_pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = modin_pd.DataFrame([{\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = modin_pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, ] ) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_numpy_exposed_type(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"numpy\", } dn = SQLDataNode(\"foo\", Scope.SCENARIO, properties=properties) dn.write( pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}, ] ) ) # Test datanode indexing and slicing assert np.array_equal(dn[0], np.array([1, 1])) assert np.array_equal(dn[1], np.array([1, 2])) assert np.array_equal(dn[:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[1:4, :1], np.array([[1], [1], [2]])) # Test filter data assert np.array_equal(dn.filter((\"foo\", 1, Operator.EQUAL)), np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[dn[:, 0] == 1], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn.filter((\"foo\", 1, Operator.NOT_EQUAL)), np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn[dn[:, 0] != 1], np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn.filter((\"bar\", 2, Operator.EQUAL)), np.array([[1, 2], [2, 2]])) assert np.array_equal(dn[dn[:, 1] == 2], np.array([[1, 2], [2, 2]])) assert np.array_equal( dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR), np.array([[1, 1], [1, 2], [2, 1], [2, 2]]), ) assert np.array_equal(dn[(dn[:, 1] == 1) | (dn[:, 1] == 2)], np.array([[1, 1], [1, 2], [2, 1], [2, 2]])) def test_filter_does_not_read_all_entities(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"numpy\", } dn = SQLDataNode(\"foo\", Scope.SCENARIO, properties=properties) # SQLDataNode.filter() should not call the MongoCollectionDataNode._read() method with patch.object(SQLDataNode, \"_read\") as read_mock: dn.filter((\"foo\", 1, Operator.EQUAL)) dn.filter((\"bar\", 2, Operator.NOT_EQUAL)) dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) assert read_mock[\"_read\"].call_count == 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import os import pathlib import pytest from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config.data_node_config import DataNodeConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.csv import CSVDataNode from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.data.pickle import PickleDataNode from src.taipy.core.exceptions.exceptions import InvalidDataNodeType, ModelNotFound from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile def file_exists(file_path: str) -> bool: return os.path.exists(file_path) class TestDataManager: def test_create_data_node_and_modify_properties_does_not_modify_config(self): dn_config = Config.configure_data_node(id=\"name\", foo=\"bar\") dn = _DataManager._create_and_set(dn_config, None, None) assert dn_config.properties.get(\"foo\") == \"bar\" assert dn_config.properties.get(\"baz\") is None dn.properties[\"baz\"] = \"qux\" _DataManager._set(dn) assert dn_config.properties.get(\"foo\") == \"bar\" assert dn_config.properties.get(\"baz\") is None assert dn.properties.get(\"foo\") == \"bar\" assert dn.properties.get(\"baz\") == \"qux\" def test_create_data_node_with_name_provided(self): dn_config = Config.configure_data_node(id=\"dn\", foo=\"bar\", name=\"acb\") dn = _DataManager._create_and_set(dn_config, None, None) assert dn.name == \"acb\" def test_create_and_get_csv_data_node(self): # Test we can instantiate a CsvDataNode from DataNodeConfig with : # - a csv type # - a default scenario scope # - No owner_id csv_dn_config = Config.configure_data_node(id=\"foo\", storage_type=\"csv\", path=\"bar\", has_header=True) csv_dn = _DataManager._create_and_set(csv_dn_config, None, None) assert isinstance(csv_dn, CSVDataNode) assert isinstance(_DataManager._get(csv_dn.id), CSVDataNode) assert _DataManager._exists(csv_dn.id) assert _DataManager._get(csv_dn.id) is not None assert _DataManager._get(csv_dn.id).id == csv_dn.id assert _DataManager._get(csv_dn.id).config_id == \"foo\" assert _DataManager._get(csv_dn.id).config_id == csv_dn.config_id assert _DataManager._get(csv_dn.id).scope == Scope.SCENARIO assert _DataManager._get(csv_dn.id).scope == csv_dn.scope assert _DataManager._get(csv_dn.id).owner_id is None assert _DataManager._get(csv_dn.id).owner_id == csv_dn.owner_id assert _DataManager._get(csv_dn.id).parent_ids == set() assert _DataManager._get(csv_dn.id).parent_ids == csv_dn.parent_ids assert _DataManager._get(csv_dn.id).last_edit_date is None assert _DataManager._get(csv_dn.id).last_edit_date == csv_dn.last_edit_date assert _DataManager._get(csv_dn.id).job_ids == [] assert _DataManager._get(csv_dn.id).job_ids == csv_dn.job_ids assert not _DataManager._get(csv_dn.id).is_ready_for_reading assert _DataManager._get(csv_dn.id).is_ready_for_reading == csv_dn.is_ready_for_reading assert len(_DataManager._get(csv_dn.id).properties) == 4 assert _DataManager._get(csv_dn.id).properties.get(\"path\") == \"bar\" assert _DataManager._get(csv_dn.id).properties.get(\"encoding\") == \"utf-8\" assert _DataManager._get(csv_dn.id).properties.get(\"has_header\") is True assert _DataManager._get(csv_dn.id).properties.get(\"exposed_type\") == \"pandas\" assert _DataManager._get(csv_dn.id).properties == csv_dn.properties assert _DataManager._get(csv_dn.id).edit_in_progress is False assert _DataManager._get(csv_dn.id)._editor_id is None assert _DataManager._get(csv_dn.id)._editor_expiration_date is None assert _DataManager._get(csv_dn) is not None assert _DataManager._get(csv_dn).id == csv_dn.id assert _DataManager._get(csv_dn).config_id == \"foo\" assert _DataManager._get(csv_dn).config_id == csv_dn.config_id assert _DataManager._get(csv_dn).scope == Scope.SCENARIO assert _DataManager._get(csv_dn).scope == csv_dn.scope assert _DataManager._get(csv_dn).owner_id is None assert _DataManager._get(csv_dn).owner_id == csv_dn.owner_id assert _DataManager._get(csv_dn).parent_ids == set() assert _DataManager._get(csv_dn).parent_ids == csv_dn.parent_ids assert _DataManager._get(csv_dn).last_edit_date is None assert _DataManager._get(csv_dn).last_edit_date == csv_dn.last_edit_date assert _DataManager._get(csv_dn).job_ids == [] assert _DataManager._get(csv_dn).job_ids == csv_dn.job_ids assert not _DataManager._get(csv_dn).is_ready_for_reading assert _DataManager._get(csv_dn).is_ready_for_reading == csv_dn.is_ready_for_reading assert len(_DataManager._get(csv_dn).properties) == 4 assert _DataManager._get(csv_dn).properties.get(\"path\") == \"bar\" assert _DataManager._get(csv_dn).properties.get(\"encoding\") == \"utf-8\" assert _DataManager._get(csv_dn).properties.get(\"has_header\") is True assert _DataManager._get(csv_dn.id).properties.get(\"exposed_type\") == \"pandas\" assert _DataManager._get(csv_dn).properties == csv_dn.properties assert _DataManager._get(csv_dn.id).edit_in_progress is False assert _DataManager._get(csv_dn.id)._editor_id is None assert _DataManager._get(csv_dn.id)._editor_expiration_date is None def test_edit_and_get_data_node(self): config = Config.configure_pickle_data_node(id=\"foo\") dn = _DataManager._create_and_set(config, None, None) assert _DataManager._get(dn.id).last_edit_date is None assert len(_DataManager._get(dn.id).properties) == 1 assert _DataManager._get(dn.id).properties.get(\"is_generated\") assert not _DataManager._get(dn.id).edit_in_progress assert _DataManager._get(dn.id)._editor_id is None assert _DataManager._get(dn.id)._editor_expiration_date is None dn.lock_edit(\"foo\") assert _DataManager._get(dn.id).last_edit_date is None assert len(_DataManager._get(dn.id).properties) == 1 assert _DataManager._get(dn.id).properties.get(\"is_generated\") assert _DataManager._get(dn.id).edit_in_progress assert _DataManager._get(dn.id).editor_id == \"foo\" assert _DataManager._get(dn.id).editor_expiration_date is not None dn.unlock_edit(\"foo\") assert _DataManager._get(dn.id).last_edit_date is None assert len(_DataManager._get(dn.id).properties) == 1 assert _DataManager._get(dn.id).properties.get(\"is_generated\") assert not _DataManager._get(dn.id).edit_in_progress assert _DataManager._get(dn.id).editor_id is None assert _DataManager._get(dn.id).editor_expiration_date is None def test_create_and_get_in_memory_data_node(self): # Test we can instantiate an InMemoryDataNode from DataNodeConfig with : # - an in_memory type # - a scenario scope # - an owner id # - some default data in_memory_dn_config = Config.configure_data_node( id=\"baz\", storage_type=\"in_memory\", scope=Scope.SCENARIO, default_data=\"qux\", other_data=\"foo\" ) in_mem_dn = _DataManager._create_and_set(in_memory_dn_config, \"Scenario_id\", {\"task_id\"}) assert isinstance(in_mem_dn, InMemoryDataNode) assert isinstance(_DataManager._get(in_mem_dn.id), InMemoryDataNode) assert _DataManager._exists(in_mem_dn.id) assert _DataManager._get(in_mem_dn.id) is not None assert _DataManager._get(in_mem_dn.id).id == in_mem_dn.id assert _DataManager._get(in_mem_dn.id).config_id == \"baz\" assert _DataManager._get(in_mem_dn.id).config_id == in_mem_dn.config_id assert _DataManager._get(in_mem_dn.id).scope == Scope.SCENARIO assert _DataManager._get(in_mem_dn.id).scope == in_mem_dn.scope assert _DataManager._get(in_mem_dn.id).owner_id == \"Scenario_id\" assert _DataManager._get(in_mem_dn.id).owner_id == in_mem_dn.owner_id assert _DataManager._get(in_mem_dn.id).parent_ids == {\"task_id\"} assert _DataManager._get(in_mem_dn.id).parent_ids == in_mem_dn.parent_ids assert _DataManager._get(in_mem_dn.id).last_edit_date is not None assert _DataManager._get(in_mem_dn.id).last_edit_date == in_mem_dn.last_edit_date assert _DataManager._get(in_mem_dn.id).job_ids == [] assert _DataManager._get(in_mem_dn.id).job_ids == in_mem_dn.job_ids assert _DataManager._get(in_mem_dn.id).is_ready_for_reading assert _DataManager._get(in_mem_dn.id).is_ready_for_reading == in_mem_dn.is_ready_for_reading assert len(_DataManager._get(in_mem_dn.id).properties) == 1 assert _DataManager._get(in_mem_dn.id).properties.get(\"other_data\") == \"foo\" assert _DataManager._get(in_mem_dn.id).properties == in_mem_dn.properties assert _DataManager._get(in_mem_dn) is not None assert _DataManager._get(in_mem_dn).id == in_mem_dn.id assert _DataManager._get(in_mem_dn).config_id == \"baz\" assert _DataManager._get(in_mem_dn).config_id == in_mem_dn.config_id assert _DataManager._get(in_mem_dn).scope == Scope.SCENARIO assert _DataManager._get(in_mem_dn).scope == in_mem_dn.scope assert _DataManager._get(in_mem_dn).owner_id == \"Scenario_id\" assert _DataManager._get(in_mem_dn).owner_id == in_mem_dn.owner_id assert _DataManager._get(in_mem_dn).parent_ids == {\"task_id\"} assert _DataManager._get(in_mem_dn).parent_ids == in_mem_dn.parent_ids assert _DataManager._get(in_mem_dn).last_edit_date is not None assert _DataManager._get(in_mem_dn).last_edit_date == in_mem_dn.last_edit_date assert _DataManager._get(in_mem_dn).job_ids == [] assert _DataManager._get(in_mem_dn).job_ids == in_mem_dn.job_ids assert _DataManager._get(in_mem_dn).is_ready_for_reading assert _DataManager._get(in_mem_dn).is_ready_for_reading == in_mem_dn.is_ready_for_reading assert len(_DataManager._get(in_mem_dn).properties) == 1 assert _DataManager._get(in_mem_dn).properties.get(\"other_data\") == \"foo\" assert _DataManager._get(in_mem_dn).properties == in_mem_dn.properties def test_create_and_get_pickle_data_node(self): # Test we can instantiate a PickleDataNode from DataNodeConfig with : # - an in_memory type # - a business cycle scope # - No owner id # - no default data dn_config = Config.configure_data_node(id=\"plop\", storage_type=\"pickle\", scope=Scope.CYCLE) pickle_dn = _DataManager._create_and_set(dn_config, None, {\"task_id_1\", \"task_id_2\"}) assert isinstance(pickle_dn, PickleDataNode) assert isinstance(_DataManager._get(pickle_dn.id), PickleDataNode) assert _DataManager._exists(pickle_dn.id) assert _DataManager._get(pickle_dn.id) is not None assert _DataManager._get(pickle_dn.id).id == pickle_dn.id assert _DataManager._get(pickle_dn.id).config_id == \"plop\" assert _DataManager._get(pickle_dn.id).config_id == pickle_dn.config_id assert _DataManager._get(pickle_dn.id).scope == Scope.CYCLE assert _DataManager._get(pickle_dn.id).scope == pickle_dn.scope assert _DataManager._get(pickle_dn.id).owner_id is None assert _DataManager._get(pickle_dn.id).owner_id == pickle_dn.owner_id assert _DataManager._get(pickle_dn.id).parent_ids == {\"task_id_1\", \"task_id_2\"} assert _DataManager._get(pickle_dn.id).parent_ids == pickle_dn.parent_ids assert _DataManager._get(pickle_dn.id).last_edit_date is None assert _DataManager._get(pickle_dn.id).last_edit_date == pickle_dn.last_edit_date assert _DataManager._get(pickle_dn.id).job_ids == [] assert _DataManager._get(pickle_dn.id).job_ids == pickle_dn.job_ids assert not _DataManager._get(pickle_dn.id).is_ready_for_reading assert _DataManager._get(pickle_dn.id).is_ready_for_reading == pickle_dn.is_ready_for_reading assert len(_DataManager._get(pickle_dn.id).properties) == 1 assert _DataManager._get(pickle_dn.id).properties == pickle_dn.properties assert _DataManager._get(pickle_dn) is not None assert _DataManager._get(pickle_dn).id == pickle_dn.id assert _DataManager._get(pickle_dn).config_id == \"plop\" assert _DataManager._get(pickle_dn).config_id == pickle_dn.config_id assert _DataManager._get(pickle_dn).scope == Scope.CYCLE assert _DataManager._get(pickle_dn).scope == pickle_dn.scope assert _DataManager._get(pickle_dn).owner_id is None assert _DataManager._get(pickle_dn).owner_id == pickle_dn.owner_id assert _DataManager._get(pickle_dn).parent_ids == {\"task_id_1\", \"task_id_2\"} assert _DataManager._get(pickle_dn).parent_ids == pickle_dn.parent_ids assert _DataManager._get(pickle_dn).last_edit_date is None assert _DataManager._get(pickle_dn).last_edit_date == pickle_dn.last_edit_date assert _DataManager._get(pickle_dn).job_ids == [] assert _DataManager._get(pickle_dn).job_ids == pickle_dn.job_ids assert not _DataManager._get(pickle_dn).is_ready_for_reading assert _DataManager._get(pickle_dn).is_ready_for_reading == pickle_dn.is_ready_for_reading assert len(_DataManager._get(pickle_dn).properties) == 1 assert _DataManager._get(pickle_dn).properties == pickle_dn.properties def test_create_raises_exception_with_wrong_type(self): wrong_type_dn_config = DataNodeConfig(id=\"foo\", storage_type=\"bar\", scope=DataNodeConfig._DEFAULT_SCOPE) with pytest.raises(InvalidDataNodeType): _DataManager._create_and_set(wrong_type_dn_config, None, None) def test_create_from_same_config_generates_new_data_node_and_new_id(self): dn_config = Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") dn = _DataManager._create_and_set(dn_config, None, None) dn_2 = _DataManager._create_and_set(dn_config, None, None) assert dn_2.id != dn.id def test_create_uses_overridden_attributes_in_config_file(self): Config.override(os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/config.toml\")) csv_dn_cfg = Config.configure_data_node(id=\"foo\", storage_type=\"csv\", path=\"bar\", has_header=True) csv_dn = _DataManager._create_and_set(csv_dn_cfg, None, None) assert csv_dn.config_id == \"foo\" assert isinstance(csv_dn, CSVDataNode) assert csv_dn._path == \"path_from_config_file\" assert csv_dn.has_header csv_dn_cfg = Config.configure_data_node(id=\"baz\", storage_type=\"csv\", path=\"bar\", has_header=True) csv_dn = _DataManager._create_and_set(csv_dn_cfg, None, None) assert csv_dn.config_id == \"baz\" assert isinstance(csv_dn, CSVDataNode) assert csv_dn._path == \"bar\" assert csv_dn.has_header def test_get_if_not_exists(self): with pytest.raises(ModelNotFound): _DataManager._repository._load(\"test_data_node_2\") def test_get_all(self): assert len(_DataManager._get_all()) == 0 dn_config_1 = Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") _DataManager._create_and_set(dn_config_1, None, None) assert len(_DataManager._get_all()) == 1 dn_config_2 = Config.configure_data_node(id=\"baz\", storage_type=\"in_memory\") _DataManager._create_and_set(dn_config_2, None, None) _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_all()) == 3 assert len([dn for dn in _DataManager._get_all() if dn.config_id == \"foo\"]) == 1 assert len([dn for dn in _DataManager._get_all() if dn.config_id == \"baz\"]) == 2 def test_get_all_on_multiple_versions_environment(self): # Create 5 data nodes with 2 versions each # Only version 1.0 has the data node with config_id = \"config_id_1\" # Only version 2.0 has the data node with config_id = \"config_id_6\" for version in range(1, 3): for i in range(5): _DataManager._set( InMemoryDataNode( f\"config_id_{i + version}\", Scope.SCENARIO, id=DataNodeId(f\"id{i}_v{version}\"), version=f\"{version}.0\", ) ) _VersionManager._set_experiment_version(\"1.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_development_version(\"1.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_experiment_version(\"2.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 _VersionManager._set_development_version(\"2.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 def test_set(self): dn = InMemoryDataNode( \"config_id\", Scope.SCENARIO, id=DataNodeId(\"id\"), owner_id=None, parent_ids={\"task_id_1\"}, last_edit_date=None, edits=[], edit_in_progress=False, properties={\"foo\": \"bar\"}, ) assert len(_DataManager._get_all()) == 0 assert not _DataManager._exists(dn.id) _DataManager._set(dn) assert len(_DataManager._get_all()) == 1 assert _DataManager._exists(dn.id) # changing data node attribute dn.config_id = \"foo\" assert dn.config_id == \"foo\" _DataManager._set(dn) assert len(_DataManager._get_all()) == 1 assert dn.config_id == \"foo\" assert _DataManager._get(dn.id).config_id == \"foo\" def test_delete(self): dn_1 = InMemoryDataNode(\"config_id\", Scope.SCENARIO, id=\"id_1\") dn_2 = InMemoryDataNode(\"config_id\", Scope.SCENARIO, id=\"id_2\") dn_3 = InMemoryDataNode(\"config_id\", Scope.SCENARIO, id=\"id_3\") assert len(_DataManager._get_all()) == 0 _DataManager._set(dn_1) _DataManager._set(dn_2) _DataManager._set(dn_3) assert len(_DataManager._get_all()) == 3 assert all(_DataManager._exists(dn.id) for dn in [dn_1, dn_2, dn_3]) _DataManager._delete(dn_1.id) assert len(_DataManager._get_all()) == 2 assert _DataManager._get(dn_2.id).id == dn_2.id assert _DataManager._get(dn_3.id).id == dn_3.id assert _DataManager._get(dn_1.id) is None assert all(_DataManager._exists(dn.id) for dn in [dn_2, dn_3]) assert not _DataManager._exists(dn_1.id) _DataManager._delete_all() assert len(_DataManager._get_all()) == 0 assert not any(_DataManager._exists(dn.id) for dn in [dn_2, dn_3]) def test_get_or_create(self): def _get_or_create_dn(config, *args): return _DataManager._bulk_get_or_create([config], *args)[config] _DataManager._delete_all() global_dn_config = Config.configure_data_node( id=\"test_data_node\", storage_type=\"in_memory\", scope=Scope.GLOBAL, data=\"In memory Data Node\" ) cycle_dn_config = Config.configure_data_node( id=\"test_data_node1\", storage_type=\"in_memory\", scope=Scope.CYCLE, data=\"In memory Data Node\" ) scenario_dn_config = Config.configure_data_node( id=\"test_data_node2\", storage_type=\"in_memory\", scope=Scope.SCENARIO, data=\"In memory scenario\" ) assert len(_DataManager._get_all()) == 0 global_dn = _get_or_create_dn(global_dn_config, None, None) assert len(_DataManager._get_all()) == 1 global_dn_bis = _get_or_create_dn(global_dn_config, None) assert len(_DataManager._get_all()) == 1 assert global_dn.id == global_dn_bis.id scenario_dn = _get_or_create_dn(scenario_dn_config, None, \"scenario_id\") assert len(_DataManager._get_all()) == 2 scenario_dn_bis = _get_or_create_dn(scenario_dn_config, None, \"scenario_id\") assert len(_DataManager._get_all()) == 2 assert scenario_dn.id == scenario_dn_bis.id scenario_dn_ter = _get_or_create_dn(scenario_dn_config, None, \"scenario_id\") assert len(_DataManager._get_all()) == 2 assert scenario_dn.id == scenario_dn_bis.id assert scenario_dn_bis.id == scenario_dn_ter.id scenario_dn_quater = _get_or_create_dn(scenario_dn_config, None, \"scenario_id_2\") assert len(_DataManager._get_all()) == 3 assert scenario_dn.id == scenario_dn_bis.id assert scenario_dn_bis.id == scenario_dn_ter.id assert scenario_dn_ter.id != scenario_dn_quater.id assert len(_DataManager._get_all()) == 3 cycle_dn = _get_or_create_dn(cycle_dn_config, \"cycle_id\", None) assert len(_DataManager._get_all()) == 4 cycle_dn_1 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", None) assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_1.id cycle_dn_2 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", \"scenario_id\") assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_2.id cycle_dn_3 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", None) assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_3.id cycle_dn_4 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", \"scenario_id\") assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_4.id cycle_dn_5 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", \"scenario_id_2\") assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_5.id assert cycle_dn_1.id == cycle_dn_2.id assert cycle_dn_2.id == cycle_dn_3.id assert cycle_dn_3.id == cycle_dn_4.id assert cycle_dn_4.id == cycle_dn_5.id def test_ensure_persistence_of_data_node(self): dm = _DataManager() dm._delete_all() dn_config_1 = Config.configure_data_node( id=\"data_node_1\", storage_type=\"in_memory\", data=\"In memory sequence 2\" ) dn_config_2 = Config.configure_data_node( id=\"data_node_2\", storage_type=\"in_memory\", data=\"In memory sequence 2\" ) dm._bulk_get_or_create([dn_config_1, dn_config_2]) assert len(dm._get_all()) == 2 # Delete the DataManager to ensure it's get from the storage system del dm dm = _DataManager() dm._bulk_get_or_create([dn_config_1]) assert len(dm._get_all()) == 2 dm._delete_all() def test_clean_generated_pickle_files(self, pickle_file_path): user_pickle_dn_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", path=pickle_file_path, default_data=\"d\" ) generated_pickle_dn_1_config = Config.configure_data_node(id=\"d2\", storage_type=\"pickle\", default_data=\"d\") generated_pickle_dn_2_config = Config.configure_data_node(id=\"d3\", storage_type=\"pickle\", default_data=\"d\") dns = _DataManager._bulk_get_or_create( [user_pickle_dn_config, generated_pickle_dn_1_config, generated_pickle_dn_2_config] ) user_pickle_dn = dns[user_pickle_dn_config] generated_pickle_dn_1 = dns[generated_pickle_dn_1_config] generated_pickle_dn_2 = dns[generated_pickle_dn_2_config] _DataManager._clean_pickle_file(user_pickle_dn.id) assert file_exists(user_pickle_dn.path) _DataManager._clean_pickle_files([generated_pickle_dn_1, generated_pickle_dn_2]) assert not file_exists(generated_pickle_dn_1.path) assert not file_exists(generated_pickle_dn_2.path) def test_delete_does_clean_generated_pickle_files(self, pickle_file_path): user_pickle_dn_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", path=pickle_file_path, default_data=\"d\" ) generated_pickle_dn_config_1 = Config.configure_data_node(id=\"d2\", storage_type=\"pickle\", default_data=\"d\") generated_pickle_dn_config_2 = Config.configure_data_node(id=\"d3\", storage_type=\"pickle\", default_data=\"d\") generated_pickle_dn_config_3 = Config.configure_data_node(id=\"d4\", storage_type=\"pickle\", default_data=\"d\") dns = _DataManager._bulk_get_or_create( [ user_pickle_dn_config, generated_pickle_dn_config_1, generated_pickle_dn_config_2, generated_pickle_dn_config_3, ] ) user_pickle_dn = dns[user_pickle_dn_config] generated_pickle_dn_1 = dns[generated_pickle_dn_config_1] generated_pickle_dn_2 = dns[generated_pickle_dn_config_2] generated_pickle_dn_3 = dns[generated_pickle_dn_config_3] _DataManager._delete(user_pickle_dn.id) assert file_exists(user_pickle_dn.path) _DataManager._delete_many([generated_pickle_dn_1.id, generated_pickle_dn_2.id]) assert not file_exists(generated_pickle_dn_1.path) assert not file_exists(generated_pickle_dn_2.path) _DataManager._delete_all() assert not file_exists(generated_pickle_dn_3.path) def test_create_dn_from_loaded_config_no_scope(self): file_config = NamedTemporaryFile( \"\"\" [TAIPY] [DATA_NODE.a] default_data = \"4:int\" [DATA_NODE.b] [TASK.t] function = \"math.sqrt:function\" inputs = [ \"a:SECTION\",] outputs = [ \"b:SECTION\",] skippable = \"False:bool\" [SCENARIO.s] tasks = [ \"t:SECTION\",] sequences.s_sequence = [ \"t:SECTION\",] [SCENARIO.s.comparators] \"\"\" ) from src.taipy import core as tp Config.override(file_config.filename) tp.create_scenario(Config.scenarios[\"s\"]) tp.create_scenario(Config.scenarios[\"s\"]) assert len(tp.get_data_nodes()) == 4 def test_create_dn_from_loaded_config_no_storage_type(self): file_config = NamedTemporaryFile( \"\"\" [TAIPY] [DATA_NODE.input] scope = \"SCENARIO:SCOPE\" default_data = \"21:int\" [DATA_NODE.output] storage_type = \"in_memory\" scope = \"SCENARIO:SCOPE\" [TASK.double] inputs = [ \"input:SECTION\",] function = \"math.sqrt:function\" outputs = [ \"output:SECTION\",] skippable = \"False:bool\" [SCENARIO.my_scenario] tasks = [ \"double:SECTION\",] sequences.my_sequence = [ \"double:SECTION\",] [SCENARIO.my_scenario.comparators] \"\"\" ) from src.taipy import core as tp Config.override(file_config.filename) scenario = tp.create_scenario(Config.scenarios[\"my_scenario\"]) assert isinstance(scenario.input, PickleDataNode) assert isinstance(scenario.output, InMemoryDataNode) def test_create_dn_from_loaded_config_modified_default_config(self): file_config = NamedTemporaryFile( \"\"\" [TAIPY] [DATA_NODE.input] scope = \"SCENARIO:SCOPE\" default_path=\"fake/path.csv\" [DATA_NODE.output] storage_type = \"in_memory\" scope = \"SCENARIO:SCOPE\" [TASK.double] inputs = [ \"input:SECTION\",] function = \"math.sqrt:function\" outputs = [ \"output:SECTION\",] skippable = \"False:bool\" [SCENARIO.my_scenario] tasks = [ \"double:SECTION\",] sequences.my_sequence = [ \"double:SECTION\",] [SCENARIO.my_scenario.comparators] \"\"\" ) from src.taipy import core as tp Config.set_default_data_node_configuration(storage_type=\"csv\") Config.override(file_config.filename) scenario = tp.create_scenario(Config.scenarios[\"my_scenario\"]) assert isinstance(scenario.input, CSVDataNode) assert isinstance(scenario.output, InMemoryDataNode) def test_get_tasks_by_config_id(self): dn_config_1 = Config.configure_data_node(\"dn_1\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(\"dn_2\", scope=Scope.SCENARIO) dn_config_3 = Config.configure_data_node(\"dn_3\", scope=Scope.SCENARIO) dn_1_1 = _DataManager._create_and_set(dn_config_1, None, None) dn_1_2 = _DataManager._create_and_set(dn_config_1, None, None) dn_1_3 = _DataManager._create_and_set(dn_config_1, None, None) assert len(_DataManager._get_all()) == 3 dn_2_1 = _DataManager._create_and_set(dn_config_2, None, None) dn_2_2 = _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_all()) == 5 dn_3_1 = _DataManager._create_and_set(dn_config_3, None, None) assert len(_DataManager._get_all()) == 6 dn_1_datanodes = _DataManager._get_by_config_id(dn_config_1.id) assert len(dn_1_datanodes) == 3 assert sorted([dn_1_1.id, dn_1_2.id, dn_1_3.id]) == sorted([sequence.id for sequence in dn_1_datanodes]) dn_2_datanodes = _DataManager._get_by_config_id(dn_config_2.id) assert len(dn_2_datanodes) == 2 assert sorted([dn_2_1.id, dn_2_2.id]) == sorted([sequence.id for sequence in dn_2_datanodes]) dn_3_datanodes = _DataManager._get_by_config_id(dn_config_3.id) assert len(dn_3_datanodes) == 1 assert sorted([dn_3_1.id]) == sorted([sequence.id for sequence in dn_3_datanodes]) def test_get_data_nodes_by_config_id_in_multiple_versions_environment(self): dn_config_1 = Config.configure_data_node(\"dn_1\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(\"dn_2\", scope=Scope.SCENARIO) _VersionManager._set_experiment_version(\"1.0\") _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_2, None, None) _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_by_config_id(dn_config_1.id)) == 3 assert len(_DataManager._get_by_config_id(dn_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_2, None, None) _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_by_config_id(dn_config_1.id)) == 3 assert len(_DataManager._get_by_config_id(dn_config_2.id)) == 2 "} {"text": "from typing import Dict, List import numpy as np import pandas as pd import pytest from src.taipy.core.data.operator import JoinOperator, Operator from .utils import ( CustomClass, FakeCustomDataNode, FakeDataframeDataNode, FakeDataNode, FakeListDataNode, FakeMultiSheetExcelCustomDataNode, FakeMultiSheetExcelDataFrameDataNode, FakeNumpyarrayDataNode, ) def test_filter_pandas_exposed_type(default_data_frame): dn = FakeDataNode(\"fake_dn\") dn.write(\"Any data\") with pytest.raises(NotImplementedError): dn.filter(((\"any\", 0, Operator.EQUAL)), JoinOperator.OR) with pytest.raises(NotImplementedError): dn.filter(((\"any\", 0, Operator.NOT_EQUAL)), JoinOperator.OR) with pytest.raises(NotImplementedError): dn.filter(((\"any\", 0, Operator.LESS_THAN)), JoinOperator.AND) with pytest.raises(NotImplementedError): dn.filter(((\"any\", 0, Operator.LESS_OR_EQUAL)), JoinOperator.AND) with pytest.raises(NotImplementedError): dn.filter(((\"any\", 0, Operator.GREATER_THAN))) with pytest.raises(NotImplementedError): dn.filter((\"any\", 0, Operator.GREATER_OR_EQUAL)) df_dn = FakeDataframeDataNode(\"fake_dataframe_dn\", default_data_frame) COLUMN_NAME_1 = \"a\" COLUMN_NAME_2 = \"b\" assert len(df_dn.filter((COLUMN_NAME_1, 1, Operator.EQUAL))) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] == 1] ) assert len(df_dn.filter((COLUMN_NAME_1, 1, Operator.NOT_EQUAL))) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] != 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, 1, Operator.EQUAL)])) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] == 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, 1, Operator.NOT_EQUAL)])) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] != 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, 1, Operator.LESS_THAN)])) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] < 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, 1, Operator.LESS_OR_EQUAL)])) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] <= 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, 1, Operator.GREATER_THAN)])) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] > 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, 1, Operator.GREATER_OR_EQUAL)])) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] >= 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, -1000, Operator.LESS_OR_EQUAL)])) == 0 assert len(df_dn.filter([(COLUMN_NAME_1, 1000, Operator.GREATER_OR_EQUAL)])) == 0 assert len(df_dn.filter([(COLUMN_NAME_1, 4, Operator.EQUAL), (COLUMN_NAME_1, 5, Operator.EQUAL)])) == len( default_data_frame[(default_data_frame[COLUMN_NAME_1] == 4) & (default_data_frame[COLUMN_NAME_1] == 5)] ) assert len( df_dn.filter([(COLUMN_NAME_1, 4, Operator.EQUAL), (COLUMN_NAME_2, 5, Operator.EQUAL)], JoinOperator.OR) ) == len(default_data_frame[(default_data_frame[COLUMN_NAME_1] == 4) | (default_data_frame[COLUMN_NAME_2] == 5)]) assert len( df_dn.filter( [(COLUMN_NAME_1, 1, Operator.GREATER_THAN), (COLUMN_NAME_2, 3, Operator.GREATER_THAN)], JoinOperator.AND ) ) == len(default_data_frame[(default_data_frame[COLUMN_NAME_1] > 1) & (default_data_frame[COLUMN_NAME_2] > 3)]) assert len( df_dn.filter( [(COLUMN_NAME_1, 2, Operator.GREATER_THAN), (COLUMN_NAME_1, 3, Operator.GREATER_THAN)], JoinOperator.OR ) ) == len(default_data_frame[(default_data_frame[COLUMN_NAME_1] > 2) | (default_data_frame[COLUMN_NAME_1] > 3)]) assert len( df_dn.filter( [(COLUMN_NAME_1, 10, Operator.GREATER_THAN), (COLUMN_NAME_1, -10, Operator.LESS_THAN)], JoinOperator.AND ) ) == len(default_data_frame[(default_data_frame[COLUMN_NAME_1] > 10) | (default_data_frame[COLUMN_NAME_1] < -10)]) assert len( df_dn.filter( [(COLUMN_NAME_1, 10, Operator.GREATER_THAN), (COLUMN_NAME_1, -10, Operator.LESS_THAN)], JoinOperator.OR ) ) == len(default_data_frame[(default_data_frame[COLUMN_NAME_1] > 10) | (default_data_frame[COLUMN_NAME_1] < -10)]) def test_filter_list(): list_dn = FakeListDataNode(\"fake_list_dn\") KEY_NAME = \"value\" assert len(list_dn.filter((KEY_NAME, 4, Operator.EQUAL))) == 1 assert len(list_dn.filter((KEY_NAME, 4, Operator.NOT_EQUAL))) == 9 assert len(list_dn.filter([(KEY_NAME, 4, Operator.EQUAL)])) == 1 assert len(list_dn.filter([(KEY_NAME, 4, Operator.NOT_EQUAL)])) == 9 assert len(list_dn.filter([(KEY_NAME, 4, Operator.LESS_THAN)])) == 4 assert len(list_dn.filter([(KEY_NAME, 4, Operator.LESS_OR_EQUAL)])) == 5 assert len(list_dn.filter([(KEY_NAME, 4, Operator.GREATER_THAN)])) == 5 assert len(list_dn.filter([(KEY_NAME, 4, Operator.GREATER_OR_EQUAL)])) == 6 assert len(list_dn.filter([(KEY_NAME, -1000, Operator.LESS_OR_EQUAL)])) == 0 assert len(list_dn.filter([(KEY_NAME, 1000, Operator.GREATER_OR_EQUAL)])) == 0 assert len(list_dn.filter([(KEY_NAME, 4, Operator.EQUAL), (KEY_NAME, 5, Operator.EQUAL)])) == 0 assert len(list_dn.filter([(KEY_NAME, 4, Operator.EQUAL), (KEY_NAME, 5, Operator.EQUAL)], JoinOperator.OR)) == 2 assert len(list_dn.filter([(KEY_NAME, 4, Operator.EQUAL), (KEY_NAME, 11, Operator.EQUAL)], JoinOperator.AND)) == 0 assert len(list_dn.filter([(KEY_NAME, 4, Operator.EQUAL), (KEY_NAME, 11, Operator.EQUAL)], JoinOperator.OR)) == 1 assert ( len(list_dn.filter([(KEY_NAME, -10, Operator.LESS_OR_EQUAL), (KEY_NAME, 11, Operator.GREATER_OR_EQUAL)])) == 0 ) assert ( len( list_dn.filter( [ (KEY_NAME, 4, Operator.GREATER_OR_EQUAL), (KEY_NAME, 6, Operator.GREATER_OR_EQUAL), ], JoinOperator.AND, ) ) == 4 ) assert ( len( list_dn.filter( [ (KEY_NAME, 4, Operator.GREATER_OR_EQUAL), (KEY_NAME, 6, Operator.GREATER_OR_EQUAL), (KEY_NAME, 11, Operator.EQUAL), ], JoinOperator.AND, ) ) == 0 ) assert ( len( list_dn.filter( [ (KEY_NAME, 4, Operator.GREATER_OR_EQUAL), (KEY_NAME, 6, Operator.GREATER_OR_EQUAL), (KEY_NAME, 11, Operator.EQUAL), ], JoinOperator.OR, ) ) == 6 ) def test_filter_numpy_exposed_type(default_data_frame): default_array = default_data_frame.to_numpy() df_dn = FakeNumpyarrayDataNode(\"fake_dataframe_dn\", default_array) assert len(df_dn.filter((0, 1, Operator.EQUAL))) == len(default_array[default_array[:, 0] == 1]) assert len(df_dn.filter((0, 1, Operator.NOT_EQUAL))) == len(default_array[default_array[:, 0] != 1]) assert len(df_dn.filter([(0, 1, Operator.EQUAL)])) == len(default_array[default_array[:, 0] == 1]) assert len(df_dn.filter([(0, 1, Operator.NOT_EQUAL)])) == len(default_array[default_array[:, 0] != 1]) assert len(df_dn.filter([(0, 1, Operator.LESS_THAN)])) == len(default_array[default_array[:, 0] < 1]) assert len(df_dn.filter([(0, 1, Operator.LESS_OR_EQUAL)])) == len(default_array[default_array[:, 0] <= 1]) assert len(df_dn.filter([(0, 1, Operator.GREATER_THAN)])) == len(default_array[default_array[:, 0] > 1]) assert len(df_dn.filter([(0, 1, Operator.GREATER_OR_EQUAL)])) == len(default_array[default_array[:, 0] >= 1]) assert len(df_dn.filter([(0, -1000, Operator.LESS_OR_EQUAL)])) == 0 assert len(df_dn.filter([(0, 1000, Operator.GREATER_OR_EQUAL)])) == 0 assert len(df_dn.filter([(0, 4, Operator.EQUAL), (0, 5, Operator.EQUAL)])) == len( default_array[(default_array[:, 0] == 4) & (default_array[:, 0] == 5)] ) assert len(df_dn.filter([(0, 4, Operator.EQUAL), (1, 5, Operator.EQUAL)], JoinOperator.OR)) == len( default_array[(default_array[:, 0] == 4) | (default_array[:, 1] == 5)] ) assert len(df_dn.filter([(0, 1, Operator.GREATER_THAN), (1, 3, Operator.GREATER_THAN)], JoinOperator.AND)) == len( default_array[(default_array[:, 0] > 1) & (default_array[:, 1] > 3)] ) assert len(df_dn.filter([(0, 2, Operator.GREATER_THAN), (0, 3, Operator.GREATER_THAN)], JoinOperator.OR)) == len( default_array[(default_array[:, 0] > 2) | (default_array[:, 0] > 3)] ) assert len(df_dn.filter([(0, 10, Operator.GREATER_THAN), (0, -10, Operator.LESS_THAN)], JoinOperator.AND)) == len( default_array[(default_array[:, 0] > 10) | (default_array[:, 0] < -10)] ) assert len(df_dn.filter([(0, 10, Operator.GREATER_THAN), (0, -10, Operator.LESS_THAN)], JoinOperator.OR)) == len( default_array[(default_array[:, 0] > 10) | (default_array[:, 0] < -10)] ) def test_filter_by_get_item(default_data_frame): # get item for DataFrame data_type default_data_frame[1] = [100, 100] df_dn = FakeDataframeDataNode(\"fake_dataframe_dn\", default_data_frame) filtered_df_dn = df_dn[\"a\"] assert isinstance(filtered_df_dn, pd.Series) assert len(filtered_df_dn) == len(default_data_frame[\"a\"]) assert filtered_df_dn.to_dict() == default_data_frame[\"a\"].to_dict() filtered_df_dn = df_dn[1] assert isinstance(filtered_df_dn, pd.Series) assert len(filtered_df_dn) == len(default_data_frame[1]) assert filtered_df_dn.to_dict() == default_data_frame[1].to_dict() filtered_df_dn = df_dn[0:2] assert isinstance(filtered_df_dn, pd.DataFrame) assert filtered_df_dn.shape == default_data_frame[0:2].shape assert len(filtered_df_dn) == 2 bool_df = default_data_frame.copy(deep=True) > 4 filtered_df_dn = df_dn[bool_df] assert isinstance(filtered_df_dn, pd.DataFrame) bool_1d_index = [True, False] filtered_df_dn = df_dn[bool_1d_index] assert isinstance(filtered_df_dn, pd.DataFrame) assert filtered_df_dn.to_dict() == default_data_frame[bool_1d_index].to_dict() assert len(filtered_df_dn) == 1 filtered_df_dn = df_dn[[\"a\", \"b\"]] assert isinstance(filtered_df_dn, pd.DataFrame) assert filtered_df_dn.shape == default_data_frame[[\"a\", \"b\"]].shape assert filtered_df_dn.to_dict() == default_data_frame[[\"a\", \"b\"]].to_dict() # get item for custom data_type custom_dn = FakeCustomDataNode(\"fake_custom_dn\") filtered_custom_dn = custom_dn[\"a\"] assert isinstance(filtered_custom_dn, List) assert len(filtered_custom_dn) == 10 assert filtered_custom_dn == [i for i in range(10)] filtered_custom_dn = custom_dn[0:5] assert isinstance(filtered_custom_dn, List) assert all([isinstance(x, CustomClass) for x in filtered_custom_dn]) assert len(filtered_custom_dn) == 5 bool_1d_index = [True if i < 5 else False for i in range(10)] filtered_custom_dn = custom_dn[bool_1d_index] assert isinstance(filtered_custom_dn, List) assert len(filtered_custom_dn) == 5 assert filtered_custom_dn == custom_dn._read()[:5] filtered_custom_dn = custom_dn[[\"a\", \"b\"]] assert isinstance(filtered_custom_dn, List) assert all([isinstance(x, Dict) for x in filtered_custom_dn]) assert len(filtered_custom_dn) == 10 assert filtered_custom_dn == [{\"a\": i, \"b\": i * 2} for i in range(10)] # get item for Multi-sheet Excel data_type multi_sheet_excel_df_dn = FakeMultiSheetExcelDataFrameDataNode(\"fake_multi_sheet_excel_df_dn\", default_data_frame) filtered_multi_sheet_excel_df_dn = multi_sheet_excel_df_dn[\"Sheet1\"] assert isinstance(filtered_multi_sheet_excel_df_dn, pd.DataFrame) assert len(filtered_multi_sheet_excel_df_dn) == len(default_data_frame) assert np.array_equal(filtered_multi_sheet_excel_df_dn.to_numpy(), default_data_frame.to_numpy()) multi_sheet_excel_custom_dn = FakeMultiSheetExcelCustomDataNode(\"fake_multi_sheet_excel_df_dn\") filtered_multi_sheet_excel_custom_dn = multi_sheet_excel_custom_dn[\"Sheet1\"] assert isinstance(filtered_multi_sheet_excel_custom_dn, List) assert len(filtered_multi_sheet_excel_custom_dn) == 10 expected_value = [CustomClass(i, i * 2) for i in range(10)] assert all( [ expected.a == filtered.a and expected.b == filtered.b for expected, filtered in zip(expected_value, filtered_multi_sheet_excel_custom_dn) ] ) "} {"text": "import os import pathlib from datetime import datetime from importlib import util from time import sleep import modin.pandas as modin_pd import numpy as np import pandas as pd import pytest from modin.pandas.test.utils import df_equals from pandas.testing import assert_frame_equal from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.data.parquet import ParquetDataNode from src.taipy.core.exceptions.exceptions import ( InvalidExposedType, NoData, UnknownCompressionAlgorithm, UnknownParquetEngine, ) from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import InvalidConfigurationId @pytest.fixture(scope=\"function\", autouse=True) def cleanup(): yield path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.parquet\") if os.path.isfile(path): os.remove(path) class MyCustomObject: def __init__(self, id, integer, text): self.id = id self.integer = integer self.text = text class MyOtherCustomObject: def __init__(self, id, sentence): self.id = id self.sentence = sentence def create_custom_class(**kwargs): return MyOtherCustomObject(id=kwargs[\"id\"], sentence=kwargs[\"text\"]) class TestParquetDataNode: __engine = [\"pyarrow\"] if util.find_spec(\"fastparquet\"): __engine.append(\"fastparquet\") def test_create(self): path = \"data/node/path\" compression = \"snappy\" dn = ParquetDataNode( \"foo_bar\", Scope.SCENARIO, properties={\"path\": path, \"compression\": compression, \"name\": \"super name\"} ) assert isinstance(dn, ParquetDataNode) assert dn.storage_type() == \"parquet\" assert dn.config_id == \"foo_bar\" assert dn.name == \"super name\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.last_edit_date is None assert dn.job_ids == [] assert not dn.is_ready_for_reading assert dn.path == path assert dn.exposed_type == \"pandas\" assert dn.compression == \"snappy\" assert dn.engine == \"pyarrow\" with pytest.raises(InvalidConfigurationId): dn = ParquetDataNode(\"foo bar\", Scope.SCENARIO, properties={\"path\": path, \"name\": \"super name\"}) def test_get_user_properties(self, parquet_file_path): dn_1 = ParquetDataNode(\"dn_1\", Scope.SCENARIO, properties={\"path\": parquet_file_path}) assert dn_1._get_user_properties() == {} dn_2 = ParquetDataNode( \"dn_2\", Scope.SCENARIO, properties={ \"exposed_type\": \"numpy\", \"default_data\": \"foo\", \"default_path\": parquet_file_path, \"engine\": \"pyarrow\", \"compression\": \"snappy\", \"read_kwargs\": {\"columns\": [\"a\", \"b\"]}, \"write_kwargs\": {\"index\": False}, \"foo\": \"bar\", }, ) # exposed_type, default_data, default_path, path, engine, compression, read_kwargs, write_kwargs # are filtered out assert dn_2._get_user_properties() == {\"foo\": \"bar\"} def test_new_parquet_data_node_with_existing_file_is_ready_for_reading(self, parquet_file_path): not_ready_dn_cfg = Config.configure_data_node( \"not_ready_data_node_config_id\", \"parquet\", path=\"NOT_EXISTING.parquet\" ) not_ready_dn = _DataManager._bulk_get_or_create([not_ready_dn_cfg])[not_ready_dn_cfg] assert not not_ready_dn.is_ready_for_reading ready_dn_cfg = Config.configure_data_node(\"ready_data_node_config_id\", \"parquet\", path=parquet_file_path) ready_dn = _DataManager._bulk_get_or_create([ready_dn_cfg])[ready_dn_cfg] assert ready_dn.is_ready_for_reading @pytest.mark.parametrize( [\"properties\", \"exists\"], [ ({}, False), ({\"default_data\": {\"a\": [\"foo\", \"bar\"]}}, True), ], ) def test_create_with_default_data(self, properties, exists): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) assert os.path.exists(dn.path) is exists @pytest.mark.parametrize(\"engine\", __engine) def test_read_file(self, engine, parquet_file_path): not_existing_parquet = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": \"nonexistent.parquet\", \"engine\": engine} ) with pytest.raises(NoData): assert not_existing_parquet.read() is None not_existing_parquet.read_or_raise() df = pd.read_parquet(parquet_file_path) # Create ParquetDataNode without exposed_type (Default is pandas.DataFrame) parquet_data_node_as_pandas = ParquetDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"engine\": engine} ) data_pandas = parquet_data_node_as_pandas.read() assert isinstance(data_pandas, pd.DataFrame) assert len(data_pandas) == 2 assert data_pandas.equals(df) assert np.array_equal(data_pandas.to_numpy(), df.to_numpy()) # Create ParquetDataNode with modin exposed_type parquet_data_node_as_modin = ParquetDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"exposed_type\": \"modin\", \"engine\": engine} ) data_modin = parquet_data_node_as_modin.read() assert isinstance(data_modin, modin_pd.DataFrame) assert len(data_modin) == 2 assert data_modin.equals(df) assert np.array_equal(data_modin.to_numpy(), df.to_numpy()) # Create ParquetDataNode with numpy exposed_type parquet_data_node_as_numpy = ParquetDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"exposed_type\": \"numpy\", \"engine\": engine} ) data_numpy = parquet_data_node_as_numpy.read() assert isinstance(data_numpy, np.ndarray) assert len(data_numpy) == 2 assert np.array_equal(data_numpy, df.to_numpy()) @pytest.mark.parametrize(\"engine\", __engine) def test_read_folder(self, engine): parquet_folder_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/parquet_example\") df = pd.read_parquet(parquet_folder_path) parquet_data_node_as_pandas = ParquetDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": parquet_folder_path, \"engine\": engine} ) data_pandas = parquet_data_node_as_pandas.read() assert isinstance(data_pandas, pd.DataFrame) assert len(data_pandas) == 5 assert data_pandas.equals(df) assert np.array_equal(data_pandas.to_numpy(), df.to_numpy()) def test_set_path(self): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": \"foo.parquet\"}) assert dn.path == \"foo.parquet\" dn.path = \"bar.parquet\" assert dn.path == \"bar.parquet\" @pytest.mark.parametrize(\"engine\", __engine) def test_read_write_after_modify_path(self, engine): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.parquet\") new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.parquet\") dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"engine\": engine}) read_data = dn.read() assert read_data is not None dn.path = new_path with pytest.raises(FileNotFoundError): dn.read() dn.write(read_data) assert dn.read().equals(read_data) def test_read_custom_exposed_type(self): example_parquet_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.parquet\") dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": example_parquet_path, \"exposed_type\": MyCustomObject} ) assert all([isinstance(obj, MyCustomObject) for obj in dn.read()]) dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": example_parquet_path, \"exposed_type\": create_custom_class} ) assert all([isinstance(obj, MyOtherCustomObject) for obj in dn.read()]) def test_raise_error_unknown_parquet_engine(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.parquet\") with pytest.raises(UnknownParquetEngine): ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"engine\": \"foo\"}) def test_raise_error_unknown_compression_algorithm(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.parquet\") with pytest.raises(UnknownCompressionAlgorithm): ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"compression\": \"foo\"}) def test_raise_error_invalid_exposed_type(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.parquet\") with pytest.raises(InvalidExposedType): ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"foo\"}) def test_read_empty_data(self, tmpdir_factory): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.parquet\")) empty_df = pd.DataFrame([]) empty_df.to_parquet(temp_file_path) # Pandas dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": \"pandas\"}) assert dn.read().equals(empty_df) # Numpy dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": \"numpy\"}) assert np.array_equal(dn.read(), empty_df.to_numpy()) # Custom dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": MyCustomObject}) assert dn.read() == [] def test_get_system_file_modified_date_instead_of_last_edit_date(self, tmpdir_factory): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.parquet\")) pd.DataFrame([]).to_parquet(temp_file_path) dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": \"pandas\"}) dn.write(pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})) previous_edit_date = dn.last_edit_date sleep(0.1) pd.DataFrame(pd.DataFrame(data={\"col1\": [5, 6], \"col2\": [7, 8]})).to_parquet(temp_file_path) new_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert previous_edit_date < dn.last_edit_date assert new_edit_date == dn.last_edit_date sleep(0.1) dn.write(pd.DataFrame(data={\"col1\": [9, 10], \"col2\": [10, 12]})) assert new_edit_date < dn.last_edit_date os.unlink(temp_file_path) def test_get_system_folder_modified_date_instead_of_last_edit_date(self, tmpdir_factory): temp_folder_path = tmpdir_factory.mktemp(\"data\").strpath temp_file_path = os.path.join(temp_folder_path, \"temp.parquet\") pd.DataFrame([]).to_parquet(temp_file_path) dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_folder_path}) initial_edit_date = dn.last_edit_date # Sleep so that the file can be created successfully on Ubuntu sleep(0.1) pd.DataFrame(pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})).to_parquet(temp_file_path) first_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert dn.last_edit_date > initial_edit_date assert dn.last_edit_date == first_edit_date sleep(0.1) pd.DataFrame(pd.DataFrame(data={\"col1\": [5, 6], \"col2\": [7, 8]})).to_parquet(temp_file_path) second_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert dn.last_edit_date > first_edit_date assert dn.last_edit_date == second_edit_date os.unlink(temp_file_path) @pytest.mark.skipif(not util.find_spec(\"fastparquet\"), reason=\"Append parquet requires fastparquet to be installed\") @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ], ) def test_append_pandas(self, parquet_file_path, default_data_frame, content): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": parquet_file_path}) assert_frame_equal(dn.read(), default_data_frame) dn.append(content) assert_frame_equal( dn.read(), pd.concat([default_data_frame, pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index(drop=True), ) @pytest.mark.skipif(not util.find_spec(\"fastparquet\"), reason=\"Append parquet requires fastparquet to be installed\") @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ], ) def test_append_modin(self, parquet_file_path, default_data_frame, content): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"exposed_type\": \"modin\"}) df_equals(dn.read(), modin_pd.DataFrame(default_data_frame)) dn.append(content) df_equals( dn.read(), modin_pd.concat([default_data_frame, pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index( drop=True ), ) @pytest.mark.parametrize( \"data\", [ [{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), modin_pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), ], ) def test_write_to_disk(self, tmpdir_factory, data): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.parquet\")) dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path}) dn.write(data) assert pathlib.Path(temp_file_path).exists() assert isinstance(dn.read(), pd.DataFrame) def test_filter_pandas_exposed_type(self, parquet_file_path): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"exposed_type\": \"pandas\"}) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, None])) assert dn[\"bar\"].equals(pd.Series([1, 2, None, 2, 2])) assert dn[:2].equals(pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 1.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = pd.DataFrame([{\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"bar\", 2, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"bar\"] == 2] expected_data = pd.DataFrame([{\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = pd.DataFrame( [ {\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}, ] ) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_modin_exposed_type(self, parquet_file_path): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"exposed_type\": \"modin\"}) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(modin_pd.Series([1, 1, 1, 2, None])) assert dn[\"bar\"].equals(modin_pd.Series([1, 2, None, 2, 2])) assert dn[:2].equals(modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 1.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = modin_pd.DataFrame([{\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"bar\", 2, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"bar\"] == 2] expected_data = modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = modin_pd.DataFrame( [ {\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}, ] ) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_numpy_exposed_type(self, parquet_file_path): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"exposed_type\": \"numpy\"}) dn.write( [ [1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], ] ) # Test datanode indexing and slicing assert np.array_equal(dn[0], np.array([1, 1])) assert np.array_equal(dn[1], np.array([1, 2])) assert np.array_equal(dn[:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[1:4, :1], np.array([[1], [1], [2]])) # Test filter data assert np.array_equal(dn.filter((0, 1, Operator.EQUAL)), np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[dn[:, 0] == 1], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn.filter((0, 1, Operator.NOT_EQUAL)), np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn[dn[:, 0] != 1], np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn.filter((1, 2, Operator.EQUAL)), np.array([[1, 2], [2, 2]])) assert np.array_equal(dn[dn[:, 1] == 2], np.array([[1, 2], [2, 2]])) assert np.array_equal( dn.filter([(1, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR), np.array([[1, 1], [1, 2], [2, 1], [2, 2]]), ) assert np.array_equal(dn[(dn[:, 1] == 1) | (dn[:, 1] == 2)], np.array([[1, 1], [1, 2], [2, 1], [2, 2]])) @pytest.mark.parametrize(\"engine\", __engine) def test_pandas_parquet_config_kwargs(self, engine, tmpdir_factory): read_kwargs = {\"filters\": [(\"integer\", \"<\", 10)], \"columns\": [\"integer\"]} temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.parquet\")) dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"engine\": engine, \"read_kwargs\": read_kwargs} ) df = pd.read_csv(os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\")) dn.write(df) assert set(pd.read_parquet(temp_file_path).columns) == {\"id\", \"integer\", \"text\"} print(dn.read()) assert set(dn.read().columns) == set(read_kwargs[\"columns\"]) # !!! filter doesn't work with `fastparquet` without partition_cols if engine == \"pyarrow\": assert len(dn.read()) != len(df) assert len(dn.read()) == 2 @pytest.mark.parametrize(\"engine\", __engine) def test_kwarg_precedence(self, engine, tmpdir_factory, default_data_frame): # Precedence: # 1. Class read/write methods # 2. Defined in read_kwargs and write_kwargs, in properties # 3. Defined top-level in properties temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.parquet\")) temp_file_2_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp_2.parquet\")) df = default_data_frame.copy(deep=True) # Write # 3 comp3 = \"snappy\" dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"engine\": engine, \"compression\": comp3} ) dn.write(df) df.to_parquet(path=temp_file_2_path, compression=comp3, engine=engine) with open(temp_file_2_path, \"rb\") as tf: with pathlib.Path(temp_file_path).open(\"rb\") as f: assert f.read() == tf.read() # 3 and 2 comp2 = \"gzip\" dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={ \"path\": temp_file_path, \"engine\": engine, \"compression\": comp3, \"write_kwargs\": {\"compression\": comp2}, }, ) dn.write(df) df.to_parquet(path=temp_file_2_path, compression=comp2, engine=engine) with open(temp_file_2_path, \"rb\") as tf: with pathlib.Path(temp_file_path).open(\"rb\") as f: assert f.read() == tf.read() # 3, 2 and 1 comp1 = \"brotli\" dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={ \"path\": temp_file_path, \"engine\": engine, \"compression\": comp3, \"write_kwargs\": {\"compression\": comp2}, }, ) dn.write_with_kwargs(df, compression=comp1) df.to_parquet(path=temp_file_2_path, compression=comp1, engine=engine) with open(temp_file_2_path, \"rb\") as tf: with pathlib.Path(temp_file_path).open(\"rb\") as f: assert f.read() == tf.read() # Read df.to_parquet(temp_file_path, engine=engine) # 2 cols2 = [\"a\", \"b\"] dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"engine\": engine, \"read_kwargs\": {\"columns\": cols2}}, ) assert set(dn.read().columns) == set(cols2) # 1 cols1 = [\"a\"] dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"engine\": engine, \"read_kwargs\": {\"columns\": cols2}}, ) assert set(dn.read_with_kwargs(columns=cols1).columns) == set(cols1) def test_partition_cols(self, tmpdir_factory, default_data_frame: pd.DataFrame): temp_dir_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp_dir\")) write_kwargs = {\"partition_cols\": [\"a\", \"b\"]} dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": temp_dir_path, \"write_kwargs\": write_kwargs} ) # type: ignore dn.write(default_data_frame) assert pathlib.Path(temp_dir_path).is_dir() # dtypes change during round-trip with partition_cols pd.testing.assert_frame_equal( dn.read().sort_index(axis=1), default_data_frame.sort_index(axis=1), check_dtype=False, check_categorical=False, ) def test_read_with_kwargs_never_written(self): path = \"data/node/path\" dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path}) assert dn.read_with_kwargs() is None "} {"text": "from importlib import util from unittest.mock import patch import modin.pandas as modin_pd import numpy as np import pandas as pd import pytest from modin.pandas.test.utils import df_equals from pandas.testing import assert_frame_equal from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.data.sql_table import SQLTableDataNode from src.taipy.core.exceptions.exceptions import InvalidExposedType, MissingRequiredProperty from taipy.config.common.scope import Scope class MyCustomObject: def __init__(self, foo=None, bar=None, *args, **kwargs): self.foo = foo self.bar = bar self.args = args self.kwargs = kwargs class TestSQLTableDataNode: __pandas_properties = [ { \"db_name\": \"taipy\", \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", \"other\": \"value\", }, }, ] __modin_properties = [ { \"db_name\": \"taipy\", \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", \"other\": \"value\", }, }, ] if util.find_spec(\"pyodbc\"): __pandas_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mssql\", \"table_name\": \"example\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) __modin_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mssql\", \"table_name\": \"example\", \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) if util.find_spec(\"pymysql\"): __pandas_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mysql\", \"table_name\": \"example\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) __modin_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mysql\", \"table_name\": \"example\", \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) if util.find_spec(\"psycopg2\"): __pandas_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"postgresql\", \"table_name\": \"example\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) __modin_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"postgresql\", \"table_name\": \"example\", \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) @pytest.mark.parametrize(\"modin_properties\", __modin_properties) def test_create(self, pandas_properties, modin_properties): dn = SQLTableDataNode( \"foo_bar\", Scope.SCENARIO, properties=pandas_properties, ) assert isinstance(dn, SQLTableDataNode) assert dn.storage_type() == \"sql_table\" assert dn.config_id == \"foo_bar\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.exposed_type == \"pandas\" assert dn.table_name == \"example\" assert dn._get_base_read_query() == \"SELECT * FROM example\" dn = SQLTableDataNode( \"foo_bar\", Scope.SCENARIO, properties=modin_properties, ) assert isinstance(dn, SQLTableDataNode) assert dn.storage_type() == \"sql_table\" assert dn.config_id == \"foo_bar\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.exposed_type == \"modin\" assert dn.table_name == \"example\" assert dn._get_base_read_query() == \"SELECT * FROM example\" @pytest.mark.parametrize(\"properties\", __pandas_properties) def test_get_user_properties(self, properties): custom_properties = properties.copy() custom_properties[\"foo\"] = \"bar\" dn = SQLTableDataNode( \"foo_bar\", Scope.SCENARIO, properties=custom_properties, ) assert dn._get_user_properties() == {\"foo\": \"bar\"} @pytest.mark.parametrize( \"properties\", [ {}, {\"db_username\": \"foo\"}, {\"db_username\": \"foo\", \"db_password\": \"foo\"}, {\"db_username\": \"foo\", \"db_password\": \"foo\", \"db_name\": \"foo\"}, ], ) def test_create_with_missing_parameters(self, properties): with pytest.raises(MissingRequiredProperty): SQLTableDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\")) with pytest.raises(MissingRequiredProperty): SQLTableDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) @patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._read_as\", return_value=\"custom\") @patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._read_as_pandas_dataframe\", return_value=\"pandas\") @patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._read_as_modin_dataframe\", return_value=\"modin\") @patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._read_as_numpy\", return_value=\"numpy\") @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) @pytest.mark.parametrize(\"modin_properties\", __modin_properties) def test_read( self, mock_read_as, mock_read_as_pandas_dataframe, mock_read_as_modin_dataframe, mock_read_as_numpy, pandas_properties, modin_properties, ): custom_properties = pandas_properties.copy() # Create SQLTableDataNode without exposed_type (Default is pandas.DataFrame) sql_data_node_as_pandas = SQLTableDataNode( \"foo\", Scope.SCENARIO, properties=pandas_properties, ) assert sql_data_node_as_pandas.read() == \"pandas\" custom_properties.pop(\"db_extra_args\") custom_properties[\"exposed_type\"] = MyCustomObject # Create the same SQLTableDataNode but with custom exposed_type sql_data_node_as_custom_object = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) assert sql_data_node_as_custom_object.read() == \"custom\" # Create the same SQLDataSource but with numpy exposed_type custom_properties[\"exposed_type\"] = \"numpy\" sql_data_source_as_numpy_object = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) assert sql_data_source_as_numpy_object.read() == \"numpy\" # Create the same SQLDataSource but with modin exposed_type sql_data_source_as_modin_object = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=modin_properties) assert sql_data_source_as_modin_object.properties[\"exposed_type\"] == \"modin\" assert sql_data_source_as_modin_object.read() == \"modin\" @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) def test_read_as(self, pandas_properties): custom_properties = pandas_properties.copy() custom_properties.pop(\"db_extra_args\") custom_properties[\"exposed_type\"] = MyCustomObject sql_data_node = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock: cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.return_value = [ {\"foo\": \"baz\", \"bar\": \"qux\"}, {\"foo\": \"quux\", \"bar\": \"quuz\"}, {\"foo\": \"corge\"}, {\"bar\": \"grault\"}, {\"KWARGS_KEY\": \"KWARGS_VALUE\"}, {}, ] data = sql_data_node._read_as() assert isinstance(data, list) assert isinstance(data[0], MyCustomObject) assert isinstance(data[1], MyCustomObject) assert isinstance(data[2], MyCustomObject) assert isinstance(data[3], MyCustomObject) assert isinstance(data[4], MyCustomObject) assert isinstance(data[5], MyCustomObject) assert data[0].foo == \"baz\" assert data[0].bar == \"qux\" assert data[1].foo == \"quux\" assert data[1].bar == \"quuz\" assert data[2].foo == \"corge\" assert data[2].bar is None assert data[3].foo is None assert data[3].bar == \"grault\" assert data[4].foo is None assert data[4].bar is None assert data[4].kwargs[\"KWARGS_KEY\"] == \"KWARGS_VALUE\" assert data[5].foo is None assert data[5].bar is None assert len(data[5].args) == 0 assert len(data[5].kwargs) == 0 with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock: cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.return_value = [] data_2 = sql_data_node._read_as() assert isinstance(data_2, list) assert len(data_2) == 0 @pytest.mark.parametrize( \"data,written_data,called_func\", [ ([{\"a\": 1, \"b\": 2}, {\"a\": 3, \"b\": 4}], [{\"a\": 1, \"b\": 2}, {\"a\": 3, \"b\": 4}], \"__insert_dicts\"), ({\"a\": 1, \"b\": 2}, [{\"a\": 1, \"b\": 2}], \"__insert_dicts\"), ([(1, 2), (3, 4)], [(1, 2), (3, 4)], \"__insert_tuples\"), ([[1, 2], [3, 4]], [[1, 2], [3, 4]], \"__insert_tuples\"), ((1, 2), [(1, 2)], \"__insert_tuples\"), ([1, 2, 3, 4], [(1,), (2,), (3,), (4,)], \"__insert_tuples\"), (\"foo\", [(\"foo\",)], \"__insert_tuples\"), (None, [(None,)], \"__insert_tuples\"), (np.array([1, 2, 3, 4]), [(1,), (2,), (3,), (4,)], \"__insert_tuples\"), (np.array([np.array([1, 2]), np.array([3, 4])]), [[1, 2], [3, 4]], \"__insert_tuples\"), ], ) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) def test_write_1(self, data, written_data, called_func, pandas_properties): custom_properties = pandas_properties.copy() custom_properties.pop(\"db_extra_args\") dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock, patch( \"src.taipy.core.data.sql_table.SQLTableDataNode._create_table\" ) as create_table_mock: cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.side_effect = None with patch(f\"src.taipy.core.data.sql_table.SQLTableDataNode._SQLTableDataNode{called_func}\") as mck: dn.write(data) mck.assert_called_once_with(written_data, create_table_mock.return_value, cursor_mock, True) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) def test_raise_error_invalid_exposed_type(self, pandas_properties): custom_properties = pandas_properties.copy() custom_properties.pop(\"db_extra_args\") custom_properties[\"exposed_type\"] = \"foo\" with pytest.raises(InvalidExposedType): SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) @pytest.mark.parametrize(\"modin_properties\", __modin_properties) def test_write_dataframe(self, pandas_properties, modin_properties): # test write pandas dataframe custom_properties = pandas_properties.copy() custom_properties.pop(\"db_extra_args\") dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [5, 6, 7, 8]}) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock, patch( \"src.taipy.core.data.sql_table.SQLTableDataNode._create_table\" ): cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.side_effect = None with patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._SQLTableDataNode__insert_dataframe\") as mck: dn.write(df) assert mck.call_args[0][0].equals(df) # test write modin dataframe custom_properties = modin_properties.copy() custom_properties.pop(\"db_extra_args\") dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) df = modin_pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [5, 6, 7, 8]}) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock, patch( \"src.taipy.core.data.sql_table.SQLTableDataNode._create_table\" ): cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.side_effect = None with patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._SQLTableDataNode__insert_dataframe\") as mck: dn.write(df) assert mck.call_args[0][0].equals(df) @pytest.mark.parametrize( \"data\", [ [], np.array([]), ], ) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) def test_write_empty_list(self, data, pandas_properties): custom_properties = pandas_properties.copy() custom_properties.pop(\"db_extra_args\") dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock, patch( \"src.taipy.core.data.sql_table.SQLTableDataNode._create_table\" ) as create_table_mock: cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.side_effect = None with patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._SQLTableDataNode__delete_all_rows\") as mck: dn.write(data) mck.assert_called_once_with(create_table_mock.return_value, cursor_mock, True) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) @patch(\"pandas.read_sql_query\") def test_engine_cache(self, _, pandas_properties): dn = SQLTableDataNode( \"foo\", Scope.SCENARIO, properties=pandas_properties, ) assert dn._engine is None with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock, patch( \"src.taipy.core.data.sql_table.SQLTableDataNode._create_table\" ): cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.side_effect = None dn.read() assert dn._engine is not None dn.db_username = \"foo\" assert dn._engine is None dn.write(1) assert dn._engine is not None dn.some_random_attribute_that_does_not_related_to_engine = \"foo\" assert dn._engine is not None @pytest.mark.parametrize( \"tmp_sqlite_path\", [ \"tmp_sqlite_db_file_path\", \"tmp_sqlite_sqlite3_file_path\", ], ) def test_sqlite_read_file_with_different_extension(self, tmp_sqlite_path, request): tmp_sqlite_path = request.getfixturevalue(tmp_sqlite_path) folder_path, db_name, file_extension = tmp_sqlite_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, } dn = SQLTableDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) data = dn.read() assert data.equals(pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}])) def test_sqlite_append_pandas(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, } dn = SQLTableDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) original_data = pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}]) data = dn.read() assert_frame_equal(data, original_data) append_data_1 = pd.DataFrame([{\"foo\": 5, \"bar\": 6}, {\"foo\": 7, \"bar\": 8}]) dn.append(append_data_1) assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True)) def test_sqlite_append_modin(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"modin\", } dn = SQLTableDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) original_data = modin_pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}]) data = dn.read() df_equals(data, original_data) append_data_1 = modin_pd.DataFrame([{\"foo\": 5, \"bar\": 6}, {\"foo\": 7, \"bar\": 8}]) dn.append(append_data_1) df_equals(dn.read(), modin_pd.concat([original_data, append_data_1]).reset_index(drop=True)) def test_filter_pandas_exposed_type(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"pandas\", } dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=properties) dn.write( pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}, ] ) ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, 2, 2])) assert dn[\"bar\"].equals(pd.Series([1, 2, 3, 1, 2, 3])) assert dn[:2].equals(pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = pd.DataFrame([{\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, ] ) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_modin_exposed_type(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"modin\", } dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=properties) dn.write( pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}, ] ) ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, 2, 2])) assert dn[\"bar\"].equals(pd.Series([1, 2, 3, 1, 2, 3])) assert dn[:2].equals(modin_pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = modin_pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = modin_pd.DataFrame([{\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = modin_pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, ] ) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_numpy_exposed_type(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"numpy\", } dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=properties) dn.write( pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}, ] ) ) # Test datanode indexing and slicing assert np.array_equal(dn[0], np.array([1, 1])) assert np.array_equal(dn[1], np.array([1, 2])) assert np.array_equal(dn[:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[1:4, :1], np.array([[1], [1], [2]])) # Test filter data assert np.array_equal(dn.filter((\"foo\", 1, Operator.EQUAL)), np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[dn[:, 0] == 1], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn.filter((\"foo\", 1, Operator.NOT_EQUAL)), np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn[dn[:, 0] != 1], np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn.filter((\"bar\", 2, Operator.EQUAL)), np.array([[1, 2], [2, 2]])) assert np.array_equal(dn[dn[:, 1] == 2], np.array([[1, 2], [2, 2]])) assert np.array_equal( dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR), np.array([[1, 1], [1, 2], [2, 1], [2, 2]]), ) assert np.array_equal(dn[(dn[:, 1] == 1) | (dn[:, 1] == 2)], np.array([[1, 1], [1, 2], [2, 1], [2, 2]])) def test_filter_does_not_read_all_entities(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"numpy\", } dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=properties) # SQLTableDataNode.filter() should not call the MongoCollectionDataNode._read() method with patch.object(SQLTableDataNode, \"_read\") as read_mock: dn.filter((\"foo\", 1, Operator.EQUAL)) dn.filter((\"bar\", 2, Operator.NOT_EQUAL)) dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) assert read_mock[\"_read\"].call_count == 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os from datetime import datetime, timedelta from time import sleep from unittest import mock import pytest import src.taipy.core as tp from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.data_node import DataNode from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import DataNodeIsBeingEdited, NoData from src.taipy.core.job.job_id import JobId from taipy.config import Config from taipy.config.common.scope import Scope from taipy.config.exceptions.exceptions import InvalidConfigurationId from .utils import FakeDataNode def funct_a_b(input: str): print(\"task_a_b\") return \"B\" def funct_b_c(input: str): print(\"task_b_c\") return \"C\" def funct_b_d(input: str): print(\"task_b_d\") return \"D\" class TestDataNode: def test_create_with_default_values(self): dn = DataNode(\"foo_bar\") assert dn.config_id == \"foo_bar\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.name is None assert dn.owner_id is None assert dn.parent_ids == set() assert dn.last_edit_date is None assert dn.job_ids == [] assert not dn.is_ready_for_reading assert len(dn.properties) == 0 def test_create(self): a_date = datetime.now() dn = DataNode( \"foo_bar\", Scope.SCENARIO, DataNodeId(\"an_id\"), \"a_scenario_id\", {\"a_parent_id\"}, a_date, [dict(job_id=\"a_job_id\")], edit_in_progress=False, prop=\"erty\", name=\"a name\", ) assert dn.config_id == \"foo_bar\" assert dn.scope == Scope.SCENARIO assert dn.id == \"an_id\" assert dn.name == \"a name\" assert dn.owner_id == \"a_scenario_id\" assert dn.parent_ids == {\"a_parent_id\"} assert dn.last_edit_date == a_date assert dn.job_ids == [\"a_job_id\"] assert dn.is_ready_for_reading assert len(dn.properties) == 2 assert dn.properties == {\"prop\": \"erty\", \"name\": \"a name\"} with pytest.raises(InvalidConfigurationId): DataNode(\"foo bar\") def test_read_write(self): dn = FakeDataNode(\"foo_bar\") with pytest.raises(NoData): assert dn.read() is None dn.read_or_raise() assert dn.write_has_been_called == 0 assert dn.read_has_been_called == 0 assert not dn.is_ready_for_reading assert dn.last_edit_date is None assert dn.job_ids == [] assert dn.edits == [] dn.write(\"Any data\") assert dn.write_has_been_called == 1 assert dn.read_has_been_called == 0 assert dn.last_edit_date is not None first_edition = dn.last_edit_date assert dn.is_ready_for_reading assert dn.job_ids == [] assert len(dn.edits) == 1 assert dn.get_last_edit()[\"timestamp\"] == dn.last_edit_date sleep(0.1) dn.write(\"Any other data\", job_id := JobId(\"a_job_id\")) assert dn.write_has_been_called == 2 assert dn.read_has_been_called == 0 second_edition = dn.last_edit_date assert first_edition < second_edition assert dn.is_ready_for_reading assert dn.job_ids == [job_id] assert len(dn.edits) == 2 assert dn.get_last_edit()[\"timestamp\"] == dn.last_edit_date dn.read() assert dn.write_has_been_called == 2 assert dn.read_has_been_called == 1 second_edition = dn.last_edit_date assert first_edition < second_edition assert dn.is_ready_for_reading assert dn.job_ids == [job_id] def test_lock_initialization(self): dn = InMemoryDataNode(\"dn\", Scope.SCENARIO) assert not dn.edit_in_progress assert dn._editor_id is None assert dn._editor_expiration_date is None def test_locked_dn_unlockable_only_by_same_editor(self): dn = InMemoryDataNode(\"dn\", Scope.SCENARIO) dn.lock_edit(\"user_1\") assert dn.edit_in_progress assert dn._editor_id == \"user_1\" assert dn._editor_expiration_date is not None with pytest.raises(DataNodeIsBeingEdited): dn.lock_edit(\"user_2\") with pytest.raises(DataNodeIsBeingEdited): dn.unlock_edit(\"user_2\") dn.unlock_edit(\"user_1\") assert not dn.edit_in_progress assert dn._editor_id is None assert dn._editor_expiration_date is None def test_none_editor_can_lock_a_locked_dn(self): dn = InMemoryDataNode(\"dn\", Scope.SCENARIO) dn.lock_edit(\"user\") assert dn.edit_in_progress assert dn._editor_id == \"user\" assert dn._editor_expiration_date is not None dn.lock_edit() assert dn.edit_in_progress assert dn._editor_id is None assert dn._editor_expiration_date is None def test_none_editor_can_unlock_a_locked_dn(self): dn = InMemoryDataNode(\"dn\", Scope.SCENARIO) dn.lock_edit(\"user\") assert dn.edit_in_progress assert dn._editor_id == \"user\" assert dn._editor_expiration_date is not None dn.unlock_edit() assert not dn.edit_in_progress assert dn._editor_id is None assert dn._editor_expiration_date is None dn.lock_edit() assert dn.edit_in_progress assert dn._editor_id is None assert dn._editor_expiration_date is None dn.unlock_edit() assert not dn.edit_in_progress assert dn._editor_id is None assert dn._editor_expiration_date is None def test_ready_for_reading(self): dn = InMemoryDataNode(\"foo_bar\", Scope.CYCLE) assert dn.last_edit_date is None assert not dn.is_ready_for_reading assert dn.job_ids == [] dn.lock_edit() assert dn.last_edit_date is None assert not dn.is_ready_for_reading assert dn.job_ids == [] dn.unlock_edit() assert dn.last_edit_date is None assert not dn.is_ready_for_reading assert dn.job_ids == [] dn.lock_edit() assert dn.last_edit_date is None assert not dn.is_ready_for_reading assert dn.job_ids == [] dn.write(\"toto\", job_id := JobId(\"a_job_id\")) assert dn.last_edit_date is not None assert dn.is_ready_for_reading assert dn.job_ids == [job_id] def test_is_valid_no_validity_period(self): # Test Never been writen dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"id\"), \"name\", \"owner_id\") assert not dn.is_valid # test has been writen dn.write(\"My data\") assert dn.is_valid def test_is_valid_with_30_min_validity_period(self): # Test Never been writen dn = InMemoryDataNode( \"foo\", Scope.SCENARIO, DataNodeId(\"id\"), \"name\", \"owner_id\", validity_period=timedelta(minutes=30) ) assert dn.is_valid is False # Has been writen less than 30 minutes ago dn.write(\"My data\") assert dn.is_valid is True # Has been writen more than 30 minutes ago dn.last_edit_date = datetime.now() + timedelta(days=-1) assert dn.is_valid is False def test_is_valid_with_5_days_validity_period(self): # Test Never been writen dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, validity_period=timedelta(days=5)) assert dn.is_valid is False # Has been writen less than 30 minutes ago dn.write(\"My data\") assert dn.is_valid is True # Has been writen more than 30 minutes ago dn._last_edit_date = datetime.now() - timedelta(days=6) _DataManager()._set(dn) assert dn.is_valid is False def test_is_up_to_date(self, current_datetime): dn_confg_1 = Config.configure_in_memory_data_node(\"dn_1\") dn_confg_2 = Config.configure_in_memory_data_node(\"dn_2\") dn_confg_3 = Config.configure_in_memory_data_node(\"dn_3\", scope=Scope.GLOBAL) task_config_1 = Config.configure_task(\"t1\", print, [dn_confg_1], [dn_confg_2]) task_config_2 = Config.configure_task(\"t2\", print, [dn_confg_2], [dn_confg_3]) scenario_config = Config.configure_scenario(\"sc\", [task_config_1, task_config_2]) scenario_1 = tp.create_scenario(scenario_config) assert len(_DataManager._get_all()) == 3 dn_1_1 = scenario_1.data_nodes[\"dn_1\"] dn_2_1 = scenario_1.data_nodes[\"dn_2\"] dn_3_1 = scenario_1.data_nodes[\"dn_3\"] assert dn_1_1.last_edit_date is None assert dn_2_1.last_edit_date is None assert dn_3_1.last_edit_date is None dn_1_1.last_edit_date = current_datetime + timedelta(1) dn_2_1.last_edit_date = current_datetime + timedelta(2) dn_3_1.last_edit_date = current_datetime + timedelta(3) assert dn_1_1.is_up_to_date assert dn_2_1.is_up_to_date assert dn_3_1.is_up_to_date dn_2_1.last_edit_date = current_datetime + timedelta(4) assert dn_1_1.is_up_to_date assert dn_2_1.is_up_to_date assert not dn_3_1.is_up_to_date dn_1_1.last_edit_date = current_datetime + timedelta(5) assert dn_1_1.is_up_to_date assert not dn_2_1.is_up_to_date assert not dn_3_1.is_up_to_date dn_1_1.last_edit_date = current_datetime + timedelta(1) dn_2_1.last_edit_date = current_datetime + timedelta(2) dn_3_1.last_edit_date = current_datetime + timedelta(3) def test_is_up_to_date_across_scenarios(self, current_datetime): dn_confg_1 = Config.configure_in_memory_data_node(\"dn_1\", scope=Scope.SCENARIO) dn_confg_2 = Config.configure_in_memory_data_node(\"dn_2\", scope=Scope.SCENARIO) dn_confg_3 = Config.configure_in_memory_data_node(\"dn_3\", scope=Scope.GLOBAL) task_config_1 = Config.configure_task(\"t1\", print, [dn_confg_1], [dn_confg_2]) task_config_2 = Config.configure_task(\"t2\", print, [dn_confg_2], [dn_confg_3]) scenario_config = Config.configure_scenario(\"sc\", [task_config_1, task_config_2]) scenario_1 = tp.create_scenario(scenario_config) scenario_2 = tp.create_scenario(scenario_config) assert len(_DataManager._get_all()) == 5 dn_1_1 = scenario_1.data_nodes[\"dn_1\"] dn_2_1 = scenario_1.data_nodes[\"dn_2\"] dn_1_2 = scenario_2.data_nodes[\"dn_1\"] dn_2_2 = scenario_2.data_nodes[\"dn_2\"] dn_3 = scenario_1.data_nodes[\"dn_3\"] assert dn_3 == scenario_2.data_nodes[\"dn_3\"] assert dn_1_1.last_edit_date is None assert dn_2_1.last_edit_date is None assert dn_1_2.last_edit_date is None assert dn_2_2.last_edit_date is None assert dn_3.last_edit_date is None dn_1_1.last_edit_date = current_datetime + timedelta(1) dn_2_1.last_edit_date = current_datetime + timedelta(2) dn_1_2.last_edit_date = current_datetime + timedelta(3) dn_2_2.last_edit_date = current_datetime + timedelta(4) dn_3.last_edit_date = current_datetime + timedelta(5) assert dn_1_1.is_up_to_date assert dn_2_1.is_up_to_date assert dn_1_2.is_up_to_date assert dn_2_2.is_up_to_date assert dn_3.is_up_to_date dn_2_1.last_edit_date = current_datetime + timedelta(6) assert dn_1_1.is_up_to_date assert dn_2_1.is_up_to_date assert dn_1_2.is_up_to_date assert dn_2_2.is_up_to_date assert not dn_3.is_up_to_date dn_2_1.last_edit_date = current_datetime + timedelta(2) dn_2_2.last_edit_date = current_datetime + timedelta(6) assert dn_1_1.is_up_to_date assert dn_2_1.is_up_to_date assert dn_1_2.is_up_to_date assert dn_2_2.is_up_to_date assert not dn_3.is_up_to_date dn_2_2.last_edit_date = current_datetime + timedelta(4) dn_1_1.last_edit_date = current_datetime + timedelta(6) assert dn_1_1.is_up_to_date assert not dn_2_1.is_up_to_date assert dn_1_2.is_up_to_date assert dn_2_2.is_up_to_date assert not dn_3.is_up_to_date dn_1_2.last_edit_date = current_datetime + timedelta(6) assert dn_1_1.is_up_to_date assert not dn_2_1.is_up_to_date assert dn_1_2.is_up_to_date assert not dn_2_2.is_up_to_date assert not dn_3.is_up_to_date def test_do_not_recompute_data_node_valid_but_continue_sequence_execution(self): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) a = Config.configure_data_node(\"A\", \"pickle\", default_data=\"A\") b = Config.configure_data_node(\"B\", \"pickle\") c = Config.configure_data_node(\"C\", \"pickle\") d = Config.configure_data_node(\"D\", \"pickle\") task_a_b = Config.configure_task(\"task_a_b\", funct_a_b, input=a, output=b, skippable=True) task_b_c = Config.configure_task(\"task_b_c\", funct_b_c, input=b, output=c) task_b_d = Config.configure_task(\"task_b_d\", funct_b_d, input=b, output=d) scenario_cfg = Config.configure_scenario(\"scenario\", [task_a_b, task_b_c, task_b_d]) _OrchestratorFactory._build_dispatcher() scenario = tp.create_scenario(scenario_cfg) scenario.submit() assert scenario.A.read() == \"A\" assert scenario.B.read() == \"B\" assert scenario.C.read() == \"C\" assert scenario.D.read() == \"D\" scenario.submit() assert len(tp.get_jobs()) == 6 jobs_and_status = [(job.task.config_id, job.status) for job in tp.get_jobs()] assert (\"task_a_b\", tp.Status.COMPLETED) in jobs_and_status assert (\"task_a_b\", tp.Status.SKIPPED) in jobs_and_status assert (\"task_b_c\", tp.Status.COMPLETED) in jobs_and_status assert (\"task_b_d\", tp.Status.COMPLETED) in jobs_and_status def test_data_node_update_after_writing(self): dn = FakeDataNode(\"foo\") _DataManager._set(dn) assert not _DataManager._get(dn.id).is_ready_for_reading dn.write(\"Any data\") assert dn.is_ready_for_reading assert _DataManager._get(dn.id).is_ready_for_reading def test_expiration_date_raise_if_never_write(self): dn = FakeDataNode(\"foo\") with pytest.raises(NoData): dn.expiration_date def test_validity_null_if_never_write(self): dn = FakeDataNode(\"foo\") assert dn.validity_period is None def test_auto_set_and_reload(self, current_datetime): dn_1 = InMemoryDataNode( \"foo\", scope=Scope.GLOBAL, id=DataNodeId(\"an_id\"), owner_id=None, parent_ids=None, last_edit_date=current_datetime, edits=[dict(job_id=\"a_job_id\")], edit_in_progress=False, validity_period=None, properties={ \"name\": \"foo\", }, ) dm = _DataManager() dm._set(dn_1) dn_2 = dm._get(dn_1) # auto set & reload on scope attribute assert dn_1.scope == Scope.GLOBAL assert dn_2.scope == Scope.GLOBAL dn_1.scope = Scope.CYCLE assert dn_1.scope == Scope.CYCLE assert dn_2.scope == Scope.CYCLE dn_2.scope = Scope.SCENARIO assert dn_1.scope == Scope.SCENARIO assert dn_2.scope == Scope.SCENARIO new_datetime = current_datetime + timedelta(1) new_datetime_1 = current_datetime + timedelta(3) # auto set & reload on last_edit_date attribute assert dn_1.last_edit_date == current_datetime assert dn_2.last_edit_date == current_datetime dn_1.last_edit_date = new_datetime_1 assert dn_1.last_edit_date == new_datetime_1 assert dn_2.last_edit_date == new_datetime_1 dn_2.last_edit_date = new_datetime assert dn_1.last_edit_date == new_datetime assert dn_2.last_edit_date == new_datetime # auto set & reload on name attribute assert dn_1.name == \"foo\" assert dn_2.name == \"foo\" dn_1.name = \"fed\" assert dn_1.name == \"fed\" assert dn_2.name == \"fed\" dn_2.name = \"def\" assert dn_1.name == \"def\" assert dn_2.name == \"def\" # auto set & reload on parent_ids attribute (set() object does not have auto set yet) assert dn_1.parent_ids == set() assert dn_2.parent_ids == set() dn_1._parent_ids.update([\"sc2\"]) _DataManager._set(dn_1) assert dn_1.parent_ids == {\"sc2\"} assert dn_2.parent_ids == {\"sc2\"} dn_2._parent_ids.clear() dn_2._parent_ids.update([\"sc1\"]) _DataManager._set(dn_2) assert dn_1.parent_ids == {\"sc1\"} assert dn_2.parent_ids == {\"sc1\"} # auto set & reload on edit_in_progress attribute assert not dn_2.edit_in_progress assert not dn_1.edit_in_progress dn_1.edit_in_progress = True assert dn_1.edit_in_progress assert dn_2.edit_in_progress dn_2.unlock_edit() assert not dn_1.edit_in_progress assert not dn_2.edit_in_progress dn_1.lock_edit() assert dn_1.edit_in_progress assert dn_2.edit_in_progress # auto set & reload on validity_period attribute time_period_1 = timedelta(1) time_period_2 = timedelta(5) assert dn_1.validity_period is None assert dn_2.validity_period is None dn_1.validity_period = time_period_1 assert dn_1.validity_period == time_period_1 assert dn_2.validity_period == time_period_1 dn_2.validity_period = time_period_2 assert dn_1.validity_period == time_period_2 assert dn_2.validity_period == time_period_2 # auto set & reload on properties attribute assert dn_1.properties == {\"name\": \"def\"} assert dn_2.properties == {\"name\": \"def\"} dn_1._properties[\"qux\"] = 4 assert dn_1.properties[\"qux\"] == 4 assert dn_2.properties[\"qux\"] == 4 assert dn_1.properties == {\"qux\": 4, \"name\": \"def\"} assert dn_2.properties == {\"qux\": 4, \"name\": \"def\"} dn_2._properties[\"qux\"] = 5 assert dn_1.properties[\"qux\"] == 5 assert dn_2.properties[\"qux\"] == 5 dn_1.properties[\"temp_key_1\"] = \"temp_value_1\" dn_1.properties[\"temp_key_2\"] = \"temp_value_2\" assert dn_1.properties == { \"name\": \"def\", \"qux\": 5, \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } assert dn_2.properties == { \"name\": \"def\", \"qux\": 5, \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } dn_1.properties.pop(\"temp_key_1\") assert \"temp_key_1\" not in dn_1.properties.keys() assert \"temp_key_1\" not in dn_1.properties.keys() assert dn_1.properties == { \"name\": \"def\", \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } assert dn_2.properties == { \"name\": \"def\", \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } dn_2.properties.pop(\"temp_key_2\") assert dn_1.properties == { \"qux\": 5, \"name\": \"def\", } assert dn_2.properties == { \"qux\": 5, \"name\": \"def\", } assert \"temp_key_2\" not in dn_1.properties.keys() assert \"temp_key_2\" not in dn_2.properties.keys() dn_1.properties[\"temp_key_3\"] = 0 assert dn_1.properties == { \"qux\": 5, \"temp_key_3\": 0, \"name\": \"def\", } assert dn_2.properties == { \"qux\": 5, \"temp_key_3\": 0, \"name\": \"def\", } dn_1.properties.update({\"temp_key_3\": 1}) assert dn_1.properties == { \"qux\": 5, \"temp_key_3\": 1, \"name\": \"def\", } assert dn_2.properties == { \"qux\": 5, \"temp_key_3\": 1, \"name\": \"def\", } dn_1.properties.update(dict()) assert dn_1.properties == { \"qux\": 5, \"temp_key_3\": 1, \"name\": \"def\", } assert dn_2.properties == { \"qux\": 5, \"temp_key_3\": 1, \"name\": \"def\", } dn_1.properties[\"temp_key_4\"] = 0 dn_1.properties[\"temp_key_5\"] = 0 dn_1.last_edit_date = new_datetime assert len(dn_1.job_ids) == 1 assert len(dn_2.job_ids) == 1 with dn_1 as dn: assert dn.config_id == \"foo\" assert dn.owner_id is None assert dn.scope == Scope.SCENARIO assert dn.last_edit_date == new_datetime assert dn.name == \"def\" assert dn.edit_in_progress assert dn.validity_period == time_period_2 assert len(dn.job_ids) == 1 assert dn._is_in_context assert dn.properties[\"qux\"] == 5 assert dn.properties[\"temp_key_3\"] == 1 assert dn.properties[\"temp_key_4\"] == 0 assert dn.properties[\"temp_key_5\"] == 0 new_datetime_2 = new_datetime + timedelta(5) dn.scope = Scope.CYCLE dn.last_edit_date = new_datetime_2 dn.name = \"abc\" dn.edit_in_progress = False dn.validity_period = None dn.properties[\"qux\"] = 9 dn.properties.pop(\"temp_key_3\") dn.properties.pop(\"temp_key_4\") dn.properties.update({\"temp_key_4\": 1}) dn.properties.update({\"temp_key_5\": 2}) dn.properties.pop(\"temp_key_5\") dn.properties.update(dict()) assert dn.config_id == \"foo\" assert dn.owner_id is None assert dn.scope == Scope.SCENARIO assert dn.last_edit_date == new_datetime assert dn.name == \"def\" assert dn.edit_in_progress assert dn.validity_period == time_period_2 assert len(dn.job_ids) == 1 assert dn.properties[\"qux\"] == 5 assert dn.properties[\"temp_key_3\"] == 1 assert dn.properties[\"temp_key_4\"] == 0 assert dn.properties[\"temp_key_5\"] == 0 assert dn_1.config_id == \"foo\" assert dn_1.owner_id is None assert dn_1.scope == Scope.CYCLE assert dn_1.last_edit_date == new_datetime_2 assert dn_1.name == \"abc\" assert not dn_1.edit_in_progress assert dn_1.validity_period is None assert not dn_1._is_in_context assert len(dn_1.job_ids) == 1 assert dn_1.properties[\"qux\"] == 9 assert \"temp_key_3\" not in dn_1.properties.keys() assert dn_1.properties[\"temp_key_4\"] == 1 assert \"temp_key_5\" not in dn_1.properties.keys() def test_get_parents(self, data_node): with mock.patch(\"src.taipy.core.get_parents\") as mck: data_node.get_parents() mck.assert_called_once_with(data_node) def test_cacheable_deprecated_false(self): dn = FakeDataNode(\"foo\") with pytest.warns(DeprecationWarning): dn.cacheable assert dn.cacheable is False def test_cacheable_deprecated_true(self): dn = FakeDataNode(\"foo\", properties={\"cacheable\": True}) with pytest.warns(DeprecationWarning): dn.cacheable assert dn.cacheable is True def test_data_node_with_env_variable_value_not_stored(self): dn_config = Config.configure_data_node(\"A\", prop=\"ENV[FOO]\") with mock.patch.dict(os.environ, {\"FOO\": \"bar\"}): dn = _DataManager._bulk_get_or_create([dn_config])[dn_config] assert dn._properties.data[\"prop\"] == \"ENV[FOO]\" assert dn.properties[\"prop\"] == \"bar\" assert dn.prop == \"bar\" def test_path_populated_with_config_default_path(self): dn_config = Config.configure_data_node(\"data_node\", \"pickle\", default_path=\"foo.p\") assert dn_config.default_path == \"foo.p\" data_node = _DataManager._bulk_get_or_create([dn_config])[dn_config] assert data_node.path == \"foo.p\" data_node.path = \"baz.p\" assert data_node.path == \"baz.p\" def test_track_edit(self): dn_config = Config.configure_data_node(\"A\") data_node = _DataManager._bulk_get_or_create([dn_config])[dn_config] data_node.write(data=\"1\", job_id=\"job_1\") data_node.write(data=\"2\", job_id=\"job_1\") data_node.write(data=\"3\", job_id=\"job_1\") assert len(data_node.edits) == 3 assert len(data_node.job_ids) == 3 assert data_node.edits[-1] == data_node.get_last_edit() assert data_node.last_edit_date == data_node.get_last_edit().get(\"timestamp\") date = datetime(2050, 1, 1, 12, 12) data_node.write(data=\"4\", timestamp=date, message=\"This is a comment on this edit\", env=\"staging\") assert len(data_node.edits) == 4 assert len(data_node.job_ids) == 3 assert data_node.edits[-1] == data_node.get_last_edit() last_edit = data_node.get_last_edit() assert last_edit[\"message\"] == \"This is a comment on this edit\" assert last_edit[\"env\"] == \"staging\" assert last_edit[\"timestamp\"] == date def test_label(self): a_date = datetime.now() dn = DataNode( \"foo_bar\", Scope.SCENARIO, DataNodeId(\"an_id\"), \"a_scenario_id\", {\"a_parent_id\"}, a_date, [dict(job_id=\"a_job_id\")], edit_in_progress=False, prop=\"erty\", name=\"a name\", ) with mock.patch(\"src.taipy.core.get\") as get_mck: class MockOwner: label = \"owner_label\" def get_label(self): return self.label get_mck.return_value = MockOwner() assert dn.get_label() == \"owner_label > \" + dn.name assert dn.get_simple_label() == dn.name def test_explicit_label(self): a_date = datetime.now() dn = DataNode( \"foo_bar\", Scope.SCENARIO, DataNodeId(\"an_id\"), \"a_scenario_id\", {\"a_parent_id\"}, a_date, [dict(job_id=\"a_job_id\")], edit_in_progress=False, label=\"a label\", name=\"a name\", ) assert dn.get_label() == \"a label\" assert dn.get_simple_label() == \"a label\" def test_change_data_node_name(self): cgf = Config.configure_data_node(\"foo\", scope=Scope.GLOBAL) dn = tp.create_global_data_node(cgf) dn.name = \"bar\" assert dn.name == \"bar\" # This new syntax will be the only one allowed: https://github.com/Avaiga/taipy-core/issues/806 dn.properties[\"name\"] = \"baz\" assert dn.name == \"baz\" "} {"text": "import os import pathlib from datetime import datetime from time import sleep import modin.pandas as modin_pd import numpy as np import pandas as pd import pytest from modin.pandas.test.utils import df_equals from pandas.testing import assert_frame_equal from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.csv import CSVDataNode from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.exceptions.exceptions import InvalidExposedType, NoData from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import InvalidConfigurationId @pytest.fixture(scope=\"function\", autouse=True) def cleanup(): yield path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.csv\") if os.path.isfile(path): os.remove(path) class MyCustomObject: def __init__(self, id, integer, text): self.id = id self.integer = integer self.text = text class TestCSVDataNode: def test_create(self): path = \"data/node/path\" dn = CSVDataNode( \"foo_bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"name\": \"super name\"} ) assert isinstance(dn, CSVDataNode) assert dn.storage_type() == \"csv\" assert dn.config_id == \"foo_bar\" assert dn.name == \"super name\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.last_edit_date is None assert dn.job_ids == [] assert not dn.is_ready_for_reading assert dn.path == path assert dn.has_header is False assert dn.exposed_type == \"pandas\" with pytest.raises(InvalidConfigurationId): dn = CSVDataNode( \"foo bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"name\": \"super name\"} ) def test_get_user_properties(self, csv_file): dn_1 = CSVDataNode(\"dn_1\", Scope.SCENARIO, properties={\"path\": \"data/node/path\"}) assert dn_1._get_user_properties() == {} dn_2 = CSVDataNode( \"dn_2\", Scope.SCENARIO, properties={ \"exposed_type\": \"numpy\", \"default_data\": \"foo\", \"default_path\": csv_file, \"has_header\": False, \"foo\": \"bar\", }, ) # exposed_type, default_data, default_path, path, has_header, sheet_name are filtered out assert dn_2._get_user_properties() == {\"foo\": \"bar\"} def test_new_csv_data_node_with_existing_file_is_ready_for_reading(self): not_ready_dn_cfg = Config.configure_data_node(\"not_ready_data_node_config_id\", \"csv\", path=\"NOT_EXISTING.csv\") not_ready_dn = _DataManager._bulk_get_or_create([not_ready_dn_cfg])[not_ready_dn_cfg] assert not not_ready_dn.is_ready_for_reading path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") ready_dn_cfg = Config.configure_data_node(\"ready_data_node_config_id\", \"csv\", path=path) ready_dn = _DataManager._bulk_get_or_create([ready_dn_cfg])[ready_dn_cfg] assert ready_dn.is_ready_for_reading @pytest.mark.parametrize( [\"properties\", \"exists\"], [ ({}, False), ({\"default_data\": [\"foo\", \"bar\"]}, True), ], ) def test_create_with_default_data(self, properties, exists): dn = CSVDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) assert os.path.exists(dn.path) is exists def test_read_with_header(self): not_existing_csv = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": \"WRONG.csv\", \"has_header\": True}) with pytest.raises(NoData): assert not_existing_csv.read() is None not_existing_csv.read_or_raise() path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") # # Create CSVDataNode without exposed_type (Default is pandas.DataFrame) csv_data_node_as_pandas = CSVDataNode(\"bar\", Scope.SCENARIO, properties={\"path\": path}) data_pandas = csv_data_node_as_pandas.read() assert isinstance(data_pandas, pd.DataFrame) assert len(data_pandas) == 10 assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path).to_numpy()) # Create CSVDataNode with modin exposed_type csv_data_node_as_modin = CSVDataNode(\"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"modin\"}) data_modin = csv_data_node_as_modin.read() assert isinstance(data_modin, modin_pd.DataFrame) assert len(data_modin) == 10 assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path).to_numpy()) # Create CSVDataNode with numpy exposed_type csv_data_node_as_numpy = CSVDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": True, \"exposed_type\": \"numpy\"} ) data_numpy = csv_data_node_as_numpy.read() assert isinstance(data_numpy, np.ndarray) assert len(data_numpy) == 10 assert np.array_equal(data_numpy, pd.read_csv(path).to_numpy()) # Create the same CSVDataNode but with custom exposed_type csv_data_node_as_custom_object = CSVDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": MyCustomObject} ) data_custom = csv_data_node_as_custom_object.read() assert isinstance(data_custom, list) assert len(data_custom) == 10 for (index, row_pandas), row_custom in zip(data_pandas.iterrows(), data_custom): assert isinstance(row_custom, MyCustomObject) assert row_pandas[\"id\"] == row_custom.id assert str(row_pandas[\"integer\"]) == row_custom.integer assert row_pandas[\"text\"] == row_custom.text def test_read_without_header(self): not_existing_csv = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": \"WRONG.csv\", \"has_header\": False}) with pytest.raises(NoData): assert not_existing_csv.read() is None not_existing_csv.read_or_raise() path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") # Create CSVDataNode without exposed_type (Default is pandas.DataFrame) csv_data_node_as_pandas = CSVDataNode(\"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False}) data_pandas = csv_data_node_as_pandas.read() assert isinstance(data_pandas, pd.DataFrame) assert len(data_pandas) == 11 assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path, header=None).to_numpy()) # Create CSVDataNode with modin exposed_type csv_data_node_as_modin = CSVDataNode( \"baz\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": \"modin\"} ) data_modin = csv_data_node_as_modin.read() assert isinstance(data_modin, modin_pd.DataFrame) assert len(data_modin) == 11 assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path, header=None).to_numpy()) # Create CSVDataNode with numpy exposed_type csv_data_node_as_numpy = CSVDataNode( \"qux\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": \"numpy\"} ) data_numpy = csv_data_node_as_numpy.read() assert isinstance(data_numpy, np.ndarray) assert len(data_numpy) == 11 assert np.array_equal(data_numpy, pd.read_csv(path, header=None).to_numpy()) # Create the same CSVDataNode but with custom exposed_type csv_data_node_as_custom_object = CSVDataNode( \"quux\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": MyCustomObject} ) data_custom = csv_data_node_as_custom_object.read() assert isinstance(data_custom, list) assert len(data_custom) == 11 for (index, row_pandas), row_custom in zip(data_pandas.iterrows(), data_custom): assert isinstance(row_custom, MyCustomObject) assert row_pandas[0] == row_custom.id assert str(row_pandas[1]) == row_custom.integer assert row_pandas[2] == row_custom.text @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append(self, csv_file, default_data_frame, content): csv_dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file}) assert_frame_equal(csv_dn.read(), default_data_frame) csv_dn.append(content) assert_frame_equal( csv_dn.read(), pd.concat([default_data_frame, pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index(drop=True), ) @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append_modin(self, csv_file, default_data_frame, content): csv_dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"modin\"}) df_equals(csv_dn.read(), modin_pd.DataFrame(default_data_frame)) csv_dn.append(content) df_equals( csv_dn.read(), modin_pd.concat([default_data_frame, pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index( drop=True ), ) @pytest.mark.parametrize( \"content,columns\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], None), ([[11, 22, 33], [44, 55, 66]], None), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"]), ], ) def test_write(self, csv_file, default_data_frame, content, columns): csv_dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file}) assert np.array_equal(csv_dn.read().values, default_data_frame.values) if not columns: csv_dn.write(content) df = pd.DataFrame(content) else: csv_dn.write_with_column_names(content, columns) df = pd.DataFrame(content, columns=columns) assert np.array_equal(csv_dn.read().values, df.values) csv_dn.write(None) assert len(csv_dn.read()) == 0 def test_write_with_different_encoding(self, csv_file): data = pd.DataFrame([{\"\u2265a\": 1, \"b\": 2}]) utf8_dn = CSVDataNode(\"utf8_dn\", Scope.SCENARIO, properties={\"default_path\": csv_file}) utf16_dn = CSVDataNode(\"utf16_dn\", Scope.SCENARIO, properties={\"default_path\": csv_file, \"encoding\": \"utf-16\"}) # If a file is written with utf-8 encoding, it can only be read with utf-8, not utf-16 encoding utf8_dn.write(data) assert np.array_equal(utf8_dn.read(), data) with pytest.raises(UnicodeError): utf16_dn.read() # If a file is written with utf-16 encoding, it can only be read with utf-16, not utf-8 encoding utf16_dn.write(data) assert np.array_equal(utf16_dn.read(), data) with pytest.raises(UnicodeError): utf8_dn.read() @pytest.mark.parametrize( \"content,columns\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], None), ([[11, 22, 33], [44, 55, 66]], None), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"]), ], ) def test_write_modin(self, csv_file, default_data_frame, content, columns): default_data_frame = modin_pd.DataFrame(default_data_frame) csv_dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"modin\"}) assert np.array_equal(csv_dn.read().values, default_data_frame.values) if not columns: csv_dn.write(content) df = pd.DataFrame(content) else: csv_dn.write_with_column_names(content, columns) df = pd.DataFrame(content, columns=columns) assert np.array_equal(csv_dn.read().values, df.values) csv_dn.write(None) assert len(csv_dn.read()) == 0 def test_write_modin_with_different_encoding(self, csv_file): data = pd.DataFrame([{\"\u2265a\": 1, \"b\": 2}]) utf8_dn = CSVDataNode(\"utf8_dn\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"modin\"}) utf16_dn = CSVDataNode( \"utf16_dn\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"modin\", \"encoding\": \"utf-16\"} ) # If a file is written with utf-8 encoding, it can only be read with utf-8, not utf-16 encoding utf8_dn.write(data) assert np.array_equal(utf8_dn.read(), data) with pytest.raises(UnicodeError): utf16_dn.read() # If a file is written with utf-16 encoding, it can only be read with utf-16, not utf-8 encoding utf16_dn.write(data) assert np.array_equal(utf16_dn.read(), data) with pytest.raises(UnicodeError): utf8_dn.read() def test_set_path(self): dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": \"foo.csv\"}) assert dn.path == \"foo.csv\" dn.path = \"bar.csv\" assert dn.path == \"bar.csv\" def test_read_write_after_modify_path(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.csv\") dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path}) read_data = dn.read() assert read_data is not None dn.path = new_path with pytest.raises(FileNotFoundError): dn.read() dn.write(read_data) assert dn.read().equals(read_data) def test_pandas_exposed_type(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"pandas\"}) assert isinstance(dn.read(), pd.DataFrame) def test_filter_pandas_exposed_type(self, csv_file): dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"pandas\"}) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, None])) assert dn[\"bar\"].equals(pd.Series([1, 2, None, 2, 2])) assert dn[:2].equals(pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 1.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = pd.DataFrame([{\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"bar\", 2, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"bar\"] == 2] expected_data = pd.DataFrame([{\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = pd.DataFrame( [ {\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}, ] ) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_modin_exposed_type(self, csv_file): dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"modin\"}) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(modin_pd.Series([1, 1, 1, 2, None])) assert dn[\"bar\"].equals(modin_pd.Series([1, 2, None, 2, 2])) assert dn[:2].equals(modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 1.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = modin_pd.DataFrame([{\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"bar\", 2, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"bar\"] == 2] expected_data = modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = modin_pd.DataFrame( [ {\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}, ] ) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_numpy_exposed_type(self, csv_file): dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"numpy\"}) dn.write( [ [1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], ] ) # Test datanode indexing and slicing assert np.array_equal(dn[0], np.array([1, 1])) assert np.array_equal(dn[1], np.array([1, 2])) assert np.array_equal(dn[:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[1:4, :1], np.array([[1], [1], [2]])) # Test filter data assert np.array_equal(dn.filter((0, 1, Operator.EQUAL)), np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[dn[:, 0] == 1], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn.filter((0, 1, Operator.NOT_EQUAL)), np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn[dn[:, 0] != 1], np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn.filter((1, 2, Operator.EQUAL)), np.array([[1, 2], [2, 2]])) assert np.array_equal(dn[dn[:, 1] == 2], np.array([[1, 2], [2, 2]])) assert np.array_equal( dn.filter([(1, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR), np.array([[1, 1], [1, 2], [2, 1], [2, 2]]), ) assert np.array_equal(dn[(dn[:, 1] == 1) | (dn[:, 1] == 2)], np.array([[1, 1], [1, 2], [2, 1], [2, 2]])) def test_raise_error_invalid_exposed_type(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") with pytest.raises(InvalidExposedType): CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"foo\"}) def test_get_system_modified_date_instead_of_last_edit_date(self, tmpdir_factory): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.csv\")) pd.DataFrame([]).to_csv(temp_file_path) dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": \"pandas\"}) dn.write(pd.DataFrame([1, 2, 3])) previous_edit_date = dn.last_edit_date sleep(0.1) pd.DataFrame([4, 5, 6]).to_csv(temp_file_path) new_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert previous_edit_date < dn.last_edit_date assert new_edit_date == dn.last_edit_date sleep(0.1) dn.write(pd.DataFrame([7, 8, 9])) assert new_edit_date < dn.last_edit_date os.unlink(temp_file_path) "} {"text": "import os import pathlib from datetime import datetime from time import sleep import modin.pandas as modin_pd import pandas as pd import pytest from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.pickle import PickleDataNode from src.taipy.core.exceptions.exceptions import NoData from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import InvalidConfigurationId @pytest.fixture(scope=\"function\", autouse=True) def cleanup(): yield path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.p\") if os.path.isfile(path): os.remove(path) class TestPickleDataNodeEntity: @pytest.fixture(scope=\"function\", autouse=True) def remove_pickle_files(self): yield import glob for f in glob.glob(\"*.p\"): print(f\"deleting file {f}\") os.remove(f) def test_create(self): dn = PickleDataNode(\"foobar_bazxyxea\", Scope.SCENARIO, properties={\"default_data\": \"Data\"}) assert os.path.isfile(Config.core.storage_folder + \"pickles/\" + dn.id + \".p\") assert isinstance(dn, PickleDataNode) assert dn.storage_type() == \"pickle\" assert dn.config_id == \"foobar_bazxyxea\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.name is None assert dn.owner_id is None assert dn.last_edit_date is not None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.read() == \"Data\" assert dn.last_edit_date is not None assert dn.job_ids == [] with pytest.raises(InvalidConfigurationId): PickleDataNode(\"foobar bazxyxea\", Scope.SCENARIO, properties={\"default_data\": \"Data\"}) def test_get_user_properties(self, pickle_file_path): dn_1 = PickleDataNode(\"dn_1\", Scope.SCENARIO, properties={\"path\": pickle_file_path}) assert dn_1._get_user_properties() == {} dn_2 = PickleDataNode( \"dn_2\", Scope.SCENARIO, properties={ \"default_data\": \"foo\", \"default_path\": pickle_file_path, \"foo\": \"bar\", }, ) # default_data, default_path, path, is_generated are filtered out assert dn_2._get_user_properties() == {\"foo\": \"bar\"} def test_new_pickle_data_node_with_existing_file_is_ready_for_reading(self): not_ready_dn_cfg = Config.configure_data_node(\"not_ready_data_node_config_id\", \"pickle\", path=\"NOT_EXISTING.p\") path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.p\") ready_dn_cfg = Config.configure_data_node(\"ready_data_node_config_id\", \"pickle\", path=path) dns = _DataManager._bulk_get_or_create([not_ready_dn_cfg, ready_dn_cfg]) assert not dns[not_ready_dn_cfg].is_ready_for_reading assert dns[ready_dn_cfg].is_ready_for_reading def test_create_with_file_name(self): dn = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": \"bar\", \"path\": \"foo.FILE.p\"}) assert os.path.isfile(\"foo.FILE.p\") assert dn.read() == \"bar\" dn.write(\"qux\") assert dn.read() == \"qux\" dn.write(1998) assert dn.read() == 1998 def test_read_and_write(self): no_data_dn = PickleDataNode(\"foo\", Scope.SCENARIO) with pytest.raises(NoData): assert no_data_dn.read() is None no_data_dn.read_or_raise() pickle_str = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": \"bar\"}) assert isinstance(pickle_str.read(), str) assert pickle_str.read() == \"bar\" pickle_str.properties[\"default_data\"] = \"baz\" # this modifies the default data value but not the data itself assert pickle_str.read() == \"bar\" pickle_str.write(\"qux\") assert pickle_str.read() == \"qux\" pickle_str.write(1998) assert pickle_str.read() == 1998 assert isinstance(pickle_str.read(), int) pickle_int = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": 197}) assert isinstance(pickle_int.read(), int) assert pickle_int.read() == 197 pickle_dict = PickleDataNode( \"foo\", Scope.SCENARIO, properties={\"default_data\": {\"bar\": 12, \"baz\": \"qux\", \"quux\": [13]}} ) assert isinstance(pickle_dict.read(), dict) assert pickle_dict.read() == {\"bar\": 12, \"baz\": \"qux\", \"quux\": [13]} default_pandas = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}) new_pandas_df = pd.DataFrame({\"c\": [7, 8, 9], \"d\": [10, 11, 12]}) default_modin = modin_pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}) new_modin_df = modin_pd.DataFrame({\"c\": [7, 8, 9], \"d\": [10, 11, 12]}) pickle_pandas = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": default_pandas}) assert isinstance(pickle_pandas.read(), pd.DataFrame) assert default_pandas.equals(pickle_pandas.read()) pickle_pandas.write(new_pandas_df) assert new_pandas_df.equals(pickle_pandas.read()) assert isinstance(pickle_pandas.read(), pd.DataFrame) pickle_pandas.write(new_modin_df) assert new_modin_df.equals(pickle_pandas.read()) assert isinstance(pickle_pandas.read(), modin_pd.DataFrame) pickle_pandas.write(1998) assert pickle_pandas.read() == 1998 assert isinstance(pickle_pandas.read(), int) pickle_modin = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": default_modin}) assert isinstance(pickle_modin.read(), modin_pd.DataFrame) assert default_modin.equals(pickle_modin.read()) pickle_modin.write(new_modin_df) assert new_modin_df.equals(pickle_modin.read()) assert isinstance(pickle_modin.read(), modin_pd.DataFrame) pickle_modin.write(new_pandas_df) assert new_pandas_df.equals(pickle_modin.read()) assert isinstance(pickle_modin.read(), pd.DataFrame) pickle_modin.write(1998) assert pickle_modin.read() == 1998 assert isinstance(pickle_modin.read(), int) def test_path_overrides_default_path(self): dn = PickleDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_data\": \"bar\", \"default_path\": \"foo.FILE.p\", \"path\": \"bar.FILE.p\", }, ) assert dn.path == \"bar.FILE.p\" def test_set_path(self): dn = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": \"foo.p\"}) assert dn.path == \"foo.p\" dn.path = \"bar.p\" assert dn.path == \"bar.p\" def test_is_generated(self): dn = PickleDataNode(\"foo\", Scope.SCENARIO, properties={}) assert dn.is_generated dn.path = \"bar.p\" assert not dn.is_generated def test_read_write_after_modify_path(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.p\") new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.p\") dn = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path}) read_data = dn.read() assert read_data is not None dn.path = new_path with pytest.raises(FileNotFoundError): dn.read() dn.write({\"other\": \"stuff\"}) assert dn.read() == {\"other\": \"stuff\"} def test_get_system_modified_date_instead_of_last_edit_date(self, tmpdir_factory): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.pickle\")) pd.DataFrame([]).to_pickle(temp_file_path) dn = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": \"pandas\"}) dn.write(pd.DataFrame([1, 2, 3])) previous_edit_date = dn.last_edit_date sleep(0.1) pd.DataFrame([4, 5, 6]).to_pickle(temp_file_path) new_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert previous_edit_date < dn.last_edit_date assert new_edit_date == dn.last_edit_date sleep(0.1) dn.write(pd.DataFrame([7, 8, 9])) assert new_edit_date < dn.last_edit_date os.unlink(temp_file_path) "} {"text": "import pytest from src.taipy.core.data.data_node import DataNode from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.generic import GenericDataNode from src.taipy.core.exceptions.exceptions import MissingReadFunction, MissingRequiredProperty, MissingWriteFunction from taipy.config.common.scope import Scope from taipy.config.exceptions.exceptions import InvalidConfigurationId def read_fct(): return TestGenericDataNode.data def read_fct_with_args(inp): return [i + inp for i in TestGenericDataNode.data] def write_fct(data): data.append(data[-1] + 1) def write_fct_with_args(data, inp): for _ in range(inp): data.append(data[-1] + 1) def read_fct_modify_data_node_name(data_node_id: DataNodeId, name: str): import src.taipy.core as tp data_node = tp.get(data_node_id) assert isinstance(data_node, DataNode) data_node.name = name # type:ignore return data_node def reset_data(): TestGenericDataNode.data = [i for i in range(10)] class TestGenericDataNode: data = [i for i in range(10)] def test_create(self): dn = GenericDataNode( \"foo_bar\", Scope.SCENARIO, properties={\"read_fct\": read_fct, \"write_fct\": write_fct, \"name\": \"super name\"} ) assert isinstance(dn, GenericDataNode) assert dn.storage_type() == \"generic\" assert dn.config_id == \"foo_bar\" assert dn.name == \"super name\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.last_edit_date is not None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.properties[\"read_fct\"] == read_fct assert dn.properties[\"write_fct\"] == write_fct dn_1 = GenericDataNode( \"foo\", Scope.SCENARIO, properties={\"read_fct\": read_fct, \"write_fct\": None, \"name\": \"foo\"} ) assert isinstance(dn, GenericDataNode) assert dn_1.storage_type() == \"generic\" assert dn_1.config_id == \"foo\" assert dn_1.name == \"foo\" assert dn_1.scope == Scope.SCENARIO assert dn_1.id is not None assert dn_1.owner_id is None assert dn_1.last_edit_date is not None assert dn_1.job_ids == [] assert dn_1.is_ready_for_reading assert dn_1.properties[\"read_fct\"] == read_fct assert dn_1.properties[\"write_fct\"] is None dn_2 = GenericDataNode( \"xyz\", Scope.SCENARIO, properties={\"read_fct\": None, \"write_fct\": write_fct, \"name\": \"xyz\"} ) assert isinstance(dn, GenericDataNode) assert dn_2.storage_type() == \"generic\" assert dn_2.config_id == \"xyz\" assert dn_2.name == \"xyz\" assert dn_2.scope == Scope.SCENARIO assert dn_2.id is not None assert dn_2.owner_id is None assert dn_2.last_edit_date is not None assert dn_2.job_ids == [] assert dn_2.is_ready_for_reading assert dn_2.properties[\"read_fct\"] is None assert dn_2.properties[\"write_fct\"] == write_fct dn_3 = GenericDataNode(\"xyz\", Scope.SCENARIO, properties={\"read_fct\": read_fct, \"name\": \"xyz\"}) assert isinstance(dn, GenericDataNode) assert dn_3.storage_type() == \"generic\" assert dn_3.config_id == \"xyz\" assert dn_3.name == \"xyz\" assert dn_3.scope == Scope.SCENARIO assert dn_3.id is not None assert dn_3.owner_id is None assert dn_3.last_edit_date is not None assert dn_3.job_ids == [] assert dn_3.is_ready_for_reading assert dn_3.properties[\"read_fct\"] == read_fct assert dn_3.properties[\"write_fct\"] is None dn_4 = GenericDataNode(\"xyz\", Scope.SCENARIO, properties={\"write_fct\": write_fct, \"name\": \"xyz\"}) assert isinstance(dn, GenericDataNode) assert dn_4.storage_type() == \"generic\" assert dn_4.config_id == \"xyz\" assert dn_4.name == \"xyz\" assert dn_4.scope == Scope.SCENARIO assert dn_4.id is not None assert dn_4.owner_id is None assert dn_4.last_edit_date is not None assert dn_4.job_ids == [] assert dn_4.is_ready_for_reading assert dn_4.properties[\"read_fct\"] is None assert dn_4.properties[\"write_fct\"] == write_fct with pytest.raises(InvalidConfigurationId): GenericDataNode(\"foo bar\", Scope.SCENARIO, properties={\"read_fct\": read_fct, \"write_fct\": write_fct}) def test_get_user_properties(self): dn_1 = GenericDataNode( \"dn_1\", Scope.SCENARIO, properties={ \"read_fct\": read_fct, \"write_fct\": write_fct, \"read_fct_args\": 1, \"write_fct_args\": 2, \"foo\": \"bar\", }, ) # read_fct, read_fct_args, write_fct, write_fct_args are filtered out assert dn_1._get_user_properties() == {\"foo\": \"bar\"} def test_create_with_missing_parameters(self): with pytest.raises(MissingRequiredProperty): GenericDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\")) with pytest.raises(MissingRequiredProperty): GenericDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties={}) def test_read_write_generic_datanode(self): generic_dn = GenericDataNode(\"foo\", Scope.SCENARIO, properties={\"read_fct\": read_fct, \"write_fct\": write_fct}) assert generic_dn.read() == self.data assert len(generic_dn.read()) == 10 generic_dn.write(self.data) assert generic_dn.read() == self.data assert len(generic_dn.read()) == 11 generic_dn_1 = GenericDataNode(\"bar\", Scope.SCENARIO, properties={\"read_fct\": read_fct, \"write_fct\": None}) assert generic_dn_1.read() == self.data assert len(generic_dn_1.read()) == 11 with pytest.raises(MissingWriteFunction): generic_dn_1.write(self.data) generic_dn_2 = GenericDataNode(\"xyz\", Scope.SCENARIO, properties={\"read_fct\": None, \"write_fct\": write_fct}) generic_dn_2.write(self.data) assert len(self.data) == 12 with pytest.raises(MissingReadFunction): generic_dn_2.read() generic_dn_3 = GenericDataNode(\"bar\", Scope.SCENARIO, properties={\"read_fct\": None, \"write_fct\": None}) with pytest.raises(MissingReadFunction): generic_dn_3.read() with pytest.raises(MissingWriteFunction): generic_dn_3.write(self.data) reset_data() def test_read_write_generic_datanode_with_arguments(self): generic_dn = GenericDataNode( \"foo\", Scope.SCENARIO, properties={ \"read_fct\": read_fct_with_args, \"write_fct\": write_fct_with_args, \"read_fct_args\": [1], \"write_fct_args\": [2], }, ) assert all([a + 1 == b for a, b in zip(self.data, generic_dn.read())]) assert len(generic_dn.read()) == 10 generic_dn.write(self.data) assert len(generic_dn.read()) == 12 reset_data() def test_read_write_generic_datanode_with_non_list_arguments(self): generic_dn = GenericDataNode( \"foo\", Scope.SCENARIO, properties={ \"read_fct\": read_fct_with_args, \"write_fct\": write_fct_with_args, \"read_fct_args\": 1, \"write_fct_args\": 2, }, ) assert all([a + 1 == b for a, b in zip(self.data, generic_dn.read())]) assert len(generic_dn.read()) == 10 generic_dn.write(self.data) assert len(generic_dn.read()) == 12 reset_data() def test_save_data_node_when_read(self): generic_dn = GenericDataNode( \"foo\", Scope.SCENARIO, properties={\"read_fct\": read_fct_modify_data_node_name, \"write_fct\": write_fct} ) generic_dn._properties[\"read_fct_args\"] = (generic_dn.id, \"bar\") generic_dn.read() assert generic_dn.name == \"bar\" "} {"text": " from dataclasses import dataclass from datetime import datetime from unittest.mock import patch import mongomock import pymongo import pytest from bson import ObjectId from bson.errors import InvalidDocument from src.taipy.core import MongoDefaultDocument from src.taipy.core.common._mongo_connector import _connect_mongodb from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.mongo import MongoCollectionDataNode from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.exceptions.exceptions import InvalidCustomDocument, MissingRequiredProperty from taipy.config.common.scope import Scope @pytest.fixture(scope=\"function\", autouse=True) def clear_mongo_connection_cache(): _connect_mongodb.cache_clear() @dataclass class CustomObjectWithoutArgs: def __init__(self, foo=None, bar=None): self.foo = foo self.bar = bar class CustomObjectWithCustomEncoder: def __init__(self, _id=None, integer=None, text=None, time=None): self.id = _id self.integer = integer self.text = text self.time = time def encode(self): return {\"_id\": self.id, \"integer\": self.integer, \"text\": self.text, \"time\": self.time.isoformat()} class CustomObjectWithCustomEncoderDecoder(CustomObjectWithCustomEncoder): @classmethod def decode(cls, data): return cls(data[\"_id\"], data[\"integer\"], data[\"text\"], datetime.fromisoformat(data[\"time\"])) class TestMongoCollectionDataNode: __properties = [ { \"db_username\": \"\", \"db_password\": \"\", \"db_name\": \"taipy\", \"collection_name\": \"foo\", \"custom_document\": MongoDefaultDocument, \"db_extra_args\": { \"ssl\": \"true\", \"retrywrites\": \"false\", \"maxIdleTimeMS\": \"120000\", }, } ] @pytest.mark.parametrize(\"properties\", __properties) def test_create(self, properties): mongo_dn = MongoCollectionDataNode( \"foo_bar\", Scope.SCENARIO, properties=properties, ) assert isinstance(mongo_dn, MongoCollectionDataNode) assert mongo_dn.storage_type() == \"mongo_collection\" assert mongo_dn.config_id == \"foo_bar\" assert mongo_dn.scope == Scope.SCENARIO assert mongo_dn.id is not None assert mongo_dn.owner_id is None assert mongo_dn.job_ids == [] assert mongo_dn.is_ready_for_reading assert mongo_dn.custom_document == MongoDefaultDocument @pytest.mark.parametrize(\"properties\", __properties) def test_get_user_properties(self, properties): custom_properties = properties.copy() custom_properties[\"foo\"] = \"bar\" mongo_dn = MongoCollectionDataNode( \"foo_bar\", Scope.SCENARIO, properties=custom_properties, ) assert mongo_dn._get_user_properties() == {\"foo\": \"bar\"} @pytest.mark.parametrize( \"properties\", [ {}, {\"db_username\": \"foo\"}, {\"db_username\": \"foo\", \"db_password\": \"foo\"}, {\"db_username\": \"foo\", \"db_password\": \"foo\", \"db_name\": \"foo\"}, ], ) def test_create_with_missing_parameters(self, properties): with pytest.raises(MissingRequiredProperty): MongoCollectionDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\")) with pytest.raises(MissingRequiredProperty): MongoCollectionDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) @pytest.mark.parametrize(\"properties\", __properties) def test_raise_error_invalid_custom_document(self, properties): custom_properties = properties.copy() custom_properties[\"custom_document\"] = \"foo\" with pytest.raises(InvalidCustomDocument): MongoCollectionDataNode( \"foo\", Scope.SCENARIO, properties=custom_properties, ) @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_read(self, properties): mock_client = pymongo.MongoClient(\"localhost\") mock_client[properties[\"db_name\"]][properties[\"collection_name\"]].insert_many( [ {\"foo\": \"baz\", \"bar\": \"qux\"}, {\"foo\": \"quux\", \"bar\": \"quuz\"}, {\"foo\": \"corge\"}, {\"bar\": \"grault\"}, {\"KWARGS_KEY\": \"KWARGS_VALUE\"}, {}, ] ) mongo_dn = MongoCollectionDataNode( \"foo\", Scope.SCENARIO, properties=properties, ) data = mongo_dn.read() assert isinstance(data, list) assert isinstance(data[0], MongoDefaultDocument) assert isinstance(data[1], MongoDefaultDocument) assert isinstance(data[2], MongoDefaultDocument) assert isinstance(data[3], MongoDefaultDocument) assert isinstance(data[4], MongoDefaultDocument) assert isinstance(data[5], MongoDefaultDocument) assert isinstance(data[0]._id, ObjectId) assert data[0].foo == \"baz\" assert data[0].bar == \"qux\" assert isinstance(data[1]._id, ObjectId) assert data[1].foo == \"quux\" assert data[1].bar == \"quuz\" assert isinstance(data[2]._id, ObjectId) assert data[2].foo == \"corge\" assert isinstance(data[3]._id, ObjectId) assert data[3].bar == \"grault\" assert isinstance(data[4]._id, ObjectId) assert data[4].KWARGS_KEY == \"KWARGS_VALUE\" assert isinstance(data[5]._id, ObjectId) @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_read_empty_as(self, properties): mongo_dn = MongoCollectionDataNode( \"foo\", Scope.SCENARIO, properties=properties, ) data = mongo_dn.read() assert isinstance(data, list) assert len(data) == 0 @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) @pytest.mark.parametrize( \"data\", [ ([{\"foo\": 1, \"a\": 2}, {\"foo\": 3, \"bar\": 4}]), ({\"a\": 1, \"bar\": 2}), ], ) def test_read_wrong_object_properties_name(self, properties, data): custom_properties = properties.copy() custom_properties[\"custom_document\"] = CustomObjectWithoutArgs mongo_dn = MongoCollectionDataNode( \"foo\", Scope.SCENARIO, properties=custom_properties, ) mongo_dn.write(data) with pytest.raises(TypeError): data = mongo_dn.read() @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) @pytest.mark.parametrize( \"data\", [ ([{\"foo\": 11, \"bar\": 22}, {\"foo\": 33, \"bar\": 44}]), ({\"foz\": 1, \"baz\": 2}), ], ) def test_append(self, properties, data): mongo_dn = MongoCollectionDataNode(\"foo\", Scope.SCENARIO, properties=properties) mongo_dn.append(data) original_data = [{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}] mongo_dn.write(original_data) mongo_dn.append(data) assert len(mongo_dn.read()) == len(data if isinstance(data, list) else [data]) + len(original_data) @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) @pytest.mark.parametrize( \"data,written_data\", [ ([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}], [{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}]), ({\"foo\": 1, \"bar\": 2}, [{\"foo\": 1, \"bar\": 2}]), ], ) def test_write(self, properties, data, written_data): mongo_dn = MongoCollectionDataNode(\"foo\", Scope.SCENARIO, properties=properties) mongo_dn.write(data) read_objects = mongo_dn.read() for read_object, written_dict in zip(read_objects, written_data): assert isinstance(read_object._id, ObjectId) assert read_object.foo == written_dict[\"foo\"] assert read_object.bar == written_dict[\"bar\"] @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) @pytest.mark.parametrize( \"data\", [ [], ], ) def test_write_empty_list(self, properties, data): mongo_dn = MongoCollectionDataNode( \"foo\", Scope.SCENARIO, properties=properties, ) mongo_dn.write(data) assert len(mongo_dn.read()) == 0 @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_write_non_serializable(self, properties): mongo_dn = MongoCollectionDataNode(\"foo\", Scope.SCENARIO, properties=properties) data = {\"a\": 1, \"b\": mongo_dn} with pytest.raises(InvalidDocument): mongo_dn.write(data) @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_write_custom_encoder(self, properties): custom_properties = properties.copy() custom_properties[\"custom_document\"] = CustomObjectWithCustomEncoder mongo_dn = MongoCollectionDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) data = [ CustomObjectWithCustomEncoder(\"1\", 1, \"abc\", datetime.now()), CustomObjectWithCustomEncoder(\"2\", 2, \"def\", datetime.now()), ] mongo_dn.write(data) read_data = mongo_dn.read() assert isinstance(read_data[0], CustomObjectWithCustomEncoder) assert isinstance(read_data[1], CustomObjectWithCustomEncoder) assert read_data[0].id == \"1\" assert read_data[0].integer == 1 assert read_data[0].text == \"abc\" assert isinstance(read_data[0].time, str) assert read_data[1].id == \"2\" assert read_data[1].integer == 2 assert read_data[1].text == \"def\" assert isinstance(read_data[1].time, str) @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_write_custom_encoder_decoder(self, properties): custom_properties = properties.copy() custom_properties[\"custom_document\"] = CustomObjectWithCustomEncoderDecoder mongo_dn = MongoCollectionDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) data = [ CustomObjectWithCustomEncoderDecoder(\"1\", 1, \"abc\", datetime.now()), CustomObjectWithCustomEncoderDecoder(\"2\", 2, \"def\", datetime.now()), ] mongo_dn.write(data) read_data = mongo_dn.read() assert isinstance(read_data[0], CustomObjectWithCustomEncoderDecoder) assert isinstance(read_data[1], CustomObjectWithCustomEncoderDecoder) assert read_data[0].id == \"1\" assert read_data[0].integer == 1 assert read_data[0].text == \"abc\" assert isinstance(read_data[0].time, datetime) assert read_data[1].id == \"2\" assert read_data[1].integer == 2 assert read_data[1].text == \"def\" assert isinstance(read_data[1].time, datetime) @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_filter(self, properties): mock_client = pymongo.MongoClient(\"localhost\") mock_client[properties[\"db_name\"]][properties[\"collection_name\"]].insert_many( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, {\"KWARGS_KEY\": \"KWARGS_VALUE\"}, ] ) mongo_dn = MongoCollectionDataNode( \"foo\", Scope.SCENARIO, properties=properties, ) assert len(mongo_dn.filter((\"foo\", 1, Operator.EQUAL))) == 3 assert len(mongo_dn.filter((\"foo\", 1, Operator.NOT_EQUAL))) == 3 assert len(mongo_dn.filter((\"bar\", 2, Operator.EQUAL))) == 3 assert len(mongo_dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)) == 4 assert mongo_dn[\"foo\"] == [1, 1, 1, 2, None, None] assert mongo_dn[\"bar\"] == [1, 2, None, 2, 2, None] assert [m.__dict__ for m in mongo_dn[:3]] == [m.__dict__ for m in mongo_dn.read()[:3]] assert mongo_dn[[\"foo\", \"bar\"]] == [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, {}, ] @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_filter_does_not_read_all_entities(self, properties): mongo_dn = MongoCollectionDataNode(\"foo\", Scope.SCENARIO, properties=properties) # MongoCollectionDataNode.filter() should not call the MongoCollectionDataNode._read() method with patch.object(MongoCollectionDataNode, \"_read\") as read_mock: mongo_dn.filter((\"foo\", 1, Operator.EQUAL)) mongo_dn.filter((\"bar\", 2, Operator.NOT_EQUAL)) mongo_dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) assert read_mock[\"_read\"].call_count == 0 "} {"text": "from src.taipy.core.data.data_node import DataNode from src.taipy.core.data.in_memory import InMemoryDataNode from taipy.config.common.scope import Scope class FakeDataNode(InMemoryDataNode): read_has_been_called = 0 write_has_been_called = 0 def __init__(self, config_id, **kwargs): scope = kwargs.pop(\"scope\", Scope.SCENARIO) super().__init__(config_id=config_id, scope=scope, **kwargs) def _read(self, query=None): self.read_has_been_called += 1 def _write(self, data): self.write_has_been_called += 1 @classmethod def storage_type(cls) -> str: return \"fake_inmemory\" write = DataNode.write # Make sure that the writing behavior comes from DataNode class FakeDataframeDataNode(DataNode): COLUMN_NAME_1 = \"a\" COLUMN_NAME_2 = \"b\" def __init__(self, config_id, default_data_frame, **kwargs): super().__init__(config_id, **kwargs) self.data = default_data_frame def _read(self): return self.data @classmethod def storage_type(cls) -> str: return \"fake_df_dn\" class FakeNumpyarrayDataNode(DataNode): def __init__(self, config_id, default_array, **kwargs): super().__init__(config_id, **kwargs) self.data = default_array def _read(self): return self.data @classmethod def storage_type(cls) -> str: return \"fake_np_dn\" class FakeListDataNode(DataNode): class Row: def __init__(self, value): self.value = value def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = [self.Row(i) for i in range(10)] def _read(self): return self.data @classmethod def storage_type(cls) -> str: return \"fake_list_dn\" class CustomClass: def __init__(self, a, b): self.a = a self.b = b class FakeCustomDataNode(DataNode): def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = [CustomClass(i, i * 2) for i in range(10)] def _read(self): return self.data class FakeMultiSheetExcelDataFrameDataNode(DataNode): def __init__(self, config_id, default_data_frame, **kwargs): super().__init__(config_id, **kwargs) self.data = { \"Sheet1\": default_data_frame, \"Sheet2\": default_data_frame, } def _read(self): return self.data class FakeMultiSheetExcelCustomDataNode(DataNode): def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = { \"Sheet1\": [CustomClass(i, i * 2) for i in range(10)], \"Sheet2\": [CustomClass(i, i * 2) for i in range(10)], } def _read(self): return self.data "} {"text": "import os import pytest from src.taipy.core.data._data_fs_repository import _DataFSRepository from src.taipy.core.data._data_sql_repository import _DataSQLRepository from src.taipy.core.data.data_node import DataNode, DataNodeId from src.taipy.core.exceptions import ModelNotFound class TestDataNodeRepository: @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_save_and_load(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) obj = repository._load(data_node.id) assert isinstance(obj, DataNode) @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_exists(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) assert repository._exists(data_node.id) assert not repository._exists(\"not-existed-data-node\") @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_load_all(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") repository._save(data_node) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_load_all_with_filters(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") data_node.owner_id = f\"task-{i}\" repository._save(data_node) objs = repository._load_all(filters=[{\"owner_id\": \"task-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) repository._delete(data_node.id) with pytest.raises(ModelNotFound): repository._load(data_node.id) @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete_all(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") repository._save(data_node) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete_many(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") repository._save(data_node) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete_by(self, data_node, repo, init_sql_repo): repository = repo() # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") data_node._version = f\"{(i+1) // 5}.0\" repository._save(data_node) objs = repository._load_all() assert len(objs) == 10 repository._delete_by(\"version\", \"1.0\") assert len(repository._load_all()) == 5 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_search(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") data_node.owner_id = f\"task-{i}\" repository._save(data_node) assert len(repository._load_all()) == 10 objs = repository._search(\"owner_id\", \"task-2\") assert len(objs) == 1 assert isinstance(objs[0], DataNode) objs = repository._search(\"owner_id\", \"task-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], DataNode) assert repository._search(\"owner_id\", \"task-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_export(self, tmpdir, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) repository._export(data_node.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _DataFSRepository else os.path.join(tmpdir.strpath, \"data_node\") assert os.path.exists(os.path.join(dir_path, f\"{data_node.id}.json\")) "} {"text": "import pytest from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import NoData from taipy.config.common.scope import Scope from taipy.config.exceptions.exceptions import InvalidConfigurationId class TestInMemoryDataNodeEntity: def test_create(self): dn = InMemoryDataNode( \"foobar_bazy\", Scope.SCENARIO, DataNodeId(\"id_uio\"), \"owner_id\", properties={\"default_data\": \"In memory Data Node\", \"name\": \"my name\"}, ) assert isinstance(dn, InMemoryDataNode) assert dn.storage_type() == \"in_memory\" assert dn.config_id == \"foobar_bazy\" assert dn.scope == Scope.SCENARIO assert dn.id == \"id_uio\" assert dn.name == \"my name\" assert dn.owner_id == \"owner_id\" assert dn.last_edit_date is not None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.read() == \"In memory Data Node\" dn_2 = InMemoryDataNode(\"foo\", Scope.SCENARIO) assert dn_2.last_edit_date is None assert not dn_2.is_ready_for_reading with pytest.raises(InvalidConfigurationId): InMemoryDataNode(\"foo bar\", Scope.SCENARIO, DataNodeId(\"dn_id\")) def test_get_user_properties(self): dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": 1, \"foo\": \"bar\"}) assert dn._get_user_properties() == {\"foo\": \"bar\"} def test_read_and_write(self): no_data_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\")) with pytest.raises(NoData): assert no_data_dn.read() is None no_data_dn.read_or_raise() in_mem_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": \"bar\"}) assert isinstance(in_mem_dn.read(), str) assert in_mem_dn.read() == \"bar\" in_mem_dn.properties[\"default_data\"] = \"baz\" # this modifies the default data value but not the data itself assert in_mem_dn.read() == \"bar\" in_mem_dn.write(\"qux\") assert in_mem_dn.read() == \"qux\" in_mem_dn.write(1998) assert isinstance(in_mem_dn.read(), int) assert in_mem_dn.read() == 1998 "} {"text": "import os import pathlib from datetime import datetime from time import sleep from typing import Dict import modin.pandas as modin_pd import numpy as np import pandas as pd import pytest from modin.pandas.test.utils import df_equals from pandas.testing import assert_frame_equal from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.excel import ExcelDataNode from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.exceptions.exceptions import ( ExposedTypeLengthMismatch, InvalidExposedType, NoData, NonExistingExcelSheet, SheetNameLengthMismatch, ) from taipy.config.common.scope import Scope from taipy.config.config import Config @pytest.fixture(scope=\"function\", autouse=True) def cleanup(): yield path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.xlsx\") if os.path.exists(path): os.remove(path) class MyCustomObject: def __init__(self, id, integer, text): self.id = id self.integer = integer self.text = text class MyCustomObject1: def __init__(self, id, integer, text): self.id = id self.integer = integer self.text = text class MyCustomObject2: def __init__(self, id, integer, text): self.id = id self.integer = integer self.text = text class TestExcelDataNode: def test_new_excel_data_node_with_existing_file_is_ready_for_reading(self): not_ready_dn_cfg = Config.configure_data_node(\"not_ready_data_node_config_id\", \"excel\", path=\"NOT_EXISTING.csv\") path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") ready_dn_cfg = Config.configure_data_node(\"ready_data_node_config_id\", \"excel\", path=path) dns = _DataManager._bulk_get_or_create([not_ready_dn_cfg, ready_dn_cfg]) assert not dns[not_ready_dn_cfg].is_ready_for_reading assert dns[ready_dn_cfg].is_ready_for_reading def test_create(self): path = \"data/node/path\" sheet_names = [\"sheet_name_1\", \"sheet_name_2\"] dn = ExcelDataNode( \"foo_bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": sheet_names, \"name\": \"super name\"}, ) assert isinstance(dn, ExcelDataNode) assert dn.storage_type() == \"excel\" assert dn.config_id == \"foo_bar\" assert dn.name == \"super name\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.parent_ids == set() assert dn.last_edit_date is None assert dn.job_ids == [] assert not dn.is_ready_for_reading assert dn.path == path assert dn.has_header is False assert dn.sheet_name == sheet_names def test_get_user_properties(self, excel_file): dn_1 = ExcelDataNode(\"dn_1\", Scope.SCENARIO, properties={\"path\": \"data/node/path\"}) assert dn_1._get_user_properties() == {} dn_2 = ExcelDataNode( \"dn_2\", Scope.SCENARIO, properties={ \"exposed_type\": \"numpy\", \"default_data\": \"foo\", \"default_path\": excel_file, \"has_header\": False, \"sheet_name\": [\"sheet_name_1\", \"sheet_name_2\"], \"foo\": \"bar\", }, ) # exposed_type, default_data, default_path, path, has_header are filtered out assert dn_2._get_user_properties() == {\"foo\": \"bar\"} def test_read_with_header(self): with pytest.raises(NoData): not_existing_excel = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": \"WRONG.xlsx\"}) assert not_existing_excel.read() is None not_existing_excel.read_or_raise() empty_excel_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/empty.xlsx\") empty_excel = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": empty_excel_path, \"exposed_type\": MyCustomObject, \"has_header\": True}, ) assert len(empty_excel.read()) == 0 path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") # Create ExcelDataNode without exposed_type (Default is pandas.DataFrame) excel_data_node_as_pandas = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": \"Sheet1\"} ) data_pandas = excel_data_node_as_pandas.read() assert isinstance(data_pandas, pd.DataFrame) assert len(data_pandas) == 5 assert np.array_equal(data_pandas.to_numpy(), pd.read_excel(path).to_numpy()) # Create ExcelDataNode with modin exposed_type excel_data_node_as_modin = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"modin\"} ) data_modin = excel_data_node_as_modin.read() assert isinstance(data_modin, modin_pd.DataFrame) assert len(data_modin) == 5 assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path).to_numpy()) # Create ExcelDataNode with numpy exposed_type excel_data_node_as_numpy = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"numpy\", \"sheet_name\": \"Sheet1\"} ) data_numpy = excel_data_node_as_numpy.read() assert isinstance(data_numpy, np.ndarray) assert len(data_numpy) == 5 assert np.array_equal(data_numpy, pd.read_excel(path).to_numpy()) # Create the same ExcelDataNode but with custom exposed_type non_existing_sheet_name_custom = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": \"abc\", \"exposed_type\": MyCustomObject}, ) with pytest.raises(NonExistingExcelSheet): non_existing_sheet_name_custom.read() excel_data_node_as_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": MyCustomObject, \"sheet_name\": \"Sheet1\"}, ) data_custom = excel_data_node_as_custom_object.read() assert isinstance(data_custom, list) assert len(data_custom) == 5 for (_, row_pandas), row_custom in zip(data_pandas.iterrows(), data_custom): assert isinstance(row_custom, MyCustomObject) assert row_pandas[\"id\"] == row_custom.id assert row_pandas[\"integer\"] == row_custom.integer assert row_pandas[\"text\"] == row_custom.text def test_read_without_header(self): not_existing_excel = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": \"WRONG.xlsx\", \"has_header\": False} ) with pytest.raises(NoData): assert not_existing_excel.read() is None not_existing_excel.read_or_raise() path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") # Create ExcelDataNode without exposed_type (Default is pandas.DataFrame) excel_data_node_as_pandas = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": \"Sheet1\"} ) data_pandas = excel_data_node_as_pandas.read() assert isinstance(data_pandas, pd.DataFrame) assert len(data_pandas) == 6 assert np.array_equal(data_pandas.to_numpy(), pd.read_excel(path, header=None).to_numpy()) # Create ExcelDataNode with modin exposed_type excel_data_node_as_modin = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"modin\"}, ) data_modin = excel_data_node_as_modin.read() assert isinstance(data_modin, modin_pd.DataFrame) assert len(data_modin) == 6 assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path, header=None).to_numpy()) # Create ExcelDataNode with numpy exposed_type excel_data_node_as_numpy = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": \"numpy\", \"sheet_name\": \"Sheet1\"}, ) data_numpy = excel_data_node_as_numpy.read() assert isinstance(data_numpy, np.ndarray) assert len(data_numpy) == 6 assert np.array_equal(data_numpy, pd.read_excel(path, header=None).to_numpy()) # Create the same ExcelDataNode but with custom exposed_type non_existing_sheet_name_custom = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": \"abc\", \"exposed_type\": MyCustomObject}, ) with pytest.raises(NonExistingExcelSheet): non_existing_sheet_name_custom.read() excel_data_node_as_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"has_header\": False, \"exposed_type\": MyCustomObject, \"sheet_name\": \"Sheet1\", }, ) data_custom = excel_data_node_as_custom_object.read() assert isinstance(data_custom, list) assert len(data_custom) == 6 for (_, row_pandas), row_custom in zip(data_pandas.iterrows(), data_custom): assert isinstance(row_custom, MyCustomObject) assert row_pandas[0] == row_custom.id assert row_pandas[1] == row_custom.integer assert row_pandas[2] == row_custom.text @pytest.mark.parametrize( \"content,columns\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], None), ([[11, 22, 33], [44, 55, 66]], None), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"]), ], ) def test_write(self, excel_file, default_data_frame, content, columns): excel_dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\"}) assert np.array_equal(excel_dn.read().values, default_data_frame.values) if not columns: excel_dn.write(content) df = pd.DataFrame(content) else: excel_dn.write_with_column_names(content, columns) df = pd.DataFrame(content, columns=columns) assert np.array_equal(excel_dn.read().values, df.values) excel_dn.write(None) assert len(excel_dn.read()) == 0 @pytest.mark.parametrize( \"content,sheet_name\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], \"sheet_name\"), ([[11, 22, 33], [44, 55, 66]], [\"sheet_name\"]), ], ) def test_write_with_sheet_name(self, excel_file_with_sheet_name, default_data_frame, content, sheet_name): excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_sheet_name, \"sheet_name\": sheet_name} ) df = pd.DataFrame(content) if isinstance(sheet_name, str): assert np.array_equal(excel_dn.read().values, default_data_frame.values) else: assert np.array_equal(excel_dn.read()[\"sheet_name\"].values, default_data_frame.values) excel_dn.write(content) if isinstance(sheet_name, str): assert np.array_equal(excel_dn.read().values, df.values) else: assert np.array_equal(excel_dn.read()[\"sheet_name\"].values, df.values) sheet_names = pd.ExcelFile(excel_file_with_sheet_name).sheet_names expected_sheet_name = sheet_name[0] if isinstance(sheet_name, list) else sheet_name assert sheet_names[0] == expected_sheet_name excel_dn.write(None) if isinstance(sheet_name, str): assert len(excel_dn.read()) == 0 else: assert len(excel_dn.read()) == 1 @pytest.mark.parametrize( \"content,sheet_name\", [ ([[11, 22, 33], [44, 55, 66]], [\"sheet_name_1\", \"sheet_name_2\"]), ], ) def test_raise_write_with_sheet_name_length_mismatch( self, excel_file_with_sheet_name, default_data_frame, content, sheet_name ): excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_sheet_name, \"sheet_name\": sheet_name} ) with pytest.raises(SheetNameLengthMismatch): excel_dn.write(content) @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), ], ) def test_write_without_sheet_name(self, excel_file_with_sheet_name, default_data_frame, content): excel_dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_sheet_name}) default_data_frame = {\"sheet_name\": default_data_frame} df = {\"Sheet1\": pd.DataFrame(content)} assert np.array_equal(excel_dn.read()[\"sheet_name\"].values, default_data_frame[\"sheet_name\"].values) excel_dn.write(content) assert np.array_equal(excel_dn.read()[\"Sheet1\"].values, df[\"Sheet1\"].values) sheet_names = pd.ExcelFile(excel_file_with_sheet_name).sheet_names expected_sheet_name = \"Sheet1\" assert sheet_names[0] == expected_sheet_name excel_dn.write(None) assert len(excel_dn.read()) == 1 @pytest.mark.parametrize( \"content,columns,sheet_name\", [ ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"], \"sheet_name\"), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"], [\"sheet_name\"]), ], ) def test_write_with_column_and_sheet_name( self, excel_file_with_sheet_name, default_data_frame, content, columns, sheet_name ): excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_sheet_name, \"sheet_name\": sheet_name} ) df = pd.DataFrame(content) if isinstance(sheet_name, str): assert np.array_equal(excel_dn.read().values, default_data_frame.values) else: assert np.array_equal(excel_dn.read()[\"sheet_name\"].values, default_data_frame.values) excel_dn.write_with_column_names(content, columns) if isinstance(sheet_name, str): assert np.array_equal(excel_dn.read().values, df.values) else: assert np.array_equal(excel_dn.read()[\"sheet_name\"].values, df.values) sheet_names = pd.ExcelFile(excel_file_with_sheet_name).sheet_names expected_sheet_name = sheet_name[0] if isinstance(sheet_name, list) else sheet_name assert sheet_names[0] == expected_sheet_name excel_dn.write(None) if isinstance(sheet_name, str): assert len(excel_dn.read()) == 0 else: assert len(excel_dn.read()) == 1 @pytest.mark.parametrize( \"content,columns\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], None), ([[11, 22, 33], [44, 55, 66]], None), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"]), ], ) def test_write_modin(self, excel_file, default_data_frame, content, columns): excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"modin\"} ) assert np.array_equal(excel_dn.read().values, default_data_frame.values) if not columns: excel_dn.write(content) df = modin_pd.DataFrame(content) else: excel_dn.write_with_column_names(content, columns) df = modin_pd.DataFrame(content, columns=columns) assert np.array_equal(excel_dn.read().values, df.values) excel_dn.write(None) assert len(excel_dn.read()) == 0 def test_read_multi_sheet_with_header(self): not_existing_excel = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": \"WRONG.xlsx\", \"sheet_name\": [\"sheet_name_1\", \"sheet_name_2\"]}, ) with pytest.raises(NoData): assert not_existing_excel.read() is None not_existing_excel.read_or_raise() path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") sheet_names = [\"Sheet1\", \"Sheet2\"] # Create ExcelDataNode without exposed_type (Default is pandas.DataFrame) excel_data_node_as_pandas = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": sheet_names} ) data_pandas = excel_data_node_as_pandas.read() assert isinstance(data_pandas, Dict) assert len(data_pandas) == 2 assert all( len(data_pandas[sheet_name] == 5) and isinstance(data_pandas[sheet_name], pd.DataFrame) for sheet_name in sheet_names ) assert list(data_pandas.keys()) == sheet_names for sheet_name in sheet_names: assert data_pandas[sheet_name].equals(pd.read_excel(path, sheet_name=sheet_name)) excel_data_node_as_pandas_no_sheet_name = ExcelDataNode(\"bar\", Scope.SCENARIO, properties={\"path\": path}) data_pandas_no_sheet_name = excel_data_node_as_pandas_no_sheet_name.read() assert isinstance(data_pandas_no_sheet_name, Dict) for key in data_pandas_no_sheet_name.keys(): assert isinstance(data_pandas_no_sheet_name[key], pd.DataFrame) assert data_pandas[key].equals(data_pandas_no_sheet_name[key]) # Create ExcelDataNode with modin exposed_type excel_data_node_as_modin = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": \"modin\"} ) data_modin = excel_data_node_as_modin.read() assert isinstance(data_modin, Dict) assert len(data_modin) == 2 assert all( len(data_modin[sheet_name] == 5) and isinstance(data_modin[sheet_name], modin_pd.DataFrame) for sheet_name in sheet_names ) assert list(data_modin.keys()) == sheet_names for sheet_name in sheet_names: assert data_modin[sheet_name].equals(modin_pd.read_excel(path, sheet_name=sheet_name)) excel_data_node_as_pandas_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"modin\"} ) data_modin_no_sheet_name = excel_data_node_as_pandas_no_sheet_name.read() assert isinstance(data_modin_no_sheet_name, Dict) for key in data_modin_no_sheet_name.keys(): assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame) assert data_modin[key].equals(data_modin_no_sheet_name[key]) # Create ExcelDataNode with numpy exposed_type excel_data_node_as_numpy = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": \"numpy\"}, ) data_numpy = excel_data_node_as_numpy.read() assert isinstance(data_numpy, Dict) assert len(data_numpy) == 2 assert all( len(data_numpy[sheet_name] == 5) and isinstance(data_numpy[sheet_name], np.ndarray) for sheet_name in sheet_names ) assert list(data_numpy.keys()) == sheet_names for sheet_name in sheet_names: assert np.array_equal(data_pandas[sheet_name], pd.read_excel(path, sheet_name=sheet_name).to_numpy()) excel_data_node_as_numpy_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"numpy\"}, ) data_numpy_no_sheet_name = excel_data_node_as_numpy_no_sheet_name.read() assert isinstance(data_numpy_no_sheet_name, Dict) for key in data_numpy_no_sheet_name.keys(): assert isinstance(data_numpy_no_sheet_name[key], np.ndarray) assert np.array_equal(data_numpy[key], data_numpy_no_sheet_name[key]) # Create the same ExcelDataNode but with custom exposed_type non_existing_sheet_name_custom = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"sheet_name\": [\"Sheet1\", \"xyz\"], \"exposed_type\": MyCustomObject1, }, ) with pytest.raises(NonExistingExcelSheet): non_existing_sheet_name_custom.read() excel_data_node_as_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": MyCustomObject1}, ) data_custom = excel_data_node_as_custom_object.read() assert isinstance(data_custom, Dict) assert len(data_custom) == 2 assert all(len(data_custom[sheet_name]) == 5 for sheet_name in sheet_names) assert list(data_custom.keys()) == sheet_names for sheet_name in sheet_names: sheet_data_pandas, sheet_data_custom = data_pandas[sheet_name], data_custom[sheet_name] for (_, row_pandas), row_custom in zip(sheet_data_pandas.iterrows(), sheet_data_custom): assert isinstance(row_custom, MyCustomObject1) assert row_pandas[\"id\"] == row_custom.id assert row_pandas[\"integer\"] == row_custom.integer assert row_pandas[\"text\"] == row_custom.text excel_data_node_as_custom_object_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": MyCustomObject1}, ) data_custom_no_sheet_name = excel_data_node_as_custom_object_no_sheet_name.read() assert isinstance(data_custom_no_sheet_name, Dict) assert len(data_custom_no_sheet_name) == 2 assert data_custom.keys() == data_custom_no_sheet_name.keys() for sheet_name in sheet_names: sheet_data_custom_no_sheet_name, sheet_data_custom = ( data_custom_no_sheet_name[sheet_name], data_custom[sheet_name], ) for row_custom_no_sheet_name, row_custom in zip(sheet_data_custom_no_sheet_name, sheet_data_custom): assert isinstance(row_custom_no_sheet_name, MyCustomObject1) assert row_custom_no_sheet_name.id == row_custom.id assert row_custom_no_sheet_name.integer == row_custom.integer assert row_custom_no_sheet_name.text == row_custom.text with pytest.raises(ExposedTypeLengthMismatch): dn = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"sheet_name\": [\"Sheet1\"], \"exposed_type\": [MyCustomObject1, MyCustomObject2], }, ) dn.read() custom_class_dict = {\"Sheet1\": MyCustomObject1, \"Sheet2\": MyCustomObject2} excel_data_node_as_multi_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": custom_class_dict}, ) assert excel_data_node_as_multi_custom_object.properties[\"exposed_type\"] == custom_class_dict excel_data_node_as_multi_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": [MyCustomObject1, MyCustomObject2]}, ) assert excel_data_node_as_multi_custom_object.properties[\"exposed_type\"] == [MyCustomObject1, MyCustomObject2] multi_data_custom = excel_data_node_as_multi_custom_object.read() assert isinstance(multi_data_custom, Dict) assert len(multi_data_custom) == 2 assert all(len(multi_data_custom[sheet_name]) == 5 for sheet_name in sheet_names) assert list(multi_data_custom.keys()) == sheet_names for sheet_name, custom_class in custom_class_dict.items(): sheet_data_pandas, sheet_data_custom = data_pandas[sheet_name], multi_data_custom[sheet_name] for (_, row_pandas), row_custom in zip(sheet_data_pandas.iterrows(), sheet_data_custom): assert isinstance(row_custom, custom_class) assert row_pandas[\"id\"] == row_custom.id assert row_pandas[\"integer\"] == row_custom.integer assert row_pandas[\"text\"] == row_custom.text excel_data_node_as_multi_custom_object_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": custom_class_dict}, ) assert excel_data_node_as_multi_custom_object_no_sheet_name.properties[\"exposed_type\"] == custom_class_dict multi_data_custom_no_sheet_name = excel_data_node_as_multi_custom_object_no_sheet_name.read() assert isinstance(multi_data_custom_no_sheet_name, Dict) assert len(multi_data_custom_no_sheet_name) == 2 assert multi_data_custom.keys() == multi_data_custom_no_sheet_name.keys() for sheet_name, custom_class in custom_class_dict.items(): sheet_data_custom_no_sheet_name, sheet_data_custom = ( multi_data_custom_no_sheet_name[sheet_name], multi_data_custom[sheet_name], ) for row_custom_no_sheet_name, row_custom in zip(sheet_data_custom_no_sheet_name, sheet_data_custom): assert isinstance(row_custom_no_sheet_name, custom_class) assert row_custom_no_sheet_name.id == row_custom.id assert row_custom_no_sheet_name.integer == row_custom.integer assert row_custom_no_sheet_name.text == row_custom.text def test_read_multi_sheet_without_header(self): not_existing_excel = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": \"WRONG.xlsx\", \"has_header\": False, \"sheet_name\": [\"sheet_name_1\", \"sheet_name_2\"]}, ) with pytest.raises(NoData): assert not_existing_excel.read() is None not_existing_excel.read_or_raise() path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") sheet_names = [\"Sheet1\", \"Sheet2\"] # Create ExcelDataNode without exposed_type (Default is pandas.DataFrame) excel_data_node_as_pandas = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": sheet_names} ) data_pandas = excel_data_node_as_pandas.read() assert isinstance(data_pandas, Dict) assert len(data_pandas) == 2 assert all(len(data_pandas[sheet_name]) == 6 for sheet_name in sheet_names) assert list(data_pandas.keys()) == sheet_names for sheet_name in sheet_names: assert isinstance(data_pandas[sheet_name], pd.DataFrame) assert data_pandas[sheet_name].equals(pd.read_excel(path, header=None, sheet_name=sheet_name)) excel_data_node_as_pandas_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False} ) data_pandas_no_sheet_name = excel_data_node_as_pandas_no_sheet_name.read() assert isinstance(data_pandas_no_sheet_name, Dict) for key in data_pandas_no_sheet_name.keys(): assert isinstance(data_pandas_no_sheet_name[key], pd.DataFrame) assert data_pandas[key].equals(data_pandas_no_sheet_name[key]) # Create ExcelDataNode with modin exposed_type excel_data_node_as_modin = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": sheet_names, \"exposed_type\": \"modin\"}, ) data_modin = excel_data_node_as_modin.read() assert isinstance(data_modin, Dict) assert len(data_modin) == 2 assert all(len(data_modin[sheet_name]) == 6 for sheet_name in sheet_names) assert list(data_modin.keys()) == sheet_names for sheet_name in sheet_names: assert isinstance(data_modin[sheet_name], modin_pd.DataFrame) assert data_modin[sheet_name].equals(pd.read_excel(path, header=None, sheet_name=sheet_name)) excel_data_node_as_modin_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": \"modin\"} ) data_modin_no_sheet_name = excel_data_node_as_modin_no_sheet_name.read() assert isinstance(data_modin_no_sheet_name, Dict) for key in data_modin_no_sheet_name.keys(): assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame) assert data_modin[key].equals(data_modin_no_sheet_name[key]) # Create ExcelDataNode with numpy exposed_type excel_data_node_as_numpy = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": sheet_names, \"exposed_type\": \"numpy\"}, ) data_numpy = excel_data_node_as_numpy.read() assert isinstance(data_numpy, Dict) assert len(data_numpy) == 2 assert all( len(data_numpy[sheet_name] == 6) and isinstance(data_numpy[sheet_name], np.ndarray) for sheet_name in sheet_names ) assert list(data_numpy.keys()) == sheet_names for sheet_name in sheet_names: assert np.array_equal( data_pandas[sheet_name], pd.read_excel(path, header=None, sheet_name=sheet_name).to_numpy() ) excel_data_node_as_numpy_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": \"numpy\"}, ) data_numpy_no_sheet_name = excel_data_node_as_numpy_no_sheet_name.read() assert isinstance(data_numpy_no_sheet_name, Dict) for key in data_numpy_no_sheet_name.keys(): assert isinstance(data_numpy_no_sheet_name[key], np.ndarray) assert np.array_equal(data_numpy[key], data_numpy_no_sheet_name[key]) # Create the same ExcelDataNode but with custom exposed_type non_existing_sheet_name_custom = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"has_header\": False, \"sheet_name\": [\"Sheet1\", \"xyz\"], \"exposed_type\": MyCustomObject1, }, ) with pytest.raises(NonExistingExcelSheet): non_existing_sheet_name_custom.read() excel_data_node_as_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"has_header\": False, \"sheet_name\": sheet_names, \"exposed_type\": MyCustomObject1, }, ) data_custom = excel_data_node_as_custom_object.read() assert excel_data_node_as_custom_object.exposed_type == MyCustomObject1 assert isinstance(data_custom, Dict) assert len(data_custom) == 2 assert all(len(data_custom[sheet_name]) == 6 for sheet_name in sheet_names) assert list(data_custom.keys()) == sheet_names for sheet_name in sheet_names: sheet_data_pandas, sheet_data_custom = data_pandas[sheet_name], data_custom[sheet_name] for (_, row_pandas), row_custom in zip(sheet_data_pandas.iterrows(), sheet_data_custom): assert isinstance(row_custom, MyCustomObject1) assert row_pandas[0] == row_custom.id assert row_pandas[1] == row_custom.integer assert row_pandas[2] == row_custom.text excel_data_node_as_custom_object_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": MyCustomObject1}, ) data_custom_no_sheet_name = excel_data_node_as_custom_object_no_sheet_name.read() assert isinstance(data_custom_no_sheet_name, Dict) assert len(data_custom_no_sheet_name) == 2 assert data_custom.keys() == data_custom_no_sheet_name.keys() for sheet_name in sheet_names: sheet_data_custom_no_sheet_name, sheet_data_custom = ( data_custom_no_sheet_name[sheet_name], data_custom[sheet_name], ) for row_custom_no_sheet_name, row_custom in zip(sheet_data_custom_no_sheet_name, sheet_data_custom): assert isinstance(row_custom_no_sheet_name, MyCustomObject1) assert row_custom_no_sheet_name.id == row_custom.id assert row_custom_no_sheet_name.integer == row_custom.integer assert row_custom_no_sheet_name.text == row_custom.text with pytest.raises(ExposedTypeLengthMismatch): dn = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"sheet_name\": [\"Sheet1\"], \"exposed_type\": [MyCustomObject1, MyCustomObject2], \"has_header\": False, }, ) dn.read() custom_class_dict = {\"Sheet1\": MyCustomObject1, \"Sheet2\": MyCustomObject2} excel_data_node_as_multi_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": custom_class_dict, \"has_header\": False, }, ) assert excel_data_node_as_multi_custom_object.properties[\"exposed_type\"] == custom_class_dict excel_data_node_as_multi_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": [MyCustomObject1, MyCustomObject2], \"has_header\": False, }, ) assert excel_data_node_as_multi_custom_object.properties[\"exposed_type\"] == [MyCustomObject1, MyCustomObject2] multi_data_custom = excel_data_node_as_multi_custom_object.read() assert isinstance(multi_data_custom, Dict) assert len(multi_data_custom) == 2 assert all(len(multi_data_custom[sheet_name]) == 6 for sheet_name in sheet_names) assert list(multi_data_custom.keys()) == sheet_names for sheet_name, custom_class in custom_class_dict.items(): sheet_data_pandas, sheet_data_custom = data_pandas[sheet_name], multi_data_custom[sheet_name] for (_, row_pandas), row_custom in zip(sheet_data_pandas.iterrows(), sheet_data_custom): assert isinstance(row_custom, custom_class) assert row_pandas[0] == row_custom.id assert row_pandas[1] == row_custom.integer assert row_pandas[2] == row_custom.text excel_data_node_as_multi_custom_object_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": custom_class_dict}, ) multi_data_custom_no_sheet_name = excel_data_node_as_multi_custom_object_no_sheet_name.read() assert isinstance(multi_data_custom_no_sheet_name, Dict) assert len(multi_data_custom_no_sheet_name) == 2 assert multi_data_custom.keys() == multi_data_custom_no_sheet_name.keys() for sheet_name, custom_class in custom_class_dict.items(): sheet_data_custom_no_sheet_name, sheet_data_custom = ( multi_data_custom_no_sheet_name[sheet_name], multi_data_custom[sheet_name], ) for row_custom_no_sheet_name, row_custom in zip(sheet_data_custom_no_sheet_name, sheet_data_custom): assert isinstance(row_custom_no_sheet_name, custom_class) assert row_custom_no_sheet_name.id == row_custom.id assert row_custom_no_sheet_name.integer == row_custom.integer assert row_custom_no_sheet_name.text == row_custom.text @pytest.mark.parametrize( \"content,columns\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], None), ([[11, 22, 33], [44, 55, 66]], None), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"]), ], ) def test_write_multi_sheet(self, excel_file_with_multi_sheet, default_multi_sheet_data_frame, content, columns): sheet_names = [\"Sheet1\", \"Sheet2\"] excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_multi_sheet, \"sheet_name\": sheet_names}, ) for sheet_name in sheet_names: assert np.array_equal(excel_dn.read()[sheet_name].values, default_multi_sheet_data_frame[sheet_name].values) multi_sheet_content = {sheet_name: pd.DataFrame(content) for sheet_name in sheet_names} excel_dn.write(multi_sheet_content) for sheet_name in sheet_names: assert np.array_equal(excel_dn.read()[sheet_name].values, multi_sheet_content[sheet_name].values) def test_write_multi_sheet_numpy(self, excel_file_with_multi_sheet): sheet_names = [\"Sheet1\", \"Sheet2\"] excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_multi_sheet, \"sheet_name\": sheet_names, \"exposed_type\": \"numpy\"}, ) sheets_data = [[11, 22, 33], [44, 55, 66]] data = { sheet_name: pd.DataFrame(sheet_data).to_numpy() for sheet_name, sheet_data in zip(sheet_names, sheets_data) } excel_dn.write(data) read_data = excel_dn.read() assert all(np.array_equal(data[sheet_name], read_data[sheet_name]) for sheet_name in sheet_names) @pytest.mark.parametrize( \"content,columns\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], None), ([[11, 22, 33], [44, 55, 66]], None), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"]), ], ) def test_write_multi_sheet_with_modin( self, excel_file_with_multi_sheet, default_multi_sheet_data_frame, content, columns ): sheet_names = [\"Sheet1\", \"Sheet2\"] excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_multi_sheet, \"sheet_name\": sheet_names, \"exposed_type\": \"modin\"}, ) for sheet_name in sheet_names: assert np.array_equal(excel_dn.read()[sheet_name].values, default_multi_sheet_data_frame[sheet_name].values) multi_sheet_content = {sheet_name: modin_pd.DataFrame(content) for sheet_name in sheet_names} excel_dn.write(multi_sheet_content) for sheet_name in sheet_names: assert np.array_equal(excel_dn.read()[sheet_name].values, multi_sheet_content[sheet_name].values) @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append_pandas_with_sheetname(self, excel_file, default_data_frame, content): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\"}) assert_frame_equal(dn.read(), default_data_frame) dn.append(content) assert_frame_equal( dn.read(), pd.concat([default_data_frame, pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index(drop=True), ) @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append_pandas_without_sheetname(self, excel_file, default_data_frame, content): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file}) assert_frame_equal(dn.read()[\"Sheet1\"], default_data_frame) dn.append(content) assert_frame_equal( dn.read()[\"Sheet1\"], pd.concat([default_data_frame, pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index(drop=True), ) @pytest.mark.parametrize( \"content\", [ ( { \"Sheet1\": pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}]), \"Sheet2\": pd.DataFrame([{\"a\": 44, \"b\": 55, \"c\": 66}]), } ), ( { \"Sheet1\": pd.DataFrame({\"a\": [11, 44], \"b\": [22, 55], \"c\": [33, 66]}), \"Sheet2\": pd.DataFrame([{\"a\": 77, \"b\": 88, \"c\": 99}]), } ), ({\"Sheet1\": np.array([[11, 22, 33], [44, 55, 66]]), \"Sheet2\": np.array([[77, 88, 99]])}), ], ) def test_append_pandas_multisheet(self, excel_file_with_multi_sheet, default_multi_sheet_data_frame, content): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_multi_sheet, \"sheet_name\": [\"Sheet1\", \"Sheet2\"]} ) assert_frame_equal(dn.read()[\"Sheet1\"], default_multi_sheet_data_frame[\"Sheet1\"]) assert_frame_equal(dn.read()[\"Sheet2\"], default_multi_sheet_data_frame[\"Sheet2\"]) dn.append(content) assert_frame_equal( dn.read()[\"Sheet1\"], pd.concat( [default_multi_sheet_data_frame[\"Sheet1\"], pd.DataFrame(content[\"Sheet1\"], columns=[\"a\", \"b\", \"c\"])] ).reset_index(drop=True), ) assert_frame_equal( dn.read()[\"Sheet2\"], pd.concat( [default_multi_sheet_data_frame[\"Sheet2\"], pd.DataFrame(content[\"Sheet2\"], columns=[\"a\", \"b\", \"c\"])] ).reset_index(drop=True), ) @pytest.mark.parametrize( \"content\", [ ({\"Sheet1\": pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}])}), (pd.DataFrame({\"a\": [11, 44], \"b\": [22, 55], \"c\": [33, 66]})), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append_only_first_sheet_of_a_multisheet_file( self, excel_file_with_multi_sheet, default_multi_sheet_data_frame, content ): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_multi_sheet, \"sheet_name\": [\"Sheet1\", \"Sheet2\"]} ) assert_frame_equal(dn.read()[\"Sheet1\"], default_multi_sheet_data_frame[\"Sheet1\"]) assert_frame_equal(dn.read()[\"Sheet2\"], default_multi_sheet_data_frame[\"Sheet2\"]) dn.append(content) appended_content = content[\"Sheet1\"] if isinstance(content, dict) else content assert_frame_equal( dn.read()[\"Sheet1\"], pd.concat( [default_multi_sheet_data_frame[\"Sheet1\"], pd.DataFrame(appended_content, columns=[\"a\", \"b\", \"c\"])] ).reset_index(drop=True), ) assert_frame_equal(dn.read()[\"Sheet2\"], default_multi_sheet_data_frame[\"Sheet2\"]) @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (modin_pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append_modin_with_sheetname(self, excel_file, default_data_frame, content): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"modin\"} ) df_equals(dn.read(), modin_pd.DataFrame(default_data_frame)) dn.append(content) df_equals( dn.read(), modin_pd.concat( [modin_pd.DataFrame(default_data_frame), modin_pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])] ).reset_index(drop=True), ) @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (modin_pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append_modin_without_sheetname(self, excel_file, default_data_frame, content): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"exposed_type\": \"modin\"}) df_equals(dn.read()[\"Sheet1\"], default_data_frame) dn.append(content) df_equals( dn.read()[\"Sheet1\"], modin_pd.concat([default_data_frame, modin_pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index( drop=True ), ) @pytest.mark.parametrize( \"content\", [ ( { \"Sheet1\": modin_pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}]), \"Sheet2\": modin_pd.DataFrame([{\"a\": 44, \"b\": 55, \"c\": 66}]), } ), ( { \"Sheet1\": modin_pd.DataFrame({\"a\": [11, 44], \"b\": [22, 55], \"c\": [33, 66]}), \"Sheet2\": modin_pd.DataFrame([{\"a\": 77, \"b\": 88, \"c\": 99}]), } ), ({\"Sheet1\": np.array([[11, 22, 33], [44, 55, 66]]), \"Sheet2\": np.array([[77, 88, 99]])}), ], ) def test_append_modin_multisheet(self, excel_file_with_multi_sheet, default_multi_sheet_data_frame, content): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"path\": excel_file_with_multi_sheet, \"sheet_name\": [\"Sheet1\", \"Sheet2\"], \"exposed_type\": \"modin\", }, ) df_equals(dn.read()[\"Sheet1\"], default_multi_sheet_data_frame[\"Sheet1\"]) df_equals(dn.read()[\"Sheet2\"], default_multi_sheet_data_frame[\"Sheet2\"]) dn.append(content) df_equals( dn.read()[\"Sheet1\"], modin_pd.concat( [ default_multi_sheet_data_frame[\"Sheet1\"], modin_pd.DataFrame(content[\"Sheet1\"], columns=[\"a\", \"b\", \"c\"]), ] ).reset_index(drop=True), ) df_equals( dn.read()[\"Sheet2\"], modin_pd.concat( [ default_multi_sheet_data_frame[\"Sheet2\"], modin_pd.DataFrame(content[\"Sheet2\"], columns=[\"a\", \"b\", \"c\"]), ] ).reset_index(drop=True), ) def test_filter_pandas_exposed_type_with_sheetname(self, excel_file): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"pandas\"} ) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, None])) assert dn[\"bar\"].equals(pd.Series([1, 2, None, 2, 2])) assert dn[:2].equals(pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 1.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = pd.DataFrame([{\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"bar\", 2, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"bar\"] == 2] expected_data = pd.DataFrame([{\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = pd.DataFrame( [ {\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}, ] ) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_pandas_exposed_type_without_sheetname(self, excel_file): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"exposed_type\": \"pandas\"}) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) assert len(dn.filter((\"foo\", 1, Operator.EQUAL))[\"Sheet1\"]) == 3 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))[\"Sheet1\"]) == 2 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))[\"Sheet1\"]) == 3 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)[\"Sheet1\"]) == 4 assert dn[\"Sheet1\"][\"foo\"].equals(pd.Series([1, 1, 1, 2, None])) assert dn[\"Sheet1\"][\"bar\"].equals(pd.Series([1, 2, None, 2, 2])) assert dn[\"Sheet1\"][:2].equals(pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) def test_filter_pandas_exposed_type_multisheet(self, excel_file): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": [\"sheet_1\", \"sheet_2\"], \"exposed_type\": \"pandas\"}, ) dn.write( { \"sheet_1\": pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ), \"sheet_2\": pd.DataFrame( [ {\"foo\": 1, \"bar\": 3}, {\"foo\": 1, \"bar\": 4}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 4}, {\"bar\": 4}, ] ), } ) assert len(dn.filter((\"foo\", 1, Operator.EQUAL))) == 2 assert len(dn.filter((\"foo\", 1, Operator.EQUAL))[\"sheet_1\"]) == 3 assert len(dn.filter((\"foo\", 1, Operator.EQUAL))[\"sheet_2\"]) == 3 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))) == 2 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))[\"sheet_1\"]) == 2 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))[\"sheet_2\"]) == 2 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))) == 2 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))[\"sheet_1\"]) == 3 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))[\"sheet_2\"]) == 0 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)) == 2 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)[\"sheet_1\"]) == 4 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)[\"sheet_2\"]) == 0 assert dn[\"sheet_1\"][\"foo\"].equals(pd.Series([1, 1, 1, 2, None])) assert dn[\"sheet_2\"][\"foo\"].equals(pd.Series([1, 1, 1, 2, None])) assert dn[\"sheet_1\"][\"bar\"].equals(pd.Series([1, 2, None, 2, 2])) assert dn[\"sheet_2\"][\"bar\"].equals(pd.Series([3, 4, None, 4, 4])) assert dn[\"sheet_1\"][:2].equals(pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) assert dn[\"sheet_2\"][:2].equals(pd.DataFrame([{\"foo\": 1.0, \"bar\": 3.0}, {\"foo\": 1.0, \"bar\": 4.0}])) def test_filter_modin_exposed_type_with_sheetname(self, excel_file): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"modin\"} ) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(modin_pd.Series([1, 1, 1, 2, None])) assert dn[\"bar\"].equals(modin_pd.Series([1, 2, None, 2, 2])) assert dn[:2].equals(modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 1.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = modin_pd.DataFrame([{\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"bar\", 2, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"bar\"] == 2] expected_data = modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = modin_pd.DataFrame( [ {\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}, ] ) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_modin_exposed_type_without_sheetname(self, excel_file): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"exposed_type\": \"modin\"}) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) assert len(dn.filter((\"foo\", 1, Operator.EQUAL))[\"Sheet1\"]) == 3 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))[\"Sheet1\"]) == 2 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))[\"Sheet1\"]) == 3 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)[\"Sheet1\"]) == 4 assert dn[\"Sheet1\"][\"foo\"].equals(modin_pd.Series([1, 1, 1, 2, None])) assert dn[\"Sheet1\"][\"bar\"].equals(modin_pd.Series([1, 2, None, 2, 2])) assert dn[\"Sheet1\"][:2].equals(modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) def test_filter_modin_exposed_type_multisheet(self, excel_file): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": [\"sheet_1\", \"sheet_2\"], \"exposed_type\": \"modin\"}, ) dn.write( { \"sheet_1\": pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ), \"sheet_2\": pd.DataFrame( [ {\"foo\": 1, \"bar\": 3}, {\"foo\": 1, \"bar\": 4}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 4}, {\"bar\": 4}, ] ), } ) assert len(dn.filter((\"foo\", 1, Operator.EQUAL))) == 2 assert len(dn.filter((\"foo\", 1, Operator.EQUAL))[\"sheet_1\"]) == 3 assert len(dn.filter((\"foo\", 1, Operator.EQUAL))[\"sheet_2\"]) == 3 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))) == 2 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))[\"sheet_1\"]) == 2 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))[\"sheet_2\"]) == 2 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))) == 2 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))[\"sheet_1\"]) == 3 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))[\"sheet_2\"]) == 0 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)) == 2 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)[\"sheet_1\"]) == 4 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)[\"sheet_2\"]) == 0 assert dn[\"sheet_1\"][\"foo\"].equals(modin_pd.Series([1, 1, 1, 2, None])) assert dn[\"sheet_2\"][\"foo\"].equals(modin_pd.Series([1, 1, 1, 2, None])) assert dn[\"sheet_1\"][\"bar\"].equals(modin_pd.Series([1, 2, None, 2, 2])) assert dn[\"sheet_2\"][\"bar\"].equals(modin_pd.Series([3, 4, None, 4, 4])) assert dn[\"sheet_1\"][:2].equals(modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) assert dn[\"sheet_2\"][:2].equals(modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 3.0}, {\"foo\": 1.0, \"bar\": 4.0}])) def test_filter_numpy_exposed_type_with_sheetname(self, excel_file): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"numpy\"} ) dn.write( [ [1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], ] ) # Test datanode indexing and slicing assert np.array_equal(dn[0], np.array([1, 1])) assert np.array_equal(dn[1], np.array([1, 2])) assert np.array_equal(dn[:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[1:4, :1], np.array([[1], [1], [2]])) # Test filter data assert np.array_equal(dn.filter((0, 1, Operator.EQUAL)), np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[dn[:, 0] == 1], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn.filter((0, 1, Operator.NOT_EQUAL)), np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn[dn[:, 0] != 1], np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn.filter((1, 2, Operator.EQUAL)), np.array([[1, 2], [2, 2]])) assert np.array_equal(dn[dn[:, 1] == 2], np.array([[1, 2], [2, 2]])) assert np.array_equal( dn.filter([(1, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR), np.array([[1, 1], [1, 2], [2, 1], [2, 2]]), ) assert np.array_equal(dn[(dn[:, 1] == 1) | (dn[:, 1] == 2)], np.array([[1, 1], [1, 2], [2, 1], [2, 2]])) def test_filter_numpy_exposed_type_without_sheetname(self, excel_file): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"exposed_type\": \"numpy\"}) dn.write( [ [1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], ] ) assert len(dn.filter((0, 1, Operator.EQUAL))[\"Sheet1\"]) == 3 assert len(dn.filter((0, 1, Operator.NOT_EQUAL))[\"Sheet1\"]) == 3 assert len(dn.filter((1, 2, Operator.EQUAL))[\"Sheet1\"]) == 2 assert len(dn.filter([(0, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR)[\"Sheet1\"]) == 4 assert np.array_equal(dn[\"Sheet1\"][0], np.array([1, 1])) assert np.array_equal(dn[\"Sheet1\"][1], np.array([1, 2])) assert np.array_equal(dn[\"Sheet1\"][:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[\"Sheet1\"][:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[\"Sheet1\"][1:4, :1], np.array([[1], [1], [2]])) def test_filter_numpy_exposed_type_multisheet(self, excel_file): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": [\"sheet_1\", \"sheet_2\"], \"exposed_type\": \"numpy\"}, ) dn.write( { \"sheet_1\": pd.DataFrame( [ [1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], ] ), \"sheet_2\": pd.DataFrame( [ [1, 4], [1, 5], [1, 6], [2, 4], [2, 5], [2, 6], ] ), } ) assert len(dn.filter((0, 1, Operator.EQUAL))) == 2 assert len(dn.filter((0, 1, Operator.EQUAL))[\"sheet_1\"]) == 3 assert len(dn.filter((0, 1, Operator.EQUAL))[\"sheet_2\"]) == 3 assert len(dn.filter((0, 1, Operator.NOT_EQUAL))) == 2 assert len(dn.filter((0, 1, Operator.NOT_EQUAL))[\"sheet_1\"]) == 3 assert len(dn.filter((0, 1, Operator.NOT_EQUAL))[\"sheet_2\"]) == 3 assert len(dn.filter((1, 2, Operator.EQUAL))) == 2 assert len(dn.filter((1, 2, Operator.EQUAL))[\"sheet_1\"]) == 2 assert len(dn.filter((1, 2, Operator.EQUAL))[\"sheet_2\"]) == 0 assert len(dn.filter([(1, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR)) == 2 assert len(dn.filter([(1, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR)[\"sheet_1\"]) == 4 assert len(dn.filter([(1, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR)[\"sheet_2\"]) == 0 assert np.array_equal(dn[\"sheet_1\"][0], np.array([1, 1])) assert np.array_equal(dn[\"sheet_2\"][0], np.array([1, 4])) assert np.array_equal(dn[\"sheet_1\"][1], np.array([1, 2])) assert np.array_equal(dn[\"sheet_2\"][1], np.array([1, 5])) assert np.array_equal(dn[\"sheet_1\"][:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[\"sheet_2\"][:3], np.array([[1, 4], [1, 5], [1, 6]])) assert np.array_equal(dn[\"sheet_1\"][:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[\"sheet_2\"][:, 1], np.array([4, 5, 6, 4, 5, 6])) assert np.array_equal(dn[\"sheet_1\"][1:4, :1], np.array([[1], [1], [2]])) assert np.array_equal(dn[\"sheet_2\"][1:4, 1:2], np.array([[5], [6], [4]])) def test_set_path(self): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": \"foo.xlsx\"}) assert dn.path == \"foo.xlsx\" dn.path = \"bar.xlsx\" assert dn.path == \"bar.xlsx\" @pytest.mark.parametrize( [\"properties\", \"exists\"], [ ({}, False), ({\"default_data\": {\"a\": [\"foo\", \"bar\"]}}, True), ], ) def test_create_with_default_data(self, properties, exists): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) assert os.path.exists(dn.path) is exists def test_read_write_after_modify_path(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.xlsx\") dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path}) read_data = dn.read() assert read_data is not None dn.path = new_path with pytest.raises(FileNotFoundError): dn.read() dn.write(read_data) for sheet, df in dn.read().items(): assert np.array_equal(df.values, read_data[sheet].values) def test_exposed_type_custom_class_after_modify_path(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") # [\"Sheet1\", \"Sheet2\"] new_path = os.path.join( pathlib.Path(__file__).parent.resolve(), \"data_sample/example_2.xlsx\" ) # [\"Sheet1\", \"Sheet2\", \"Sheet3\"] dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path, \"exposed_type\": MyCustomObject1}) assert dn.exposed_type == MyCustomObject1 dn.read() dn.path = new_path dn.read() dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": path, \"exposed_type\": MyCustomObject1, \"sheet_name\": [\"Sheet4\"]}, ) assert dn.exposed_type == MyCustomObject1 with pytest.raises(NonExistingExcelSheet): dn.read() def test_exposed_type_dict(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") # [\"Sheet1\", \"Sheet2\"] dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_path\": path, \"exposed_type\": { \"Sheet1\": MyCustomObject1, \"Sheet2\": MyCustomObject2, \"Sheet3\": MyCustomObject1, }, }, ) data = dn.read() assert isinstance(data, Dict) assert isinstance(data[\"Sheet1\"][0], MyCustomObject1) assert isinstance(data[\"Sheet2\"][0], MyCustomObject2) def test_exposed_type_list(self): path_1 = os.path.join( pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\" ) # [\"Sheet1\", \"Sheet2\"] path_2 = os.path.join( pathlib.Path(__file__).parent.resolve(), \"data_sample/example_2.xlsx\" ) # [\"Sheet1\", \"Sheet2\", \"Sheet3\"] dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": path_1, \"exposed_type\": [MyCustomObject1, MyCustomObject2]}, ) data = dn.read() assert isinstance(data, Dict) assert isinstance(data[\"Sheet1\"][0], MyCustomObject1) assert isinstance(data[\"Sheet2\"][0], MyCustomObject2) dn.path = path_2 with pytest.raises(ExposedTypeLengthMismatch): dn.read() def test_not_trying_to_read_sheet_names_when_exposed_type_is_set(self): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": \"notexistyet.xlsx\", \"exposed_type\": MyCustomObject1} ) assert dn.path == \"notexistyet.xlsx\" assert dn.exposed_type == MyCustomObject1 dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": \"notexistyet.xlsx\", \"exposed_type\": [MyCustomObject1, MyCustomObject2]}, ) assert dn.path == \"notexistyet.xlsx\" assert dn.exposed_type == [MyCustomObject1, MyCustomObject2] dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_path\": \"notexistyet.xlsx\", \"exposed_type\": {\"Sheet1\": MyCustomObject1, \"Sheet2\": MyCustomObject2}, }, ) assert dn.path == \"notexistyet.xlsx\" assert dn.exposed_type == {\"Sheet1\": MyCustomObject1, \"Sheet2\": MyCustomObject2} def test_exposed_type_default(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path, \"sheet_name\": \"Sheet1\"}) assert dn.exposed_type == \"pandas\" data = dn.read() assert isinstance(data, pd.DataFrame) def test_pandas_exposed_type(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": path, \"exposed_type\": \"pandas\", \"sheet_name\": \"Sheet1\"} ) assert dn.exposed_type == \"pandas\" data = dn.read() assert isinstance(data, pd.DataFrame) def test_complex_exposed_type_dict(self): # [\"Sheet1\", \"Sheet2\", \"Sheet3\", \"Sheet4\", \"Sheet5\"] path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example_4.xlsx\") dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_path\": path, \"exposed_type\": { \"Sheet1\": MyCustomObject1, \"Sheet2\": \"numpy\", \"Sheet3\": \"pandas\", }, \"sheet_name\": [\"Sheet1\", \"Sheet2\", \"Sheet3\", \"Sheet4\"], }, ) data = dn.read() assert isinstance(data, dict) assert isinstance(data[\"Sheet1\"], list) assert isinstance(data[\"Sheet1\"][0], MyCustomObject1) assert isinstance(data[\"Sheet2\"], np.ndarray) assert isinstance(data[\"Sheet3\"], pd.DataFrame) assert isinstance(data[\"Sheet4\"], pd.DataFrame) assert data.get(\"Sheet5\") is None def test_complex_exposed_type_list(self): # [\"Sheet1\", \"Sheet2\", \"Sheet3\", \"Sheet4\",\"Sheet5\"] path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example_4.xlsx\") dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_path\": path, \"exposed_type\": [MyCustomObject1, \"numpy\", \"pandas\"], \"sheet_name\": [\"Sheet1\", \"Sheet2\", \"Sheet3\"], }, ) data = dn.read() assert isinstance(data, dict) assert isinstance(data[\"Sheet1\"], list) assert isinstance(data[\"Sheet1\"][0], MyCustomObject1) assert isinstance(data[\"Sheet2\"], np.ndarray) assert isinstance(data[\"Sheet3\"], pd.DataFrame) def test_invalid_exposed_type(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") with pytest.raises(InvalidExposedType): ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": path, \"exposed_type\": \"invalid\", \"sheet_name\": \"Sheet1\"}, ) with pytest.raises(InvalidExposedType): ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_path\": path, \"exposed_type\": [\"numpy\", \"invalid\", \"pandas\"], \"sheet_name\": \"Sheet1\", }, ) with pytest.raises(InvalidExposedType): ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_path\": path, \"exposed_type\": {\"Sheet1\": \"pandas\", \"Sheet2\": \"invalid\"}, \"sheet_name\": \"Sheet1\", }, ) def test_get_system_modified_date_instead_of_last_edit_date(self, tmpdir_factory): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.xlsx\")) pd.DataFrame([]).to_excel(temp_file_path) dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": \"pandas\"}) dn.write(pd.DataFrame([1, 2, 3])) previous_edit_date = dn.last_edit_date sleep(0.1) pd.DataFrame([4, 5, 6]).to_excel(temp_file_path) new_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert previous_edit_date < dn.last_edit_date assert new_edit_date == dn.last_edit_date sleep(0.1) dn.write(pd.DataFrame([7, 8, 9])) assert new_edit_date < dn.last_edit_date os.unlink(temp_file_path) "} {"text": "import os import pathlib import pytest from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config.data_node_config import DataNodeConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data.csv import CSVDataNode from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import InvalidDataNodeType, ModelNotFound from taipy.config.common.scope import Scope from taipy.config.config import Config def file_exists(file_path: str) -> bool: return os.path.exists(file_path) def init_managers(): _DataManagerFactory._build_manager()._delete_all() class TestDataManager: def test_create_data_node_and_modify_properties_does_not_modify_config(self, init_sql_repo): init_managers() dn_config = Config.configure_data_node(id=\"name\", foo=\"bar\") dn = _DataManager._create_and_set(dn_config, None, None) assert dn_config.properties.get(\"foo\") == \"bar\" assert dn_config.properties.get(\"baz\") is None dn.properties[\"baz\"] = \"qux\" _DataManager._set(dn) assert dn_config.properties.get(\"foo\") == \"bar\" assert dn_config.properties.get(\"baz\") is None assert dn.properties.get(\"foo\") == \"bar\" assert dn.properties.get(\"baz\") == \"qux\" def test_create_raises_exception_with_wrong_type(self, init_sql_repo): init_managers() wrong_type_dn_config = DataNodeConfig(id=\"foo\", storage_type=\"bar\", scope=DataNodeConfig._DEFAULT_SCOPE) with pytest.raises(InvalidDataNodeType): _DataManager._create_and_set(wrong_type_dn_config, None, None) def test_create_from_same_config_generates_new_data_node_and_new_id(self, init_sql_repo): init_managers() dn_config = Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") dn = _DataManager._create_and_set(dn_config, None, None) dn_2 = _DataManager._create_and_set(dn_config, None, None) assert dn_2.id != dn.id def test_create_uses_overridden_attributes_in_config_file(self, init_sql_repo): init_managers() Config.override(os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/config.toml\")) csv_dn_cfg = Config.configure_data_node(id=\"foo\", storage_type=\"csv\", path=\"bar\", has_header=True) csv_dn = _DataManager._create_and_set(csv_dn_cfg, None, None) assert csv_dn.config_id == \"foo\" assert isinstance(csv_dn, CSVDataNode) assert csv_dn._path == \"path_from_config_file\" assert csv_dn.has_header csv_dn_cfg = Config.configure_data_node(id=\"baz\", storage_type=\"csv\", path=\"bar\", has_header=True) csv_dn = _DataManager._create_and_set(csv_dn_cfg, None, None) assert csv_dn.config_id == \"baz\" assert isinstance(csv_dn, CSVDataNode) assert csv_dn._path == \"bar\" assert csv_dn.has_header def test_get_if_not_exists(self, init_sql_repo): init_managers() with pytest.raises(ModelNotFound): _DataManager._repository._load(\"test_data_node_2\") def test_get_all(self, init_sql_repo): init_managers() _DataManager._delete_all() assert len(_DataManager._get_all()) == 0 dn_config_1 = Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") _DataManager._create_and_set(dn_config_1, None, None) assert len(_DataManager._get_all()) == 1 dn_config_2 = Config.configure_data_node(id=\"baz\", storage_type=\"in_memory\") _DataManager._create_and_set(dn_config_2, None, None) _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_all()) == 3 assert len([dn for dn in _DataManager._get_all() if dn.config_id == \"foo\"]) == 1 assert len([dn for dn in _DataManager._get_all() if dn.config_id == \"baz\"]) == 2 def test_get_all_on_multiple_versions_environment(self, init_sql_repo): init_managers() # Create 5 data nodes with 2 versions each # Only version 1.0 has the data node with config_id = \"config_id_1\" # Only version 2.0 has the data node with config_id = \"config_id_6\" for version in range(1, 3): for i in range(5): _DataManager._set( InMemoryDataNode( f\"config_id_{i+version}\", Scope.SCENARIO, id=DataNodeId(f\"id{i}_v{version}\"), version=f\"{version}.0\", ) ) _VersionManager._set_experiment_version(\"1.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_development_version(\"1.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_experiment_version(\"2.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 _VersionManager._set_development_version(\"2.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 def test_set(self, init_sql_repo): init_managers() dn = InMemoryDataNode( \"config_id\", Scope.SCENARIO, id=DataNodeId(\"id\"), owner_id=None, parent_ids={\"task_id_1\"}, last_edit_date=None, edits=[], edit_in_progress=False, properties={\"foo\": \"bar\"}, ) assert len(_DataManager._get_all()) == 0 assert not _DataManager._exists(dn.id) _DataManager._set(dn) assert len(_DataManager._get_all()) == 1 assert _DataManager._exists(dn.id) # changing data node attribute dn.config_id = \"foo\" assert dn.config_id == \"foo\" _DataManager._set(dn) assert len(_DataManager._get_all()) == 1 assert dn.config_id == \"foo\" assert _DataManager._get(dn.id).config_id == \"foo\" def test_delete(self, init_sql_repo): init_managers() _DataManager._delete_all() dn_1 = InMemoryDataNode(\"config_id\", Scope.SCENARIO, id=\"id_1\") dn_2 = InMemoryDataNode(\"config_id\", Scope.SCENARIO, id=\"id_2\") dn_3 = InMemoryDataNode(\"config_id\", Scope.SCENARIO, id=\"id_3\") assert len(_DataManager._get_all()) == 0 _DataManager._set(dn_1) _DataManager._set(dn_2) _DataManager._set(dn_3) assert len(_DataManager._get_all()) == 3 assert all(_DataManager._exists(dn.id) for dn in [dn_1, dn_2, dn_3]) _DataManager._delete(dn_1.id) assert len(_DataManager._get_all()) == 2 assert _DataManager._get(dn_2.id).id == dn_2.id assert _DataManager._get(dn_3.id).id == dn_3.id assert _DataManager._get(dn_1.id) is None assert all(_DataManager._exists(dn.id) for dn in [dn_2, dn_3]) assert not _DataManager._exists(dn_1.id) _DataManager._delete_all() assert len(_DataManager._get_all()) == 0 assert not any(_DataManager._exists(dn.id) for dn in [dn_2, dn_3]) def test_get_or_create(self, init_sql_repo): def _get_or_create_dn(config, *args): return _DataManager._bulk_get_or_create([config], *args)[config] init_managers() global_dn_config = Config.configure_data_node( id=\"test_data_node\", storage_type=\"in_memory\", scope=Scope.GLOBAL, data=\"In memory Data Node\" ) cycle_dn_config = Config.configure_data_node( id=\"test_data_node1\", storage_type=\"in_memory\", scope=Scope.CYCLE, data=\"In memory scenario\" ) scenario_dn_config = Config.configure_data_node( id=\"test_data_node2\", storage_type=\"in_memory\", scope=Scope.SCENARIO, data=\"In memory scenario\" ) _DataManager._delete_all() assert len(_DataManager._get_all()) == 0 global_dn = _get_or_create_dn(global_dn_config, None, None) assert len(_DataManager._get_all()) == 1 global_dn_bis = _get_or_create_dn(global_dn_config, None) assert len(_DataManager._get_all()) == 1 assert global_dn.id == global_dn_bis.id scenario_dn = _get_or_create_dn(scenario_dn_config, None, \"scenario_id\") assert len(_DataManager._get_all()) == 2 scenario_dn_bis = _get_or_create_dn(scenario_dn_config, None, \"scenario_id\") assert len(_DataManager._get_all()) == 2 assert scenario_dn.id == scenario_dn_bis.id scenario_dn_ter = _get_or_create_dn(scenario_dn_config, None, \"scenario_id\") assert len(_DataManager._get_all()) == 2 assert scenario_dn.id == scenario_dn_bis.id assert scenario_dn_bis.id == scenario_dn_ter.id scenario_dn_quater = _get_or_create_dn(scenario_dn_config, None, \"scenario_id_2\") assert len(_DataManager._get_all()) == 3 assert scenario_dn.id == scenario_dn_bis.id assert scenario_dn_bis.id == scenario_dn_ter.id assert scenario_dn_ter.id != scenario_dn_quater.id assert len(_DataManager._get_all()) == 3 cycle_dn = _get_or_create_dn(cycle_dn_config, \"cycle_id\", None) assert len(_DataManager._get_all()) == 4 cycle_dn_1 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", None) assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_1.id cycle_dn_2 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", \"scenario_id\") assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_2.id cycle_dn_3 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", None) assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_3.id cycle_dn_4 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", \"scenario_id\") assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_4.id cycle_dn_5 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", \"scenario_id_2\") assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_5.id assert cycle_dn_1.id == cycle_dn_2.id assert cycle_dn_2.id == cycle_dn_3.id assert cycle_dn_3.id == cycle_dn_4.id assert cycle_dn_4.id == cycle_dn_5.id def test_get_data_nodes_by_config_id(self, init_sql_repo): init_managers() dn_config_1 = Config.configure_data_node(\"dn_1\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(\"dn_2\", scope=Scope.SCENARIO) dn_config_3 = Config.configure_data_node(\"dn_3\", scope=Scope.SCENARIO) dn_1_1 = _DataManager._create_and_set(dn_config_1, None, None) dn_1_2 = _DataManager._create_and_set(dn_config_1, None, None) dn_1_3 = _DataManager._create_and_set(dn_config_1, None, None) assert len(_DataManager._get_all()) == 3 dn_2_1 = _DataManager._create_and_set(dn_config_2, None, None) dn_2_2 = _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_all()) == 5 dn_3_1 = _DataManager._create_and_set(dn_config_3, None, None) assert len(_DataManager._get_all()) == 6 dn_1_datanodes = _DataManager._get_by_config_id(dn_config_1.id) assert len(dn_1_datanodes) == 3 assert sorted([dn_1_1.id, dn_1_2.id, dn_1_3.id]) == sorted([sequence.id for sequence in dn_1_datanodes]) dn_2_datanodes = _DataManager._get_by_config_id(dn_config_2.id) assert len(dn_2_datanodes) == 2 assert sorted([dn_2_1.id, dn_2_2.id]) == sorted([sequence.id for sequence in dn_2_datanodes]) dn_3_datanodes = _DataManager._get_by_config_id(dn_config_3.id) assert len(dn_3_datanodes) == 1 assert sorted([dn_3_1.id]) == sorted([sequence.id for sequence in dn_3_datanodes]) def test_get_data_nodes_by_config_id_in_multiple_versions_environment(self, init_sql_repo): init_managers() dn_config_1 = Config.configure_data_node(\"dn_1\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(\"dn_2\", scope=Scope.SCENARIO) _VersionManager._set_experiment_version(\"1.0\") _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_2, None, None) _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_by_config_id(dn_config_1.id)) == 3 assert len(_DataManager._get_by_config_id(dn_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_2, None, None) _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_by_config_id(dn_config_1.id)) == 3 assert len(_DataManager._get_by_config_id(dn_config_2.id)) == 2 "} {"text": "import os import pytest from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.exceptions import ModelNotFound from src.taipy.core.job._job_manager_factory import _JobManagerFactory from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task from taipy.config.config import Config from tests.conftest import init_sql_repo def configure_fs_repo(): Config.configure_core(repository_type=\"default\") def configure_sql_repo(): init_sql_repo class TestSubmissionRepository: @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_save_and_load(self, data_node, job, configure_repo): configure_repo() _DataManagerFactory._build_manager()._repository._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskManagerFactory._build_manager()._repository._save(task) job._task = task _JobManagerFactory._build_manager()._repository._save(job) submission = Submission(task.id) submission_repository = _SubmissionManagerFactory._build_manager()._repository submission_repository._save(submission) submission.jobs = [job] obj = submission_repository._load(submission.id) assert isinstance(obj, Submission) @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_exists(self, configure_repo): configure_repo() submission = Submission(\"entity_id\") submission_repository = _SubmissionManagerFactory._build_manager()._repository submission_repository._save(submission) assert submission_repository._exists(submission.id) assert not submission_repository._exists(\"not-existed-submission\") @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_load_all(self, configure_repo): configure_repo() repository = _SubmissionManagerFactory._build_manager()._repository submission = Submission(\"entity_id\") for i in range(10): submission.id = f\"submission-{i}\" repository._save(submission) submissions = repository._load_all() assert len(submissions) == 10 @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_delete(self, configure_repo): configure_repo() repository = _SubmissionManagerFactory._build_manager()._repository submission = Submission(\"entity_id\") repository._save(submission) repository._delete(submission.id) with pytest.raises(ModelNotFound): repository._load(submission.id) @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_delete_all(self, configure_repo): configure_repo() submission_repository = _SubmissionManagerFactory._build_manager()._repository submission = Submission(\"entity_id\") for i in range(10): submission.id = f\"submission-{i}\" submission_repository._save(submission) assert len(submission_repository._load_all()) == 10 submission_repository._delete_all() assert len(submission_repository._load_all()) == 0 @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_delete_many(self, configure_repo): configure_repo() submission = Submission(\"entity_id\") submission_repository = _SubmissionManagerFactory._build_manager()._repository for i in range(10): submission.id = f\"submission-{i}\" submission_repository._save(submission) objs = submission_repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] submission_repository._delete_many(ids) assert len(submission_repository._load_all()) == 7 @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_delete_by(self, configure_repo): configure_repo() # Create 5 entities with version 1.0 and 5 entities with version 2.0 submission_repository = _SubmissionManagerFactory._build_manager()._repository submission = Submission(\"entity_id\") for i in range(10): submission.id = f\"submission-{i}\" submission._version = f\"{(i+1) // 5}.0\" submission_repository._save(submission) objs = submission_repository._load_all() assert len(objs) == 10 submission_repository._delete_by(\"version\", \"1.0\") assert len(submission_repository._load_all()) == 5 @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_search(self, configure_repo): configure_repo() submission_repository = _SubmissionManagerFactory._build_manager()._repository submission = Submission(\"entity_id\", version=\"random_version_number\") for i in range(10): submission.id = f\"submission-{i}\" submission_repository._save(submission) assert len(submission_repository._load_all()) == 10 objs = submission_repository._search(\"id\", \"submission-2\") assert len(objs) == 1 assert isinstance(objs[0], Submission) objs = submission_repository._search(\"id\", \"submission-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], Submission) assert submission_repository._search(\"id\", \"submission-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_export(self, tmpdir, configure_repo): configure_repo() repository = _SubmissionManagerFactory._build_manager()._repository submission = Submission(\"entity_id\") repository._save(submission) repository._export(submission.id, tmpdir.strpath) dir_path = ( repository.dir_path if Config.core.repository_type == \"default\" else os.path.join(tmpdir.strpath, \"submission\") ) assert os.path.exists(os.path.join(dir_path, f\"{submission.id}.json\")) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from datetime import datetime from time import sleep from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.submission.submission_status import SubmissionStatus from src.taipy.core.task.task import Task def test_create_submission(scenario): submission_1 = _SubmissionManagerFactory._build_manager()._create(scenario.id) assert submission_1.id is not None assert submission_1.entity_id == scenario.id assert submission_1.jobs == [] assert isinstance(submission_1.creation_date, datetime) assert submission_1._submission_status == SubmissionStatus.SUBMITTED def test_get_submission(): submission_manager = _SubmissionManagerFactory._build_manager() assert submission_manager._get(\"random_submission_id\") is None submission_1 = submission_manager._create(\"entity_id\") submission_2 = submission_manager._get(submission_1.id) assert submission_1.id == submission_2.id assert submission_1.entity_id == submission_2.entity_id == \"entity_id\" assert submission_1.jobs == submission_2.jobs assert submission_1.creation_date == submission_2.creation_date assert submission_1.submission_status == submission_2.submission_status def test_get_all_submission(): submission_manager = _SubmissionManagerFactory._build_manager() version_manager = _VersionManagerFactory._build_manager() submission_manager._set(Submission(\"entity_id\", \"submission_id\", version=version_manager._get_latest_version())) for version_name in [\"abc\", \"xyz\"]: for i in range(10): submission_manager._set( Submission(\"entity_id\", f\"submission_{version_name}_{i}\", version=f\"{version_name}\") ) assert len(submission_manager._get_all()) == 1 version_manager._set_experiment_version(\"xyz\") version_manager._set_experiment_version(\"abc\") assert len(submission_manager._get_all()) == 10 assert len(submission_manager._get_all(\"abc\")) == 10 assert len(submission_manager._get_all(\"xyz\")) == 10 def test_get_latest_submission(): task_1 = Task(\"task_config_1\", {}, print, id=\"task_id_1\") task_2 = Task(\"task_config_2\", {}, print, id=\"task_id_2\") submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(task_1.id) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_2 = submission_manager._create(task_2.id) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_3 = submission_manager._create(task_1.id) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_4 = submission_manager._create(task_2.id) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_4 def test_delete_submission(): submission_manager = _SubmissionManagerFactory._build_manager() submission = Submission(\"entity_id\", \"submission_id\") submission_manager._set(submission) for i in range(10): submission_manager._set(Submission(\"entity_id\", f\"submission_{i}\")) assert len(submission_manager._get_all()) == 11 assert isinstance(submission_manager._get(submission.id), Submission) submission_manager._delete(submission.id) assert len(submission_manager._get_all()) == 10 assert submission_manager._get(submission.id) is None submission_manager._delete_all() assert len(submission_manager._get_all()) == 0 "} {"text": "from datetime import datetime from functools import partial from typing import Union from unittest import mock from unittest.mock import patch import pytest from src.taipy.core import TaskId from src.taipy.core.job._job_manager_factory import _JobManagerFactory from src.taipy.core.job.job import Job from src.taipy.core.job.status import Status from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.submission.submission_status import SubmissionStatus from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task def test_create_submission(scenario, job, current_datetime): submission_1 = Submission(scenario.id) assert submission_1.id is not None assert submission_1.entity_id == scenario.id assert submission_1.jobs == [] assert isinstance(submission_1.creation_date, datetime) assert submission_1._submission_status == SubmissionStatus.SUBMITTED assert submission_1._version is not None submission_2 = Submission( scenario.id, \"submission_id\", [job], current_datetime, SubmissionStatus.COMPLETED, \"version_id\" ) assert submission_2.id == \"submission_id\" assert submission_2.entity_id == scenario.id assert submission_2._jobs == [job] assert submission_2.creation_date == current_datetime assert submission_2._submission_status == SubmissionStatus.COMPLETED assert submission_2._version == \"version_id\" class MockJob: def __init__(self, id: str, status): self.status = status self.id = id def is_failed(self): return self.status == Status.FAILED def is_canceled(self): return self.status == Status.CANCELED def is_blocked(self): return self.status == Status.BLOCKED def is_pending(self): return self.status == Status.PENDING def is_running(self): return self.status == Status.RUNNING def is_completed(self): return self.status == Status.COMPLETED def is_skipped(self): return self.status == Status.SKIPPED def is_abandoned(self): return self.status == Status.ABANDONED def is_submitted(self): return self.status == Status.SUBMITTED def mock_get_jobs(job_ids): jobs = { \"job0_submitted\": MockJob(\"job0_submitted\", Status.SUBMITTED), \"job1_failed\": MockJob(\"job1_failed\", Status.FAILED), \"job2_canceled\": MockJob(\"job2_canceled\", Status.CANCELED), \"job3_blocked\": MockJob(\"job3_blocked\", Status.BLOCKED), \"job4_pending\": MockJob(\"job4_pending\", Status.PENDING), \"job5_running\": MockJob(\"job5_running\", Status.RUNNING), \"job6_completed\": MockJob(\"job6_completed\", Status.COMPLETED), \"job7_skipped\": MockJob(\"job7_skipped\", Status.SKIPPED), \"job8_abandoned\": MockJob(\"job8_abandoned\", Status.ABANDONED), } return [jobs[job_id] for job_id in job_ids] def __test_update_submission_status(job_ids, expected_submission_status): with ( patch( \"src.taipy.core.submission.submission.Submission.jobs\", new_callable=mock.PropertyMock, return_value=(mock_get_jobs(job_ids)), ) ): submission = Submission(\"submission_id\") submission._update_submission_status(None) assert submission.submission_status == expected_submission_status @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job1_failed\"], SubmissionStatus.FAILED), ([\"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job3_blocked\"], SubmissionStatus.BLOCKED), ([\"job4_pending\"], SubmissionStatus.PENDING), ([\"job5_running\"], SubmissionStatus.RUNNING), ([\"job6_completed\"], SubmissionStatus.COMPLETED), ([\"job7_skipped\"], SubmissionStatus.COMPLETED), ([\"job8_abandoned\"], SubmissionStatus.UNDEFINED), ], ) def test_update_single_submission_status(job_ids, expected_submission_status): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job1_failed\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job2_canceled\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job3_blocked\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job4_pending\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job5_running\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job6_completed\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job7_skipped\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job8_abandoned\"], SubmissionStatus.FAILED), ([\"job2_canceled\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job3_blocked\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job4_pending\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job5_running\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job6_completed\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job7_skipped\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job8_abandoned\", \"job1_failed\"], SubmissionStatus.FAILED), ], ) def test_update_submission_status_with_one_failed_job_in_jobs(job_ids, expected_submission_status): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job2_canceled\", \"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job3_blocked\"], SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job4_pending\"], SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job5_running\"], SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job6_completed\"], SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job7_skipped\"], SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job8_abandoned\"], SubmissionStatus.CANCELED), ([\"job3_blocked\", \"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job4_pending\", \"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job5_running\", \"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job6_completed\", \"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job7_skipped\", \"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job8_abandoned\", \"job2_canceled\"], SubmissionStatus.CANCELED), ], ) def test_update_submission_status_with_one_canceled_job_in_jobs(job_ids, expected_submission_status): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job4_pending\", \"job3_blocked\"], SubmissionStatus.PENDING), ([\"job4_pending\", \"job4_pending\"], SubmissionStatus.PENDING), ([\"job4_pending\", \"job6_completed\"], SubmissionStatus.PENDING), ([\"job4_pending\", \"job7_skipped\"], SubmissionStatus.PENDING), ([\"job3_blocked\", \"job4_pending\"], SubmissionStatus.PENDING), ([\"job6_completed\", \"job4_pending\"], SubmissionStatus.PENDING), ([\"job7_skipped\", \"job4_pending\"], SubmissionStatus.PENDING), ], ) def test_update_submission_status_with_no_failed_or_cancel_one_pending_in_jobs(job_ids, expected_submission_status): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job5_running\", \"job3_blocked\"], SubmissionStatus.RUNNING), ([\"job5_running\", \"job4_pending\"], SubmissionStatus.RUNNING), ([\"job5_running\", \"job5_running\"], SubmissionStatus.RUNNING), ([\"job5_running\", \"job6_completed\"], SubmissionStatus.RUNNING), ([\"job5_running\", \"job7_skipped\"], SubmissionStatus.RUNNING), ([\"job3_blocked\", \"job5_running\"], SubmissionStatus.RUNNING), ([\"job4_pending\", \"job5_running\"], SubmissionStatus.RUNNING), ([\"job6_completed\", \"job5_running\"], SubmissionStatus.RUNNING), ([\"job7_skipped\", \"job5_running\"], SubmissionStatus.RUNNING), ], ) def test_update_submission_status_with_no_failed_cancel_nor_pending_one_running_in_jobs( job_ids, expected_submission_status ): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job3_blocked\", \"job3_blocked\"], SubmissionStatus.BLOCKED), ([\"job3_blocked\", \"job6_completed\"], SubmissionStatus.BLOCKED), ([\"job3_blocked\", \"job7_skipped\"], SubmissionStatus.BLOCKED), ([\"job6_completed\", \"job3_blocked\"], SubmissionStatus.BLOCKED), ([\"job7_skipped\", \"job3_blocked\"], SubmissionStatus.BLOCKED), ], ) def test_update_submission_status_with_no_failed_cancel_pending_nor_running_one_blocked_in_jobs( job_ids, expected_submission_status ): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job6_completed\", \"job6_completed\"], SubmissionStatus.COMPLETED), ([\"job6_completed\", \"job7_skipped\"], SubmissionStatus.COMPLETED), ([\"job7_skipped\", \"job6_completed\"], SubmissionStatus.COMPLETED), ([\"job7_skipped\", \"job7_skipped\"], SubmissionStatus.COMPLETED), ], ) def test_update_submission_status_with_only_completed_or_skipped_in_jobs(job_ids, expected_submission_status): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job3_blocked\", \"job8_abandoned\"], SubmissionStatus.UNDEFINED), ([\"job4_pending\", \"job8_abandoned\"], SubmissionStatus.UNDEFINED), ([\"job5_running\", \"job8_abandoned\"], SubmissionStatus.UNDEFINED), ([\"job6_completed\", \"job8_abandoned\"], SubmissionStatus.UNDEFINED), ([\"job7_skipped\", \"job8_abandoned\"], SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job8_abandoned\"], SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job3_blocked\"], SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job4_pending\"], SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job5_running\"], SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job6_completed\"], SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job7_skipped\"], SubmissionStatus.UNDEFINED), ], ) def test_update_submission_status_with_wrong_case_abandoned_without_cancel_or_failed_in_jobs( job_ids, expected_submission_status ): __test_update_submission_status(job_ids, expected_submission_status) def test_auto_set_and_reload(): task = Task(config_id=\"name_1\", properties={}, function=print, id=TaskId(\"task_1\")) submission_1 = Submission(task.id) job_1 = Job(\"job_1\", task, submission_1.id, submission_1.entity_id) job_2 = Job(\"job_2\", task, submission_1.id, submission_1.entity_id) _TaskManagerFactory._build_manager()._set(task) _SubmissionManagerFactory._build_manager()._set(submission_1) _JobManagerFactory._build_manager()._set(job_1) _JobManagerFactory._build_manager()._set(job_2) submission_2 = _SubmissionManagerFactory._build_manager()._get(submission_1) assert submission_1.id == submission_2.id assert submission_1.entity_id == submission_2.entity_id assert submission_1.creation_date == submission_2.creation_date assert submission_1.submission_status == submission_2.submission_status # auto set & reload on jobs attribute assert submission_1.jobs == [] assert submission_2.jobs == [] submission_1.jobs = [job_1] assert submission_1.jobs == [job_1] assert submission_2.jobs == [job_1] submission_2.jobs = [job_2] assert submission_1.jobs == [job_2] assert submission_2.jobs == [job_2] submission_1.jobs = [job_1, job_2] assert submission_1.jobs == [job_1, job_2] assert submission_2.jobs == [job_1, job_2] submission_2.jobs = [job_2, job_1] assert submission_1.jobs == [job_2, job_1] assert submission_2.jobs == [job_2, job_1] # auto set & reload on submission_status attribute assert submission_1.submission_status == SubmissionStatus.SUBMITTED assert submission_2.submission_status == SubmissionStatus.SUBMITTED submission_1.submission_status = SubmissionStatus.BLOCKED assert submission_1.submission_status == SubmissionStatus.BLOCKED assert submission_2.submission_status == SubmissionStatus.BLOCKED submission_2.submission_status = SubmissionStatus.COMPLETED assert submission_1.submission_status == SubmissionStatus.COMPLETED assert submission_2.submission_status == SubmissionStatus.COMPLETED with submission_1 as submission: assert submission.jobs == [job_2, job_1] assert submission.submission_status == SubmissionStatus.COMPLETED submission.jobs = [job_1] submission.submission_status = SubmissionStatus.PENDING assert submission.jobs == [job_2, job_1] assert submission.submission_status == SubmissionStatus.COMPLETED assert submission_1.jobs == [job_1] assert submission_1.submission_status == SubmissionStatus.PENDING assert submission_2.jobs == [job_1] assert submission_2.submission_status == SubmissionStatus.PENDING "} {"text": "from datetime import datetime from time import sleep from src.taipy.core import Task from src.taipy.core._repository.db._sql_connection import _SQLConnection from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.submission.submission_status import SubmissionStatus def init_managers(): _VersionManagerFactory._build_manager()._delete_all() _SubmissionManagerFactory._build_manager()._delete_all() def test_create_submission(scenario, init_sql_repo): init_managers() submission_1 = _SubmissionManagerFactory._build_manager()._create(scenario.id) assert submission_1.id is not None assert submission_1.entity_id == scenario.id assert submission_1.jobs == [] assert isinstance(submission_1.creation_date, datetime) assert submission_1._submission_status == SubmissionStatus.SUBMITTED def test_get_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(\"entity_id\") submission_2 = submission_manager._get(submission_1.id) assert submission_1.id == submission_2.id assert submission_1.entity_id == submission_2.entity_id == \"entity_id\" assert submission_1.jobs == submission_2.jobs assert submission_1.creation_date == submission_2.creation_date assert submission_1.submission_status == submission_2.submission_status def test_get_all_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() version_manager = _VersionManagerFactory._build_manager() submission_manager._set(Submission(\"entity_id\", \"submission_id\", version=version_manager._get_latest_version())) for version_name in [\"abc\", \"xyz\"]: for i in range(10): submission_manager._set( Submission(\"entity_id\", f\"submission_{version_name}_{i}\", version=f\"{version_name}\") ) assert len(submission_manager._get_all()) == 1 version_manager._set_experiment_version(\"xyz\") version_manager._set_experiment_version(\"abc\") assert len(submission_manager._get_all()) == 10 assert len(submission_manager._get_all(\"abc\")) == 10 assert len(submission_manager._get_all(\"xyz\")) == 10 def test_get_latest_submission(init_sql_repo): init_managers() task_1 = Task(\"task_config_1\", {}, print, id=\"task_id_1\") task_2 = Task(\"task_config_2\", {}, print, id=\"task_id_2\") submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(task_1.id) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_2 = submission_manager._create(task_2.id) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_3 = submission_manager._create(task_1.id) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_4 = submission_manager._create(task_2.id) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_4 def test_delete_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() submission = Submission(\"entity_id\", \"submission_id\") submission_manager._set(submission) for i in range(10): submission_manager._set(Submission(\"entity_id\", f\"submission_{i}\")) assert len(submission_manager._get_all()) == 11 assert isinstance(submission_manager._get(submission.id), Submission) submission_manager._delete(submission.id) assert len(submission_manager._get_all()) == 10 assert submission_manager._get(submission.id) is None submission_manager._delete_all() assert len(submission_manager._get_all()) == 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from importlib.util import find_spec if find_spec(\"taipy\"): if find_spec(\"taipy.config\"): from taipy.config._init import * # type: ignore if find_spec(\"taipy.gui\"): from taipy.gui._init import * # type: ignore if find_spec(\"taipy.core\"): from taipy.core._init import * # type: ignore if find_spec(\"taipy.rest\"): from taipy.rest._init import * # type: ignore if find_spec(\"taipy.gui_core\"): from taipy.gui_core._init import * # type: ignore if find_spec(\"taipy.enterprise\"): from taipy.enterprise._init import * # type: ignore if find_spec(\"taipy._run\"): from taipy._run import _run as run # type: ignore "} {"text": "from ._core import Core from ._entity.submittable import Submittable from .cycle.cycle import Cycle from .cycle.cycle_id import CycleId from .data.data_node import DataNode from .data.data_node_id import DataNodeId from .job.job import Job from .job.job_id import JobId from .job.status import Status from .scenario.scenario import Scenario from .scenario.scenario_id import ScenarioId from .sequence.sequence import Sequence from .sequence.sequence_id import SequenceId from .taipy import ( cancel_job, clean_all_entities_by_version, compare_scenarios, create_global_data_node, create_scenario, delete, delete_job, delete_jobs, exists, export_scenario, get, get_cycles, get_cycles_scenarios, get_data_nodes, get_entities_by_config_id, get_jobs, get_latest_job, get_parents, get_primary, get_primary_scenarios, get_scenarios, get_sequences, get_tasks, is_deletable, is_editable, is_promotable, is_readable, is_submittable, set, set_primary, submit, subscribe_scenario, subscribe_sequence, tag, unsubscribe_scenario, unsubscribe_sequence, untag, ) from .task.task import Task from .task.task_id import TaskId "} {"text": "\"\"\"# Taipy Core The Taipy Core package is a Python library designed to build powerful, customized, data-driven back-end applications. It provides the tools to help Python developers transform their algorithms into a complete back-end application. More details on the [Taipy Core](../../core/index.md) functionalities are available in the user manual. To build a Taipy Core application, the first step consists of setting up the Taipy configuration to design your application's characteristics and behaviors. Import `Config^` from the `taipy.config^` module, then use the various methods of the `Config^` singleton class to configure your core application. In particular, configure the [data nodes](../../core/config/data-node-config.md), [tasks](../../core/config/task-config.md), and [scenarios](../../core/config/scenario-config.md). Please refer to the [Core configuration user manual](../../core/config/index.md) for more information and detailed examples. Once your application is configured, import module `import taipy as tp` so you can use any function described in the following section on [Functionc](#functions). In particular, the most used functions are `tp.create_scenario()`, `tp.get_scenarios()`, `tp.get_data_nodes()`, `tp.submit()`, used to get, create, and submit entities. !!! Note Taipy Core provides a runnable service, `Core^` that runs as a service in a dedicated thread. The purpose is to have a dedicated thread responsible for dispatching the submitted jobs to an available executor for their execution. In particular, this `Core^` service is automatically run when Core is used with Taipy REST or Taipy GUI. See the [running services](../../run-deploy/run/running_services.md) page of the user manual for more details. \"\"\" from ._init import * from ._init_version import _read_version from .common.mongo_default_document import MongoDefaultDocument from .data.data_node_id import Edit from .exceptions import exceptions __version__ = _read_version() "} {"text": "import json import os from pathlib import Path def _read_version(): with open(f\"{Path(os.path.abspath(__file__)).parent}{os.sep}version.json\") as version_file: version = json.load(version_file) version_string = f'{version.get(\"major\", 0)}.{version.get(\"minor\", 0)}.{version.get(\"patch\", 0)}' if vext := version.get(\"ext\"): version_string = f\"{version_string}.{vext}\" return version_string "} {"text": "import pathlib import shutil from datetime import datetime from typing import Any, Callable, Dict, List, Optional, Set, Union, overload from taipy.config.common.scope import Scope from taipy.logger._taipy_logger import _TaipyLogger from ._entity._entity import _Entity from ._version._version_manager_factory import _VersionManagerFactory from .common._warnings import _warn_no_core_service from .config.data_node_config import DataNodeConfig from .config.scenario_config import ScenarioConfig from .cycle._cycle_manager_factory import _CycleManagerFactory from .cycle.cycle import Cycle from .cycle.cycle_id import CycleId from .data._data_manager_factory import _DataManagerFactory from .data.data_node import DataNode from .data.data_node_id import DataNodeId from .exceptions.exceptions import ( DataNodeConfigIsNotGlobal, ModelNotFound, NonExistingVersion, VersionIsNotProductionVersion, ) from .job._job_manager_factory import _JobManagerFactory from .job.job import Job from .job.job_id import JobId from .scenario._scenario_manager_factory import _ScenarioManagerFactory from .scenario.scenario import Scenario from .scenario.scenario_id import ScenarioId from .sequence._sequence_manager_factory import _SequenceManagerFactory from .sequence.sequence import Sequence from .sequence.sequence_id import SequenceId from .submission._submission_manager_factory import _SubmissionManagerFactory from .submission.submission import Submission from .task._task_manager_factory import _TaskManagerFactory from .task.task import Task from .task.task_id import TaskId __logger = _TaipyLogger._get_logger() def set(entity: Union[DataNode, Task, Sequence, Scenario, Cycle]): \"\"\"Save or update an entity. This function allows you to save or update an entity in Taipy. Parameters: entity (Union[DataNode^, Task^, Sequence^, Scenario^, Cycle^]): The entity to save or update. \"\"\" if isinstance(entity, Cycle): return _CycleManagerFactory._build_manager()._set(entity) if isinstance(entity, Scenario): return _ScenarioManagerFactory._build_manager()._set(entity) if isinstance(entity, Sequence): return _SequenceManagerFactory._build_manager()._set(entity) if isinstance(entity, Task): return _TaskManagerFactory._build_manager()._set(entity) if isinstance(entity, DataNode): return _DataManagerFactory._build_manager()._set(entity) def is_submittable(entity: Union[Scenario, ScenarioId, Sequence, SequenceId, Task, TaskId]) -> bool: \"\"\"Indicate if an entity can be submitted. This function checks if the given entity can be submitted for execution. Returns: True if the given entity can be submitted. False otherwise. \"\"\" if isinstance(entity, Scenario) or (isinstance(entity, str) and entity.startswith(Scenario._ID_PREFIX)): return _ScenarioManagerFactory._build_manager()._is_submittable(entity) # type: ignore if isinstance(entity, Sequence) or (isinstance(entity, str) and entity.startswith(Sequence._ID_PREFIX)): return _SequenceManagerFactory._build_manager()._is_submittable(entity) # type: ignore if isinstance(entity, Task) or (isinstance(entity, str) and entity.startswith(Task._ID_PREFIX)): return _TaskManagerFactory._build_manager()._is_submittable(entity) # type: ignore return False def is_editable( entity: Union[ DataNode, Task, Job, Sequence, Scenario, Cycle, DataNodeId, TaskId, JobId, SequenceId, ScenarioId, CycleId ] ) -> bool: \"\"\"Indicate if an entity can be edited. This function checks if the given entity can be edited. Returns: True if the given entity can be edited. False otherwise. \"\"\" if isinstance(entity, Cycle) or (isinstance(entity, str) and entity.startswith(Cycle._ID_PREFIX)): return _CycleManagerFactory._build_manager()._is_editable(entity) # type: ignore if isinstance(entity, Scenario) or (isinstance(entity, str) and entity.startswith(Scenario._ID_PREFIX)): return _ScenarioManagerFactory._build_manager()._is_editable(entity) # type: ignore if isinstance(entity, Sequence) or (isinstance(entity, str) and entity.startswith(Sequence._ID_PREFIX)): return _SequenceManagerFactory._build_manager()._is_editable(entity) # type: ignore if isinstance(entity, Task) or (isinstance(entity, str) and entity.startswith(Task._ID_PREFIX)): return _TaskManagerFactory._build_manager()._is_editable(entity) # type: ignore if isinstance(entity, Job) or (isinstance(entity, str) and entity.startswith(Job._ID_PREFIX)): return _JobManagerFactory._build_manager()._is_editable(entity) # type: ignore if isinstance(entity, DataNode) or (isinstance(entity, str) and entity.startswith(DataNode._ID_PREFIX)): return _DataManagerFactory._build_manager()._is_editable(entity) # type: ignore return False def is_readable( entity: Union[ DataNode, Task, Job, Sequence, Scenario, Cycle, DataNodeId, TaskId, JobId, SequenceId, ScenarioId, CycleId ] ) -> bool: \"\"\"Indicate if an entity can be read. This function checks if the given entity can be read. Returns: True if the given entity can be read. False otherwise. \"\"\" if isinstance(entity, Cycle) or (isinstance(entity, str) and entity.startswith(Cycle._ID_PREFIX)): return _CycleManagerFactory._build_manager()._is_readable(entity) # type: ignore if isinstance(entity, Scenario) or (isinstance(entity, str) and entity.startswith(Scenario._ID_PREFIX)): return _ScenarioManagerFactory._build_manager()._is_readable(entity) # type: ignore if isinstance(entity, Sequence) or (isinstance(entity, str) and entity.startswith(Sequence._ID_PREFIX)): return _SequenceManagerFactory._build_manager()._is_readable(entity) # type: ignore if isinstance(entity, Task) or (isinstance(entity, str) and entity.startswith(Task._ID_PREFIX)): return _TaskManagerFactory._build_manager()._is_readable(entity) # type: ignore if isinstance(entity, Job) or (isinstance(entity, str) and entity.startswith(Job._ID_PREFIX)): return _JobManagerFactory._build_manager()._is_readable(entity) # type: ignore if isinstance(entity, DataNode) or (isinstance(entity, str) and entity.startswith(DataNode._ID_PREFIX)): return _DataManagerFactory._build_manager()._is_readable(entity) # type: ignore return False @_warn_no_core_service() def submit( entity: Union[Scenario, Sequence, Task], force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, ) -> Union[Job, List[Job]]: \"\"\"Submit a scenario, sequence or task entity for execution. This function submits the given entity for execution and returns the created job(s). If the entity is a sequence or a scenario, all the tasks of the entity are submitted for execution. Parameters: entity (Union[Scenario^, Sequence^, Task^]): The scenario, sequence or task to submit. force (bool): If True, the execution is forced even if for skippable tasks. wait (bool): Wait for the orchestrated jobs created from the submission to be finished in asynchronous mode. timeout (Union[float, int]): The optional maximum number of seconds to wait for the jobs to be finished before returning. Returns: The created `Job^` or a collection of the created `Job^` depends on the submitted entity. - If a `Scenario^` or a `Sequence^` is provided, it will return a list of `Job^`. - If a `Task^` is provided, it will return the created `Job^`. \"\"\" if isinstance(entity, Scenario): return _ScenarioManagerFactory._build_manager()._submit(entity, force=force, wait=wait, timeout=timeout) if isinstance(entity, Sequence): return _SequenceManagerFactory._build_manager()._submit(entity, force=force, wait=wait, timeout=timeout) if isinstance(entity, Task): return _TaskManagerFactory._build_manager()._submit(entity, force=force, wait=wait, timeout=timeout) @overload def exists(entity_id: TaskId) -> bool: ... @overload def exists(entity_id: DataNodeId) -> bool: ... @overload def exists(entity_id: SequenceId) -> bool: ... @overload def exists(entity_id: ScenarioId) -> bool: ... @overload def exists(entity_id: CycleId) -> bool: ... @overload def exists(entity_id: JobId) -> bool: ... @overload def exists(entity_id: str) -> bool: ... def exists(entity_id: Union[TaskId, DataNodeId, SequenceId, ScenarioId, JobId, CycleId, str]) -> bool: \"\"\"Check if an entity with the specified identifier exists. This function checks if an entity with the given identifier exists. It supports various types of entity identifiers, including `TaskId^`, `DataNodeId^`, `SequenceId^`, `ScenarioId^`, `JobId^`, `CycleId^`, and string representations. Parameters: entity_id (Union[DataNodeId^, TaskId^, SequenceId^, ScenarioId^, JobId^, CycleId^]): The identifier of the entity to check for existence. Returns: True if the given entity exists. False otherwise. Raises: ModelNotFound: If the entity's type cannot be determined. Note: The function performs checks for various entity types (`Job^`, `Cycle^`, `Scenario^`, `Sequence^`, `Task^`, `DataNode^`) based on their respective identifier prefixes. \"\"\" if entity_id.startswith(Job._ID_PREFIX): return _JobManagerFactory._build_manager()._exists(JobId(entity_id)) if entity_id.startswith(Cycle._ID_PREFIX): return _CycleManagerFactory._build_manager()._exists(CycleId(entity_id)) if entity_id.startswith(Scenario._ID_PREFIX): return _ScenarioManagerFactory._build_manager()._exists(ScenarioId(entity_id)) if entity_id.startswith(Sequence._ID_PREFIX): return _SequenceManagerFactory._build_manager()._exists(SequenceId(entity_id)) if entity_id.startswith(Task._ID_PREFIX): return _TaskManagerFactory._build_manager()._exists(TaskId(entity_id)) if entity_id.startswith(DataNode._ID_PREFIX): return _DataManagerFactory._build_manager()._exists(DataNodeId(entity_id)) raise ModelNotFound(\"NOT_DETERMINED\", entity_id) @overload def get(entity_id: TaskId) -> Task: ... @overload def get(entity_id: DataNodeId) -> DataNode: ... @overload def get(entity_id: SequenceId) -> Sequence: ... @overload def get(entity_id: ScenarioId) -> Scenario: ... @overload def get(entity_id: CycleId) -> Cycle: ... @overload def get(entity_id: JobId) -> Job: ... @overload def get(entity_id: str) -> Union[Task, DataNode, Sequence, Scenario, Job, Cycle]: ... def get( entity_id: Union[TaskId, DataNodeId, SequenceId, ScenarioId, JobId, CycleId, str] ) -> Union[Task, DataNode, Sequence, Scenario, Job, Cycle]: \"\"\"Retrieve an entity by its unique identifier. This function allows you to retrieve an entity by specifying its identifier. The identifier must match the pattern of one of the supported entity types: Task^, DataNode^, Sequence^, Job^, Cycle^, or Scenario^. Parameters: entity_id (Union[TaskId, DataNodeId, SequenceId, ScenarioId, JobId, CycleId, str]): The identifier of the entity to retrieve.
It should conform to the identifier pattern of one of the entities (`Task^`, `DataNode^`, `Sequence^`, `Job^`, `Cycle^` or `Scenario^`). Returns: The entity that corresponds to the provided identifier. Returns None if no matching entity is found. Raises: ModelNotFound^: If the provided *entity_id* does not match any known entity pattern. \"\"\" if entity_id.startswith(Job._ID_PREFIX): return _JobManagerFactory._build_manager()._get(JobId(entity_id)) if entity_id.startswith(Cycle._ID_PREFIX): return _CycleManagerFactory._build_manager()._get(CycleId(entity_id)) if entity_id.startswith(Scenario._ID_PREFIX): return _ScenarioManagerFactory._build_manager()._get(ScenarioId(entity_id)) if entity_id.startswith(Sequence._ID_PREFIX): return _SequenceManagerFactory._build_manager()._get(SequenceId(entity_id)) if entity_id.startswith(Task._ID_PREFIX): return _TaskManagerFactory._build_manager()._get(TaskId(entity_id)) if entity_id.startswith(DataNode._ID_PREFIX): return _DataManagerFactory._build_manager()._get(DataNodeId(entity_id)) raise ModelNotFound(\"NOT_DETERMINED\", entity_id) def get_tasks() -> List[Task]: \"\"\"Retrieve a list of all existing tasks. This function returns a list of all tasks that currently exist. Returns: A list containing all the tasks. \"\"\" return _TaskManagerFactory._build_manager()._get_all() def is_deletable(entity: Union[Scenario, Job, ScenarioId, JobId]) -> bool: \"\"\"Check if a `Scenario^` or a `Job^` can be deleted. This function determines whether a scenario or a job can be safely deleted without causing conflicts or issues. Parameters: entity (Union[Scenario, Job, ScenarioId, JobId]): The scenario or job to check. Returns: True if the given scenario or job can be deleted. False otherwise. \"\"\" if isinstance(entity, str) and entity.startswith(Job._ID_PREFIX) or isinstance(entity, Job): return _JobManagerFactory._build_manager()._is_deletable(entity) # type: ignore if isinstance(entity, str) and entity.startswith(Scenario._ID_PREFIX) or isinstance(entity, Scenario): return _ScenarioManagerFactory._build_manager()._is_deletable(entity) # type: ignore return True def delete(entity_id: Union[TaskId, DataNodeId, SequenceId, ScenarioId, JobId, CycleId]): \"\"\"Delete an entity and its nested entities. This function deletes the specified entity and recursively deletes all its nested entities. The behavior varies depending on the type of entity provided: - If a `CycleId` is provided, the nested scenarios, tasks, data nodes, and jobs are deleted. - If a `ScenarioId` is provided, the nested tasks, data nodes, and jobs are deleted. If the scenario is primary, it can only be deleted if it is the only scenario in the cycle. In that case, its cycle is also deleted. Use the `is_deletable()^` function to check if the scenario can be deleted. - If a `SequenceId` is provided, the related jobs are deleted. - If a `TaskId` is provided, the related data nodes, and jobs are deleted. Parameters: entity_id (Union[TaskId, DataNodeId, SequenceId, ScenarioId, JobId, CycleId]): The identifier of the entity to delete. Raises: ModelNotFound: No entity corresponds to the specified *entity_id*. \"\"\" if entity_id.startswith(Job._ID_PREFIX): job_manager = _JobManagerFactory._build_manager() return job_manager._delete(job_manager._get(JobId(entity_id))) # type: ignore if entity_id.startswith(Cycle._ID_PREFIX): return _CycleManagerFactory._build_manager()._hard_delete(CycleId(entity_id)) if entity_id.startswith(Scenario._ID_PREFIX): return _ScenarioManagerFactory._build_manager()._hard_delete(ScenarioId(entity_id)) if entity_id.startswith(Sequence._ID_PREFIX): return _SequenceManagerFactory._build_manager()._hard_delete(SequenceId(entity_id)) if entity_id.startswith(Task._ID_PREFIX): return _TaskManagerFactory._build_manager()._hard_delete(TaskId(entity_id)) if entity_id.startswith(DataNode._ID_PREFIX): return _DataManagerFactory._build_manager()._delete(DataNodeId(entity_id)) raise ModelNotFound(\"NOT_DETERMINED\", entity_id) def get_scenarios(cycle: Optional[Cycle] = None, tag: Optional[str] = None) -> List[Scenario]: \"\"\"Retrieve a list of existing scenarios filtered by cycle or tag. This function allows you to retrieve a list of scenarios based on optional filtering criteria. If both a _cycle_ and a _tag_ are provided, the returned list contains scenarios that belong to the specified _cycle_ **and** also have the specified _tag_. Parameters: cycle (Optional[Cycle^]): The optional `Cycle^` to filter scenarios by. tag (Optional[str]): The optional tag to filter scenarios by. Returns: The list of scenarios filtered by cycle or tag. If no filtering criteria are provided, this method returns all existing scenarios. \"\"\" if not cycle and not tag: return _ScenarioManagerFactory._build_manager()._get_all() if cycle and not tag: return _ScenarioManagerFactory._build_manager()._get_all_by_cycle(cycle) if not cycle and tag: return _ScenarioManagerFactory._build_manager()._get_all_by_tag(tag) if cycle and tag: cycles_scenarios = _ScenarioManagerFactory._build_manager()()._get_all_by_cycle(cycle) return [scenario for scenario in cycles_scenarios if scenario.has_tag(tag)] return [] def get_primary(cycle: Cycle) -> Optional[Scenario]: \"\"\"Retrieve the primary scenario associated with a cycle. Parameters: cycle (Cycle^): The cycle for which to retrieve the primary scenario. Returns: The primary scenario of the given _cycle_. If the cycle has no primary scenario, this method returns None. \"\"\" return _ScenarioManagerFactory._build_manager()._get_primary(cycle) def get_primary_scenarios() -> List[Scenario]: \"\"\"Retrieve a list of all primary scenarios. Returns: A list containing all primary scenarios. \"\"\" return _ScenarioManagerFactory._build_manager()._get_primary_scenarios() def is_promotable(scenario: Union[Scenario, ScenarioId]) -> bool: \"\"\"Determine if a scenario can be promoted to become a primary scenario. This function checks whether the given scenario is eligible to be promoted as a primary scenario. Parameters: scenario (Union[Scenario, ScenarioId]): The scenario to be evaluated for promotability. Returns: True if the given scenario can be promoted to be a primary scenario. False otherwise. \"\"\" return _ScenarioManagerFactory._build_manager()._is_promotable_to_primary(scenario) def set_primary(scenario: Scenario): \"\"\"Promote a scenario as the primary scenario of its cycle. This function promotes the given scenario as the primary scenario of its associated cycle. If the cycle already has a primary scenario, that scenario is demoted and is no longer considered the primary scenario for its cycle. Parameters: scenario (Scenario^): The scenario to promote as the new _primary_ scenario. \"\"\" return _ScenarioManagerFactory._build_manager()._set_primary(scenario) def tag(scenario: Scenario, tag: str): \"\"\"Add a tag to a scenario. This function adds a user-defined tag to the specified scenario. If another scenario within the same cycle already has the same tag applied, the previous scenario is untagged. Parameters: scenario (Scenario^): The scenario to which the tag will be added. tag (str): The tag to apply to the scenario. \"\"\" return _ScenarioManagerFactory._build_manager()._tag(scenario, tag) def untag(scenario: Scenario, tag: str): \"\"\"Remove a tag from a scenario. This function removes a specified tag from the given scenario. If the scenario does not have the specified tag, it has no effect. Parameters: scenario (Scenario^): The scenario from which the tag will be removed. tag (str): The tag to remove from the scenario. \"\"\" return _ScenarioManagerFactory._build_manager()._untag(scenario, tag) def compare_scenarios(*scenarios: Scenario, data_node_config_id: Optional[str] = None) -> Dict[str, Any]: \"\"\"Compare the data nodes of several scenarios. You can specify which data node config identifier should the comparison be performed on. Parameters: *scenarios (*Scenario^): The list of the scenarios to compare. data_node_config_id (Optional[str]): The config identifier of the DataNode to perform the comparison on.
If _data_node_config_id_ is not provided, the scenarios are compared on all defined comparators.
Returns: The comparison results. The key is the data node config identifier used for comparison. Raises: InsufficientScenarioToCompare^: Raised when only one or no scenario for comparison is provided. NonExistingComparator^: Raised when the scenario comparator does not exist. DifferentScenarioConfigs^: Raised when the provided scenarios do not share the same scenario config. NonExistingScenarioConfig^: Raised when the scenario config of the provided scenarios could not be found. \"\"\" return _ScenarioManagerFactory._build_manager()._compare(*scenarios, data_node_config_id=data_node_config_id) def subscribe_scenario( callback: Callable[[Scenario, Job], None], params: Optional[List[Any]] = None, scenario: Optional[Scenario] = None, ): \"\"\"Subscribe a function to be called on job status change. The subscription is applied to all jobs created for the execution of _scenario_. If no scenario is provided, the subscription applies to all scenarios. Parameters: callback (Callable[[Scenario^, Job^], None]): The function to be called on status change. params (Optional[List[Any]]): The parameters to be passed to the _callback_. scenario (Optional[Scenario^]): The scenario to which the callback is applied. If None, the subscription is registered for all scenarios. Note: Notifications are applied only for jobs created **after** this subscription. \"\"\" params = [] if params is None else params return _ScenarioManagerFactory._build_manager()._subscribe(callback, params, scenario) def unsubscribe_scenario( callback: Callable[[Scenario, Job], None], params: Optional[List[Any]] = None, scenario: Optional[Scenario] = None ): \"\"\"Unsubscribe a function that is called when the status of a `Job^` changes. If no scenario is provided, the subscription is removed for all scenarios. Parameters: callback (Callable[[Scenario^, Job^], None]): The function to unsubscribe from. params (Optional[List[Any]]): The parameters to be passed to the callback. scenario (Optional[Scenario]): The scenario to unsubscribe from. If None, it applies to all scenarios. Note: The callback function will continue to be called for ongoing jobs. \"\"\" return _ScenarioManagerFactory._build_manager()._unsubscribe(callback, params, scenario) def subscribe_sequence( callback: Callable[[Sequence, Job], None], params: Optional[List[Any]] = None, sequence: Optional[Sequence] = None ): \"\"\"Subscribe a function to be called on job status change. The subscription is applied to all jobs created for the execution of _sequence_. Parameters: callback (Callable[[Sequence^, Job^], None]): The callable function to be called on status change. params (Optional[List[Any]]): The parameters to be passed to the _callback_. sequence (Optional[Sequence^]): The sequence to subscribe on. If None, the subscription is applied to all sequences. Note: Notifications are applied only for jobs created **after** this subscription. \"\"\" return _SequenceManagerFactory._build_manager()._subscribe(callback, params, sequence) def unsubscribe_sequence( callback: Callable[[Sequence, Job], None], params: Optional[List[Any]] = None, sequence: Optional[Sequence] = None ): \"\"\"Unsubscribe a function that is called when the status of a Job changes. Parameters: callback (Callable[[Sequence^, Job^], None]): The callable function to be called on status change. params (Optional[List[Any]]): The parameters to be passed to the _callback_. sequence (Optional[Sequence^]): The sequence to unsubscribe to. If None, it applies to all sequences. Note: The function will continue to be called for ongoing jobs. \"\"\" return _SequenceManagerFactory._build_manager()._unsubscribe(callback, params, sequence) def get_sequences() -> List[Sequence]: \"\"\"Return all existing sequences. Returns: The list of all sequences. \"\"\" return _SequenceManagerFactory._build_manager()._get_all() def get_jobs() -> List[Job]: \"\"\"Return all the existing jobs. Returns: The list of all jobs. \"\"\" return _JobManagerFactory._build_manager()._get_all() def delete_job(job: Job, force=False): \"\"\"Delete a job. This function deletes the specified job. If the job is not completed and *force* is not set to True, a `JobNotDeletedException^` may be raised. Parameters: job (Job^): The job to delete. force (Optional[bool]): If True, forces the deletion of _job_, even if it is not completed yet. Raises: JobNotDeletedException^: If the job is not finished. \"\"\" return _JobManagerFactory._build_manager()._delete(job, force) def delete_jobs(): \"\"\"Delete all jobs.\"\"\" return _JobManagerFactory._build_manager()._delete_all() def cancel_job(job: Union[str, Job]): \"\"\"Cancel a job and set the status of the subsequent jobs to ABANDONED. This function cancels the specified job and sets the status of any subsequent jobs to ABANDONED. Parameters: job (Job^): The job to cancel. \"\"\" _JobManagerFactory._build_manager()._cancel(job) def get_latest_job(task: Task) -> Optional[Job]: \"\"\"Return the latest job of a task. This function retrieves the latest job associated with a task. Parameters: task (Task^): The task to retrieve the latest job from. Returns: The latest job created from _task_, or None if no job has been created from _task_. \"\"\" return _JobManagerFactory._build_manager()._get_latest(task) def get_latest_submission(entity: Union[Scenario, Sequence, Task]) -> Optional[Submission]: \"\"\"Return the latest submission of a scenario, sequence or task. This function retrieves the latest submission associated with a scenario, sequence or task. Parameters: entity (Union[Scenario^, Sequence^, Task^]): The scenario, sequence or task to retrieve the latest submission from. Returns: The latest submission created from _scenario_, _sequence_ and _task_, or None if no submission has been created from _scenario_, _sequence_ and _task_. \"\"\" return _SubmissionManagerFactory._build_manager()._get_latest(entity) def get_data_nodes() -> List[DataNode]: \"\"\"Return all the existing data nodes. Returns: The list of all data nodes. \"\"\" return _DataManagerFactory._build_manager()._get_all() def get_cycles() -> List[Cycle]: \"\"\"Return the list of all existing cycles. Returns: The list of all cycles. \"\"\" return _CycleManagerFactory._build_manager()._get_all() def create_scenario( config: ScenarioConfig, creation_date: Optional[datetime] = None, name: Optional[str] = None, ) -> Scenario: \"\"\"Create and return a new scenario based on a scenario configuration. If the scenario belongs to a cycle, a cycle (corresponding to the _creation_date_ and the configuration frequency attribute) is created if it does not exist yet. Parameters: config (ScenarioConfig^): The scenario configuration used to create a new scenario. creation_date (Optional[datetime.datetime]): The creation date of the scenario. If None, the current date time is used. name (Optional[str]): The displayable name of the scenario. Returns: The new scenario. \"\"\" return _ScenarioManagerFactory._build_manager()._create(config, creation_date, name) def create_global_data_node(config: DataNodeConfig) -> DataNode: \"\"\"Create and return a new GLOBAL data node from a data node configuration. Parameters: config (DataNodeConfig^): The data node configuration. It must have a `GLOBAL` scope. Returns: The new global data node. Raises: DataNodeConfigIsNotGlobal^: If the data node configuration does not have GLOBAL scope. \"\"\" # Check if the data node config has GLOBAL scope if config.scope is not Scope.GLOBAL: raise DataNodeConfigIsNotGlobal(config.id) # type: ignore if dns := _DataManagerFactory._build_manager()._get_by_config_id(config.id): # type: ignore return dns[0] return _DataManagerFactory._build_manager()._create_and_set(config, None, None) def clean_all_entities_by_version(version_number=None) -> bool: \"\"\"Delete all entities of a specific version. This function deletes all entities associated with the specified version. Parameters: version_number (optional[str]): The version number of the entities to be deleted. If None, the default behavior may apply. Returns: True if the operation succeeded, False otherwise. Notes: - If the specified version does not exist, the operation will be aborted, and False will be returned. - This function cleans all entities, including jobs, scenarios, sequences, tasks, and data nodes. - The production version of the specified version is also deleted if it exists. \"\"\" version_manager = _VersionManagerFactory._build_manager() try: version_number = version_manager._replace_version_number(version_number) except NonExistingVersion as e: __logger.warning(f\"{e.message} Abort cleaning the entities of version '{version_number}'.\") return False _JobManagerFactory._build_manager()._delete_by_version(version_number) _ScenarioManagerFactory._build_manager()._delete_by_version(version_number) _SequenceManagerFactory._build_manager()._delete_by_version(version_number) _TaskManagerFactory._build_manager()._delete_by_version(version_number) _DataManagerFactory._build_manager()._delete_by_version(version_number) version_manager._delete(version_number) try: version_manager._delete_production_version(version_number) except VersionIsNotProductionVersion: pass return True def export_scenario( scenario_id: ScenarioId, folder_path: Union[str, pathlib.Path], ): \"\"\"Export all related entities of a scenario to a folder. This function exports all related entities of the specified scenario to the specified folder. Parameters: scenario_id (ScenarioId): The ID of the scenario to export. folder_path (Union[str, pathlib.Path]): The folder path to export the scenario to. \"\"\" manager = _ScenarioManagerFactory._build_manager() scenario = manager._get(scenario_id) entity_ids = manager._get_children_entity_ids(scenario) # type: ignore entity_ids.scenario_ids = {scenario_id} entity_ids.cycle_ids = {scenario.cycle.id} shutil.rmtree(folder_path, ignore_errors=True) for data_node_id in entity_ids.data_node_ids: _DataManagerFactory._build_manager()._export(data_node_id, folder_path) for task_id in entity_ids.task_ids: _TaskManagerFactory._build_manager()._export(task_id, folder_path) for sequence_id in entity_ids.sequence_ids: _SequenceManagerFactory._build_manager()._export(sequence_id, folder_path) for cycle_id in entity_ids.cycle_ids: _CycleManagerFactory._build_manager()._export(cycle_id, folder_path) for scenario_id in entity_ids.scenario_ids: _ScenarioManagerFactory._build_manager()._export(scenario_id, folder_path) for job_id in entity_ids.job_ids: _JobManagerFactory._build_manager()._export(job_id, folder_path) def get_parents( entity: Union[TaskId, DataNodeId, SequenceId, Task, DataNode, Sequence], parent_dict=None ) -> Dict[str, Set[_Entity]]: \"\"\"Get the parents of an entity from itself or its identifier. Parameters: entity (Union[TaskId, DataNodeId, SequenceId, Task, DataNode, Sequence]): The entity or its identifier to get the parents. Returns: The dictionary of all parent entities. They are grouped by their type (Scenario^, Sequences^, or tasks^) so each key corresponds to a level of the parents and the value is a set of the parent entities. An empty dictionary is returned if the entity does not have parents.
Example: The following instruction returns all the scenarios that include the datanode identified by \"my_datanode_id\". `taipy.get_parents(\"id_of_my_datanode\")[\"scenario\"]` Raises: ModelNotFound^: If _entity_ does not match a correct entity pattern. \"\"\" def update_parent_dict(parents_set, parent_dict): for k, value in parents_set.items(): if k in parent_dict.keys(): parent_dict[k].update(value) else: parent_dict[k] = value if isinstance(entity, str): entity = get(entity) # type: ignore parent_dict = parent_dict or dict() if isinstance(entity, (Scenario, Cycle)): return parent_dict current_parent_dict: Dict[str, Set] = {} for parent in entity.parent_ids: parent_entity = get(parent) if parent_entity._MANAGER_NAME in current_parent_dict.keys(): current_parent_dict[parent_entity._MANAGER_NAME].add(parent_entity) else: current_parent_dict[parent_entity._MANAGER_NAME] = {parent_entity} if isinstance(entity, Sequence): update_parent_dict(current_parent_dict, parent_dict) if isinstance(entity, Task): parent_entity_key_to_search_next = \"scenario\" update_parent_dict(current_parent_dict, parent_dict) for parent in parent_dict.get(parent_entity_key_to_search_next, []): get_parents(parent, parent_dict) if isinstance(entity, DataNode): parent_entity_key_to_search_next = \"task\" update_parent_dict(current_parent_dict, parent_dict) for parent in parent_dict.get(parent_entity_key_to_search_next, []): get_parents(parent, parent_dict) return parent_dict def get_cycles_scenarios() -> Dict[Optional[Cycle], List[Scenario]]: \"\"\"Get the scenarios grouped by cycles. Returns: The dictionary of all cycles and their corresponding scenarios. \"\"\" cycles_scenarios: Dict[Optional[Cycle], List[Scenario]] = {} for scenario in get_scenarios(): if scenario.cycle in cycles_scenarios.keys(): cycles_scenarios[scenario.cycle].append(scenario) else: cycles_scenarios[scenario.cycle] = [scenario] return cycles_scenarios def get_entities_by_config_id( config_id: str, ) -> Union[List, List[Task], List[DataNode], List[Sequence], List[Scenario]]: \"\"\"Get the entities by its config id. Parameters: config_id (str): The config id of the entities Returns: The list of all entities by the config id. \"\"\" entities: List = [] if entities := _ScenarioManagerFactory._build_manager()._get_by_config_id(config_id): return entities if entities := _TaskManagerFactory._build_manager()._get_by_config_id(config_id): return entities if entities := _DataManagerFactory._build_manager()._get_by_config_id(config_id): return entities return entities "} {"text": "from multiprocessing import Lock from typing import Optional from taipy.config import Config from taipy.logger._taipy_logger import _TaipyLogger from ._backup._backup import _init_backup_file_with_storage_folder from ._core_cli import _CoreCLI from ._orchestrator._dispatcher._job_dispatcher import _JobDispatcher from ._orchestrator._orchestrator import _Orchestrator from ._orchestrator._orchestrator_factory import _OrchestratorFactory from ._version._version_manager_factory import _VersionManagerFactory from .config import CoreSection from .exceptions.exceptions import CoreServiceIsAlreadyRunning class Core: \"\"\" Core service \"\"\" _is_running = False __lock_is_running = Lock() __logger = _TaipyLogger._get_logger() _orchestrator: Optional[_Orchestrator] = None _dispatcher: Optional[_JobDispatcher] = None def __init__(self): \"\"\" Initialize a Core service. \"\"\" pass def run(self, force_restart=False): \"\"\" Start a Core service. This function checks the configuration, manages application's version, and starts a dispatcher and lock the Config. \"\"\" if self.__class__._is_running: raise CoreServiceIsAlreadyRunning with self.__class__.__lock_is_running: self.__class__._is_running = True self.__update_core_section() self.__manage_version() self.__check_and_block_config() if self._orchestrator is None: self._orchestrator = _OrchestratorFactory._build_orchestrator() self.__start_dispatcher(force_restart) def stop(self): \"\"\" Stop the Core service. This function stops the dispatcher and unblock the Config for update. \"\"\" Config.unblock_update() if self._dispatcher: self._dispatcher = _OrchestratorFactory._remove_dispatcher() self.__logger.info(\"Core service has been stopped.\") with self.__class__.__lock_is_running: self.__class__._is_running = False @staticmethod def __update_core_section(): _CoreCLI.create_parser() Config._applied_config._unique_sections[CoreSection.name]._update(_CoreCLI.parse_arguments()) @staticmethod def __manage_version(): _VersionManagerFactory._build_manager()._manage_version() Config._applied_config._unique_sections[CoreSection.name]._update( {\"version_number\": _VersionManagerFactory._build_manager()._get_latest_version()} ) @staticmethod def __check_and_block_config(): Config.check() Config.block_update() _init_backup_file_with_storage_folder() def __start_dispatcher(self, force_restart): if dispatcher := _OrchestratorFactory._build_dispatcher(force_restart=force_restart): self._dispatcher = dispatcher if Config.job_config.is_development: _Orchestrator._check_and_execute_jobs_if_development_mode() "} {"text": "from typing import Dict from taipy._cli._base_cli import _CLI from .config import CoreSection class _CoreCLI: \"\"\"Command-line interface for Taipy Core application.\"\"\" __MODE_ARGS: Dict[str, Dict] = { \"--development\": { \"action\": \"store_true\", \"dest\": \"taipy_development\", \"help\": \"\"\" When execute Taipy application in `development` mode, all entities from the previous development version will be deleted before running new Taipy application. \"\"\", }, \"--experiment\": { \"dest\": \"taipy_experiment\", \"nargs\": \"?\", \"const\": \"\", \"metavar\": \"VERSION\", \"help\": \"\"\" When execute Taipy application in `experiment` mode, the current Taipy application is saved to a new version. If version name already exists, check for compatibility with current Python Config and run the application. Without being specified, the version number will be a random string. \"\"\", }, \"--production\": { \"dest\": \"taipy_production\", \"nargs\": \"?\", \"const\": \"\", \"metavar\": \"VERSION\", \"help\": \"\"\" When execute in `production` mode, the current version is used in production. All production versions should have the same configuration and share all entities. Without being specified, the latest version is used. \"\"\", }, } __FORCE_ARGS: Dict[str, Dict] = { \"--force\": { \"dest\": \"taipy_force\", \"action\": \"store_true\", \"help\": \"\"\" Force override the configuration of the version if existed and run the application. Default to False. \"\"\", }, \"--no-force\": { \"dest\": \"no_taipy_force\", \"action\": \"store_true\", \"help\": \"Stop the application if any Config conflict exists.\", }, } @classmethod def create_parser(cls): core_parser = _CLI._add_groupparser(\"Taipy Core\", \"Optional arguments for Taipy Core service\") mode_group = core_parser.add_mutually_exclusive_group() for mode_arg, mode_arg_dict in cls.__MODE_ARGS.items(): mode_group.add_argument(mode_arg, cls.__add_taipy_prefix(mode_arg), **mode_arg_dict) force_group = core_parser.add_mutually_exclusive_group() for force_arg, force_arg_dict in cls.__FORCE_ARGS.items(): force_group.add_argument(cls.__add_taipy_prefix(force_arg), **force_arg_dict) @classmethod def create_run_parser(cls): run_parser = _CLI._add_subparser(\"run\", help=\"Run a Taipy application.\") mode_group = run_parser.add_mutually_exclusive_group() for mode_arg, mode_arg_dict in cls.__MODE_ARGS.items(): mode_group.add_argument(mode_arg, **mode_arg_dict) force_group = run_parser.add_mutually_exclusive_group() for force_arg, force_arg_dict in cls.__FORCE_ARGS.items(): force_group.add_argument(force_arg, **force_arg_dict) @classmethod def parse_arguments(cls): args = _CLI._parse() as_dict = {} if args.taipy_development: as_dict[CoreSection._MODE_KEY] = CoreSection._DEVELOPMENT_MODE elif args.taipy_experiment is not None: as_dict[CoreSection._MODE_KEY] = CoreSection._EXPERIMENT_MODE as_dict[CoreSection._VERSION_NUMBER_KEY] = args.taipy_experiment elif args.taipy_production is not None: as_dict[CoreSection._MODE_KEY] = CoreSection._PRODUCTION_MODE as_dict[CoreSection._VERSION_NUMBER_KEY] = args.taipy_production if args.taipy_force: as_dict[CoreSection._FORCE_KEY] = True elif args.no_taipy_force: as_dict[CoreSection._FORCE_KEY] = False return as_dict @classmethod def __add_taipy_prefix(cls, key: str): if key.startswith(\"--no-\"): return key[:5] + \"taipy-\" + key[5:] return key[:2] + \"taipy-\" + key[2:] "} {"text": "import copy import json import pathlib import shutil from typing import Any, Dict, Iterable, Iterator, List, Optional, Type, Union from taipy.config.config import Config from ..common._utils import _retry_read_entity from ..common.typing import Converter, Entity, Json, ModelType from ..exceptions import FileCannotBeRead, InvalidExportPath, ModelNotFound from ._abstract_repository import _AbstractRepository from ._decoder import _Decoder from ._encoder import _Encoder class _FileSystemRepository(_AbstractRepository[ModelType, Entity]): \"\"\" Holds common methods to be used and extended when the need for saving dataclasses as JSON files in local storage emerges. Some lines have type: ignore because MyPy won't recognize some generic attributes. This should be revised in the future. Attributes: model_type (ModelType): Generic dataclass. converter: A class that handles conversion to and from a database backend dir_name (str): Folder that will hold the files for this dataclass model. \"\"\" __EXCEPTIONS_TO_RETRY = (FileCannotBeRead,) def __init__(self, model_type: Type[ModelType], converter: Type[Converter], dir_name: str): self.model_type = model_type self.converter = converter self._dir_name = dir_name @property def dir_path(self): return self._storage_folder / self._dir_name @property def _storage_folder(self) -> pathlib.Path: return pathlib.Path(Config.core.storage_folder) ############################### # ## Inherited methods ## # ############################### def _save(self, entity: Entity): self.__create_directory_if_not_exists() model = self.converter._entity_to_model(entity) # type: ignore self.__get_path(model.id).write_text( json.dumps(model.to_dict(), ensure_ascii=False, indent=0, cls=_Encoder, check_circular=False), encoding=\"UTF-8\", ) def _exists(self, entity_id: str) -> bool: return self.__get_path(entity_id).exists() def _load(self, entity_id: str) -> Entity: path = pathlib.Path(self.__get_path(entity_id)) try: file_content = self.__read_file(path) except (FileNotFoundError, FileCannotBeRead): raise ModelNotFound(str(self.dir_path), entity_id) return self.__file_content_to_entity(file_content) def _load_all(self, filters: Optional[List[Dict]] = None) -> List[Entity]: entities = [] try: for f in self.dir_path.iterdir(): if data := self.__filter_by(f, filters): entities.append(self.__file_content_to_entity(data)) except FileNotFoundError: pass return entities def _delete(self, entity_id: str): try: self.__get_path(entity_id).unlink() except FileNotFoundError: raise ModelNotFound(str(self.dir_path), entity_id) def _delete_all(self): shutil.rmtree(self.dir_path, ignore_errors=True) def _delete_many(self, ids: Iterable[str]): for model_id in ids: self._delete(model_id) def _delete_by(self, attribute: str, value: str): filters: List[Dict] = [{}] for fil in filters: fil.update({attribute: value}) try: for f in self.dir_path.iterdir(): if self.__filter_by(f, filters): f.unlink() except FileNotFoundError: pass def _search(self, attribute: str, value: Any, filters: Optional[List[Dict]] = None) -> List[Entity]: return list(self.__search(attribute, value, filters)) def _export(self, entity_id: str, folder_path: Union[str, pathlib.Path]): if isinstance(folder_path, str): folder: pathlib.Path = pathlib.Path(folder_path) else: folder = folder_path if folder.resolve() == self._storage_folder.resolve(): raise InvalidExportPath(\"The export folder must not be the storage folder.\") export_dir = folder / self._dir_name if not export_dir.exists(): export_dir.mkdir(parents=True) export_path = export_dir / f\"{entity_id}.json\" # Delete if exists. if export_path.exists(): export_path.unlink() shutil.copy2(self.__get_path(entity_id), export_path) ########################################### # ## Specific or optimized methods ## # ########################################### def _get_by_configs_and_owner_ids(self, configs_and_owner_ids, filters: Optional[List[Dict]] = None): # Design in order to optimize performance on Entity creation. # Maintainability and readability were impacted. if not filters: filters = [{}] res = {} configs_and_owner_ids = set(configs_and_owner_ids) try: for f in self.dir_path.iterdir(): config_id, owner_id, entity = self.__match_file_and_get_entity( f, configs_and_owner_ids, copy.deepcopy(filters) ) if entity: key = config_id, owner_id res[key] = entity configs_and_owner_ids.remove(key) if len(configs_and_owner_ids) == 0: return res except FileNotFoundError: # Folder with data was not created yet. return {} return res def _get_by_config_and_owner_id( self, config_id: str, owner_id: Optional[str], filters: Optional[List[Dict]] = None ) -> Optional[Entity]: if not filters: filters = [{}] else: filters = copy.deepcopy(filters) if owner_id is not None: for fil in filters: fil.update({\"owner_id\": owner_id}) return self.__filter_files_by_config_and_owner_id(config_id, owner_id, filters) ############################# # ## Private methods ## # ############################# def __filter_files_by_config_and_owner_id( self, config_id: str, owner_id: Optional[str], filters: Optional[List[Dict]] = None ): try: files = filter(lambda f: config_id in f.name, self.dir_path.iterdir()) entities = map( lambda f: self.__file_content_to_entity(self.__filter_by(f, filters)), files, ) corresponding_entities = filter( lambda e: e is not None and e.config_id == config_id and e.owner_id == owner_id, # type: ignore entities, ) return next(corresponding_entities, None) # type: ignore except FileNotFoundError: pass return None def __match_file_and_get_entity(self, filepath, config_and_owner_ids, filters): if match := [(c, p) for c, p in config_and_owner_ids if c.id in filepath.name]: for config, owner_id in match: for fil in filters: fil.update({\"config_id\": config.id, \"owner_id\": owner_id}) if data := self.__filter_by(filepath, filters): return config, owner_id, self.__file_content_to_entity(data) return None, None, None def __create_directory_if_not_exists(self): self.dir_path.mkdir(parents=True, exist_ok=True) def __search(self, attribute: str, value: str, filters: Optional[List[Dict]] = None) -> Iterator[Entity]: return filter(lambda e: getattr(e, attribute, None) == value, self._load_all(filters)) def __get_path(self, model_id) -> pathlib.Path: return self.dir_path / f\"{model_id}.json\" def __file_content_to_entity(self, file_content): if not file_content: return None if isinstance(file_content, str): file_content = json.loads(file_content, cls=_Decoder) model = self.model_type.from_dict(file_content) entity = self.converter._model_to_entity(model) return entity def __filter_by(self, filepath: pathlib.Path, filters: Optional[List[Dict]]) -> Optional[Json]: if not filters: filters = [{}] try: file_content = self.__read_file(filepath) except (FileNotFoundError, FileCannotBeRead): return None for _filter in filters: conditions = [ f'\"{key}\": \"{value}\"' if value is not None else f'\"{key}\": null' for key, value in _filter.items() ] if all(condition in file_content for condition in conditions): return json.loads(file_content, cls=_Decoder) return None @_retry_read_entity(__EXCEPTIONS_TO_RETRY) def __read_file(self, filepath: pathlib.Path) -> str: if not filepath.is_file(): raise FileNotFoundError try: with filepath.open(\"r\", encoding=\"UTF-8\") as f: file_content = f.read() return file_content except Exception: raise FileCannotBeRead(str(filepath)) "} {"text": "import json import re from datetime import datetime, timedelta class _Decoder(json.JSONDecoder): def __init__(self, *args, **kwargs): json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) def _str_to_timedelta(self, timedelta_str: str) -> timedelta: \"\"\" Parse a time string e.g. (2h13m) into a timedelta object. :param timedelta_str: A string identifying a duration. (eg. 2h13m) :return datetime.timedelta: A datetime.timedelta object \"\"\" regex = re.compile( r\"^((?P[\\.\\d]+?)d)? *\" r\"((?P[\\.\\d]+?)h)? *\" r\"((?P[\\.\\d]+?)m)? *\" r\"((?P[\\.\\d]+?)s)?$\" ) parts = regex.match(timedelta_str) if not parts: raise TypeError(\"Can not deserialize string into timedelta\") time_params = {name: float(param) for name, param in parts.groupdict().items() if param} # mypy has an issue with dynamic keyword parameters, hence the type ignore on the line bellow. return timedelta(**time_params) # type: ignore def object_hook(self, source): if source.get(\"__type__\") == \"Datetime\": return datetime.fromisoformat(source.get(\"__value__\")) if source.get(\"__type__\") == \"Timedelta\": return self._str_to_timedelta(source.get(\"__value__\")) else: return source def loads(d): return json.loads(d, cls=_Decoder) "} {"text": "import pathlib from abc import abstractmethod from typing import Any, Dict, Generic, Iterable, List, Optional, TypeVar, Union ModelType = TypeVar(\"ModelType\") Entity = TypeVar(\"Entity\") class _AbstractRepository(Generic[ModelType, Entity]): @abstractmethod def _save(self, entity: Entity): \"\"\" Save an entity in the repository. Parameters: entity: The data from an object. \"\"\" raise NotImplementedError @abstractmethod def _exists(self, entity_id: str) -> bool: \"\"\" Check if an entity with id entity_id exists in the repository. Parameters: entity_id: The entity id, i.e., its primary key. Returns: True if the entity id exists. \"\"\" raise NotImplementedError @abstractmethod def _load(self, entity_id: str) -> Entity: \"\"\" Retrieve the entity data from the repository. Parameters: entity_id: The entity id, i.e., its primary key. Returns: An entity. \"\"\" raise NotImplementedError @abstractmethod def _load_all(self, filters: Optional[List[Dict]] = None) -> List[Entity]: \"\"\" Retrieve all the entities' data from the repository taking any passed filter into account. Returns: A list of entities. \"\"\" raise NotImplementedError @abstractmethod def _delete(self, entity_id: str): \"\"\" Delete an entity in the repository. Parameters: entity_id: The id of the entity to be deleted. \"\"\" raise NotImplementedError @abstractmethod def _delete_all(self): \"\"\" Delete all entities from the repository. \"\"\" raise NotImplementedError @abstractmethod def _delete_many(self, ids: Iterable[str]): \"\"\" Delete all entities from the list of ids from the repository. Parameters: ids: List of ids to be deleted. \"\"\" raise NotImplementedError @abstractmethod def _delete_by(self, attribute: str, value: str): \"\"\" Delete all entities from the list of ids from the repository. Parameters: attribute: The entity property that is the key to the search. value: The value of the attribute that are being searched. \"\"\" raise NotImplementedError @abstractmethod def _search(self, attribute: str, value: Any, filters: Optional[List[Dict]] = None) -> List[Entity]: \"\"\" Parameters: attribute: The entity property that is the key to the search. value: The value of the attribute that are being searched. Returns: A list of entities that match the search criteria. \"\"\" raise NotImplementedError @abstractmethod def _export(self, entity_id: str, folder_path: Union[str, pathlib.Path]): \"\"\" Export an entity from the repository. Parameters: entity_id (str): The id of the entity to be exported. folder_path (Union[str, pathlib.Path]): The folder path to export the entity to. \"\"\" raise NotImplementedError "} {"text": "import json import pathlib from typing import Any, Dict, Iterable, List, Optional, Type, Union from sqlalchemy.dialects import sqlite from sqlalchemy.exc import NoResultFound from .._repository._abstract_repository import _AbstractRepository from ..common.typing import Converter, Entity, ModelType from ..exceptions import ModelNotFound from .db._sql_connection import _SQLConnection class _SQLRepository(_AbstractRepository[ModelType, Entity]): def __init__(self, model_type: Type[ModelType], converter: Type[Converter]): \"\"\" Holds common methods to be used and extended when the need for saving dataclasses in a SqlLite database. Some lines have type: ignore because MyPy won't recognize some generic attributes. This should be revised in the future. Attributes: model_type: Generic dataclass. converter: A class that handles conversion to and from a database backend db: An sqlite3 session object \"\"\" self.db = _SQLConnection.init_db() self.model_type = model_type self.converter = converter self.table = self.model_type.__table__ ############################### # ## Inherited methods ## # ############################### def _save(self, entity: Entity): obj = self.converter._entity_to_model(entity) if self._exists(entity.id): # type: ignore self._update_entry(obj) return self.__insert_model(obj) def _exists(self, entity_id: str): query = self.table.select().filter_by(id=entity_id) return bool(self.db.execute(str(query), [entity_id]).fetchone()) def _load(self, entity_id: str) -> Entity: query = self.table.select().filter_by(id=entity_id) if entry := self.db.execute(str(query.compile(dialect=sqlite.dialect())), [entity_id]).fetchone(): entry = self.model_type.from_dict(entry) return self.converter._model_to_entity(entry) raise ModelNotFound(str(self.model_type.__name__), entity_id) def _load_all(self, filters: Optional[List[Dict]] = None) -> List[Entity]: query = self.table.select() entities: List[Entity] = [] for f in filters or [{}]: filtered_query = query.filter_by(**f) try: entries = self.db.execute( str(filtered_query.compile(dialect=sqlite.dialect())), [self.__serialize_filter_values(val) for val in list(f.values())], ).fetchall() entities.extend([self.converter._model_to_entity(self.model_type.from_dict(m)) for m in entries]) except NoResultFound: continue return entities def _delete(self, entity_id: str): delete_query = self.table.delete().filter_by(id=entity_id) cursor = self.db.execute(str(delete_query.compile(dialect=sqlite.dialect())), [entity_id]) if cursor.rowcount == 0: raise ModelNotFound(str(self.model_type.__name__), entity_id) self.db.commit() def _delete_all(self): self.db.execute(str(self.table.delete().compile(dialect=sqlite.dialect()))) self.db.commit() def _delete_many(self, ids: Iterable[str]): for entity_id in ids: self._delete(entity_id) def _delete_by(self, attribute: str, value: str): delete_by_query = self.table.delete().filter_by(**{attribute: value}) self.db.execute(str(delete_by_query.compile(dialect=sqlite.dialect())), [value]) self.db.commit() def _search(self, attribute: str, value: Any, filters: Optional[List[Dict]] = None) -> List[Entity]: query = self.table.select().filter_by(**{attribute: value}) entities: List[Entity] = [] for f in filters or [{}]: entries = self.db.execute( str(query.filter_by(**f).compile(dialect=sqlite.dialect())), [value] + [self.__serialize_filter_values(val) for val in list(f.values())], ).fetchall() entities.extend([self.converter._model_to_entity(self.model_type.from_dict(m)) for m in entries]) return entities def _export(self, entity_id: str, folder_path: Union[str, pathlib.Path]): if isinstance(folder_path, str): folder: pathlib.Path = pathlib.Path(folder_path) else: folder = folder_path export_dir = folder / self.table.name if not export_dir.exists(): export_dir.mkdir(parents=True) export_path = export_dir / f\"{entity_id}.json\" query = self.table.select().filter_by(id=entity_id) if entry := self.db.execute(str(query.compile(dialect=sqlite.dialect())), [entity_id]).fetchone(): with open(export_path, \"w\", encoding=\"utf-8\") as export_file: export_file.write(json.dumps(entry)) else: raise ModelNotFound(self.model_type, entity_id) # type: ignore ########################################### # ## Specific or optimized methods ## # ########################################### def _get_multi(self, *, skip: int = 0, limit: int = 100) -> List[ModelType]: query = self.table.select().offset(skip).limit(limit) return self.db.execute(str(query.compile(dialect=sqlite.dialect()))).fetchall() def _get_by_config(self, config_id: Any) -> Optional[ModelType]: query = self.table.select().filter_by(config_id=config_id) return self.db.execute(str(query.compile(dialect=sqlite.dialect())), [config_id]).fetchall() def _get_by_config_and_owner_id( self, config_id: str, owner_id: Optional[str], filters: Optional[List[Dict]] = None ) -> Optional[Entity]: if not filters: filters = [{}] if entry := self.__get_entities_by_config_and_owner(config_id, owner_id, filters): return self.converter._model_to_entity(entry) return None def _get_by_configs_and_owner_ids(self, configs_and_owner_ids, filters: Optional[List[Dict]] = None): # Design in order to optimize performance on Entity creation. # Maintainability and readability were impacted. if not filters: filters = [{}] res = {} configs_and_owner_ids = set(configs_and_owner_ids) for config, owner in configs_and_owner_ids: entry = self.__get_entities_by_config_and_owner(config.id, owner, filters) if entry: entity = self.converter._model_to_entity(entry) key = config, owner res[key] = entity return res def __get_entities_by_config_and_owner( self, config_id: str, owner_id: Optional[str] = None, filters: Optional[List[Dict]] = None ) -> Optional[ModelType]: if not filters: filters = [] versions = [item.get(\"version\") for item in filters if item.get(\"version\")] query = self.table.select().filter_by(config_id=config_id) parameters: List = [config_id] if owner_id: parameters.append(owner_id) query = query.filter_by(owner_id=owner_id) query = str(query.compile(dialect=sqlite.dialect())) if versions: table_name = self.table.name query = query + f\" AND {table_name}.version IN ({','.join(['?']*len(versions))})\" parameters.extend(versions) if entry := self.db.execute(query, parameters).fetchone(): return self.model_type.from_dict(entry) return None ############################# # ## Private methods ## # ############################# def __insert_model(self, model: ModelType): query = self.table.insert() self.db.execute(str(query.compile(dialect=sqlite.dialect())), model.to_list()) self.db.commit() def _update_entry(self, model): query = self.table.update().filter_by(id=model.id) self.db.execute(str(query.compile(dialect=sqlite.dialect())), model.to_list() + [model.id]) self.db.commit() @staticmethod def __serialize_filter_values(value): if isinstance(value, (dict, list)): return json.dumps(value).replace('\"', \"'\") return value "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import json from datetime import datetime, timedelta from enum import Enum from typing import Any class _Encoder(json.JSONEncoder): def _timedelta_to_str(self, obj: timedelta) -> str: total_seconds = obj.total_seconds() return ( f\"{int(total_seconds // 86400)}d\" f\"{int(total_seconds % 86400 // 3600)}h\" f\"{int(total_seconds % 3600 // 60)}m\" f\"{int(total_seconds % 60)}s\" ) def default(self, o: Any): if isinstance(o, Enum): result = o.value elif isinstance(o, datetime): result = {\"__type__\": \"Datetime\", \"__value__\": o.isoformat()} elif isinstance(o, timedelta): result = {\"__type__\": \"Timedelta\", \"__value__\": self._timedelta_to_str(o)} else: result = json.JSONEncoder.default(self, o) return result def dumps(d): return json.dumps(d, cls=_Encoder) "} {"text": "from abc import ABC, abstractmethod class _AbstractConverter(ABC): @classmethod @abstractmethod def _entity_to_model(cls, entity): raise NotImplementedError @classmethod @abstractmethod def _model_to_entity(cls, model): raise NotImplementedError "} {"text": "import dataclasses import enum import json from typing import Any, Dict from sqlalchemy import Table from ._decoder import _Decoder from ._encoder import _Encoder class _BaseModel: __table__: Table def __iter__(self): for attr, value in self.__dict__.items(): yield attr, value def to_dict(self) -> Dict[str, Any]: model_dict = {**dataclasses.asdict(self)} for k, v in model_dict.items(): if isinstance(v, enum.Enum): model_dict[k] = repr(v) return model_dict @staticmethod def _serialize_attribute(value): return json.dumps(value, ensure_ascii=False, cls=_Encoder) @staticmethod def _deserialize_attribute(value): if isinstance(value, str): return json.loads(value.replace(\"'\", '\"'), cls=_Decoder) return value @staticmethod def from_dict(data: Dict[str, Any]): pass def to_list(self): pass "} {"text": "import sqlite3 from functools import lru_cache from sqlite3 import Connection from sqlalchemy.dialects import sqlite from sqlalchemy.schema import CreateTable from taipy.config.config import Config from ...exceptions import MissingRequiredProperty def dict_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d class _SQLConnection: _connection = None @classmethod def init_db(cls): if cls._connection: return cls._connection cls._connection = _build_connection() cls._connection.row_factory = dict_factory from ..._version._version_model import _VersionModel from ...cycle._cycle_model import _CycleModel from ...data._data_model import _DataNodeModel from ...job._job_model import _JobModel from ...scenario._scenario_model import _ScenarioModel from ...submission._submission_model import _SubmissionModel from ...task._task_model import _TaskModel cls._connection.execute( str(CreateTable(_CycleModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) cls._connection.execute( str(CreateTable(_DataNodeModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) cls._connection.execute( str(CreateTable(_JobModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) cls._connection.execute( str(CreateTable(_ScenarioModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) cls._connection.execute( str(CreateTable(_TaskModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) cls._connection.execute( str(CreateTable(_VersionModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) cls._connection.execute( str(CreateTable(_SubmissionModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())) ) return cls._connection def _build_connection() -> Connection: # Set SQLite threading mode to Serialized, means that threads may share the module, connections and cursors sqlite3.threadsafety = 3 properties = Config.core.repository_properties try: db_location = properties[\"db_location\"] except KeyError: raise MissingRequiredProperty(\"Missing property db_location.\") return __build_connection(db_location) @lru_cache def __build_connection(db_location: str): return sqlite3.connect(db_location, check_same_thread=False) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from sqlalchemy.orm import declarative_base, registry _SQLBaseModel = declarative_base() mapper_registry = registry() "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": " import os from taipy.config import Config __BACKUP_FILE_PATH_ENVIRONMENT_VARIABLE_NAME = \"TAIPY_BACKUP_FILE_PATH\" def _init_backup_file_with_storage_folder(): if preserve_file_path := os.getenv(__BACKUP_FILE_PATH_ENVIRONMENT_VARIABLE_NAME): with open(preserve_file_path, \"a\") as f: f.write(f\"{Config.core.storage_folder}\\n\") def _append_to_backup_file(new_file_path: str): if preserve_file_path := os.getenv(__BACKUP_FILE_PATH_ENVIRONMENT_VARIABLE_NAME): storage_folder = os.path.abspath(Config.core.storage_folder) + os.sep if not os.path.abspath(new_file_path).startswith(storage_folder): with open(preserve_file_path, \"a\") as f: f.write(f\"{new_file_path}\\n\") def _remove_from_backup_file(to_remove_file_path: str): if preserve_file_path := os.getenv(__BACKUP_FILE_PATH_ENVIRONMENT_VARIABLE_NAME, None): storage_folder = os.path.abspath(Config.core.storage_folder) + os.sep if not os.path.abspath(to_remove_file_path).startswith(storage_folder): try: with open(preserve_file_path, \"r+\") as f: old_backup = f.read() to_remove_file_path = to_remove_file_path + \"\\n\" # To avoid removing the file path of different data nodes that are pointing # to the same file. We will only replace the file path only once. if old_backup.startswith(to_remove_file_path): new_backup = old_backup.replace(to_remove_file_path, \"\", 1) else: new_backup = old_backup.replace(\"\\n\" + to_remove_file_path, \"\\n\", 1) if new_backup is not old_backup: f.seek(0) f.write(new_backup) f.truncate() except Exception: pass def _replace_in_backup_file(old_file_path: str, new_file_path: str): _remove_from_backup_file(old_file_path) _append_to_backup_file(new_file_path) "} {"text": "import re from copy import copy from typing import Any, Dict, Optional, Union from taipy.config import Config, UniqueSection from taipy.config._config import _Config from taipy.config.common._config_blocker import _ConfigBlocker from taipy.config.common._template_handler import _TemplateHandler as _tpl from .._init_version import _read_version from ..exceptions.exceptions import ConfigCoreVersionMismatched class CoreSection(UniqueSection): \"\"\" Configuration parameters for running the `Core^` service. Attributes: root_folder (str): Path of the base folder for the taipy application. The default value is \"./taipy/\" storage_folder (str): Folder name used to store Taipy data. The default value is \".data/\". It is used in conjunction with the *root_folder* attribute. That means the storage path is (The default path is \"./taipy/.data/\"). repository_type (str): Type of the repository to be used to store Taipy data. The default value is \"filesystem\". repository_properties (Dict[str, Union[str, int]]): A dictionary of additional properties to be used by the repository. read_entity_retry (int): Number of retries to read an entity from the repository before return failure. The default value is 3. mode (str): The Taipy operating mode. By default, the `Core^` service runs in \"development\" mode. An \"experiment\" and a \"production\" mode are also available. Please refer to the [Versioning management](../core/versioning/index.md) documentation page for more details. version_number (str)): The identifier of the user application version. Please refer to the [Versioning management](../core/versioning/index.md) documentation page for more details. force (bool): If True, force the application run even if there are some conflicts in the configuration. core_version (str): The Taipy Core package version. **properties (dict[str, any]): A dictionary of additional properties. \"\"\" name = \"CORE\" _ROOT_FOLDER_KEY = \"root_folder\" _DEFAULT_ROOT_FOLDER = \"./taipy/\" _STORAGE_FOLDER_KEY = \"storage_folder\" _DEFAULT_STORAGE_FOLDER = \".data/\" _REPOSITORY_TYPE_KEY = \"repository_type\" _DEFAULT_REPOSITORY_TYPE = \"filesystem\" _REPOSITORY_PROPERTIES_KEY = \"repository_properties\" _DEFAULT_REPOSITORY_PROPERTIES: Dict = dict() _READ_ENTITY_RETRY_KEY = \"read_entity_retry\" _DEFAULT_READ_ENTITY_RETRY = 1 _MODE_KEY = \"mode\" _DEVELOPMENT_MODE = \"development\" _EXPERIMENT_MODE = \"experiment\" _PRODUCTION_MODE = \"production\" _DEFAULT_MODE = _DEVELOPMENT_MODE _VERSION_NUMBER_KEY = \"version_number\" _DEFAULT_VERSION_NUMBER = \"\" _FORCE_KEY = \"force\" _DEFAULT_FORCE = False _CORE_VERSION_KEY = \"core_version\" _CURRENT_CORE_VERSION = _read_version() def __init__( self, root_folder: Optional[str] = None, storage_folder: Optional[str] = None, repository_type: Optional[str] = None, repository_properties: Optional[Dict[str, Union[str, int]]] = None, read_entity_retry: Optional[int] = None, mode: Optional[str] = None, version_number: Optional[str] = None, force: Optional[bool] = None, core_version: Optional[str] = None, **properties, ): self._root_folder = root_folder self._storage_folder = storage_folder self._repository_type = repository_type self._repository_properties = repository_properties or {} self._read_entity_retry = ( read_entity_retry if read_entity_retry is not None else self._DEFAULT_READ_ENTITY_RETRY ) self.mode = mode or self._DEFAULT_MODE self.version_number = version_number or self._DEFAULT_VERSION_NUMBER self.force = force or self._DEFAULT_FORCE self._check_compatibility(core_version) self._core_version = core_version super().__init__(**properties) def __copy__(self): return CoreSection( self.root_folder, self.storage_folder, self.repository_type, self.repository_properties, self.read_entity_retry, self.mode, self.version_number, self.force, self._core_version, **copy(self._properties), ) @property def storage_folder(self): return _tpl._replace_templates(self._storage_folder) @storage_folder.setter # type: ignore @_ConfigBlocker._check() def storage_folder(self, val): self._storage_folder = val @property def root_folder(self): return _tpl._replace_templates(self._root_folder) @root_folder.setter # type: ignore @_ConfigBlocker._check() def root_folder(self, val): self._root_folder = val @property def repository_type(self): return _tpl._replace_templates(self._repository_type) @repository_type.setter # type: ignore @_ConfigBlocker._check() def repository_type(self, val): self._repository_type = val @property def repository_properties(self): return ( {k: _tpl._replace_templates(v) for k, v in self._repository_properties.items()} if self._repository_properties else self._DEFAULT_REPOSITORY_PROPERTIES.copy() ) @repository_properties.setter # type: ignore @_ConfigBlocker._check() def repository_properties(self, val): self._repository_properties = val @property def read_entity_retry(self): return _tpl._replace_templates(self._read_entity_retry) @read_entity_retry.setter # type: ignore @_ConfigBlocker._check() def read_entity_retry(self, val): self._read_entity_retry = val @classmethod def default_config(cls): return CoreSection( cls._DEFAULT_ROOT_FOLDER, cls._DEFAULT_STORAGE_FOLDER, cls._DEFAULT_REPOSITORY_TYPE, cls._DEFAULT_REPOSITORY_PROPERTIES, cls._DEFAULT_READ_ENTITY_RETRY, cls._DEFAULT_MODE, cls._DEFAULT_VERSION_NUMBER, cls._DEFAULT_FORCE, cls._CURRENT_CORE_VERSION, ) def _clean(self): self._root_folder = self._DEFAULT_ROOT_FOLDER self._storage_folder = self._DEFAULT_STORAGE_FOLDER self._repository_type = self._DEFAULT_REPOSITORY_TYPE self._repository_properties = self._DEFAULT_REPOSITORY_PROPERTIES.copy() self._read_entity_retry = self._DEFAULT_READ_ENTITY_RETRY self.mode = self._DEFAULT_MODE self.version_number = self._DEFAULT_VERSION_NUMBER self.force = self._DEFAULT_FORCE self._core_version = self._CURRENT_CORE_VERSION self._properties.clear() def _to_dict(self): as_dict = {} if self._root_folder: as_dict[self._ROOT_FOLDER_KEY] = self._root_folder if self._storage_folder: as_dict[self._STORAGE_FOLDER_KEY] = self._storage_folder if self._repository_type: as_dict[self._REPOSITORY_TYPE_KEY] = self._repository_type if self._repository_properties: as_dict[self._REPOSITORY_PROPERTIES_KEY] = self._repository_properties if self._read_entity_retry is not None: as_dict[self._READ_ENTITY_RETRY_KEY] = self._read_entity_retry if self.mode is not None: as_dict[self._MODE_KEY] = self.mode if self.version_number is not None: as_dict[self._VERSION_NUMBER_KEY] = self.version_number if self.force is not None: as_dict[self._FORCE_KEY] = self.force if self._core_version is not None: as_dict[self._CORE_VERSION_KEY] = self._core_version as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id=None, config: Optional[_Config] = None): root_folder = as_dict.pop(cls._ROOT_FOLDER_KEY, None) storage_folder = as_dict.pop(cls._STORAGE_FOLDER_KEY, None) repository_type = as_dict.pop(cls._REPOSITORY_TYPE_KEY, None) repository_properties = as_dict.pop(cls._REPOSITORY_PROPERTIES_KEY, None) read_entity_retry = as_dict.pop(cls._READ_ENTITY_RETRY_KEY, None) mode = as_dict.pop(cls._MODE_KEY, None) version_nb = as_dict.pop(cls._VERSION_NUMBER_KEY, None) force = as_dict.pop(cls._FORCE_KEY, None) core_version = as_dict.pop(cls._CORE_VERSION_KEY, None) return CoreSection( root_folder, storage_folder, repository_type, repository_properties, read_entity_retry, mode, version_nb, force, core_version, **as_dict, ) def _update(self, as_dict: Dict[str, Any]): root_folder = _tpl._replace_templates(as_dict.pop(self._ROOT_FOLDER_KEY, self._root_folder)) if self._root_folder != root_folder: self._root_folder = root_folder storage_folder = _tpl._replace_templates(as_dict.pop(self._STORAGE_FOLDER_KEY, self._storage_folder)) if self._storage_folder != storage_folder: self._storage_folder = storage_folder repository_type = _tpl._replace_templates(as_dict.pop(self._REPOSITORY_TYPE_KEY, self._repository_type)) if self._repository_type != repository_type: self._repository_type = repository_type repository_properties = _tpl._replace_templates( as_dict.pop(self._REPOSITORY_PROPERTIES_KEY, self._repository_properties) ) self._repository_properties.update(repository_properties) read_entity_retry = _tpl._replace_templates(as_dict.pop(self._READ_ENTITY_RETRY_KEY, self._read_entity_retry)) if self._read_entity_retry != read_entity_retry: self._read_entity_retry = read_entity_retry mode = _tpl._replace_templates(as_dict.pop(self._MODE_KEY, self.mode)) if self.mode != mode: self.mode = mode version_number = _tpl._replace_templates(as_dict.pop(self._VERSION_NUMBER_KEY, self.version_number)) if self.version_number != version_number: self.version_number = version_number force = _tpl._replace_templates(as_dict.pop(self._FORCE_KEY, self.force)) if self.force != force: self.force = force core_version = as_dict.pop(self._CORE_VERSION_KEY, None) self._check_compatibility(core_version) self._properties.update(as_dict) @classmethod def _check_compatibility(cls, core_version): if not core_version: return version_pattern = r\"^(\\d+)\\.(\\d+)\\.(\\d+)$\" dev_version_pattern = r\"^(\\d+)\\.(\\d+)\\.(\\d+).(\\w*)$\" installed_match = re.match(version_pattern, cls._CURRENT_CORE_VERSION) or re.match( dev_version_pattern, cls._CURRENT_CORE_VERSION ) required_match = re.match(version_pattern, core_version) or re.match(dev_version_pattern, core_version) if required_match and installed_match: installed_group = installed_match.groups() required_group = required_match.groups() installed_major, installed_minor = installed_group[0], installed_group[1] required_major, required_minor = required_group[0], required_group[1] if required_major != installed_major or required_minor != installed_minor: raise ConfigCoreVersionMismatched(core_version, cls._CURRENT_CORE_VERSION) @staticmethod def _configure( root_folder: Optional[str] = None, storage_folder: Optional[str] = None, repository_type: Optional[str] = None, repository_properties: Optional[Dict[str, Union[str, int]]] = None, read_entity_retry: Optional[int] = None, mode: Optional[str] = None, version_number: Optional[str] = None, force: Optional[bool] = None, **properties, ) -> \"CoreSection\": \"\"\"Configure the Core service. Parameters: root_folder (Optional[str]): Path of the base folder for the taipy application. The default value is \"./taipy/\" storage_folder (Optional[str]): Folder name used to store Taipy data. The default value is \".data/\". It is used in conjunction with the `root_folder` field. That means the storage path is (The default path is \"./taipy/.data/\"). repository_type (Optional[str]): The type of the repository to be used to store Taipy data. The default value is \"filesystem\". repository_properties (Optional[Dict[str, Union[str, int]]]): A dictionary of additional properties to be used by the repository. read_entity_retry (Optional[int]): Number of retries to read an entity from the repository before return failure. The default value is 3. mode (Optional[str]): Indicates the mode of the version management system. Possible values are *\"development\"*, *\"experiment\"*, or *\"production\"*. version_number (Optional[str]): The string identifier of the version. In development mode, the version number is ignored. force (Optional[bool]): If True, Taipy will override a version even if the configuration has changed and run the application. **properties (Dict[str, Any]): A keyworded variable length list of additional arguments configure the behavior of the `Core^` service. Returns: The Core configuration. \"\"\" section = CoreSection( root_folder=root_folder, storage_folder=storage_folder, repository_type=repository_type, repository_properties=repository_properties, read_entity_retry=read_entity_retry, mode=mode, version_number=version_number, force=force, core_version=_read_version(), **properties, ) Config._register(section) return Config.unique_sections[CoreSection.name] "} {"text": "from collections import defaultdict from copy import copy from typing import Any, Callable, Dict, List, Optional, Union from taipy.config._config import _Config from taipy.config.common._template_handler import _TemplateHandler as _tpl from taipy.config.common._validate_id import _validate_id from taipy.config.common.frequency import Frequency from taipy.config.config import Config from taipy.config.section import Section from .data_node_config import DataNodeConfig from .task_config import TaskConfig class ScenarioConfig(Section): \"\"\" Configuration fields needed to instantiate an actual `Scenario^`. Attributes: id (str): Identifier of the scenario config. It must be a valid Python variable name. tasks (Optional[Union[TaskConfig, List[TaskConfig]]]): List of task configs.
The default value is None. additional_data_nodes (Optional[Union[DataNodeConfig, List[DataNodeConfig]]]):
List of additional data node configs. The default value is None. frequency (Optional[Frequency]): The frequency of the scenario's cycle. The default value is None. comparators: Optional[Dict[str, Union[List[Callable], Callable]]]: Dictionary of the data node
config id as key and a list of Callable used to compare the data nodes as value. sequences (Optional[Dict[str, List[TaskConfig]]]): Dictionary of sequence descriptions. The default value is None. **properties (dict[str, any]): A dictionary of additional properties. \"\"\" name = \"SCENARIO\" _SEQUENCES_KEY = \"sequences\" _TASKS_KEY = \"tasks\" _ADDITIONAL_DATA_NODES_KEY = \"additional_data_nodes\" _FREQUENCY_KEY = \"frequency\" _SEQUENCES_KEY = \"sequences\" _COMPARATOR_KEY = \"comparators\" def __init__( self, id: str, tasks: Optional[Union[TaskConfig, List[TaskConfig]]] = None, additional_data_nodes: Optional[Union[DataNodeConfig, List[DataNodeConfig]]] = None, frequency: Optional[Frequency] = None, comparators: Optional[Dict[str, Union[List[Callable], Callable]]] = None, sequences: Optional[Dict[str, List[TaskConfig]]] = None, **properties, ): if tasks: self._tasks = list(tasks) if isinstance(tasks, TaskConfig) else copy(tasks) else: self._tasks = [] if additional_data_nodes: self._additional_data_nodes = ( list(additional_data_nodes) if isinstance(additional_data_nodes, DataNodeConfig) else copy(additional_data_nodes) ) else: self._additional_data_nodes = [] self.sequences = sequences if sequences else {} self.frequency = frequency self.comparators = defaultdict(list) if comparators: for k, v in comparators.items(): if isinstance(v, list): self.comparators[_validate_id(k)].extend(v) else: self.comparators[_validate_id(k)].append(v) super().__init__(id, **properties) def __copy__(self): comp = None if self.comparators is None else self.comparators scenario_config = ScenarioConfig( self.id, copy(self._tasks), copy(self._additional_data_nodes), self.frequency, copy(comp), copy(self.sequences), **copy(self._properties), ) return scenario_config def __getattr__(self, item: str) -> Optional[Any]: return _tpl._replace_templates(self._properties.get(item)) @property def task_configs(self) -> List[TaskConfig]: return self._tasks @property def tasks(self) -> List[TaskConfig]: return self._tasks @property def additional_data_node_configs(self) -> List[DataNodeConfig]: return self._additional_data_nodes @property def additional_data_nodes(self) -> List[DataNodeConfig]: return self._additional_data_nodes @property def data_node_configs(self) -> List[DataNodeConfig]: return self.__get_all_unique_data_nodes() @property def data_nodes(self) -> List[DataNodeConfig]: return self.__get_all_unique_data_nodes() def __get_all_unique_data_nodes(self) -> List[DataNodeConfig]: data_node_configs = set(self._additional_data_nodes) for task in self._tasks: data_node_configs.update(task.inputs) data_node_configs.update(task.outputs) return list(data_node_configs) @classmethod def default_config(cls): return ScenarioConfig(cls._DEFAULT_KEY, list(), list(), None, dict()) def _clean(self): self._tasks = list() self._additional_data_nodes = list() self.frequency = None self.comparators = dict() self.sequences = dict() self._properties = dict() def _to_dict(self) -> Dict[str, Any]: return { self._COMPARATOR_KEY: self.comparators, self._TASKS_KEY: self._tasks, self._ADDITIONAL_DATA_NODES_KEY: self._additional_data_nodes, self._FREQUENCY_KEY: self.frequency, self._SEQUENCES_KEY: self.sequences, **self._properties, } @classmethod def _from_dict( cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None ) -> \"ScenarioConfig\": # type: ignore as_dict.pop(cls._ID_KEY, id) tasks = cls.__get_task_configs(as_dict.pop(cls._TASKS_KEY, list()), config) additional_data_node_ids = as_dict.pop(cls._ADDITIONAL_DATA_NODES_KEY, list()) additional_data_nodes = cls.__get_additional_data_node_configs(additional_data_node_ids, config) frequency = as_dict.pop(cls._FREQUENCY_KEY, None) comparators = as_dict.pop(cls._COMPARATOR_KEY, dict()) sequences = as_dict.pop(cls._SEQUENCES_KEY, {}) for sequence_name, sequence_tasks in sequences.items(): sequences[sequence_name] = cls.__get_task_configs(sequence_tasks, config) scenario_config = ScenarioConfig( id=id, tasks=tasks, additional_data_nodes=additional_data_nodes, frequency=frequency, comparators=comparators, sequences=sequences, **as_dict, ) return scenario_config @staticmethod def __get_task_configs(task_config_ids: List[str], config: Optional[_Config]): task_configs = set() if config: if task_config_section := config._sections.get(TaskConfig.name): for task_config_id in task_config_ids: if task_config := task_config_section.get(task_config_id, None): task_configs.add(task_config) return list(task_configs) @staticmethod def __get_additional_data_node_configs(additional_data_node_ids: List[str], config: Optional[_Config]): additional_data_node_configs = set() if config: if data_node_config_section := config._sections.get(DataNodeConfig.name): for additional_data_node_id in additional_data_node_ids: if additional_data_node_config := data_node_config_section.get(additional_data_node_id): additional_data_node_configs.add(additional_data_node_config) return list(additional_data_node_configs) def _update(self, as_dict: Dict[str, Any], default_section=None): self._tasks = as_dict.pop(self._TASKS_KEY, self._tasks) if self._tasks is None and default_section: self._tasks = default_section._tasks self._additional_data_nodes = as_dict.pop(self._ADDITIONAL_DATA_NODES_KEY, self._additional_data_nodes) if self._additional_data_nodes is None and default_section: self._additional_data_nodes = default_section._additional_data_nodes self.frequency = as_dict.pop(self._FREQUENCY_KEY, self.frequency) if self.frequency is None and default_section: self.frequency = default_section.frequency self.comparators = as_dict.pop(self._COMPARATOR_KEY, self.comparators) if self.comparators is None and default_section: self.comparators = default_section.comparators self.sequences = as_dict.pop(self._SEQUENCES_KEY, self.sequences) if self.sequences is None and default_section: self.sequences = default_section.sequences self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} def add_comparator(self, dn_config_id: str, comparator: Callable): self.comparators[dn_config_id].append(comparator) def delete_comparator(self, dn_config_id: str): if dn_config_id in self.comparators: del self.comparators[dn_config_id] @staticmethod def _configure( id: str, task_configs: Optional[List[TaskConfig]] = None, additional_data_node_configs: Optional[List[DataNodeConfig]] = None, frequency: Optional[Frequency] = None, comparators: Optional[Dict[str, Union[List[Callable], Callable]]] = None, sequences: Optional[Dict[str, List[TaskConfig]]] = None, **properties, ) -> \"ScenarioConfig\": \"\"\"Configure a new scenario configuration. Parameters: id (str): The unique identifier of the new scenario configuration. task_configs (Optional[List[TaskConfig^]]): The list of task configurations used by this scenario configuration. The default value is None. additional_data_node_configs (Optional[List[DataNodeConfig^]]): The list of additional data nodes related to this scenario configuration. The default value is None. frequency (Optional[Frequency^]): The scenario frequency.
It corresponds to the recurrence of the scenarios instantiated from this configuration. Based on this frequency each scenario will be attached to the relevant cycle. comparators (Optional[Dict[str, Union[List[Callable], Callable]]]): The list of functions used to compare scenarios. A comparator function is attached to a scenario's data node configuration. The key of the dictionary parameter corresponds to the data node configuration id. During the scenarios' comparison, each comparator is applied to all the data nodes instantiated from the data node configuration attached to the comparator. See `(taipy.)compare_scenarios()^` more more details. sequences (Optional[Dict[str, List[TaskConfig]]]): Dictionary of sequence descriptions. The default value is None. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new scenario configuration. \"\"\" section = ScenarioConfig( id, task_configs, additional_data_node_configs, frequency=frequency, comparators=comparators, sequences=sequences, **properties, ) Config._register(section) return Config.sections[ScenarioConfig.name][id] @staticmethod def _set_default_configuration( task_configs: Optional[List[TaskConfig]] = None, additional_data_node_configs: List[DataNodeConfig] = None, frequency: Optional[Frequency] = None, comparators: Optional[Dict[str, Union[List[Callable], Callable]]] = None, sequences: Optional[Dict[str, List[TaskConfig]]] = None, **properties, ) -> \"ScenarioConfig\": \"\"\"Set the default values for scenario configurations. This function creates the *default scenario configuration* object, where all scenario configuration objects will find their default values when needed. Parameters: task_configs (Optional[List[TaskConfig^]]): The list of task configurations used by this scenario configuration. additional_data_node_configs (Optional[List[DataNodeConfig^]]): The list of additional data nodes related to this scenario configuration. frequency (Optional[Frequency^]): The scenario frequency. It corresponds to the recurrence of the scenarios instantiated from this configuration. Based on this frequency each scenario will be attached to the relevant cycle. comparators (Optional[Dict[str, Union[List[Callable], Callable]]]): The list of functions used to compare scenarios. A comparator function is attached to a scenario's data node configuration. The key of the dictionary parameter corresponds to the data node configuration id. During the scenarios' comparison, each comparator is applied to all the data nodes instantiated from the data node configuration attached to the comparator. See `taipy.compare_scenarios()^` more more details. sequences (Optional[Dict[str, List[TaskConfig]]]): Dictionary of sequences. The default value is None. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new default scenario configuration. \"\"\" section = ScenarioConfig( _Config.DEFAULT_KEY, task_configs, additional_data_node_configs, frequency=frequency, comparators=comparators, sequences=sequences, **properties, ) Config._register(section) return Config.sections[ScenarioConfig.name][_Config.DEFAULT_KEY] def add_sequences(self, sequences: Dict[str, List[TaskConfig]]): self.sequences.update(sequences) def remove_sequences(self, sequence_names: Union[str, List[str]]): if isinstance(sequence_names, List): for sequence_name in sequence_names: self.sequences.pop(sequence_name) else: self.sequences.pop(sequence_names) "} {"text": "import json from copy import copy from datetime import timedelta from typing import Any, Callable, Dict, List, Optional, Union from taipy.config._config import _Config from taipy.config.common._config_blocker import _ConfigBlocker from taipy.config.common._template_handler import _TemplateHandler as _tpl from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.section import Section from ..common._warnings import _warn_deprecated from ..common.mongo_default_document import MongoDefaultDocument class DataNodeConfig(Section): \"\"\" Configuration fields needed to instantiate a `DataNode^`. A Data Node config is made to be used as a generator for actual data nodes. It holds configuration information needed to create an actual data node. Attributes: id (str): Unique identifier of the data node config. It must be a valid Python variable name. storage_type (str): Storage type of the data nodes created from the data node config. The possible values are : \"csv\", \"excel\", \"pickle\", \"sql_table\", \"sql\", \"mongo_collection\", \"generic\", \"json\", \"parquet\" and \"in_memory\". The default value is \"pickle\". Note that the \"in_memory\" value can only be used when `JobConfig^`.mode is \"standalone\". scope (Optional[Scope^]): The optional `Scope^` of the data nodes instantiated from the data node config. The default value is SCENARIO. **properties (dict[str, any]): A dictionary of additional properties. \"\"\" name = \"DATA_NODE\" _STORAGE_TYPE_KEY = \"storage_type\" _STORAGE_TYPE_VALUE_PICKLE = \"pickle\" _STORAGE_TYPE_VALUE_SQL_TABLE = \"sql_table\" _STORAGE_TYPE_VALUE_SQL = \"sql\" _STORAGE_TYPE_VALUE_MONGO_COLLECTION = \"mongo_collection\" _STORAGE_TYPE_VALUE_CSV = \"csv\" _STORAGE_TYPE_VALUE_EXCEL = \"excel\" _STORAGE_TYPE_VALUE_IN_MEMORY = \"in_memory\" _STORAGE_TYPE_VALUE_GENERIC = \"generic\" _STORAGE_TYPE_VALUE_JSON = \"json\" _STORAGE_TYPE_VALUE_PARQUET = \"parquet\" _DEFAULT_STORAGE_TYPE = _STORAGE_TYPE_VALUE_PICKLE _ALL_STORAGE_TYPES = [ _STORAGE_TYPE_VALUE_PICKLE, _STORAGE_TYPE_VALUE_SQL_TABLE, _STORAGE_TYPE_VALUE_SQL, _STORAGE_TYPE_VALUE_MONGO_COLLECTION, _STORAGE_TYPE_VALUE_CSV, _STORAGE_TYPE_VALUE_EXCEL, _STORAGE_TYPE_VALUE_IN_MEMORY, _STORAGE_TYPE_VALUE_GENERIC, _STORAGE_TYPE_VALUE_JSON, _STORAGE_TYPE_VALUE_PARQUET, ] _EXPOSED_TYPE_KEY = \"exposed_type\" _EXPOSED_TYPE_PANDAS = \"pandas\" _EXPOSED_TYPE_MODIN = \"modin\" _EXPOSED_TYPE_NUMPY = \"numpy\" _DEFAULT_EXPOSED_TYPE = _EXPOSED_TYPE_PANDAS _ALL_EXPOSED_TYPES = [ _EXPOSED_TYPE_PANDAS, _EXPOSED_TYPE_MODIN, _EXPOSED_TYPE_NUMPY, ] _OPTIONAL_ENCODING_PROPERTY = \"encoding\" _DEFAULT_ENCODING_VALUE = \"utf-8\" # Generic _OPTIONAL_READ_FUNCTION_GENERIC_PROPERTY = \"read_fct\" _OPTIONAL_READ_FUNCTION_ARGS_GENERIC_PROPERTY = \"read_fct_args\" _OPTIONAL_WRITE_FUNCTION_GENERIC_PROPERTY = \"write_fct\" _OPTIONAL_WRITE_FUNCTION_ARGS_GENERIC_PROPERTY = \"write_fct_args\" # CSV _OPTIONAL_EXPOSED_TYPE_CSV_PROPERTY = \"exposed_type\" _OPTIONAL_DEFAULT_PATH_CSV_PROPERTY = \"default_path\" _OPTIONAL_HAS_HEADER_CSV_PROPERTY = \"has_header\" # Excel _OPTIONAL_EXPOSED_TYPE_EXCEL_PROPERTY = \"exposed_type\" _OPTIONAL_DEFAULT_PATH_EXCEL_PROPERTY = \"default_path\" _OPTIONAL_HAS_HEADER_EXCEL_PROPERTY = \"has_header\" _OPTIONAL_SHEET_NAME_EXCEL_PROPERTY = \"sheet_name\" # In memory _OPTIONAL_DEFAULT_DATA_IN_MEMORY_PROPERTY = \"default_data\" # SQL _REQUIRED_DB_NAME_SQL_PROPERTY = \"db_name\" _REQUIRED_DB_ENGINE_SQL_PROPERTY = \"db_engine\" _DB_ENGINE_SQLITE = \"sqlite\" _OPTIONAL_FOLDER_PATH_SQLITE_PROPERTY = \"sqlite_folder_path\" _OPTIONAL_FILE_EXTENSION_SQLITE_PROPERTY = \"sqlite_file_extension\" _OPTIONAL_DB_PASSWORD_SQL_PROPERTY = \"db_password\" _OPTIONAL_DB_USERNAME_SQL_PROPERTY = \"db_username\" _OPTIONAL_PORT_SQL_PROPERTY = \"db_port\" _OPTIONAL_HOST_SQL_PROPERTY = \"db_host\" _OPTIONAL_DRIVER_SQL_PROPERTY = \"db_driver\" _OPTIONAL_DB_EXTRA_ARGS_SQL_PROPERTY = \"db_extra_args\" _OPTIONAL_EXPOSED_TYPE_SQL_PROPERTY = \"exposed_type\" # SQL_TABLE _REQUIRED_TABLE_NAME_SQL_TABLE_PROPERTY = \"table_name\" # SQL _REQUIRED_READ_QUERY_SQL_PROPERTY = \"read_query\" _REQUIRED_WRITE_QUERY_BUILDER_SQL_PROPERTY = \"write_query_builder\" _OPTIONAL_APPEND_QUERY_BUILDER_SQL_PROPERTY = \"append_query_builder\" # MONGO _REQUIRED_DB_NAME_MONGO_PROPERTY = \"db_name\" _REQUIRED_COLLECTION_NAME_MONGO_PROPERTY = \"collection_name\" _OPTIONAL_CUSTOM_DOCUMENT_MONGO_PROPERTY = \"custom_document\" _OPTIONAL_USERNAME_MONGO_PROPERTY = \"db_username\" _OPTIONAL_PASSWORD_MONGO_PROPERTY = \"db_password\" _OPTIONAL_HOST_MONGO_PROPERTY = \"db_host\" _OPTIONAL_PORT_MONGO_PROPERTY = \"db_port\" _OPTIONAL_DRIVER_MONGO_PROPERTY = \"db_driver\" _OPTIONAL_DB_EXTRA_ARGS_MONGO_PROPERTY = \"db_extra_args\" # Pickle _OPTIONAL_DEFAULT_PATH_PICKLE_PROPERTY = \"default_path\" _OPTIONAL_DEFAULT_DATA_PICKLE_PROPERTY = \"default_data\" # JSON _OPTIONAL_ENCODER_JSON_PROPERTY = \"encoder\" _OPTIONAL_DECODER_JSON_PROPERTY = \"decoder\" _OPTIONAL_DEFAULT_PATH_JSON_PROPERTY = \"default_path\" # Parquet _OPTIONAL_EXPOSED_TYPE_PARQUET_PROPERTY = \"exposed_type\" _OPTIONAL_DEFAULT_PATH_PARQUET_PROPERTY = \"default_path\" _OPTIONAL_ENGINE_PARQUET_PROPERTY = \"engine\" _OPTIONAL_COMPRESSION_PARQUET_PROPERTY = \"compression\" _OPTIONAL_READ_KWARGS_PARQUET_PROPERTY = \"read_kwargs\" _OPTIONAL_WRITE_KWARGS_PARQUET_PROPERTY = \"write_kwargs\" _REQUIRED_PROPERTIES: Dict[str, List] = { _STORAGE_TYPE_VALUE_PICKLE: [], _STORAGE_TYPE_VALUE_SQL_TABLE: [ _REQUIRED_DB_NAME_SQL_PROPERTY, _REQUIRED_DB_ENGINE_SQL_PROPERTY, _REQUIRED_TABLE_NAME_SQL_TABLE_PROPERTY, ], _STORAGE_TYPE_VALUE_SQL: [ _REQUIRED_DB_NAME_SQL_PROPERTY, _REQUIRED_DB_ENGINE_SQL_PROPERTY, _REQUIRED_READ_QUERY_SQL_PROPERTY, _REQUIRED_WRITE_QUERY_BUILDER_SQL_PROPERTY, ], _STORAGE_TYPE_VALUE_MONGO_COLLECTION: [ _REQUIRED_DB_NAME_MONGO_PROPERTY, _REQUIRED_COLLECTION_NAME_MONGO_PROPERTY, ], _STORAGE_TYPE_VALUE_CSV: [], _STORAGE_TYPE_VALUE_EXCEL: [], _STORAGE_TYPE_VALUE_IN_MEMORY: [], _STORAGE_TYPE_VALUE_GENERIC: [], _STORAGE_TYPE_VALUE_JSON: [], _STORAGE_TYPE_VALUE_PARQUET: [], } _OPTIONAL_PROPERTIES = { _STORAGE_TYPE_VALUE_GENERIC: { _OPTIONAL_READ_FUNCTION_GENERIC_PROPERTY: None, _OPTIONAL_WRITE_FUNCTION_GENERIC_PROPERTY: None, _OPTIONAL_READ_FUNCTION_ARGS_GENERIC_PROPERTY: None, _OPTIONAL_WRITE_FUNCTION_ARGS_GENERIC_PROPERTY: None, }, _STORAGE_TYPE_VALUE_CSV: { _OPTIONAL_DEFAULT_PATH_CSV_PROPERTY: None, _OPTIONAL_ENCODING_PROPERTY: _DEFAULT_ENCODING_VALUE, _OPTIONAL_HAS_HEADER_CSV_PROPERTY: True, _OPTIONAL_EXPOSED_TYPE_CSV_PROPERTY: _DEFAULT_EXPOSED_TYPE, }, _STORAGE_TYPE_VALUE_EXCEL: { _OPTIONAL_DEFAULT_PATH_EXCEL_PROPERTY: None, _OPTIONAL_HAS_HEADER_EXCEL_PROPERTY: True, _OPTIONAL_SHEET_NAME_EXCEL_PROPERTY: None, _OPTIONAL_EXPOSED_TYPE_EXCEL_PROPERTY: _DEFAULT_EXPOSED_TYPE, }, _STORAGE_TYPE_VALUE_IN_MEMORY: {_OPTIONAL_DEFAULT_DATA_IN_MEMORY_PROPERTY: None}, _STORAGE_TYPE_VALUE_SQL_TABLE: { _OPTIONAL_DB_USERNAME_SQL_PROPERTY: None, _OPTIONAL_DB_PASSWORD_SQL_PROPERTY: None, _OPTIONAL_HOST_SQL_PROPERTY: \"localhost\", _OPTIONAL_PORT_SQL_PROPERTY: 1433, _OPTIONAL_DRIVER_SQL_PROPERTY: \"\", _OPTIONAL_FOLDER_PATH_SQLITE_PROPERTY: None, _OPTIONAL_FILE_EXTENSION_SQLITE_PROPERTY: \".db\", _OPTIONAL_DB_EXTRA_ARGS_SQL_PROPERTY: None, _OPTIONAL_EXPOSED_TYPE_SQL_PROPERTY: _DEFAULT_EXPOSED_TYPE, }, _STORAGE_TYPE_VALUE_SQL: { _OPTIONAL_DB_USERNAME_SQL_PROPERTY: None, _OPTIONAL_DB_PASSWORD_SQL_PROPERTY: None, _OPTIONAL_HOST_SQL_PROPERTY: \"localhost\", _OPTIONAL_PORT_SQL_PROPERTY: 1433, _OPTIONAL_DRIVER_SQL_PROPERTY: \"\", _OPTIONAL_APPEND_QUERY_BUILDER_SQL_PROPERTY: None, _OPTIONAL_FOLDER_PATH_SQLITE_PROPERTY: None, _OPTIONAL_FILE_EXTENSION_SQLITE_PROPERTY: \".db\", _OPTIONAL_DB_EXTRA_ARGS_SQL_PROPERTY: None, _OPTIONAL_EXPOSED_TYPE_SQL_PROPERTY: _DEFAULT_EXPOSED_TYPE, }, _STORAGE_TYPE_VALUE_MONGO_COLLECTION: { _OPTIONAL_CUSTOM_DOCUMENT_MONGO_PROPERTY: MongoDefaultDocument, _OPTIONAL_USERNAME_MONGO_PROPERTY: \"\", _OPTIONAL_PASSWORD_MONGO_PROPERTY: \"\", _OPTIONAL_HOST_MONGO_PROPERTY: \"localhost\", _OPTIONAL_PORT_MONGO_PROPERTY: 27017, _OPTIONAL_DRIVER_MONGO_PROPERTY: \"\", _OPTIONAL_DB_EXTRA_ARGS_MONGO_PROPERTY: None, }, _STORAGE_TYPE_VALUE_PICKLE: { _OPTIONAL_DEFAULT_PATH_PICKLE_PROPERTY: None, _OPTIONAL_DEFAULT_DATA_PICKLE_PROPERTY: None, }, _STORAGE_TYPE_VALUE_JSON: { _OPTIONAL_DEFAULT_PATH_PICKLE_PROPERTY: None, _OPTIONAL_ENCODING_PROPERTY: _DEFAULT_ENCODING_VALUE, _OPTIONAL_ENCODER_JSON_PROPERTY: None, _OPTIONAL_DECODER_JSON_PROPERTY: None, }, _STORAGE_TYPE_VALUE_PARQUET: { _OPTIONAL_DEFAULT_PATH_PARQUET_PROPERTY: None, _OPTIONAL_ENGINE_PARQUET_PROPERTY: \"pyarrow\", _OPTIONAL_COMPRESSION_PARQUET_PROPERTY: \"snappy\", _OPTIONAL_READ_KWARGS_PARQUET_PROPERTY: None, _OPTIONAL_WRITE_KWARGS_PARQUET_PROPERTY: None, _OPTIONAL_EXPOSED_TYPE_PARQUET_PROPERTY: _DEFAULT_EXPOSED_TYPE, }, } _SCOPE_KEY = \"scope\" _DEFAULT_SCOPE = Scope.SCENARIO _VALIDITY_PERIOD_KEY = \"validity_period\" _DEFAULT_VALIDITY_PERIOD = None def __init__( self, id: str, storage_type: Optional[str] = None, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties, ): self._storage_type = storage_type self._scope = scope self._validity_period = validity_period super().__init__(id, **properties) def __copy__(self): return DataNodeConfig(self.id, self._storage_type, self._scope, self._validity_period, **copy(self._properties)) def __getattr__(self, item: str) -> Optional[Any]: return _tpl._replace_templates(self._properties.get(item)) @property def storage_type(self): return _tpl._replace_templates(self._storage_type) @storage_type.setter # type: ignore @_ConfigBlocker._check() def storage_type(self, val): self._storage_type = val @property def scope(self): return _tpl._replace_templates(self._scope) @scope.setter # type: ignore @_ConfigBlocker._check() def scope(self, val): self._scope = val @property def validity_period(self): return _tpl._replace_templates(self._validity_period) @validity_period.setter # type: ignore @_ConfigBlocker._check() def validity_period(self, val): self._validity_period = val @property def cacheable(self): _warn_deprecated(\"cacheable\", suggest=\"the skippable feature\") cacheable = self._properties.get(\"cacheable\") if cacheable is not None: return _tpl._replace_templates(cacheable) else: return False @cacheable.setter # type: ignore @_ConfigBlocker._check() def cacheable(self, val): _warn_deprecated(\"cacheable\", suggest=\"the skippable feature\") self._properties[\"cacheable\"] = val @classmethod def default_config(cls): return DataNodeConfig( cls._DEFAULT_KEY, cls._DEFAULT_STORAGE_TYPE, cls._DEFAULT_SCOPE, cls._DEFAULT_VALIDITY_PERIOD ) def _clean(self): self._storage_type = self._DEFAULT_STORAGE_TYPE self._scope = self._DEFAULT_SCOPE self._validity_period = self._DEFAULT_VALIDITY_PERIOD self._properties.clear() def _to_dict(self): as_dict = {} if self._storage_type is not None: as_dict[self._STORAGE_TYPE_KEY] = self._storage_type if self._scope is not None: as_dict[self._SCOPE_KEY] = self._scope if self._validity_period is not None: as_dict[self._VALIDITY_PERIOD_KEY] = self._validity_period as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, id) storage_type = as_dict.pop(cls._STORAGE_TYPE_KEY, None) scope = as_dict.pop(cls._SCOPE_KEY, None) validity_perid = as_dict.pop(cls._VALIDITY_PERIOD_KEY, None) return DataNodeConfig(id=id, storage_type=storage_type, scope=scope, validity_period=validity_perid, **as_dict) def _update(self, as_dict, default_section=None): self._storage_type = as_dict.pop(self._STORAGE_TYPE_KEY, self._storage_type) if self._storage_type is None and default_section: self._storage_type = default_section.storage_type self._scope = as_dict.pop(self._SCOPE_KEY, self._scope) if self._scope is None and default_section: if default_section.scope and self._storage_type == default_section.storage_type: self._scope = default_section.scope else: self._scope = self._DEFAULT_SCOPE self._validity_period = as_dict.pop(self._VALIDITY_PERIOD_KEY, self._validity_period) if self._validity_period is None and default_section: self._validity_period = default_section.validity_period self._properties.update(as_dict) if default_section and self._storage_type == default_section.storage_type: self._properties = {**default_section.properties, **self._properties} # Assign default value to optional properties if not defined by user if self._OPTIONAL_PROPERTIES.get(self._storage_type): for optional_property, default_value in self._OPTIONAL_PROPERTIES[self._storage_type].items(): if default_value is not None and self._properties.get(optional_property) is None: self._properties[optional_property] = default_value @staticmethod def _set_default_configuration( storage_type: str, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties ) -> \"DataNodeConfig\": \"\"\"Set the default values for data node configurations. This function creates the _default data node configuration_ object, where all data node configuration objects will find their default values when needed. Parameters: storage_type (str): The default storage type for all data node configurations. The possible values are *\"pickle\"* (the default value), *\"csv\"*, *\"excel\"*, *\"sql\"*, *\"mongo_collection\"*, *\"in_memory\"*, *\"json\"*, *\"parquet\"* or *\"generic\"*. scope (Optional[Scope^]): The default scope for all data node configurations.
The default value is `Scope.SCENARIO`. validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task configs page](../core/config/task-config.md) for more details). If *validity_period* is set to None, the data node is always up-to-date. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The default data node configuration. \"\"\" section = DataNodeConfig(_Config.DEFAULT_KEY, storage_type, scope, validity_period, **properties) Config._register_default(section) return Config.sections[DataNodeConfig.name][_Config.DEFAULT_KEY] @classmethod def _configure_from( cls, source_configuration: \"DataNodeConfig\", id: str, **properties, ) -> \"DataNodeConfig\": \"\"\"Configure a new data node configuration from an existing one. Parameters: source_configuration (DataNodeConfig): The source data node configuration. id (str): The unique identifier of the new data node configuration. **properties (dict[str, any]): A keyworded variable length list of additional arguments.
The default properties are the properties of the source data node configuration. Returns: The new data node configuration. \"\"\" scope = properties.pop(\"scope\", None) or source_configuration.scope validity_period = properties.pop(\"validity_period\", None) or source_configuration.validity_period properties = {**source_configuration.properties, **properties} # type: ignore return cls.__configure(id, source_configuration.storage_type, scope, validity_period, **properties) @classmethod def _configure( cls, id: str, storage_type: Optional[str] = None, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties, ) -> \"DataNodeConfig\": \"\"\"Configure a new data node configuration. Parameters: id (str): The unique identifier of the new data node configuration. storage_type (Optional[str]): The data node configuration storage type. The possible values are None (which is the default value of *\"pickle\"*, unless it has been overloaded by the *storage_type* value set in the default data node configuration (see `(Config.)set_default_data_node_configuration()^`)), *\"pickle\"*, *\"csv\"*, *\"excel\"*, *\"sql_table\"*, *\"sql\"*, *\"json\"*, *\"parquet\"*, *\"mongo_collection\"*, *\"in_memory\"*, or *\"generic\"*. scope (Optional[Scope^]): The scope of the data node configuration.
The default value is `Scope.SCENARIO` (or the one specified in `(Config.)set_default_data_node_configuration()^`). validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task configs page](../core/config/task-config.md) for more details). If *validity_period* is set to None, the data node is always up-to-date. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new data node configuration. \"\"\" configuration_map: Dict[str, Callable] = { cls._STORAGE_TYPE_VALUE_PICKLE: cls._configure_pickle, cls._STORAGE_TYPE_VALUE_SQL_TABLE: cls._configure_sql_table, cls._STORAGE_TYPE_VALUE_SQL: cls._configure_sql, cls._STORAGE_TYPE_VALUE_MONGO_COLLECTION: cls._configure_mongo_collection, cls._STORAGE_TYPE_VALUE_CSV: cls._configure_csv, cls._STORAGE_TYPE_VALUE_EXCEL: cls._configure_excel, cls._STORAGE_TYPE_VALUE_IN_MEMORY: cls._configure_in_memory, cls._STORAGE_TYPE_VALUE_GENERIC: cls._configure_generic, cls._STORAGE_TYPE_VALUE_JSON: cls._configure_json, cls._STORAGE_TYPE_VALUE_PARQUET: cls._configure_parquet, } if storage_type in cls._ALL_STORAGE_TYPES: return configuration_map[storage_type](id=id, scope=scope, validity_period=validity_period, **properties) return cls.__configure(id, storage_type, scope, validity_period, **properties) @classmethod def _configure_csv( cls, id: str, default_path: Optional[str] = None, encoding: Optional[str] = None, has_header: Optional[bool] = None, exposed_type: Optional[str] = None, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties, ) -> \"DataNodeConfig\": \"\"\"Configure a new CSV data node configuration. Parameters: id (str): The unique identifier of the new CSV data node configuration. default_path (Optional[str]): The default path of the CSV file. encoding (Optional[str]): The encoding of the CSV file. has_header (Optional[bool]): If True, indicates that the CSV file has a header. exposed_type (Optional[str]): The exposed type of the data read from CSV file.
The default value is `pandas`. scope (Optional[Scope^]): The scope of the CSV data node configuration.
The default value is `Scope.SCENARIO`. validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task configs page](../core/config/task-config.md) for more details). If *validity_period* is set to None, the data node is always up-to-date. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new CSV data node configuration. \"\"\" if default_path is not None: properties[cls._OPTIONAL_DEFAULT_PATH_CSV_PROPERTY] = default_path if encoding is not None: properties[cls._OPTIONAL_ENCODING_PROPERTY] = encoding if has_header is not None: properties[cls._OPTIONAL_HAS_HEADER_CSV_PROPERTY] = has_header if exposed_type is not None: properties[cls._OPTIONAL_EXPOSED_TYPE_CSV_PROPERTY] = exposed_type return cls.__configure(id, DataNodeConfig._STORAGE_TYPE_VALUE_CSV, scope, validity_period, **properties) @classmethod def _configure_json( cls, id: str, default_path: Optional[str] = None, encoding: Optional[str] = None, encoder: Optional[json.JSONEncoder] = None, decoder: Optional[json.JSONDecoder] = None, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties, ) -> \"DataNodeConfig\": \"\"\"Configure a new JSON data node configuration. Parameters: id (str): The unique identifier of the new JSON data node configuration. default_path (Optional[str]): The default path of the JSON file. encoding (Optional[str]): The encoding of the JSON file. encoder (Optional[json.JSONEncoder]): The JSON encoder used to write data into the JSON file. decoder (Optional[json.JSONDecoder]): The JSON decoder used to read data from the JSON file. scope (Optional[Scope^]): The scope of the JSON data node configuration.
The default value is `Scope.SCENARIO`. validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task configs page](../core/config/task-config.md) for more details). If *validity_period* is set to None, the data node is always up-to-date. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new JSON data node configuration. \"\"\" if default_path is not None: properties[cls._OPTIONAL_DEFAULT_PATH_JSON_PROPERTY] = default_path if encoding is not None: properties[cls._OPTIONAL_ENCODING_PROPERTY] = encoding if encoder is not None: properties[cls._OPTIONAL_ENCODER_JSON_PROPERTY] = encoder if decoder is not None: properties[cls._OPTIONAL_DECODER_JSON_PROPERTY] = decoder return cls.__configure(id, DataNodeConfig._STORAGE_TYPE_VALUE_JSON, scope, validity_period, **properties) @classmethod def _configure_parquet( cls, id: str, default_path: Optional[str] = None, engine: Optional[str] = None, compression: Optional[str] = None, read_kwargs: Optional[Dict] = None, write_kwargs: Optional[Dict] = None, exposed_type: Optional[str] = None, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties, ) -> \"DataNodeConfig\": \"\"\"Configure a new Parquet data node configuration. Parameters: id (str): The unique identifier of the new Parquet data node configuration. default_path (Optional[str]): The default path of the Parquet file. engine (Optional[str]): Parquet library to use. Possible values are *\"fastparquet\"* or *\"pyarrow\"*.
The default value is *\"pyarrow\"*. compression (Optional[str]): Name of the compression to use. Possible values are *\"snappy\"*, *\"gzip\"*, *\"brotli\"*, or *\"none\"* (no compression). The default value is *\"snappy\"*. read_kwargs (Optional[dict]): Additional parameters passed to the `pandas.read_parquet()` function. write_kwargs (Optional[dict]): Additional parameters passed to the `pandas.DataFrame.write_parquet()` function.
The parameters in *read_kwargs* and *write_kwargs* have a **higher precedence** than the top-level parameters which are also passed to Pandas. exposed_type (Optional[str]): The exposed type of the data read from Parquet file.
The default value is `pandas`. scope (Optional[Scope^]): The scope of the Parquet data node configuration.
The default value is `Scope.SCENARIO`. validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task configs page](../core/config/task-config.md) for more details). If *validity_period* is set to None, the data node is always up-to-date. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new Parquet data node configuration. \"\"\" if default_path is not None: properties[cls._OPTIONAL_DEFAULT_PATH_PARQUET_PROPERTY] = default_path if engine is not None: properties[cls._OPTIONAL_ENGINE_PARQUET_PROPERTY] = engine if compression is not None: properties[cls._OPTIONAL_COMPRESSION_PARQUET_PROPERTY] = compression if read_kwargs is not None: properties[cls._OPTIONAL_READ_KWARGS_PARQUET_PROPERTY] = read_kwargs if write_kwargs is not None: properties[cls._OPTIONAL_WRITE_KWARGS_PARQUET_PROPERTY] = write_kwargs if exposed_type is not None: properties[cls._OPTIONAL_EXPOSED_TYPE_PARQUET_PROPERTY] = exposed_type return cls.__configure(id, DataNodeConfig._STORAGE_TYPE_VALUE_PARQUET, scope, validity_period, **properties) @classmethod def _configure_excel( cls, id: str, default_path: Optional[str] = None, has_header: Optional[bool] = None, sheet_name: Optional[Union[List[str], str]] = None, exposed_type: Optional[str] = None, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties, ) -> \"DataNodeConfig\": \"\"\"Configure a new Excel data node configuration. Parameters: id (str): The unique identifier of the new Excel data node configuration. default_path (Optional[str]): The path of the Excel file. has_header (Optional[bool]): If True, indicates that the Excel file has a header. sheet_name (Optional[Union[List[str], str]]): The list of sheet names to be used. This can be a unique name. exposed_type (Optional[str]): The exposed type of the data read from Excel file.
The default value is `pandas`. scope (Optional[Scope^]): The scope of the Excel data node configuration.
The default value is `Scope.SCENARIO`. validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task configs page](../core/config/task-config.md) for more details). If *validity_period* is set to None, the data node is always up-to-date. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new Excel data node configuration. \"\"\" if default_path is not None: properties[cls._OPTIONAL_DEFAULT_PATH_EXCEL_PROPERTY] = default_path if has_header is not None: properties[cls._OPTIONAL_HAS_HEADER_EXCEL_PROPERTY] = has_header if sheet_name is not None: properties[cls._OPTIONAL_SHEET_NAME_EXCEL_PROPERTY] = sheet_name if exposed_type is not None: properties[cls._OPTIONAL_EXPOSED_TYPE_EXCEL_PROPERTY] = exposed_type return cls.__configure(id, DataNodeConfig._STORAGE_TYPE_VALUE_EXCEL, scope, validity_period, **properties) @classmethod def _configure_generic( cls, id: str, read_fct: Optional[Callable] = None, write_fct: Optional[Callable] = None, read_fct_args: Optional[List] = None, write_fct_args: Optional[List] = None, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties, ) -> \"DataNodeConfig\": \"\"\"Configure a new generic data node configuration. Parameters: id (str): The unique identifier of the new generic data node configuration. read_fct (Optional[Callable]): The Python function called to read the data. write_fct (Optional[Callable]): The Python function called to write the data. The provided function must have at least one parameter that receives the data to be written. read_fct_args (Optional[List]): The list of arguments that are passed to the function *read_fct* to read data. write_fct_args (Optional[List]): The list of arguments that are passed to the function *write_fct* to write the data. scope (Optional[Scope^]): The scope of the Generic data node configuration.
The default value is `Scope.SCENARIO`. validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task configs page](../core/config/task-config.md) for more details). If *validity_period* is set to None, the data node is always up-to-date. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new Generic data node configuration. \"\"\" if read_fct is not None: properties[cls._OPTIONAL_READ_FUNCTION_GENERIC_PROPERTY] = read_fct if write_fct is not None: properties[cls._OPTIONAL_WRITE_FUNCTION_GENERIC_PROPERTY] = write_fct if read_fct_args is not None: properties[cls._OPTIONAL_READ_FUNCTION_ARGS_GENERIC_PROPERTY] = read_fct_args if write_fct_args is not None: properties[cls._OPTIONAL_WRITE_FUNCTION_ARGS_GENERIC_PROPERTY] = write_fct_args return cls.__configure(id, DataNodeConfig._STORAGE_TYPE_VALUE_GENERIC, scope, validity_period, **properties) @classmethod def _configure_in_memory( cls, id: str, default_data: Optional[Any] = None, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties, ) -> \"DataNodeConfig\": \"\"\"Configure a new *in-memory* data node configuration. Parameters: id (str): The unique identifier of the new in_memory data node configuration. default_data (Optional[any]): The default data of the data nodes instantiated from this in_memory data node configuration. scope (Optional[Scope^]): The scope of the in_memory data node configuration.
The default value is `Scope.SCENARIO`. validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task configs page](../core/config/task-config.md) for more details). If *validity_period* is set to None, the data node is always up-to-date. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new *in-memory* data node configuration. \"\"\" if default_data is not None: properties[cls._OPTIONAL_DEFAULT_DATA_IN_MEMORY_PROPERTY] = default_data return cls.__configure(id, DataNodeConfig._STORAGE_TYPE_VALUE_IN_MEMORY, scope, validity_period, **properties) @classmethod def _configure_pickle( cls, id: str, default_path: Optional[str] = None, default_data: Optional[Any] = None, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties, ) -> \"DataNodeConfig\": \"\"\"Configure a new pickle data node configuration. Parameters: id (str): The unique identifier of the new pickle data node configuration. default_path (Optional[str]): The path of the pickle file. default_data (Optional[any]): The default data of the data nodes instantiated from this pickle data node configuration. scope (Optional[Scope^]): The scope of the pickle data node configuration.
The default value is `Scope.SCENARIO`. validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task configs page](../core/config/task-config.md) for more details). If *validity_period* is set to None, the data node is always up-to-date. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new pickle data node configuration. \"\"\" if default_path is not None: properties[cls._OPTIONAL_DEFAULT_PATH_PICKLE_PROPERTY] = default_path if default_data is not None: properties[cls._OPTIONAL_DEFAULT_DATA_PICKLE_PROPERTY] = default_data return cls.__configure(id, DataNodeConfig._STORAGE_TYPE_VALUE_PICKLE, scope, validity_period, **properties) @classmethod def _configure_sql_table( cls, id: str, db_name: str, db_engine: str, table_name: str, db_username: Optional[str] = None, db_password: Optional[str] = None, db_host: Optional[str] = None, db_port: Optional[int] = None, db_driver: Optional[str] = None, sqlite_folder_path: Optional[str] = None, sqlite_file_extension: Optional[str] = None, db_extra_args: Optional[Dict[str, Any]] = None, exposed_type: Optional[str] = None, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties, ) -> \"DataNodeConfig\": \"\"\"Configure a new SQL table data node configuration. Parameters: id (str): The unique identifier of the new SQL data node configuration. db_name (str): The database name, or the name of the SQLite database file. db_engine (str): The database engine. Possible values are *\"sqlite\"*, *\"mssql\"*, *\"mysql\"*, or *\"postgresql\"*. table_name (str): The name of the SQL table. db_username (Optional[str]): The database username. Required by the *\"mssql\"*, *\"mysql\"*, and *\"postgresql\"* engines. db_password (Optional[str]): The database password. Required by the *\"mssql\"*, *\"mysql\"*, and *\"postgresql\"* engines. db_host (Optional[str]): The database host.
The default value is \"localhost\". db_port (Optional[int]): The database port.
The default value is 1433. db_driver (Optional[str]): The database driver. sqlite_folder_path (Optional[str]): The path to the folder that contains SQLite file.
The default value is the current working folder. sqlite_file_extension (Optional[str]): The file extension of the SQLite file.
The default value is \".db\". db_extra_args (Optional[dict[str, any]]): A dictionary of additional arguments to be passed into database connection string. exposed_type (Optional[str]): The exposed type of the data read from SQL table.
The default value is \"pandas\". scope (Optional[Scope^]): The scope of the SQL data node configuration.
The default value is `Scope.SCENARIO`. validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task configs page](../core/config/task-config.md) for more details). If *validity_period* is set to None, the data node is always up-to-date. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new SQL data node configuration. \"\"\" properties.update( { cls._REQUIRED_DB_NAME_SQL_PROPERTY: db_name, cls._REQUIRED_DB_ENGINE_SQL_PROPERTY: db_engine, cls._REQUIRED_TABLE_NAME_SQL_TABLE_PROPERTY: table_name, } ) if db_username is not None: properties[cls._OPTIONAL_DB_USERNAME_SQL_PROPERTY] = db_username if db_password is not None: properties[cls._OPTIONAL_DB_PASSWORD_SQL_PROPERTY] = db_password if db_host is not None: properties[cls._OPTIONAL_HOST_SQL_PROPERTY] = db_host if db_port is not None: properties[cls._OPTIONAL_PORT_SQL_PROPERTY] = db_port if db_driver is not None: properties[cls._OPTIONAL_DRIVER_SQL_PROPERTY] = db_driver if sqlite_folder_path is not None: properties[cls._OPTIONAL_FOLDER_PATH_SQLITE_PROPERTY] = sqlite_folder_path if sqlite_file_extension is not None: properties[cls._OPTIONAL_FILE_EXTENSION_SQLITE_PROPERTY] = sqlite_file_extension if db_extra_args is not None: properties[cls._OPTIONAL_DB_EXTRA_ARGS_SQL_PROPERTY] = db_extra_args if exposed_type is not None: properties[cls._OPTIONAL_EXPOSED_TYPE_SQL_PROPERTY] = exposed_type return cls.__configure(id, DataNodeConfig._STORAGE_TYPE_VALUE_SQL_TABLE, scope, validity_period, **properties) @classmethod def _configure_sql( cls, id: str, db_name: str, db_engine: str, read_query: str, write_query_builder: Callable, append_query_builder: Optional[Callable] = None, db_username: Optional[str] = None, db_password: Optional[str] = None, db_host: Optional[str] = None, db_port: Optional[int] = None, db_driver: Optional[str] = None, sqlite_folder_path: Optional[str] = None, sqlite_file_extension: Optional[str] = None, db_extra_args: Optional[Dict[str, Any]] = None, exposed_type: Optional[str] = None, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties, ) -> \"DataNodeConfig\": \"\"\"Configure a new SQL data node configuration. Parameters: id (str): The unique identifier of the new SQL data node configuration. db_name (str): The database name, or the name of the SQLite database file. db_engine (str): The database engine. Possible values are *\"sqlite\"*, *\"mssql\"*, *\"mysql\"*, or *\"postgresql\"*. read_query (str): The SQL query string used to read the data from the database. write_query_builder (Callable): A callback function that takes the data as an input parameter and returns a list of SQL queries to be executed when writing data to the data node. append_query_builder (Optional[Callable]): A callback function that takes the data as an input parameter and returns a list of SQL queries to be executed when appending data to the data node. db_username (Optional[str]): The database username. Required by the *\"mssql\"*, *\"mysql\"*, and *\"postgresql\"* engines. db_password (Optional[str]): The database password. Required by the *\"mssql\"*, *\"mysql\"*, and *\"postgresql\"* engines. db_host (Optional[str]): The database host.
The default value is \"localhost\". db_port (Optional[int]): The database port.
The default value is 1433. db_driver (Optional[str]): The database driver. sqlite_folder_path (Optional[str]): The path to the folder that contains SQLite file.
The default value is the current working folder. sqlite_file_extension (Optional[str]): The file extension of the SQLite file.
The default value is \".db\". db_extra_args (Optional[dict[str, any]]): A dictionary of additional arguments to be passed into database connection string. exposed_type (Optional[str]): The exposed type of the data read from SQL query.
The default value is \"pandas\". scope (Optional[Scope^]): The scope of the SQL data node configuration.
The default value is `Scope.SCENARIO`. validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task configs page](../core/config/task-config.md) for more details). If *validity_period* is set to None, the data node is always up-to-date. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new SQL data node configuration. \"\"\" properties.update( { cls._REQUIRED_DB_NAME_SQL_PROPERTY: db_name, cls._REQUIRED_DB_ENGINE_SQL_PROPERTY: db_engine, cls._REQUIRED_READ_QUERY_SQL_PROPERTY: read_query, cls._REQUIRED_WRITE_QUERY_BUILDER_SQL_PROPERTY: write_query_builder, } ) if append_query_builder is not None: properties[cls._OPTIONAL_APPEND_QUERY_BUILDER_SQL_PROPERTY] = append_query_builder if db_username is not None: properties[cls._OPTIONAL_DB_USERNAME_SQL_PROPERTY] = db_username if db_password is not None: properties[cls._OPTIONAL_DB_PASSWORD_SQL_PROPERTY] = db_password if db_host is not None: properties[cls._OPTIONAL_HOST_SQL_PROPERTY] = db_host if db_port is not None: properties[cls._OPTIONAL_PORT_SQL_PROPERTY] = db_port if db_driver is not None: properties[cls._OPTIONAL_DRIVER_SQL_PROPERTY] = db_driver if sqlite_folder_path is not None: properties[cls._OPTIONAL_FOLDER_PATH_SQLITE_PROPERTY] = sqlite_folder_path if sqlite_file_extension is not None: properties[cls._OPTIONAL_FILE_EXTENSION_SQLITE_PROPERTY] = sqlite_file_extension if db_extra_args is not None: properties[cls._OPTIONAL_DB_EXTRA_ARGS_SQL_PROPERTY] = db_extra_args if exposed_type is not None: properties[cls._OPTIONAL_EXPOSED_TYPE_SQL_PROPERTY] = exposed_type return cls.__configure(id, DataNodeConfig._STORAGE_TYPE_VALUE_SQL, scope, validity_period, **properties) @classmethod def _configure_mongo_collection( cls, id: str, db_name: str, collection_name: str, custom_document: Optional[Any] = None, db_username: Optional[str] = None, db_password: Optional[str] = None, db_host: Optional[str] = None, db_port: Optional[int] = None, db_driver: Optional[str] = None, db_extra_args: Optional[Dict[str, Any]] = None, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties, ) -> \"DataNodeConfig\": \"\"\"Configure a new Mongo collection data node configuration. Parameters: id (str): The unique identifier of the new Mongo collection data node configuration. db_name (str): The database name. collection_name (str): The collection in the database to read from and to write the data to. custom_document (Optional[any]): The custom document class to store, encode, and decode data when reading and writing to a Mongo collection. The custom_document can have an optional *decode()* method to decode data in the Mongo collection to a custom object, and an optional *encode()*) method to encode the object's properties to the Mongo collection when writing. db_username (Optional[str]): The database username. db_password (Optional[str]): The database password. db_host (Optional[str]): The database host.
The default value is \"localhost\". db_port (Optional[int]): The database port.
The default value is 27017. db_driver (Optional[str]): The database driver. db_extra_args (Optional[dict[str, any]]): A dictionary of additional arguments to be passed into database connection string. scope (Optional[Scope^]): The scope of the Mongo collection data node configuration.
The default value is `Scope.SCENARIO`. validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task configs page](../core/config/task-config.md) for more details). If *validity_period* is set to None, the data node is always up-to-date. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new Mongo collection data node configuration. \"\"\" properties.update( { cls._REQUIRED_DB_NAME_MONGO_PROPERTY: db_name, cls._REQUIRED_COLLECTION_NAME_MONGO_PROPERTY: collection_name, } ) if custom_document is not None: properties[cls._OPTIONAL_CUSTOM_DOCUMENT_MONGO_PROPERTY] = custom_document if db_username is not None: properties[cls._OPTIONAL_USERNAME_MONGO_PROPERTY] = db_username if db_password is not None: properties[cls._OPTIONAL_PASSWORD_MONGO_PROPERTY] = db_password if db_host is not None: properties[cls._OPTIONAL_HOST_MONGO_PROPERTY] = db_host if db_port is not None: properties[cls._OPTIONAL_PORT_MONGO_PROPERTY] = db_port if db_driver is not None: properties[cls._OPTIONAL_DRIVER_MONGO_PROPERTY] = db_driver if db_extra_args is not None: properties[cls._OPTIONAL_DB_EXTRA_ARGS_MONGO_PROPERTY] = db_extra_args return cls.__configure( id, DataNodeConfig._STORAGE_TYPE_VALUE_MONGO_COLLECTION, scope, validity_period, **properties ) @staticmethod def __configure( id: str, storage_type: Optional[str] = None, scope: Optional[Scope] = None, validity_period: Optional[timedelta] = None, **properties, ): section = DataNodeConfig(id, storage_type, scope, validity_period, **properties) Config._register(section) return Config.sections[DataNodeConfig.name][id] "} {"text": "from copy import copy from typing import Any, Dict, List, Optional, Union from taipy.config._config import _Config from taipy.config.common._template_handler import _TemplateHandler as _tpl from taipy.config.config import Config from taipy.config.section import Section from ..common._warnings import _warn_deprecated from .data_node_config import DataNodeConfig class TaskConfig(Section): \"\"\" Configuration fields needed to instantiate an actual `Task^`. Attributes: id (str): Identifier of the task config. Must be a valid Python variable name. inputs (Union[DataNodeConfig^, List[DataNodeConfig^]]): The optional list of `DataNodeConfig^` inputs.
The default value is []. outputs (Union[DataNodeConfig^, List[DataNodeConfig^]]): The optional list of `DataNodeConfig^` outputs.
The default value is []. skippable (bool): If True, indicates that the task can be skipped if no change has been made on inputs.
The default value is False. function (Callable): User function taking as inputs some parameters compatible with the exposed types (*exposed_type* field) of the input data nodes and returning results compatible with the exposed types (*exposed_type* field) of the outputs list.
The default value is None. **properties (dict[str, any]): A dictionary of additional properties. \"\"\" name = \"TASK\" _INPUT_KEY = \"inputs\" _FUNCTION = \"function\" _OUTPUT_KEY = \"outputs\" _IS_SKIPPABLE_KEY = \"skippable\" def __init__( self, id: str, function, inputs: Optional[Union[DataNodeConfig, List[DataNodeConfig]]] = None, outputs: Optional[Union[DataNodeConfig, List[DataNodeConfig]]] = None, skippable: Optional[bool] = False, **properties, ): if inputs: self._inputs = [inputs] if isinstance(inputs, DataNodeConfig) else copy(inputs) else: self._inputs = [] if outputs: self._outputs = [outputs] if isinstance(outputs, DataNodeConfig) else copy(outputs) outputs_all_cacheable = all(output.cacheable for output in self._outputs) if not skippable and outputs_all_cacheable: _warn_deprecated(\"cacheable\", suggest=\"the skippable feature\") skippable = True else: self._outputs = [] self._skippable = skippable self.function = function super().__init__(id, **properties) def __copy__(self): return TaskConfig( self.id, self.function, copy(self._inputs), copy(self._outputs), self.skippable, **copy(self._properties) ) def __getattr__(self, item: str) -> Optional[Any]: return _tpl._replace_templates(self._properties.get(item)) @property def input_configs(self) -> List[DataNodeConfig]: return list(self._inputs) @property def inputs(self) -> List[DataNodeConfig]: return list(self._inputs) @property def output_configs(self) -> List[DataNodeConfig]: return list(self._outputs) @property def outputs(self) -> List[DataNodeConfig]: return list(self._outputs) @property def skippable(self): return _tpl._replace_templates(self._skippable) @classmethod def default_config(cls): return TaskConfig(cls._DEFAULT_KEY, None, [], [], False) def _clean(self): self.function = None self._inputs = [] self._outputs = [] self._skippable = False self._properties.clear() def _to_dict(self): return { self._FUNCTION: self.function, self._INPUT_KEY: self._inputs, self._OUTPUT_KEY: self._outputs, self._IS_SKIPPABLE_KEY: self._skippable, **self._properties, } @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config]): as_dict.pop(cls._ID_KEY, id) funct = as_dict.pop(cls._FUNCTION, None) dn_configs = config._sections.get(DataNodeConfig.name, None) or [] # type: ignore inputs = [] if inputs_as_str := as_dict.pop(cls._INPUT_KEY, None): inputs = [dn_configs[dn_id] for dn_id in inputs_as_str if dn_id in dn_configs] outputs = [] if outputs_as_str := as_dict.pop(cls._OUTPUT_KEY, None): outputs = [dn_configs[ds_id] for ds_id in outputs_as_str if ds_id in dn_configs] skippable = as_dict.pop(cls._IS_SKIPPABLE_KEY, False) return TaskConfig(id=id, function=funct, inputs=inputs, outputs=outputs, skippable=skippable, **as_dict) def _update(self, as_dict, default_section=None): function = as_dict.pop(self._FUNCTION, None) if function is not None and type(function) is not str: self.function = function self._inputs = as_dict.pop(self._INPUT_KEY, self._inputs) if self._inputs is None and default_section: self._inputs = default_section._inputs self._outputs = as_dict.pop(self._OUTPUT_KEY, self._outputs) if self._outputs is None and default_section: self._outputs = default_section._outputs self._skippable = as_dict.pop(self._IS_SKIPPABLE_KEY, self._skippable) self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure( id: str, function, input: Optional[Union[DataNodeConfig, List[DataNodeConfig]]] = None, output: Optional[Union[DataNodeConfig, List[DataNodeConfig]]] = None, skippable: Optional[bool] = False, **properties, ) -> \"TaskConfig\": \"\"\"Configure a new task configuration. Parameters: id (str): The unique identifier of this task configuration. function (Callable): The python function called by Taipy to run the task. input (Optional[Union[DataNodeConfig^, List[DataNodeConfig^]]]): The list of the function input data node configurations. This can be a unique data node configuration if there is a single input data node, or None if there are none. output (Optional[Union[DataNodeConfig^, List[DataNodeConfig^]]]): The list of the function output data node configurations. This can be a unique data node configuration if there is a single output data node, or None if there are none. skippable (bool): If True, indicates that the task can be skipped if no change has been made on inputs.
The default value is False. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new task configuration. \"\"\" section = TaskConfig(id, function, input, output, skippable, **properties) Config._register(section) return Config.sections[TaskConfig.name][id] @staticmethod def _set_default_configuration( function, input: Optional[Union[DataNodeConfig, List[DataNodeConfig]]] = None, output: Optional[Union[DataNodeConfig, List[DataNodeConfig]]] = None, skippable: Optional[bool] = False, **properties, ) -> \"TaskConfig\": \"\"\"Set the default values for task configurations. This function creates the *default task configuration* object, where all task configuration objects will find their default values when needed. Parameters: function (Callable): The python function called by Taipy to run the task. input (Optional[Union[DataNodeConfig^, List[DataNodeConfig^]]]): The list of the input data node configurations. This can be a unique data node configuration if there is a single input data node, or None if there are none. output (Optional[Union[DataNodeConfig^, List[DataNodeConfig^]]]): The list of the output data node configurations. This can be a unique data node configuration if there is a single output data node, or None if there are none. skippable (bool): If True, indicates that the task can be skipped if no change has been made on inputs.
The default value is False. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The default task configuration. \"\"\" section = TaskConfig(_Config.DEFAULT_KEY, function, input, output, skippable, **properties) Config._register(section) return Config.sections[TaskConfig.name][_Config.DEFAULT_KEY] "} {"text": "from taipy.config import _inject_section from taipy.config.checker._checker import _Checker from taipy.config.common.frequency import Frequency # type: ignore from taipy.config.common.scope import Scope # type: ignore from taipy.config.config import Config # type: ignore from taipy.config.global_app.global_app_config import GlobalAppConfig # type: ignore from .checkers._config_id_checker import _ConfigIdChecker from .checkers._core_section_checker import _CoreSectionChecker from .checkers._data_node_config_checker import _DataNodeConfigChecker from .checkers._job_config_checker import _JobConfigChecker from .checkers._scenario_config_checker import _ScenarioConfigChecker from .checkers._task_config_checker import _TaskConfigChecker from .core_section import CoreSection from .data_node_config import DataNodeConfig from .job_config import JobConfig from .migration_config import MigrationConfig from .scenario_config import ScenarioConfig from .task_config import TaskConfig _inject_section( JobConfig, \"job_config\", JobConfig.default_config(), [(\"configure_job_executions\", JobConfig._configure)], add_to_unconflicted_sections=True, ) _inject_section( DataNodeConfig, \"data_nodes\", DataNodeConfig.default_config(), [ (\"configure_data_node\", DataNodeConfig._configure), (\"configure_data_node_from\", DataNodeConfig._configure_from), (\"set_default_data_node_configuration\", DataNodeConfig._set_default_configuration), (\"configure_csv_data_node\", DataNodeConfig._configure_csv), (\"configure_json_data_node\", DataNodeConfig._configure_json), (\"configure_parquet_data_node\", DataNodeConfig._configure_parquet), (\"configure_sql_table_data_node\", DataNodeConfig._configure_sql_table), (\"configure_sql_data_node\", DataNodeConfig._configure_sql), (\"configure_mongo_collection_data_node\", DataNodeConfig._configure_mongo_collection), (\"configure_in_memory_data_node\", DataNodeConfig._configure_in_memory), (\"configure_pickle_data_node\", DataNodeConfig._configure_pickle), (\"configure_excel_data_node\", DataNodeConfig._configure_excel), (\"configure_generic_data_node\", DataNodeConfig._configure_generic), ], ) _inject_section( TaskConfig, \"tasks\", TaskConfig.default_config(), [ (\"configure_task\", TaskConfig._configure), (\"set_default_task_configuration\", TaskConfig._set_default_configuration), ], ) _inject_section( ScenarioConfig, \"scenarios\", ScenarioConfig.default_config(), [ (\"configure_scenario\", ScenarioConfig._configure), (\"set_default_scenario_configuration\", ScenarioConfig._set_default_configuration), ], ) _inject_section( MigrationConfig, \"migration_functions\", MigrationConfig.default_config(), [(\"add_migration_function\", MigrationConfig._add_migration_function)], add_to_unconflicted_sections=True, ) _inject_section( CoreSection, \"core\", CoreSection.default_config(), [(\"configure_core\", CoreSection._configure)], add_to_unconflicted_sections=True, ) _Checker.add_checker(_ConfigIdChecker) _Checker.add_checker(_CoreSectionChecker) _Checker.add_checker(_DataNodeConfigChecker) _Checker.add_checker(_JobConfigChecker) # We don't need to add _MigrationConfigChecker because it is run only when the Core service is run. _Checker.add_checker(_TaskConfigChecker) _Checker.add_checker(_ScenarioConfigChecker) "} {"text": "import collections.abc from copy import deepcopy from typing import Any, Callable, Dict, Optional, Union from taipy.config._config import _Config from taipy.config.common._template_handler import _TemplateHandler as _tpl from taipy.config.config import Config from taipy.config.section import Section from taipy.config.unique_section import UniqueSection class MigrationConfig(UniqueSection): \"\"\" Configuration fields needed to register migration functions from an old version to newer one. Attributes: migration_fcts (Dict[str, Dict[str, Callable]]): A dictionary that maps the version that entities are migrated from to the migration functions. **properties (dict[str, Any]): A dictionary of additional properties. \"\"\" name = \"VERSION_MIGRATION\" _MIGRATION_FCTS_KEY = \"migration_fcts\" def __init__( self, migration_fcts: Dict[str, Dict[str, Callable]], **properties, ): self.migration_fcts = migration_fcts super().__init__(**properties) def __copy__(self): return MigrationConfig( deepcopy(self.migration_fcts), **deepcopy(self._properties), ) def _clean(self): self.migration_fcts.clear() self._properties.clear() def __getattr__(self, item: str) -> Optional[Any]: return _tpl._replace_templates(self._properties.get(item)) # type: ignore @classmethod def default_config(cls): return MigrationConfig({}) def _to_dict(self): return { self._MIGRATION_FCTS_KEY: self.migration_fcts, **self._properties, } @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config]): return MigrationConfig(**as_dict) def _update(self, as_dict, default_section=None): def deep_update(d, u): for k, v in u.items(): if isinstance(v, collections.abc.Mapping): d[k] = deep_update(d.get(k, {}), v) else: d[k] = v return d migration_fcts = as_dict.pop(self._MIGRATION_FCTS_KEY) deep_update(self.migration_fcts, migration_fcts) self._properties.update(as_dict) @staticmethod def _add_migration_function( target_version: str, config: Union[Section, str], migration_fct: Callable, **properties, ): \"\"\"Add a migration function for a Configuration to migrate entities to the target version. Parameters: target_version (str): The production version that entities are migrated to. config (Union[Section, str]): The configuration or the `id` of the config that needs to migrate. migration_fct (Callable): Migration function that takes an entity as input and returns a new entity that is compatible with the target production version. **properties (Dict[str, Any]): A keyworded variable length list of additional arguments. Returns: `MigrationConfig^`: The Migration configuration. \"\"\" config_id = config if isinstance(config, str) else config.id migration_fcts = {target_version: {config_id: migration_fct}} section = MigrationConfig( migration_fcts, **properties, ) Config._register(section) return Config.unique_sections[MigrationConfig.name] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from copy import copy from typing import Any, Dict, Optional, Union from taipy.config import Config from taipy.config._config import _Config from taipy.config.common._template_handler import _TemplateHandler as _tpl from taipy.config.unique_section import UniqueSection from ..exceptions.exceptions import ModeNotAvailable class JobConfig(UniqueSection): \"\"\" Configuration fields related to the jobs' executions. Parameters: mode (str): The Taipy operating mode. By default, the \"development\" mode is set for testing and debugging the executions of jobs. A \"standalone\" mode is also available. **properties (dict[str, any]): A dictionary of additional properties. \"\"\" name = \"JOB\" _MODE_KEY = \"mode\" _STANDALONE_MODE = \"standalone\" _DEVELOPMENT_MODE = \"development\" _DEFAULT_MODE = _DEVELOPMENT_MODE _MODES = [_STANDALONE_MODE, _DEVELOPMENT_MODE] def __init__(self, mode: Optional[str] = None, **properties): self.mode = mode or self._DEFAULT_MODE self._config = self._create_config(self.mode, **properties) super().__init__(**properties) def __copy__(self): return JobConfig(self.mode, **copy(self._properties)) def __getattr__(self, key: str) -> Optional[Any]: return self._config.get(key, None) @classmethod def default_config(cls): return JobConfig(cls._DEFAULT_MODE) def _clean(self): self.mode = self._DEFAULT_MODE self._config = self._create_config(self.mode) def _to_dict(self): as_dict = {} if self.mode is not None: as_dict[self._MODE_KEY] = self.mode as_dict.update(self._config) return as_dict @classmethod def _from_dict(cls, config_as_dict: Dict[str, Any], id=None, config: Optional[_Config] = None): mode = config_as_dict.pop(cls._MODE_KEY, None) job_config = JobConfig(mode, **config_as_dict) return job_config def _update(self, as_dict: Dict[str, Any], default_section=None): mode = _tpl._replace_templates(as_dict.pop(self._MODE_KEY, self.mode)) if self.mode != mode: self.mode = mode self._config = self._create_config(self.mode, **as_dict) if self._config is not None: self._update_config(as_dict) @staticmethod def _configure( mode: Optional[str] = None, max_nb_of_workers: Optional[Union[int, str]] = None, **properties ) -> \"JobConfig\": \"\"\"Configure job execution. Parameters: mode (Optional[str]): The job execution mode. Possible values are: *\"standalone\"* (the default value) or *\"development\"*. max_nb_of_workers (Optional[int, str]): Parameter used only in default *\"standalone\"* mode. This indicates the maximum number of jobs able to run in parallel.
The default value is 1.
A string can be provided to dynamically set the value using an environment variable. The string must follow the pattern: `ENV[<env_var>]` where `<env_var>` is the name of an environment variable. **properties (dict[str, any]): A keyworded variable length list of additional arguments. Returns: The new job execution configuration. \"\"\" section = JobConfig(mode, max_nb_of_workers=max_nb_of_workers, **properties) Config._register(section) return Config.unique_sections[JobConfig.name] def _update_config(self, config_as_dict: Dict[str, Any]): for k, v in config_as_dict.items(): type_to_convert = type(self.get_default_config(self.mode).get(k, None)) or str value = _tpl._replace_templates(v, type_to_convert) if value is not None: self._config[k] = value @property def is_standalone(self) -> bool: \"\"\"True if the config is set to standalone mode\"\"\" return self.mode == self._STANDALONE_MODE @property def is_development(self) -> bool: \"\"\"True if the config is set to development mode\"\"\" return self.mode == self._DEVELOPMENT_MODE @classmethod def get_default_config(cls, mode: str) -> Dict[str, Any]: if cls.is_standalone: # type: ignore return {\"max_nb_of_workers\": 1} if cls.is_development: return {} raise ModeNotAvailable(mode) @classmethod def _create_config(cls, mode, **properties): return {**cls.get_default_config(mode), **properties} "} {"text": "from typing import Set from taipy.config._config import _Config from taipy.config.checker._checkers._config_checker import _ConfigChecker from taipy.config.checker.issue_collector import IssueCollector from ..core_section import CoreSection class _CoreSectionChecker(_ConfigChecker): _ACCEPTED_REPOSITORY_TYPES: Set[str] = {\"filesystem\", \"sql\"} def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: if core_section := self._config._unique_sections.get(CoreSection.name): self._check_repository_type(core_section) return self._collector def _check_repository_type(self, core_section: CoreSection): value = core_section.repository_type if value not in self._ACCEPTED_REPOSITORY_TYPES: self._warning( core_section._REPOSITORY_TYPE_KEY, value, f'Value \"{value}\" for field {core_section._REPOSITORY_TYPE_KEY} of the CoreSection is not supported. ' f'Default value \"filesystem\" is applied.', ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from taipy.config._config import _Config from taipy.config.checker._checkers._config_checker import _ConfigChecker from taipy.config.checker.issue_collector import IssueCollector from ..._version._version_manager_factory import _VersionManagerFactory from ..migration_config import MigrationConfig class _MigrationConfigChecker(_ConfigChecker): def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: if migration_config := self._config._unique_sections.get(MigrationConfig.name): self._check_if_entity_property_key_used_is_predefined(migration_config) migration_fcts = migration_config.migration_fcts for target_version, migration_functions in migration_config.migration_fcts.items(): for config_id, migration_function in migration_functions.items(): self._check_callable(target_version, config_id, migration_function) self._check_valid_production_version(migration_fcts) self._check_migration_from_productions_to_productions_exist(migration_fcts) return self._collector def _check_callable(self, target_version, config_id, migration_function): if not callable(migration_function): self._error( MigrationConfig._MIGRATION_FCTS_KEY, migration_function, f\"The migration function of config `{config_id}` from version {target_version}\" f\" must be populated with Callable value.\", ) def _check_valid_production_version(self, migration_fcts): for target_version in migration_fcts.keys(): if target_version not in _VersionManagerFactory._build_manager()._get_production_versions(): self._error( MigrationConfig._MIGRATION_FCTS_KEY, target_version, \"The target version for a migration function must be a production version.\", ) def _check_migration_from_productions_to_productions_exist(self, migration_fcts): production_versions = _VersionManagerFactory._build_manager()._get_production_versions() for source_version, target_version in zip(production_versions[:-1], production_versions[1:]): if not migration_fcts.get(target_version): self._info( \"target_version\", None, f'There is no migration function from production version \"{source_version}\"' f' to version \"{target_version}\".', ) "} {"text": "from typing import Dict from taipy.config._config import _Config from taipy.config.checker._checkers._config_checker import _ConfigChecker from taipy.config.checker.issue_collector import IssueCollector from ..data_node_config import DataNodeConfig from ..job_config import JobConfig class _JobConfigChecker(_ConfigChecker): def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: if job_config := self._config._unique_sections.get(JobConfig.name): data_node_configs = self._config._sections[DataNodeConfig.name] self._check_multiprocess_mode(job_config, data_node_configs) return self._collector def _check_multiprocess_mode(self, job_config: JobConfig, data_node_configs: Dict[str, DataNodeConfig]): if job_config.is_standalone: for cfg_id, data_node_config in data_node_configs.items(): if data_node_config.storage_type == DataNodeConfig._STORAGE_TYPE_VALUE_IN_MEMORY: self._error( DataNodeConfig._STORAGE_TYPE_KEY, data_node_config.storage_type, f\"DataNode `{cfg_id}`: In-memory storage type can ONLY be used in \" f\"{JobConfig._DEVELOPMENT_MODE} mode.\", ) "} {"text": "from typing import Dict, List from taipy.config._config import _Config from taipy.config.checker._checkers._config_checker import _ConfigChecker from taipy.config.checker.issue_collector import IssueCollector class _ConfigIdChecker(_ConfigChecker): def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: existing_config_ids: Dict[str, List[str]] = dict() for entity_type, section_dictionary in self._config._sections.items(): for config_id in section_dictionary.keys(): if config_id in existing_config_ids.keys(): existing_config_ids[config_id].append(entity_type) else: existing_config_ids[config_id] = [entity_type] for config_id, entity_types in existing_config_ids.items(): if config_id != \"default\" and len(entity_types) > 1: self._error( \"config_id\", config_id, f\"`{config_id}` is used as the config_id of multiple configurations {str(entity_types)}\", ) "} {"text": "from taipy.config import Config from taipy.config._config import _Config from taipy.config.checker._checkers._config_checker import _ConfigChecker from taipy.config.checker.issue_collector import IssueCollector from taipy.config.common.frequency import Frequency from ..data_node_config import DataNodeConfig from ..scenario_config import ScenarioConfig from ..task_config import TaskConfig class _ScenarioConfigChecker(_ConfigChecker): def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: scenario_configs = self._config._sections[ScenarioConfig.name] for scenario_config_id, scenario_config in scenario_configs.items(): if scenario_config_id != _Config.DEFAULT_KEY: self._check_if_entity_property_key_used_is_predefined(scenario_config) self._check_existing_config_id(scenario_config) self._check_frequency(scenario_config_id, scenario_config) self._check_task_configs(scenario_config_id, scenario_config) self._check_addition_data_node_configs(scenario_config_id, scenario_config) self._check_additional_dns_not_overlapping_tasks_dns(scenario_config_id, scenario_config) self._check_tasks_in_sequences_exist_in_scenario_tasks(scenario_config_id, scenario_config) self._check_comparators(scenario_config_id, scenario_config) return self._collector def _check_task_configs(self, scenario_config_id: str, scenario_config: ScenarioConfig): self._check_children( ScenarioConfig, scenario_config_id, scenario_config._TASKS_KEY, scenario_config.tasks, TaskConfig, ) def _check_addition_data_node_configs(self, scenario_config_id: str, scenario_config: ScenarioConfig): self._check_children( ScenarioConfig, scenario_config_id, scenario_config._ADDITIONAL_DATA_NODES_KEY, scenario_config.additional_data_nodes, DataNodeConfig, can_be_empty=True, ) def _check_frequency(self, scenario_config_id: str, scenario_config: ScenarioConfig): if scenario_config.frequency and not isinstance(scenario_config.frequency, Frequency): self._error( scenario_config._FREQUENCY_KEY, scenario_config.frequency, f\"{scenario_config._FREQUENCY_KEY} field of ScenarioConfig `{scenario_config_id}` must be\" f\" populated with a Frequency value.\", ) def _check_comparators(self, scenario_config_id: str, scenario_config: ScenarioConfig): if scenario_config.comparators is not None and not isinstance(scenario_config.comparators, dict): self._error( ScenarioConfig._COMPARATOR_KEY, scenario_config.comparators, f\"{ScenarioConfig._COMPARATOR_KEY} field of ScenarioConfig\" f\" `{scenario_config_id}` must be populated with a dictionary value.\", ) else: for data_node_id, comparator in scenario_config.comparators.items(): if data_node_id not in Config.data_nodes: self._error( ScenarioConfig._COMPARATOR_KEY, scenario_config.comparators, f\"The key `{data_node_id}` in {ScenarioConfig._COMPARATOR_KEY} field of ScenarioConfig\" f\" `{scenario_config_id}` must be populated with a valid data node configuration id.\", ) if not callable(comparator): if not isinstance(comparator, list) or not all(callable(comp) for comp in comparator): self._error( ScenarioConfig._COMPARATOR_KEY, scenario_config.comparators, f\"The value of `{data_node_id}` in {ScenarioConfig._COMPARATOR_KEY} field of ScenarioConfig\" f\" `{scenario_config_id}` must be populated with a list of Callable values.\", ) def _check_additional_dns_not_overlapping_tasks_dns(self, scenario_config_id: str, scenario_config: ScenarioConfig): data_node_configs = set() for task_config in scenario_config.task_configs: if isinstance(task_config, TaskConfig): input_dn_configs = task_config.input_configs if task_config.input_configs else [] output_dn_configs = task_config.output_configs if task_config.output_configs else [] data_node_configs.update({*input_dn_configs, *output_dn_configs}) for additional_data_node_config in scenario_config.additional_data_node_configs: if additional_data_node_config in data_node_configs: self._warning( ScenarioConfig._ADDITIONAL_DATA_NODES_KEY, scenario_config.additional_data_node_configs, f\"The additional data node `{additional_data_node_config.id}` in\" f\" {ScenarioConfig._ADDITIONAL_DATA_NODES_KEY} field of ScenarioConfig\" f\" `{scenario_config_id}` has already existed as an input or output data node of\" f\" ScenarioConfig `{scenario_config_id}` tasks.\", ) def _check_tasks_in_sequences_exist_in_scenario_tasks( self, scenario_config_id: str, scenario_config: ScenarioConfig ): scenario_task_ids = set() for task_config in scenario_config.tasks: if isinstance(task_config, TaskConfig): scenario_task_ids.add(task_config.id) for sequence_tasks in scenario_config.sequences.values(): self._check_children( ScenarioConfig, scenario_config_id, scenario_config._SEQUENCES_KEY, sequence_tasks, TaskConfig, can_be_empty=True, ) for task in sequence_tasks: if isinstance(task, TaskConfig) and task.id not in scenario_task_ids: self._error( ScenarioConfig._SEQUENCES_KEY, scenario_config.sequences, f\"The task `{task.id}` in {ScenarioConfig._SEQUENCES_KEY} field of ScenarioConfig\" f\" `{scenario_config_id}` must exist in {ScenarioConfig._TASKS_KEY} field of ScenarioConfig\" f\" `{scenario_config_id}`.\", ) "} {"text": "from datetime import timedelta from typing import Dict from taipy.config._config import _Config from taipy.config.checker._checker import _ConfigChecker from taipy.config.checker.issue_collector import IssueCollector from taipy.config.common.scope import Scope from ..data_node_config import DataNodeConfig class _DataNodeConfigChecker(_ConfigChecker): def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: data_node_configs: Dict[str, DataNodeConfig] = self._config._sections[DataNodeConfig.name] for data_node_config_id, data_node_config in data_node_configs.items(): self._check_existing_config_id(data_node_config) self._check_if_entity_property_key_used_is_predefined(data_node_config) self._check_storage_type(data_node_config_id, data_node_config) self._check_scope(data_node_config_id, data_node_config) self._check_validity_period(data_node_config_id, data_node_config) self._check_required_properties(data_node_config_id, data_node_config) self._check_callable(data_node_config_id, data_node_config) self._check_generic_read_write_fct_and_args(data_node_config_id, data_node_config) self._check_exposed_type(data_node_config_id, data_node_config) return self._collector def _check_storage_type(self, data_node_config_id: str, data_node_config: DataNodeConfig): if data_node_config.storage_type not in DataNodeConfig._ALL_STORAGE_TYPES: self._error( data_node_config._STORAGE_TYPE_KEY, data_node_config.storage_type, f\"`{data_node_config._STORAGE_TYPE_KEY}` field of DataNodeConfig `{data_node_config_id}` must be\" f\" either csv, sql_table, sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory.\", ) def _check_scope(self, data_node_config_id: str, data_node_config: DataNodeConfig): if not isinstance(data_node_config.scope, Scope): self._error( data_node_config._SCOPE_KEY, data_node_config.scope, f\"`{data_node_config._SCOPE_KEY}` field of DataNodeConfig `{data_node_config_id}` must be\" f\" populated with a Scope value.\", ) def _check_validity_period(self, data_node_config_id: str, data_node_config: DataNodeConfig): if data_node_config.validity_period and not isinstance(data_node_config.validity_period, timedelta): self._error( data_node_config._VALIDITY_PERIOD_KEY, data_node_config.validity_period, f\"`{data_node_config._VALIDITY_PERIOD_KEY}` field of DataNodeConfig `{data_node_config_id}` must be\" f\" None or populated with a timedelta value.\", ) def _check_required_properties(self, data_node_config_id: str, data_node_config: DataNodeConfig): if storage_type := data_node_config.storage_type: if storage_type in DataNodeConfig._REQUIRED_PROPERTIES: required_properties = DataNodeConfig._REQUIRED_PROPERTIES[storage_type] if storage_type == DataNodeConfig._STORAGE_TYPE_VALUE_SQL: if data_node_config.properties: if engine := data_node_config.properties.get(DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY): if engine == DataNodeConfig._DB_ENGINE_SQLITE: required_properties = [ DataNodeConfig._REQUIRED_DB_NAME_SQL_PROPERTY, DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY, DataNodeConfig._REQUIRED_READ_QUERY_SQL_PROPERTY, DataNodeConfig._REQUIRED_WRITE_QUERY_BUILDER_SQL_PROPERTY, ] else: required_properties = [ DataNodeConfig._OPTIONAL_DB_USERNAME_SQL_PROPERTY, DataNodeConfig._OPTIONAL_DB_PASSWORD_SQL_PROPERTY, DataNodeConfig._REQUIRED_DB_NAME_SQL_PROPERTY, DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY, DataNodeConfig._REQUIRED_READ_QUERY_SQL_PROPERTY, DataNodeConfig._REQUIRED_WRITE_QUERY_BUILDER_SQL_PROPERTY, ] if storage_type == DataNodeConfig._STORAGE_TYPE_VALUE_SQL_TABLE: if data_node_config.properties: if engine := data_node_config.properties.get(DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY): if engine == DataNodeConfig._DB_ENGINE_SQLITE: required_properties = [ DataNodeConfig._REQUIRED_DB_NAME_SQL_PROPERTY, DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY, DataNodeConfig._REQUIRED_TABLE_NAME_SQL_TABLE_PROPERTY, ] else: required_properties = [ DataNodeConfig._OPTIONAL_DB_USERNAME_SQL_PROPERTY, DataNodeConfig._OPTIONAL_DB_PASSWORD_SQL_PROPERTY, DataNodeConfig._REQUIRED_DB_NAME_SQL_PROPERTY, DataNodeConfig._REQUIRED_DB_ENGINE_SQL_PROPERTY, DataNodeConfig._REQUIRED_TABLE_NAME_SQL_TABLE_PROPERTY, ] for required_property in required_properties: if not data_node_config.properties or required_property not in data_node_config.properties: if data_node_config_id == DataNodeConfig._DEFAULT_KEY: self._warning( required_property, None, f\"DataNodeConfig `{data_node_config_id}` is missing the required \" f\"property `{required_property}` for type `{storage_type}`.\", ) else: self._error( required_property, None, f\"DataNodeConfig `{data_node_config_id}` is missing the required \" f\"property `{required_property}` for type `{storage_type}`.\", ) def _check_generic_read_write_fct_and_args(self, data_node_config_id: str, data_node_config: DataNodeConfig): if data_node_config.storage_type == DataNodeConfig._STORAGE_TYPE_VALUE_GENERIC: properties_to_check = [ DataNodeConfig._OPTIONAL_READ_FUNCTION_ARGS_GENERIC_PROPERTY, DataNodeConfig._OPTIONAL_WRITE_FUNCTION_ARGS_GENERIC_PROPERTY, ] for prop_key in properties_to_check: if data_node_config.properties and prop_key in data_node_config.properties: prop_value = data_node_config.properties[prop_key] if not isinstance(prop_value, list): self._error( prop_key, prop_value, f\"`{prop_key}` field of DataNodeConfig\" f\" `{data_node_config_id}` must be populated with a List value.\", ) if data_node_config_id != DataNodeConfig._DEFAULT_KEY: properties_to_check_at_least_one = [ DataNodeConfig._OPTIONAL_READ_FUNCTION_GENERIC_PROPERTY, DataNodeConfig._OPTIONAL_WRITE_FUNCTION_GENERIC_PROPERTY, ] has_at_least_one = False for prop_key in properties_to_check_at_least_one: if data_node_config.properties and prop_key in data_node_config.properties: has_at_least_one = True if not has_at_least_one: self._error( \", \".join(properties_to_check_at_least_one), None, f\"Either `{DataNodeConfig._OPTIONAL_READ_FUNCTION_GENERIC_PROPERTY}` field or \" f\"`{DataNodeConfig._OPTIONAL_WRITE_FUNCTION_GENERIC_PROPERTY}` field of \" f\"DataNodeConfig `{data_node_config_id}` must be populated with a Callable function.\", ) def _check_callable(self, data_node_config_id: str, data_node_config: DataNodeConfig): properties_to_check = { DataNodeConfig._STORAGE_TYPE_VALUE_GENERIC: [ DataNodeConfig._OPTIONAL_READ_FUNCTION_GENERIC_PROPERTY, DataNodeConfig._OPTIONAL_WRITE_FUNCTION_GENERIC_PROPERTY, ], DataNodeConfig._STORAGE_TYPE_VALUE_SQL: [ DataNodeConfig._REQUIRED_WRITE_QUERY_BUILDER_SQL_PROPERTY, DataNodeConfig._OPTIONAL_APPEND_QUERY_BUILDER_SQL_PROPERTY, ], } if data_node_config.storage_type in properties_to_check.keys(): for prop_key in properties_to_check[data_node_config.storage_type]: prop_value = data_node_config.properties.get(prop_key) if data_node_config.properties else None if prop_value and not callable(prop_value): self._error( prop_key, prop_value, f\"`{prop_key}` of DataNodeConfig `{data_node_config_id}` must be\" f\" populated with a Callable function.\", ) def _check_exposed_type(self, data_node_config_id: str, data_node_config: DataNodeConfig): if not isinstance(data_node_config.exposed_type, str): return if data_node_config.exposed_type not in DataNodeConfig._ALL_EXPOSED_TYPES: self._error( data_node_config._EXPOSED_TYPE_KEY, data_node_config.exposed_type, f\"The `{data_node_config._EXPOSED_TYPE_KEY}` of DataNodeConfig `{data_node_config_id}` \" f'must be either \"pandas\", \"modin\", \"numpy\", or a custom type.', ) "} {"text": "from taipy.config._config import _Config from taipy.config.checker._checkers._config_checker import _ConfigChecker from taipy.config.checker.issue_collector import IssueCollector from ..data_node_config import DataNodeConfig from ..task_config import TaskConfig class _TaskConfigChecker(_ConfigChecker): def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: task_configs = self._config._sections[TaskConfig.name] for task_config_id, task_config in task_configs.items(): if task_config_id != _Config.DEFAULT_KEY: self._check_existing_config_id(task_config) self._check_if_entity_property_key_used_is_predefined(task_config) self._check_existing_function(task_config_id, task_config) self._check_inputs(task_config_id, task_config) self._check_outputs(task_config_id, task_config) return self._collector def _check_inputs(self, task_config_id: str, task_config: TaskConfig): self._check_children( TaskConfig, task_config_id, task_config._INPUT_KEY, task_config.input_configs, DataNodeConfig ) def _check_outputs(self, task_config_id: str, task_config: TaskConfig): self._check_children( TaskConfig, task_config_id, task_config._OUTPUT_KEY, task_config.output_configs, DataNodeConfig ) def _check_existing_function(self, task_config_id: str, task_config: TaskConfig): if not task_config.function: self._error( task_config._FUNCTION, task_config.function, f\"{task_config._FUNCTION} field of TaskConfig `{task_config_id}` is empty.\", ) else: if not callable(task_config.function): self._error( task_config._FUNCTION, task_config.function, f\"{task_config._FUNCTION} field of TaskConfig `{task_config_id}` must be\" f\" populated with Callable value.\", ) "} {"text": "from dataclasses import dataclass, field from datetime import datetime from functools import singledispatch from typing import Any, Optional from ..common._repr_enum import _ReprEnum from ..exceptions.exceptions import InvalidEventAttributeName, InvalidEventOperation class EventOperation(_ReprEnum): \"\"\"Enum representing a type of operation performed on a Core entity. `EventOperation` is used as an attribute of the `Event^` object to describe the operation performed on an entity.
The possible operations are `CREATION`, `UPDATE`, `DELETION`, or `SUBMISSION`. \"\"\" CREATION = 1 UPDATE = 2 DELETION = 3 SUBMISSION = 4 class EventEntityType(_ReprEnum): \"\"\"Enum representing an entity type. `EventEntityType` is used as an attribute of the `Event^` object to describe an entity that was changed.
The possible operations are `CYCLE`, `SCENARIO`, `SEQUENCE`, `TASK`, `DATA_NODE`, `JOB` or `SUBMISSION`. \"\"\" CYCLE = 1 SCENARIO = 2 SEQUENCE = 3 TASK = 4 DATA_NODE = 5 JOB = 6 SUBMISSION = 7 _NO_ATTRIBUTE_NAME_OPERATIONS = set([EventOperation.CREATION, EventOperation.DELETION, EventOperation.SUBMISSION]) _UNSUBMITTABLE_ENTITY_TYPES = (EventEntityType.CYCLE, EventEntityType.DATA_NODE, EventEntityType.JOB) _ENTITY_TO_EVENT_ENTITY_TYPE = { \"scenario\": EventEntityType.SCENARIO, \"sequence\": EventEntityType.SEQUENCE, \"task\": EventEntityType.TASK, \"data\": EventEntityType.DATA_NODE, \"job\": EventEntityType.JOB, \"cycle\": EventEntityType.CYCLE, \"submission\": EventEntityType.SUBMISSION, } @dataclass(frozen=True) class Event: \"\"\"Event object used to notify any change in the Core service. An event holds the necessary attributes to identify the change. Attributes: entity_type (EventEntityType^): Type of the entity that was changed (`DataNode^`, `Scenario^`, `Cycle^`, etc. ). entity_id (Optional[str]): Unique identifier of the entity that was changed. operation (EventOperation^): Enum describing the operation (among `CREATION`, `UPDATE`, `DELETION`, and `SUBMISSION`) that was performed on the entity. attribute_name (Optional[str]): Name of the entity's attribute changed. Only relevant for `UPDATE` operations attribute_value (Optional[str]): Name of the entity's attribute changed. Only relevant for `UPDATE` operations metadata (dict): A dict of additional medata about the source of this event creation_date (datetime): Date and time of the event creation. \"\"\" entity_type: EventEntityType operation: EventOperation entity_id: Optional[str] = None attribute_name: Optional[str] = None attribute_value: Optional[Any] = None metadata: dict = field(default_factory=dict) creation_date: datetime = field(init=False) def __post_init__(self): # Creation date super().__setattr__(\"creation_date\", datetime.now()) # Check operation: if self.entity_type in _UNSUBMITTABLE_ENTITY_TYPES and self.operation == EventOperation.SUBMISSION: raise InvalidEventOperation # Check attribute name: if self.operation in _NO_ATTRIBUTE_NAME_OPERATIONS and self.attribute_name is not None: raise InvalidEventAttributeName @singledispatch def _make_event( entity: Any, operation: EventOperation, /, attribute_name: Optional[str] = None, attribute_value: Optional[Any] = None, **kwargs, ) -> Event: \"\"\"Helper function to make an event for this entity with the given `EventOperation^` type. In case of `EventOperation.UPDATE^` events, an attribute name and value must be given. Parameters: entity (Any): The entity object to generate an event for. operation (EventOperation^): The operation of the event. The possible values are:
  • CREATION
  • UPDATE
  • DELETION
  • SUBMISSION
attribute_name (Optional[str]): The name of the updated attribute for a `EventOperation.UPDATE`. This argument is always given in case of an UPDATE. attribute_value (Optional[Any]): The value of the udated attribute for a `EventOperation.UPDATE`. This argument is always given in case of an UPDATE. **kwargs (dict[str, any]): Any extra information that would be passed to the metadata event. Note: you should pass only simple types: str, float, double as values.\"\"\" raise Exception(f\"Unexpected entity type: {type(entity)}\") "} {"text": "from queue import SimpleQueue from typing import Any, Dict, Optional, Set, Tuple from ._registration import _Registration from ._topic import _Topic from .event import Event, EventEntityType, EventOperation def _publish_event( entity_type: EventEntityType, operation: EventOperation, /, entity_id: Optional[str] = None, attribute_name: Optional[str] = None, attribute_value: Optional[Any] = None, **kwargs, ): \"\"\"Internal helper function to send events. It basically creates an event corresponding to the given arguments and send it using `Notifier.publish(event)` Parameters: entity_type (EventEntityType^) operation (EventOperation^) entity_id (Optional[str]) attribute_name (Optional[str]) attribute_value (Optional[Any]) **kwargs \"\"\" event = Event( entity_id=entity_id, entity_type=entity_type, operation=operation, attribute_name=attribute_name, attribute_value=attribute_value, metadata=kwargs, ) Notifier.publish(event) class Notifier: \"\"\"A class for managing event registrations and publishing `Core^` service events.\"\"\" _topics_registrations_list: Dict[_Topic, Set[_Registration]] = {} @classmethod def register( cls, entity_type: Optional[EventEntityType] = None, entity_id: Optional[str] = None, operation: Optional[EventOperation] = None, attribute_name: Optional[str] = None, ) -> Tuple[str, SimpleQueue]: \"\"\"Register a listener for a specific event topic. The topic is defined by the combination of the entity type, the entity id, the operation and the attribute name. Parameters: entity_type (Optional[EventEntityType^]): If provided, the listener will be notified for all events related to this entity type. Otherwise, the listener will be notified for events related to all entity types.
The possible entity type values are defined in the `EventEntityType^` enum. The possible values are:
  • CYCLE
  • SCENARIO
  • SEQUENCE
  • TASK
  • DATA_NODE
  • JOB
entity_id (Optional[str]): If provided, the listener will be notified for all events related to this entity. Otherwise, the listener will be notified for events related to all entities. operation (Optional[EventOperation^]): If provided, the listener will be notified for all events related to this operation. Otherwise, the listener will be notified for events related to all operations.
The possible operation values are defined in the `EventOperation^` enum. The possible values are:
  • CREATION
  • UPDATE
  • DELETION
  • SUBMISSION
attribute_name (Optional[str]): If provided, the listener will be notified for all events related to this entity's attribute. Otherwise, the listener will be notified for events related to all attributes. Returns: A tuple containing the registration id and the event queue. \"\"\" registration = _Registration(entity_type, entity_id, operation, attribute_name) if registrations := cls._topics_registrations_list.get(registration.topic, None): registrations.add(registration) else: cls._topics_registrations_list[registration.topic] = {registration} return registration.registration_id, registration.queue @classmethod def unregister(cls, registration_id: str): \"\"\"Unregister a listener. Parameters: registration_id (RegistrationId^): The registration id returned by the `register` method. \"\"\" to_remove_registration: Optional[_Registration] = None for _, registrations in cls._topics_registrations_list.items(): for registration in registrations: if registration.registration_id == registration_id: to_remove_registration = registration break if to_remove_registration: registrations = cls._topics_registrations_list[to_remove_registration.topic] registrations.remove(to_remove_registration) if len(registrations) == 0: del cls._topics_registrations_list[to_remove_registration.topic] @classmethod def publish(cls, event): \"\"\"Publish a `Core^` service event to all registered listeners whose topic matches the event. Parameters: event (Event^): The event to publish. \"\"\" for topic, registrations in cls._topics_registrations_list.items(): if Notifier._is_matching(event, topic): for registration in registrations: registration.queue.put(event) @staticmethod def _is_matching(event: Event, topic: _Topic) -> bool: \"\"\"Check if an event matches a topic.\"\"\" if topic.entity_type is not None and event.entity_type != topic.entity_type: return False if topic.entity_id is not None and event.entity_id != topic.entity_id: return False if topic.operation is not None and event.operation != topic.operation: return False if topic.attribute_name is not None and event.attribute_name and event.attribute_name != topic.attribute_name: return False return True "} {"text": "\"\"\" Package for notifications about changes on `Core^` service entities. The Core service generates `Event^` objects to track changes on entities. These events are then relayed to a `Notifier^`, which handles the dispatch to consumers interested in specific event topics. To subscribe, a consumer needs to invoke the `Notifier.register()^` method. This call will yield a `RegistrationId^` and a dedicated event queue for receiving notifications. To handle notifications, an event consumer (e.g., the `CoreEventConsumerBase^` object) must be instantiated with an associated event queue. \"\"\" from ._registration import _Registration from ._topic import _Topic from .core_event_consumer import CoreEventConsumerBase from .event import _ENTITY_TO_EVENT_ENTITY_TYPE, Event, EventEntityType, EventOperation, _make_event from .notifier import Notifier, _publish_event from .registration_id import RegistrationId "} {"text": "from typing import NewType RegistrationId = NewType(\"RegistrationId\", str) RegistrationId.__doc__ = \"\"\"Registration identifier. It can be used to instantiate a `CoreEventConsumerBase^`.\"\"\" "} {"text": "from queue import SimpleQueue from typing import Optional from uuid import uuid4 from ._topic import _Topic from .event import EventEntityType, EventOperation from .registration_id import RegistrationId class _Registration: _ID_PREFIX = \"REGISTRATION\" __SEPARATOR = \"_\" def __init__( self, entity_type: Optional[EventEntityType] = None, entity_id: Optional[str] = None, operation: Optional[EventOperation] = None, attribute_name: Optional[str] = None, ): self.registration_id: str = self._new_id() self.topic: _Topic = _Topic(entity_type, entity_id, operation, attribute_name) self.queue: SimpleQueue = SimpleQueue() @staticmethod def _new_id() -> RegistrationId: \"\"\"Generate a unique registration identifier.\"\"\" return RegistrationId(_Registration.__SEPARATOR.join([_Registration._ID_PREFIX, str(uuid4())])) def __hash__(self) -> int: return hash(self.registration_id) "} {"text": "from typing import Optional from ..exceptions.exceptions import InvalidEventOperation from .event import _UNSUBMITTABLE_ENTITY_TYPES, EventEntityType, EventOperation class _Topic: def __init__( self, entity_type: Optional[EventEntityType] = None, entity_id: Optional[str] = None, operation: Optional[EventOperation] = None, attribute_name: Optional[str] = None, ): self.entity_type = entity_type self.entity_id = entity_id self.operation = self.__preprocess_operation(operation, self.entity_type) self.attribute_name = self.__preprocess_attribute_name(attribute_name, self.operation) @classmethod def __preprocess_attribute_name( cls, attribute_name: Optional[str] = None, operation: Optional[EventOperation] = None ) -> Optional[str]: # if operation in _NO_ATTRIBUTE_NAME_OPERATIONS and attribute_name is not None: # raise InvalidEventAttributeName return attribute_name @classmethod def __preprocess_operation( cls, operation: Optional[EventOperation] = None, entity_type: Optional[EventEntityType] = None ) -> Optional[EventOperation]: if ( entity_type and operation and entity_type in _UNSUBMITTABLE_ENTITY_TYPES and operation == EventOperation.SUBMISSION ): raise InvalidEventOperation return operation def __hash__(self): return hash((self.entity_type, self.entity_id, self.operation, self.attribute_name)) def __eq__(self, __value) -> bool: if ( self.entity_type == __value.entity_type and self.entity_id == __value.entity_id and self.operation == __value.operation and self.attribute_name == __value.attribute_name ): return True return False "} {"text": "import abc import threading from queue import Empty, SimpleQueue from .event import Event class CoreEventConsumerBase(threading.Thread): \"\"\"Abstract base class for implementing a Core event consumer. This class provides a framework for consuming events from a queue in a separate thread. It should be subclassed, and the `process_event` method should be implemented to define the custom logic for handling incoming events. Example usage: ```python class MyEventConsumer(CoreEventConsumerBase): def process_event(self, event: Event): # Custom event processing logic here print(f\"Received event created at : {event.creation_date}\") pass consumer = MyEventConsumer(\"consumer_1\", event_queue) consumer.start() # ... consumer.stop() ``` Subclasses should implement the `process_event` method to define their specific event handling behavior. Attributes: queue (SimpleQueue): The queue from which events will be consumed. \"\"\" def __init__(self, registration_id: str, queue: SimpleQueue): \"\"\"Initialize a CoreEventConsumerBase instance. Parameters: registration_id (str): A unique identifier of the registration. You can get a registration id invoking `Notifier.register()^` method. queue (SimpleQueue): The queue from which events will be consumed. You can get a queue invoking `Notifier.register()^` method. \"\"\" threading.Thread.__init__(self, name=f\"Thread-Taipy-Core-Consumer-{registration_id}\") self.daemon = True self.queue = queue self.__STOP_FLAG = False self._TIMEOUT = 0.1 def start(self): \"\"\"Start the event consumer thread.\"\"\" self.__STOP_FLAG = False threading.Thread.start(self) def stop(self): \"\"\"Stop the event consumer thread.\"\"\" self.__STOP_FLAG = True def run(self): while not self.__STOP_FLAG: try: event: Event = self.queue.get(block=True, timeout=self._TIMEOUT) self.process_event(event) except Empty: pass @abc.abstractmethod def process_event(self, event: Event): \"\"\"This method should be overridden in subclasses to define how events are processed.\"\"\" raise NotImplementedError "} {"text": "import re import uuid from datetime import datetime from typing import Any, Dict, Optional from taipy.config.common.frequency import Frequency from .._entity._entity import _Entity from .._entity._labeled import _Labeled from .._entity._properties import _Properties from .._entity._reload import _Reloader, _self_reload, _self_setter from ..exceptions.exceptions import _SuspiciousFileOperation from ..notification.event import Event, EventEntityType, EventOperation, _make_event from .cycle_id import CycleId class Cycle(_Entity, _Labeled): \"\"\"An iteration of a recurrent work pattern. Attributes: id (str): The unique identifier of the cycle. frequency (Frequency^): The frequency of this cycle. creation_date (datetime): The date and time of the creation of this cycle. start_date (datetime): The date and time of the start of this cycle. end_date (datetime): The date and time of the end of this cycle. name (str): The name of this cycle. properties (dict[str, Any]): A dictionary of additional properties. \"\"\" _ID_PREFIX = \"CYCLE\" __SEPARATOR = \"_\" _MANAGER_NAME = \"cycle\" def __init__( self, frequency: Frequency, properties: Dict[str, Any], creation_date: datetime, start_date: datetime, end_date: datetime, name: Optional[str] = None, id: Optional[CycleId] = None, ): self._frequency = frequency self._creation_date = creation_date self._start_date = start_date self._end_date = end_date self._name = self._new_name(name) self.id = id or self._new_id(self._name) self._properties = _Properties(self, **properties) def _new_name(self, name: Optional[str] = None) -> str: if name: return name if self._frequency == Frequency.DAILY: # Example \"Monday, 2. January 2023\" return self._start_date.strftime(\"%A, %d. %B %Y\") if self._frequency == Frequency.WEEKLY: # Example \"Week 01 2023, from 2. January\" return self._start_date.strftime(\"Week %W %Y, from %d. %B\") if self._frequency == Frequency.MONTHLY: # Example \"January 2023\" return self._start_date.strftime(\"%B %Y\") if self._frequency == Frequency.QUARTERLY: # Example \"2023 Q1\" return f\"{self._start_date.strftime('%Y')} Q{(self._start_date.month-1)//3+1}\" if self._frequency == Frequency.YEARLY: # Example \"2023\" return self._start_date.strftime(\"%Y\") return Cycle.__SEPARATOR.join([str(self._frequency.value), self._start_date.ctime()]) @property # type: ignore @_self_reload(_MANAGER_NAME) def frequency(self): return self._frequency @frequency.setter # type: ignore @_self_setter(_MANAGER_NAME) def frequency(self, val): self._frequency = val @property # type: ignore @_self_reload(_MANAGER_NAME) def creation_date(self): return self._creation_date @creation_date.setter # type: ignore @_self_setter(_MANAGER_NAME) def creation_date(self, val): self._creation_date = val @property # type: ignore @_self_reload(_MANAGER_NAME) def start_date(self): return self._start_date @start_date.setter # type: ignore @_self_setter(_MANAGER_NAME) def start_date(self, val): self._start_date = val @property # type: ignore @_self_reload(_MANAGER_NAME) def end_date(self): return self._end_date @end_date.setter # type: ignore @_self_setter(_MANAGER_NAME) def end_date(self, val): self._end_date = val @property # type: ignore @_self_reload(_MANAGER_NAME) def name(self): return self._name @name.setter # type: ignore @_self_setter(_MANAGER_NAME) def name(self, val): self._name = val @property def properties(self): self._properties = _Reloader()._reload(self._MANAGER_NAME, self)._properties return self._properties @staticmethod def _new_id(name: str) -> CycleId: def _get_valid_filename(name: str) -> str: \"\"\" Source: https://github.com/django/django/blob/main/django/utils/text.py \"\"\" s = str(name).strip().replace(\" \", \"_\") s = re.sub(r\"(?u)[^-\\w.]\", \"\", s) if s in {\"\", \".\", \"..\"}: raise _SuspiciousFileOperation(\"Could not derive file name from '%s'\" % name) s = str(s).strip().replace(\" \", \"_\") return re.sub(r\"(?u)[^-\\w.]\", \"\", s) return CycleId(_get_valid_filename(Cycle.__SEPARATOR.join([Cycle._ID_PREFIX, name, str(uuid.uuid4())]))) def __getattr__(self, attribute_name): protected_attribute_name = attribute_name if protected_attribute_name in self._properties: return self._properties[protected_attribute_name] raise AttributeError(f\"{attribute_name} is not an attribute of cycle {self.id}\") def __eq__(self, other): return self.id == other.id def __hash__(self): return hash(self.id) def get_label(self) -> str: \"\"\"Returns the cycle label. Returns: The label of the cycle as a string. \"\"\" return self._get_label() def get_simple_label(self) -> str: \"\"\"Returns the cycle simple label. Returns: The simple label of the cycle as a string. \"\"\" return self._get_simple_label() @_make_event.register(Cycle) def _make_event_for_cycle( cycle: Cycle, operation: EventOperation, /, attribute_name: Optional[str] = None, attribute_value: Optional[Any] = None, **kwargs, ) -> Event: metadata = {**kwargs} return Event( entity_type=EventEntityType.CYCLE, entity_id=cycle.id, operation=operation, attribute_name=attribute_name, attribute_value=attribute_value, metadata=metadata, ) "} {"text": "import calendar from datetime import datetime, time, timedelta from typing import Callable, Dict, List, Optional from taipy.config.common.frequency import Frequency from .._entity._entity_ids import _EntityIds from .._manager._manager import _Manager from .._repository._abstract_repository import _AbstractRepository from ..job._job_manager_factory import _JobManagerFactory from ..notification import EventEntityType, EventOperation, _publish_event from ..submission._submission_manager_factory import _SubmissionManagerFactory from .cycle import Cycle from .cycle_id import CycleId class _CycleManager(_Manager[Cycle]): _ENTITY_NAME = Cycle.__name__ _repository: _AbstractRepository _EVENT_ENTITY_TYPE = EventEntityType.CYCLE @classmethod def _create( cls, frequency: Frequency, name: Optional[str] = None, creation_date: Optional[datetime] = None, **properties ): creation_date = creation_date if creation_date else datetime.now() start_date = _CycleManager._get_start_date_of_cycle(frequency, creation_date) end_date = _CycleManager._get_end_date_of_cycle(frequency, start_date) cycle = Cycle( frequency, properties, creation_date=creation_date, start_date=start_date, end_date=end_date, name=name ) cls._set(cycle) _publish_event( cls._EVENT_ENTITY_TYPE, EventOperation.CREATION, entity_id=cycle.id, ) return cycle @classmethod def _get_or_create( cls, frequency: Frequency, creation_date: Optional[datetime] = None, name: Optional[str] = None ) -> Cycle: creation_date = creation_date if creation_date else datetime.now() start_date = _CycleManager._get_start_date_of_cycle(frequency, creation_date) cycles = cls._get_cycles_by_frequency_and_start_date( frequency=frequency, start_date=start_date, cycles=cls._get_all() ) if len(cycles) > 0: return cycles[0] else: return cls._create(frequency=frequency, creation_date=creation_date, name=name) @staticmethod def _get_start_date_of_cycle(frequency: Frequency, creation_date: datetime): start_date = creation_date.date() start_time = time() if frequency == Frequency.DAILY: start_date = start_date if frequency == Frequency.WEEKLY: start_date = start_date - timedelta(days=start_date.weekday()) if frequency == Frequency.MONTHLY: start_date = start_date.replace(day=1) if frequency == Frequency.YEARLY: start_date = start_date.replace(day=1, month=1) return datetime.combine(start_date, start_time) @staticmethod def _get_end_date_of_cycle(frequency: Frequency, start_date: datetime): end_date = start_date if frequency == Frequency.DAILY: end_date = end_date + timedelta(days=1) if frequency == Frequency.WEEKLY: end_date = end_date + timedelta(7 - end_date.weekday()) if frequency == Frequency.MONTHLY: last_day_of_month = calendar.monthrange(start_date.year, start_date.month)[1] end_date = end_date.replace(day=last_day_of_month) + timedelta(days=1) if frequency == Frequency.YEARLY: end_date = end_date.replace(month=12, day=31) + timedelta(days=1) return end_date - timedelta(microseconds=1) @classmethod def _hard_delete(cls, cycle_id: CycleId): cycle = cls._get(cycle_id) entity_ids_to_delete = cls._get_children_entity_ids(cycle) entity_ids_to_delete.cycle_ids.add(cycle.id) cls._delete_entities_of_multiple_types(entity_ids_to_delete) @classmethod def _get_children_entity_ids(cls, cycle: Cycle) -> _EntityIds: from ..scenario._scenario_manager_factory import _ScenarioManagerFactory entity_ids = _EntityIds() scenarios = _ScenarioManagerFactory._build_manager()._get_all_by_cycle(cycle) for scenario in scenarios: entity_ids.scenario_ids.add(scenario.id) owner_ids = {scenario.id, cycle.id} for sequence in scenario.sequences.values(): if sequence.owner_id in owner_ids: entity_ids.sequence_ids.add(sequence.id) for task in scenario.tasks.values(): if task.owner_id in owner_ids: entity_ids.task_ids.add(task.id) for data_node in scenario.data_nodes.values(): if data_node.owner_id in owner_ids: entity_ids.data_node_ids.add(data_node.id) jobs = _JobManagerFactory._build_manager()._get_all() for job in jobs: if job.task.id in entity_ids.task_ids: entity_ids.job_ids.add(job.id) submissions = _SubmissionManagerFactory._build_manager()._get_all() submitted_entity_ids = list(entity_ids.scenario_ids.union(entity_ids.sequence_ids, entity_ids.task_ids)) for submission in submissions: if submission.entity_id in submitted_entity_ids: entity_ids.submission_ids.add(submission.id) return entity_ids @classmethod def _get_cycles_by_frequency_and_start_date( cls, frequency: Frequency, start_date: datetime, cycles: List[Cycle] ) -> List[Cycle]: return cls._get_cycles_cdt( lambda cycle: cycle.frequency == frequency and cycle.start_date == start_date, cycles ) @classmethod def _get_cycles_by_frequency_and_overlapping_date( cls, frequency: Frequency, date: datetime, cycles: List[Cycle] ) -> List[Cycle]: return cls._get_cycles_cdt( lambda cycle: cycle.frequency == frequency and cycle.start_date <= date <= cycle.end_date, cycles ) @classmethod def _get_cycles_cdt(cls, cdt: Callable[[Cycle], bool], cycles: List[Cycle]) -> List[Cycle]: return [cycle for cycle in cycles if cdt(cycle)] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._filesystem_repository import _FileSystemRepository from ._cycle_converter import _CycleConverter from ._cycle_model import _CycleModel class _CycleFSRepository(_FileSystemRepository): def __init__(self): super().__init__(model_type=_CycleModel, converter=_CycleConverter, dir_name=\"cycles\") "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._sql_repository import _SQLRepository from ._cycle_converter import _CycleConverter from ._cycle_model import _CycleModel class _CycleSQLRepository(_SQLRepository): def __init__(self): super().__init__(model_type=_CycleModel, converter=_CycleConverter) "} {"text": "from typing import NewType CycleId = NewType(\"CycleId\", str) CycleId.__doc__ = \"\"\"Type that holds a `Cycle^` identifier.\"\"\" "} {"text": "from dataclasses import dataclass from typing import Any, Dict from sqlalchemy import JSON, Column, Enum, String, Table from taipy.config.common.frequency import Frequency from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry from .cycle_id import CycleId @mapper_registry.mapped @dataclass class _CycleModel(_BaseModel): __table__ = Table( \"cycle\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"name\", String), Column(\"frequency\", Enum(Frequency)), Column(\"properties\", JSON), Column(\"creation_date\", String), Column(\"start_date\", String), Column(\"end_date\", String), ) id: CycleId name: str frequency: Frequency properties: Dict[str, Any] creation_date: str start_date: str end_date: str @staticmethod def from_dict(data: Dict[str, Any]): return _CycleModel( id=data[\"id\"], name=data[\"name\"], frequency=Frequency._from_repr(data[\"frequency\"]), properties=_BaseModel._deserialize_attribute(data[\"properties\"]), creation_date=data[\"creation_date\"], start_date=data[\"start_date\"], end_date=data[\"end_date\"], ) def to_list(self): return [ self.id, self.name, repr(self.frequency), _BaseModel._serialize_attribute(self.properties), self.creation_date, self.start_date, self.end_date, ] "} {"text": "from datetime import datetime from .._repository._abstract_converter import _AbstractConverter from ..cycle._cycle_model import _CycleModel from ..cycle.cycle import Cycle class _CycleConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, cycle: Cycle) -> _CycleModel: return _CycleModel( id=cycle.id, name=cycle._name, frequency=cycle._frequency, creation_date=cycle._creation_date.isoformat(), start_date=cycle._start_date.isoformat(), end_date=cycle._end_date.isoformat(), properties=cycle._properties.data, ) @classmethod def _model_to_entity(cls, model: _CycleModel) -> Cycle: return Cycle( id=model.id, name=model.name, frequency=model.frequency, properties=model.properties, creation_date=datetime.fromisoformat(model.creation_date), start_date=datetime.fromisoformat(model.start_date), end_date=datetime.fromisoformat(model.end_date), ) "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ..cycle._cycle_manager import _CycleManager from ._cycle_fs_repository import _CycleFSRepository from ._cycle_sql_repository import _CycleSQLRepository class _CycleManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _CycleFSRepository, \"sql\": _CycleSQLRepository} @classmethod def _build_manager(cls) -> Type[_CycleManager]: # type: ignore if cls._using_enterprise(): cycle_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".cycle._cycle_manager\", \"_CycleManager\" ) # type: ignore build_repository = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".cycle._cycle_manager_factory\", \"_CycleManagerFactory\" )._build_repository # type: ignore else: cycle_manager = _CycleManager build_repository = cls._build_repository cycle_manager._repository = build_repository() # type: ignore return cycle_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": "from abc import abstractmethod from importlib import util from typing import Type from taipy.config import Config from ._manager import _Manager class _ManagerFactory: _TAIPY_ENTERPRISE_MODULE = \"taipy.enterprise\" _TAIPY_ENTERPRISE_CORE_MODULE = _TAIPY_ENTERPRISE_MODULE + \".core\" @classmethod @abstractmethod def _build_manager(cls) -> Type[_Manager]: # type: ignore raise NotImplementedError @classmethod def _build_repository(cls): raise NotImplementedError @classmethod def _using_enterprise(cls) -> bool: return util.find_spec(cls._TAIPY_ENTERPRISE_MODULE) is not None @staticmethod def _get_repository_with_repo_map(repository_map: dict): return repository_map.get(Config.core.repository_type, repository_map.get(\"default\")) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pathlib from importlib import metadata from typing import Dict, Generic, Iterable, List, Optional, TypeVar, Union from taipy.logger._taipy_logger import _TaipyLogger from .._entity._entity_ids import _EntityIds from .._repository._abstract_repository import _AbstractRepository from ..exceptions.exceptions import ModelNotFound from ..notification import Event, EventOperation, Notifier EntityType = TypeVar(\"EntityType\") class _Manager(Generic[EntityType]): _repository: _AbstractRepository _logger = _TaipyLogger._get_logger() _ENTITY_NAME: str = \"Entity\" @classmethod def _delete_all(cls): \"\"\" Deletes all entities. \"\"\" cls._repository._delete_all() if hasattr(cls, \"_EVENT_ENTITY_TYPE\"): Notifier.publish( Event( cls._EVENT_ENTITY_TYPE, EventOperation.DELETION, metadata={\"delete_all\": True}, ) ) @classmethod def _delete_many(cls, ids: Iterable): \"\"\" Deletes entities by a list of ids. \"\"\" cls._repository._delete_many(ids) if hasattr(cls, \"_EVENT_ENTITY_TYPE\"): for entity_id in ids: Notifier.publish( Event( cls._EVENT_ENTITY_TYPE, # type: ignore EventOperation.DELETION, entity_id=entity_id, metadata={\"delete_all\": True}, ) ) @classmethod def _delete_by_version(cls, version_number: str): \"\"\" Deletes entities by version number. \"\"\" cls._repository._delete_by(attribute=\"version\", value=version_number) if hasattr(cls, \"_EVENT_ENTITY_TYPE\"): Notifier.publish( Event( cls._EVENT_ENTITY_TYPE, # type: ignore EventOperation.DELETION, metadata={\"delete_by_version\": version_number}, ) ) @classmethod def _delete(cls, id): \"\"\" Deletes an entity by id. \"\"\" cls._repository._delete(id) if hasattr(cls, \"_EVENT_ENTITY_TYPE\"): Notifier.publish( Event( cls._EVENT_ENTITY_TYPE, EventOperation.DELETION, entity_id=id, ) ) @classmethod def _set(cls, entity: EntityType): \"\"\" Save or update an entity. \"\"\" cls._repository._save(entity) @classmethod def _get_all(cls, version_number: Optional[str] = \"all\") -> List[EntityType]: \"\"\" Returns all entities. \"\"\" filters: List[Dict] = [] return cls._repository._load_all(filters) @classmethod def _get_all_by(cls, filters: Optional[List[Dict]] = None) -> List[EntityType]: \"\"\" Returns all entities based on a criteria. \"\"\" if not filters: filters = [] return cls._repository._load_all(filters) @classmethod def _get(cls, entity: Union[str, EntityType], default=None) -> EntityType: \"\"\" Returns an entity by id or reference. \"\"\" entity_id = entity if isinstance(entity, str) else entity.id # type: ignore try: return cls._repository._load(entity_id) except ModelNotFound: cls._logger.error(f\"{cls._ENTITY_NAME} not found: {entity_id}\") return default @classmethod def _exists(cls, entity_id: str) -> bool: \"\"\" Returns True if the entity id exists. \"\"\" return cls._repository._exists(entity_id) @classmethod def _delete_entities_of_multiple_types(cls, _entity_ids: _EntityIds): \"\"\" Deletes entities of multiple types. \"\"\" from ..cycle._cycle_manager_factory import _CycleManagerFactory from ..data._data_manager_factory import _DataManagerFactory from ..job._job_manager_factory import _JobManagerFactory from ..scenario._scenario_manager_factory import _ScenarioManagerFactory from ..sequence._sequence_manager_factory import _SequenceManagerFactory from ..submission._submission_manager_factory import _SubmissionManagerFactory from ..task._task_manager_factory import _TaskManagerFactory _CycleManagerFactory._build_manager()._delete_many(_entity_ids.cycle_ids) _SequenceManagerFactory._build_manager()._delete_many(_entity_ids.sequence_ids) _ScenarioManagerFactory._build_manager()._delete_many(_entity_ids.scenario_ids) _TaskManagerFactory._build_manager()._delete_many(_entity_ids.task_ids) _JobManagerFactory._build_manager()._delete_many(_entity_ids.job_ids) _DataManagerFactory._build_manager()._delete_many(_entity_ids.data_node_ids) _SubmissionManagerFactory._build_manager()._delete_many(_entity_ids.submission_ids) @classmethod def _export(cls, id: str, folder_path: Union[str, pathlib.Path]): \"\"\" Export an entity. \"\"\" return cls._repository._export(id, folder_path) @classmethod def _is_editable(cls, entity: Union[EntityType, _EntityIds]) -> bool: return True @classmethod def _is_readable(cls, entity: Union[EntityType, _EntityIds]) -> bool: return True "} {"text": "import uuid from typing import List, Optional, Union from taipy.config import Config from taipy.config._config_comparator._comparator_result import _ComparatorResult from taipy.config.checker.issue_collector import IssueCollector from taipy.config.exceptions.exceptions import InconsistentEnvVariableError from taipy.logger._taipy_logger import _TaipyLogger from .._manager._manager import _Manager from ..exceptions.exceptions import ConflictedConfigurationError, ModelNotFound, NonExistingVersion from ._version import _Version from ._version_fs_repository import _VersionFSRepository class _VersionManager(_Manager[_Version]): _ENTITY_NAME = _Version.__name__ __logger = _TaipyLogger._get_logger() __DEVELOPMENT_VERSION = [\"development\", \"dev\"] __LATEST_VERSION = \"latest\" __PRODUCTION_VERSION = \"production\" __ALL_VERSION = [\"all\", \"\"] _DEFAULT_VERSION = __LATEST_VERSION _repository: _VersionFSRepository @classmethod def _get(cls, entity: Union[str, _Version], default=None) -> _Version: \"\"\" Returns the version entity by id or reference. \"\"\" entity_id = entity if isinstance(entity, str) else entity.id try: return cls._repository._load(entity_id) except ModelNotFound: return default @classmethod def _get_or_create(cls, id: str, force: bool) -> _Version: if version := cls._get(id): comparator_result = Config._comparator._find_conflict_config(version.config, Config._applied_config, id) if comparator_result.get(_ComparatorResult.CONFLICTED_SECTION_KEY): if force: cls.__logger.warning( f\"Option --force is detected, overriding the configuration of version {id} ...\" ) version.config = Config._applied_config else: raise ConflictedConfigurationError() else: version = _Version(id=id, config=Config._applied_config) cls._set(version) return version @classmethod def _get_all(cls, version_number: Optional[Union[str, List]] = \"all\") -> List[_Version]: \"\"\" Returns all entities. \"\"\" version_number = cls._replace_version_number(version_number) # type: ignore if not isinstance(version_number, List): version_number = [version_number] if version_number else [] filters = [{\"version\": version} for version in version_number] return cls._repository._load_all(filters) @classmethod def _set_development_version(cls, version_number: str) -> str: cls._get_or_create(version_number, force=True) cls._repository._set_development_version(version_number) return version_number @classmethod def _get_development_version(cls) -> str: try: return cls._repository._get_development_version() except (FileNotFoundError, ModelNotFound): return cls._set_development_version(str(uuid.uuid4())) @classmethod def _set_experiment_version(cls, version_number: str, force: bool = False) -> str: if version_number == cls._get_development_version(): raise SystemExit( f\"Version number {version_number} is the development version. Please choose a different name\" f\" for this experiment.\" ) if version_number in cls._get_production_versions(): raise SystemExit( f\"Version number {version_number} is already a production version. Please choose a different name\" f\" for this experiment.\" ) try: cls._get_or_create(version_number, force) except ConflictedConfigurationError: raise SystemExit( f\"Please add a new experiment version or run your application with --force option to\" f\" override the Config of experiment {version_number}.\" ) cls._repository._set_latest_version(version_number) return version_number @classmethod def _get_latest_version(cls) -> str: try: return cls._repository._get_latest_version() except (FileNotFoundError, ModelNotFound): # If there is no version in the system yet, create a new version as development version # This set the default versioning behavior on Jupyter notebook to Development mode return cls._set_development_version(str(uuid.uuid4())) @classmethod def _set_production_version(cls, version_number: str, force: bool = False) -> str: if version_number == cls._get_development_version(): cls._set_development_version(str(uuid.uuid4())) try: cls._get_or_create(version_number, force) except ConflictedConfigurationError: raise SystemExit( f\"Please add a new production version with migration functions.\\n\" f\"If old entities remain compatible with the new configuration, you can also run your application with\" f\" --force option to override the production configuration of version {version_number}.\" ) cls._repository._set_production_version(version_number) return version_number @classmethod def _get_production_versions(cls) -> List[str]: try: return cls._repository._get_production_versions() except (FileNotFoundError, ModelNotFound): return [] @classmethod def _delete_production_version(cls, version_number) -> str: return cls._repository._delete_production_version(version_number) @classmethod def _replace_version_number(cls, version_number: Optional[str] = None): if version_number is None: version_number = cls._replace_version_number(cls._DEFAULT_VERSION) production_versions = cls._get_production_versions() if version_number in production_versions: return production_versions return version_number if version_number == cls.__LATEST_VERSION: return cls._get_latest_version() if version_number in cls.__DEVELOPMENT_VERSION: return cls._get_development_version() if version_number == cls.__PRODUCTION_VERSION: return cls._get_production_versions() if version_number in cls.__ALL_VERSION: return \"\" try: if version := cls._get(version_number): return version.id except InconsistentEnvVariableError: # The version exist but the Config is alternated return version_number raise NonExistingVersion(version_number) @classmethod def _manage_version(cls): from ..taipy import clean_all_entities_by_version if Config.core.mode == \"development\": current_version_number = cls._get_development_version() cls.__logger.info(f\"Development mode: Clean all entities of version {current_version_number}\") clean_all_entities_by_version(current_version_number) cls._set_development_version(current_version_number) elif Config.core.mode in [\"experiment\", \"production\"]: default_version_number = { \"experiment\": str(uuid.uuid4()), \"production\": cls._get_latest_version(), } version_setter = { \"experiment\": cls._set_experiment_version, \"production\": cls._set_production_version, } if Config.core.version_number: current_version_number = Config.core.version_number else: current_version_number = default_version_number[Config.core.mode] version_setter[Config.core.mode](current_version_number, Config.core.force) if Config.core.mode == \"production\": cls.__check_production_migration_config() else: raise SystemExit(f\"Undefined execution mode: {Config.core.mode}.\") @classmethod def __check_production_migration_config(self): from ..config.checkers._migration_config_checker import _MigrationConfigChecker collector = _MigrationConfigChecker(Config._applied_config, IssueCollector())._check() for issue in collector._warnings: self.__logger.warning(str(issue)) for issue in collector._infos: self.__logger.info(str(issue)) for issue in collector._errors: self.__logger.error(str(issue)) if len(collector._errors) != 0: raise SystemExit(\"Configuration errors found. Please check the error log for more information.\") @classmethod def _delete_entities_of_multiple_types(cls, _entity_ids): raise NotImplementedError "} {"text": "from datetime import datetime from typing import Any from taipy.config import Config from taipy.config._config import _Config from .._entity._entity import _Entity class _Version(_Entity): def __init__(self, id: str, config: Any) -> None: self.id: str = id self.config: _Config = config self.creation_date: datetime = datetime.now() def __eq__(self, other): return self.id == other.id and self.__is_config_eq(other) def __is_config_eq(self, other): return Config._serializer._str(self.config) == Config._serializer._str(other.config) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from typing import Dict, List from .._version._version_manager_factory import _VersionManagerFactory class _VersionMixin: _version_manager = _VersionManagerFactory._build_manager() @classmethod def __fetch_version_number(cls, version_number): version_number = _VersionManagerFactory._build_manager()._replace_version_number(version_number) if not isinstance(version_number, List): version_number = [version_number] if version_number else [] return version_number @classmethod def _build_filters_with_version(cls, version_number) -> List[Dict]: filters = [] if versions := cls.__fetch_version_number(version_number): filters = [{\"version\": version} for version in versions] return filters @classmethod def _get_latest_version(cls): return cls._version_manager._get_latest_version() "} {"text": "from .._manager._manager_factory import _ManagerFactory from ..common import _utils from ._version_fs_repository import _VersionFSRepository from ._version_manager import _VersionManager from ._version_sql_repository import _VersionSQLRepository class _VersionManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _VersionFSRepository, \"sql\": _VersionSQLRepository} @classmethod def _build_manager(cls) -> _VersionManager: # type: ignore if cls._using_enterprise(): version_manager = _utils._load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \"._version._version_manager\", \"_VersionManager\" ) # type: ignore build_repository = _utils._load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \"._version._version_manager_factory\", \"_VersionManagerFactory\" )._build_repository # type: ignore else: version_manager = _VersionManager build_repository = cls._build_repository version_manager._repository = build_repository() # type: ignore return version_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": "from abc import ABC, abstractmethod class _VersionRepositoryInterface(ABC): _LATEST_VERSION_KEY = \"latest_version\" _DEVELOPMENT_VERSION_KEY = \"development_version\" _PRODUCTION_VERSION_KEY = \"production_version\" @abstractmethod def _set_latest_version(self, version_number): raise NotImplementedError @abstractmethod def _get_latest_version(self): raise NotImplementedError @abstractmethod def _set_development_version(self, version_number): raise NotImplementedError @abstractmethod def _get_development_version(self): raise NotImplementedError @abstractmethod def _set_production_version(self, version_number): raise NotImplementedError @abstractmethod def _get_production_versions(self): raise NotImplementedError @abstractmethod def _delete_production_version(self, version_number): raise NotImplementedError "} {"text": "import json from typing import List from taipy.logger._taipy_logger import _TaipyLogger from .._repository._filesystem_repository import _FileSystemRepository from ..exceptions.exceptions import VersionIsNotProductionVersion from ._version_converter import _VersionConverter from ._version_model import _VersionModel from ._version_repository_interface import _VersionRepositoryInterface class _VersionFSRepository(_FileSystemRepository, _VersionRepositoryInterface): def __init__(self): super().__init__(model_type=_VersionModel, converter=_VersionConverter, dir_name=\"version\") @property def _version_file_path(self): return super()._storage_folder / \"version.json\" def _delete_all(self): super()._delete_all() if self._version_file_path.exists(): self._version_file_path.unlink() def _set_latest_version(self, version_number): if self._version_file_path.exists(): with open(self._version_file_path, \"r\") as f: file_content = json.load(f) file_content[self._LATEST_VERSION_KEY] = version_number else: self.dir_path.mkdir(parents=True, exist_ok=True) file_content = { self._LATEST_VERSION_KEY: version_number, self._DEVELOPMENT_VERSION_KEY: \"\", self._PRODUCTION_VERSION_KEY: [], } self._version_file_path.write_text( json.dumps( file_content, ensure_ascii=False, indent=0, ) ) def _get_latest_version(self) -> str: with open(self._version_file_path, \"r\") as f: file_content = json.load(f) return file_content[self._LATEST_VERSION_KEY] def _set_development_version(self, version_number): if self._version_file_path.exists(): with open(self._version_file_path, \"r\") as f: file_content = json.load(f) file_content[self._DEVELOPMENT_VERSION_KEY] = version_number file_content[self._LATEST_VERSION_KEY] = version_number else: self.dir_path.mkdir(parents=True, exist_ok=True) file_content = { self._LATEST_VERSION_KEY: version_number, self._DEVELOPMENT_VERSION_KEY: version_number, self._PRODUCTION_VERSION_KEY: [], } self._version_file_path.write_text( json.dumps( file_content, ensure_ascii=False, indent=0, ) ) def _get_development_version(self) -> str: with open(self._version_file_path, \"r\") as f: file_content = json.load(f) return file_content[self._DEVELOPMENT_VERSION_KEY] def _set_production_version(self, version_number): if self._version_file_path.exists(): with open(self._version_file_path, \"r\") as f: file_content = json.load(f) file_content[self._LATEST_VERSION_KEY] = version_number if version_number not in file_content[self._PRODUCTION_VERSION_KEY]: file_content[self._PRODUCTION_VERSION_KEY].append(version_number) else: _TaipyLogger._get_logger().info(f\"Version {version_number} is already a production version.\") else: self.dir_path.mkdir(parents=True, exist_ok=True) file_content = { self._LATEST_VERSION_KEY: version_number, self._DEVELOPMENT_VERSION_KEY: \"\", self._PRODUCTION_VERSION_KEY: [version_number], } self._version_file_path.write_text( json.dumps( file_content, ensure_ascii=False, indent=0, ) ) def _get_production_versions(self) -> List[str]: with open(self._version_file_path, \"r\") as f: file_content = json.load(f) return file_content[self._PRODUCTION_VERSION_KEY] def _delete_production_version(self, version_number): try: with open(self._version_file_path, \"r\") as f: file_content = json.load(f) if version_number not in file_content[self._PRODUCTION_VERSION_KEY]: raise VersionIsNotProductionVersion(f\"Version '{version_number}' is not a production version.\") file_content[self._PRODUCTION_VERSION_KEY].remove(version_number) self._version_file_path.write_text( json.dumps( file_content, ensure_ascii=False, indent=0, ) ) except FileNotFoundError: raise VersionIsNotProductionVersion(f\"Version '{version_number}' is not a production version.\") "} {"text": "from sqlalchemy.dialects import sqlite from .._repository._sql_repository import _SQLRepository from ..exceptions.exceptions import ModelNotFound, VersionIsNotProductionVersion from ._version_converter import _VersionConverter from ._version_model import _VersionModel from ._version_repository_interface import _VersionRepositoryInterface class _VersionSQLRepository(_SQLRepository, _VersionRepositoryInterface): def __init__(self): super().__init__(model_type=_VersionModel, converter=_VersionConverter) def _set_latest_version(self, version_number): if old_latest := self.db.execute(str(self.table.select().filter_by(is_latest=True))).fetchone(): old_latest = self.model_type.from_dict(old_latest) old_latest.is_latest = False self._update_entry(old_latest) version = self.__get_by_id(version_number) version.is_latest = True self._update_entry(version) def _get_latest_version(self): if latest := self.db.execute( str(self.table.select().filter_by(is_latest=True).compile(dialect=sqlite.dialect())) ).fetchone(): return latest[\"id\"] raise ModelNotFound(self.model_type, \"\") def _set_development_version(self, version_number): if old_development := self.db.execute(str(self.table.select().filter_by(is_development=True))).fetchone(): old_development = self.model_type.from_dict(old_development) old_development.is_development = False self._update_entry(old_development) version = self.__get_by_id(version_number) version.is_development = True self._update_entry(version) self._set_latest_version(version_number) def _get_development_version(self): if development := self.db.execute(str(self.table.select().filter_by(is_development=True))).fetchone(): return development[\"id\"] raise ModelNotFound(self.model_type, \"\") def _set_production_version(self, version_number): version = self.__get_by_id(version_number) version.is_production = True self._update_entry(version) self._set_latest_version(version_number) def _get_production_versions(self): if productions := self.db.execute( str(self.table.select().filter_by(is_production=True).compile(dialect=sqlite.dialect())), ).fetchall(): return [p[\"id\"] for p in productions] return [] def _delete_production_version(self, version_number): version = self.__get_by_id(version_number) if not version or not version.is_production: raise VersionIsNotProductionVersion(f\"Version '{version_number}' is not a production version.\") version.is_production = False self._update_entry(version) def __get_by_id(self, version_id): query = str(self.table.select().filter_by(id=version_id).compile(dialect=sqlite.dialect())) entry = self.db.execute(query, [version_id]).fetchone() return self.model_type.from_dict(entry) if entry else None "} {"text": "from datetime import datetime from taipy.config import Config from .._repository._abstract_converter import _AbstractConverter from .._version._version import _Version from .._version._version_model import _VersionModel class _VersionConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, version: _Version) -> _VersionModel: return _VersionModel( id=version.id, config=Config._to_json(version.config), creation_date=version.creation_date.isoformat() ) @classmethod def _model_to_entity(cls, model: _VersionModel) -> _Version: version = _Version(id=model.id, config=Config._from_json(model.config)) version.creation_date = datetime.fromisoformat(model.creation_date) return version "} {"text": "from dataclasses import dataclass from typing import Any, Dict from sqlalchemy import Boolean, Column, String, Table from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry @mapper_registry.mapped @dataclass class _VersionModel(_BaseModel): __table__ = Table( \"version\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"config\", String), # config is store as a json string Column(\"creation_date\", String), Column(\"is_production\", Boolean), Column(\"is_development\", Boolean), Column(\"is_latest\", Boolean), ) id: str config: Dict[str, Any] creation_date: str @staticmethod def from_dict(data: Dict[str, Any]): model = _VersionModel( id=data[\"id\"], config=data[\"config\"], creation_date=data[\"creation_date\"], ) model.is_production = data.get(\"is_production\") # type: ignore model.is_development = data.get(\"is_development\") # type: ignore model.is_latest = data.get(\"is_latest\") # type: ignore return model def to_list(self): return [ self.id, self.config, self.creation_date, self.is_production, self.is_development, self.is_latest, ] "} {"text": "from typing import Callable, List from taipy.config.config import Config from .._entity._reload import _Reloader from ..config import MigrationConfig from ._version_manager_factory import _VersionManagerFactory def _migrate_entity(entity): if ( latest_version := _VersionManagerFactory._build_manager()._get_latest_version() ) in _VersionManagerFactory._build_manager()._get_production_versions(): if migration_fcts := __get_migration_fcts_to_latest(entity._version, entity.config_id): with _Reloader(): for fct in migration_fcts: entity = fct(entity) entity._version = latest_version return entity def __get_migration_fcts_to_latest(source_version: str, config_id: str) -> List[Callable]: migration_fcts_to_latest: List[Callable] = [] production_versions = _VersionManagerFactory._build_manager()._get_production_versions() try: start_index = production_versions.index(source_version) + 1 except ValueError: return migration_fcts_to_latest versions_to_migrate = production_versions[start_index:] for version in versions_to_migrate: migration_fct = Config.unique_sections[MigrationConfig.name].migration_fcts.get(version, {}).get(config_id) if migration_fct: migration_fcts_to_latest.append(migration_fct) return migration_fcts_to_latest "} {"text": "import sys from taipy._cli._base_cli import _CLI from taipy.config import Config from taipy.config.exceptions.exceptions import InconsistentEnvVariableError from taipy.logger._taipy_logger import _TaipyLogger from ...data._data_manager_factory import _DataManagerFactory from ...exceptions.exceptions import VersionIsNotProductionVersion from ...job._job_manager_factory import _JobManagerFactory from ...scenario._scenario_manager_factory import _ScenarioManagerFactory from ...sequence._sequence_manager_factory import _SequenceManagerFactory from ...taipy import clean_all_entities_by_version from ...task._task_manager_factory import _TaskManagerFactory from .._version_manager_factory import _VersionManagerFactory from ._bcolor import _Bcolors class _VersionCLI: \"\"\"Command-line interface of the versioning system.\"\"\" __logger = _TaipyLogger._get_logger() @classmethod def create_parser(cls): version_parser = _CLI._add_subparser(\"manage-versions\", help=\"Taipy version control system.\") version_parser.add_argument( \"-l\", \"--list\", action=\"store_true\", help=\"List all existing versions of the Taipy application.\" ) version_parser.add_argument( \"--rename\", nargs=2, metavar=(\"OLD_VERSION\", \"NEW_VERSION\"), help=\"Rename a Taipy version.\" ) version_parser.add_argument( \"--compare-config\", nargs=2, metavar=(\"VERSION_1\", \"VERSION_2\"), help=\"Compare the Configuration of 2 Taipy versions.\", ) version_parser.add_argument( \"-d\", \"--delete\", metavar=\"VERSION\", help=\"Delete a Taipy version by version number.\" ) version_parser.add_argument( \"-dp\", \"--delete-production\", metavar=\"VERSION\", help=\"Delete a Taipy version from production by version number. The version is still kept as an experiment \" \"version.\", ) @classmethod def parse_arguments(cls): args = _CLI._parse() if getattr(args, \"which\", None) != \"manage-versions\": return if args.list: print(cls.__list_versions()) sys.exit(0) if args.rename: try: cls.__rename_version(args.rename[0], args.rename[1]) except InconsistentEnvVariableError as error: cls.__logger.error( f\"Fail to rename version {args.rename[0]} to {args.rename[1]} due to outdated Configuration.\" f\"Detail: {str(error)}\" ) sys.exit(1) cls.__logger.info(f\"Successfully renamed version '{args.rename[0]}' to '{args.rename[1]}'.\") sys.exit(0) if args.compare_config: cls.__compare_version_config(args.compare_config[0], args.compare_config[1]) sys.exit(0) if args.delete_production: try: _VersionManagerFactory._build_manager()._delete_production_version(args.delete_production) cls.__logger.info( f\"Successfully delete version {args.delete_production} from the production version list.\" ) sys.exit(0) except VersionIsNotProductionVersion as e: raise SystemExit(e) if args.delete: if clean_all_entities_by_version(args.delete): cls.__logger.info(f\"Successfully delete version {args.delete}.\") else: sys.exit(1) sys.exit(0) @classmethod def __list_versions(cls): list_version_message = f\"\\n{'Version number':<36} {'Mode':<20} {'Creation date':<20}\\n\" latest_version_number = _VersionManagerFactory._build_manager()._get_latest_version() development_version_number = _VersionManagerFactory._build_manager()._get_development_version() production_version_numbers = _VersionManagerFactory._build_manager()._get_production_versions() versions = _VersionManagerFactory._build_manager()._get_all() versions.sort(key=lambda x: x.creation_date, reverse=True) bold = False for version in versions: if version.id == development_version_number: list_version_message += _Bcolors.GREEN mode = \"Development\" elif version.id in production_version_numbers: list_version_message += _Bcolors.PURPLE mode = \"Production\" else: list_version_message += _Bcolors.BLUE mode = \"Experiment\" if version.id == latest_version_number: list_version_message += _Bcolors.BOLD bold = True mode += \" (latest)\" list_version_message += ( f\"{(version.id):<36} {mode:<20} {version.creation_date.strftime('%Y-%m-%d %H:%M:%S'):<20}\" ) list_version_message += _Bcolors.END if bold: list_version_message += _Bcolors.END list_version_message += \"\\n\" return list_version_message @classmethod def __rename_version(cls, old_version: str, new_version: str): _version_manager = _VersionManagerFactory._build_manager() # Check if the new version already exists, return an error if _version_manager._get(new_version): cls.__logger.error(f\"Version name '{new_version}' is already used.\") sys.exit(1) # Make sure that all entities of the old version are exists and loadable version_entity = _version_manager._get(old_version) if version_entity is None: cls.__logger.error(f\"Version '{old_version}' does not exist.\") sys.exit(1) jobs = _JobManagerFactory._build_manager()._get_all(version_number=old_version) scenarios = _ScenarioManagerFactory._build_manager()._get_all(version_number=old_version) sequences = _SequenceManagerFactory._build_manager()._get_all(version_number=old_version) tasks = _TaskManagerFactory._build_manager()._get_all(version_number=old_version) datanodes = _DataManagerFactory._build_manager()._get_all(version_number=old_version) # Update the version of all entities for job in jobs: job._version = new_version _JobManagerFactory._build_manager()._set(job) for scenario in scenarios: scenario._version = new_version _ScenarioManagerFactory._build_manager()._set(scenario) for sequence in sequences: sequence._version = new_version _SequenceManagerFactory._build_manager()._set(sequence) for task in tasks: task._version = new_version _TaskManagerFactory._build_manager()._set(task) for datanode in datanodes: datanode._version = new_version _DataManagerFactory._build_manager()._set(datanode) # Update the version entity if old_version in _version_manager._get_production_versions(): _version_manager._set_production_version(new_version) if old_version == _version_manager._get_latest_version(): _version_manager._set_experiment_version(new_version) if old_version == _version_manager._get_development_version(): _version_manager._set_development_version(new_version) _version_manager._delete(old_version) try: _version_manager._delete_production_version(old_version) except VersionIsNotProductionVersion: pass if not _version_manager._get(new_version): version_entity.id = new_version _version_manager._set(version_entity) @classmethod def __compare_version_config(cls, version_1: str, version_2: str): version_entity_1 = _VersionManagerFactory._build_manager()._get(version_1) if version_entity_1 is None: cls.__logger.error(f\"Version '{version_1}' does not exist.\") sys.exit(1) version_entity_2 = _VersionManagerFactory._build_manager()._get(version_2) if version_entity_2 is None: cls.__logger.error(f\"Version '{version_2}' does not exist.\") sys.exit(1) Config._comparator._compare( version_entity_1.config, version_entity_2.config, version_1, version_2, ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import sys def _vt_codes_enabled_in_windows_registry(): \"\"\" Check the Windows Registry to see if VT code handling has been enabled by default, see https://superuser.com/a/1300251/447564. \"\"\" try: # winreg is only available on Windows. import winreg except ImportError: return False else: try: reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, \"Console\") reg_key_value, _ = winreg.QueryValueEx(reg_key, \"VirtualTerminalLevel\") except FileNotFoundError: return False else: return reg_key_value == 1 def _is_color_supported(): \"\"\" Return True if the running system's terminal supports color, and False otherwise. \"\"\" is_a_tty = hasattr(sys.stdout, \"isatty\") and sys.stdout.isatty() return is_a_tty and ( sys.platform != \"win32\" or \"ANSICON\" in os.environ or \"WT_SESSION\" in os.environ # Windows Terminal supports VT codes. or os.environ.get(\"TERM_PROGRAM\") == \"vscode\" # VSCode's built-in terminal supports colors. or _vt_codes_enabled_in_windows_registry() ) class _Bcolors: PURPLE = \"\\033[95m\" if _is_color_supported() else \"\" BLUE = \"\\033[94m\" if _is_color_supported() else \"\" CYAN = \"\\033[96m\" if _is_color_supported() else \"\" GREEN = \"\\033[92m\" if _is_color_supported() else \"\" BOLD = \"\\033[1m\" if _is_color_supported() else \"\" UNDERLINE = \"\\033[4m\" if _is_color_supported() else \"\" END = \"\\033[0m\" if _is_color_supported() else \"\" "} {"text": "from .exceptions import * "} {"text": " from typing import List, Optional class ConfigCoreVersionMismatched(Exception): \"\"\"Raised if core version in Config does not match with the version of Taipy Core.\"\"\" def __init__(self, config_core_version: str, core_version: str) -> None: self.message = ( f\"Core version {config_core_version} in Config does not match with version of Taipy Core {core_version}.\" ) class CoreServiceIsAlreadyRunning(Exception): \"\"\"Raised if the Core service is already running.\"\"\" class CycleAlreadyExists(Exception): \"\"\"Raised if it is trying to create a Cycle that has already exists.\"\"\" class NonExistingCycle(Exception): \"\"\"Raised if a requested cycle is not known by the Cycle manager.\"\"\" def __init__(self, cycle_id: str): self.message = f\"Cycle: {cycle_id} does not exist.\" class MissingRequiredProperty(Exception): \"\"\"Raised if a required property is missing when creating a Data Node.\"\"\" class InvalidDataNodeType(Exception): \"\"\"Raised if a data node storage type does not exist.\"\"\" class MultipleDataNodeFromSameConfigWithSameOwner(Exception): \"\"\" Raised if there are multiple data nodes from the same data node configuration and the same owner identifier. \"\"\" class NoData(Exception): \"\"\"Raised if a data node is read before it has been written. This exception can be raised by `DataNode.read_or_raise()^`. \"\"\" class UnknownDatabaseEngine(Exception): \"\"\"Raised if the database engine is not known when creating a connection with a SQLDataNode.\"\"\" class MissingAppendQueryBuilder(Exception): \"\"\"Raised if no append query build is provided when appending data to a SQLDataNode.\"\"\" class UnknownParquetEngine(Exception): \"\"\"Raised if the parquet engine is not known or not supported when create a ParquetDataNode.\"\"\" class UnknownCompressionAlgorithm(Exception): \"\"\"Raised if the compression algorithm is not supported by ParquetDataNode.\"\"\" class NonExistingDataNode(Exception): \"\"\"Raised if a requested DataNode is not known by the DataNode Manager.\"\"\" def __init__(self, data_node_id: str): self.message = f\"DataNode: {data_node_id} does not exist.\" class DataNodeIsBeingEdited(Exception): \"\"\"Raised if a DataNode is being edited.\"\"\" def __init__(self, data_node_id: str, editor_id: Optional[str] = None): self.message = f\"DataNode {data_node_id} is being edited{ ' by ' + editor_id if editor_id else ''}.\" class NonExistingDataNodeConfig(Exception): \"\"\"Raised if a requested DataNodeConfig is not known by the DataNode Manager.\"\"\" def __init__(self, data_node_config_id: str): self.message = f\"Data node config: {data_node_config_id} does not exist.\" class NonExistingExcelSheet(Exception): \"\"\"Raised if a requested Sheet name does not exist in the provided Excel file.\"\"\" def __init__(self, sheet_name: str, excel_file_name: str): self.message = f\"{sheet_name} does not exist in {excel_file_name}.\" class ExposedTypeLengthMismatch(Exception): \"\"\"Raised if length of exposed type list does not match with number of sheets in the provided Excel file.\"\"\" class SheetNameLengthMismatch(Exception): \"\"\"Raised if length of sheet_name list does not match with number of sheets in the data to be written to Excel file.\"\"\" class InvalidExposedType(Exception): \"\"\"Raised if an invalid exposed type is provided.\"\"\" class InvalidCustomDocument(Exception): \"\"\"Raised if an invalid custom document class is provided to a `MongoCollectionDataNode`.\"\"\" class DataNodeConfigIsNotGlobal(Exception): \"\"\"Raised if a DataNode is not global.\"\"\" def __init__(self, data_node_config_id: str): self.message = f\"Data node config `{data_node_config_id}` does not have GLOBAL scope.\" class MissingReadFunction(Exception): \"\"\"Raised if no read function is provided for the GenericDataNode.\"\"\" class MissingWriteFunction(Exception): \"\"\"Raised if no write function is provided for the GenericDataNode.\"\"\" class JobNotDeletedException(RuntimeError): \"\"\"Raised if there is an attempt to delete a job that cannot be deleted. This exception can be raised by `taipy.delete_job()^`. \"\"\" def __init__(self, job_id: str): self.message = f\"Job: {job_id} cannot be deleted.\" class NonExistingJob(RuntimeError): \"\"\"Raised if a requested job is not known by the Job manager.\"\"\" def __init__(self, job_id: str): self.message = f\"Job: {job_id} does not exist.\" class DataNodeWritingError(RuntimeError): \"\"\"Raised if an error happens during the writing in a data node.\"\"\" class InvalidSubscriber(RuntimeError): \"\"\"Raised if the loaded function is not valid.\"\"\" class InvalidSequenceId(Exception): \"\"\"Raised if a Sequence id can not be broken down.\"\"\" def __init__(self, sequence_id: str): self.message = f\"Sequence: {sequence_id} is invalid.\" class InvalidSequence(Exception): \"\"\"Raised if a Sequence is not a connected Directed Acyclic Graph.\"\"\" def __init__(self, sequence_id: str): self.message = f\"Sequence: {sequence_id} is not a connected Directed Acyclic Graph.\" class NonExistingSequence(Exception): \"\"\"Raised if a requested Sequence is not known by the Sequence Manager.\"\"\" def __init__(self, sequence_id: str): self.message = f\"Sequence: {sequence_id} does not exist.\" class SequenceBelongsToNonExistingScenario(Exception): \"\"\"Raised if a Sequence does not belong to an existing Scenario.\"\"\" def __init__(self, sequence_id: str, scenario_id: str): self.message = f\"Sequence: {sequence_id} belongs to a non-existing Scenario: {scenario_id}.\" class SequenceTaskDoesNotExistInScenario(Exception): \"\"\"Raised if Tasks of a Sequence do not exist in the same Scenario that the Sequence belongs to.\"\"\" def __init__(self, task_ids: List[Optional[str]], sequence_name: str, scenario_id: str): self.message = f\"Tasks {task_ids} of Sequence {sequence_name} does not exist in Scenario {scenario_id}.\" class SequenceTaskConfigDoesNotExistInSameScenarioConfig(Exception): \"\"\"Raised if TaskConfigs of a Sequence do not exist in the same ScenarioConfig that the Sequence belongs to.\"\"\" def __init__(self, task_config_ids: List[Optional[str]], sequence_name: str, scenario_config_id: str): self.message = f\"TaskConfig {task_config_ids} of Sequence name {sequence_name} \" self.message += f\"does not exist in ScenarioConfig {scenario_config_id}.\" class NonExistingSequenceConfig(Exception): \"\"\"Raised if a requested Sequence configuration is not known by the Sequence Manager.\"\"\" def __init__(self, sequence_config_id: str): self.message = f\"Sequence config: {sequence_config_id} does not exist.\" class MultipleSequenceFromSameConfigWithSameOwner(Exception): \"\"\"Raised if it exists multiple sequences from the same sequence config and with the same _owner_id_.\"\"\" class ModelNotFound(Exception): \"\"\"Raised when trying to fetch a non-existent model. This exception can be raised by `taipy.get()^` and `taipy.delete()^`. \"\"\" def __init__(self, model_name: str, model_id: str): self.message = f\"A {model_name} model with id {model_id} could not be found.\" class NonExistingScenario(Exception): \"\"\"Raised if a requested scenario is not known by the Scenario Manager.\"\"\" def __init__(self, scenario_id: str): self.message = f\"Scenario: {scenario_id} does not exist.\" class NonExistingScenarioConfig(Exception): \"\"\"Raised if a requested scenario configuration is not known by the Scenario Manager. This exception can be raised by `taipy.compare_scenarios()^`. \"\"\" def __init__(self, scenario_config_id: str): self.message = f\"Scenario config: {scenario_config_id} does not exist.\" class InvalidSscenario(Exception): \"\"\"Raised if a Scenario is not a Directed Acyclic Graph.\"\"\" def __init__(self, scenario_id: str): self.message = f\"Scenario: {scenario_id} is not a Directed Acyclic Graph.\" class DoesNotBelongToACycle(Exception): \"\"\"Raised if a scenario without any cycle is promoted as primary scenario.\"\"\" class DeletingPrimaryScenario(Exception): \"\"\"Raised if a primary scenario is deleted.\"\"\" class DifferentScenarioConfigs(Exception): \"\"\"Raised if scenario comparison is requested on scenarios with different scenario configs. This exception can be raised by `taipy.compare_scenarios()^`. \"\"\" class InsufficientScenarioToCompare(Exception): \"\"\"Raised if too few scenarios are requested to be compared. Scenario comparison need at least two scenarios to compare. This exception can be raised by `taipy.compare_scenarios()^`. \"\"\" class NonExistingComparator(Exception): \"\"\"Raised if a scenario comparator does not exist. This exception can be raised by `taipy.compare_scenarios()^`. \"\"\" class UnauthorizedTagError(Exception): \"\"\"Must provide an authorized tag.\"\"\" class DependencyNotInstalled(Exception): \"\"\"Raised if a package is missing.\"\"\" def __init__(self, package_name: str): self.message = f\"\"\" Package '{package_name}' should be installed. Run 'pip install taipy[{package_name}]' to installed it. \"\"\" class NonExistingTask(Exception): \"\"\"Raised if a requested task is not known by the Task Manager.\"\"\" def __init__(self, task_id: str): self.message = f\"Task: {task_id} does not exist.\" class NonExistingTaskConfig(Exception): \"\"\"Raised if a requested task configuration is not known by the Task Manager.\"\"\" def __init__(self, id: str): self.message = f\"Task config: {id} does not exist.\" class MultipleTaskFromSameConfigWithSameOwner(Exception): \"\"\"Raised if there are multiple tasks from the same task configuration and the same owner identifier.\"\"\" class OrchestratorNotBuilt(Exception): \"\"\"Raised if the orchestrator was not built in the OrchestratorFactory\"\"\" class ModeNotAvailable(Exception): \"\"\"Raised if the mode in JobConfig is not supported.\"\"\" class InvalidExportPath(Exception): \"\"\"Raised if the export path is not valid.\"\"\" class NonExistingVersion(Exception): \"\"\"Raised if request a Version that is not known by the Version Manager.\"\"\" def __init__(self, version_number: str): self.message = f\"Version '{version_number}' does not exist.\" class VersionIsNotProductionVersion(Exception): \"\"\"Raised if the version is not a production version.\"\"\" class ConflictedConfigurationError(Exception): \"\"\"Conflicts have been detected between the current and previous Configurations.\"\"\" class InvalidEventAttributeName(Exception): \"\"\" Raised if the attribute doesn't exist or an attribute name is provided when operation is either creation, deletion or submission \"\"\" class InvalidEventOperation(Exception): \"\"\"Raised when operation doesn't belong to the entity\"\"\" class FileCannotBeRead(Exception): \"\"\"Raised when a file cannot be read.\"\"\" class _SuspiciousFileOperation(Exception): pass "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from __future__ import annotations import abc from typing import Any, Callable, List, Optional, Set, Union import networkx as nx from ..common._listattributes import _ListAttributes from ..common._utils import _Subscriber from ..data.data_node import DataNode from ..job.job import Job from ..task.task import Task from ._dag import _DAG class Submittable: \"\"\"Instance of an entity that can be submitted for execution. A submittable holds functions that can be used to build the execution directed acyclic graph. Attributes: subscribers (List[Callable]): The list of callbacks to be called on `Job^`'s status change. \"\"\" def __init__(self, subscribers: Optional[List[_Subscriber]] = None): self._subscribers = _ListAttributes(self, subscribers or list()) @abc.abstractmethod def submit( self, callbacks: Optional[List[Callable]] = None, force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, ): raise NotImplementedError def get_inputs(self) -> Set[DataNode]: \"\"\"Return the set of input data nodes of the submittable entity. Returns: The set of input data nodes. \"\"\" dag = self._build_dag() return self.__get_inputs(dag) def __get_inputs(self, dag: nx.DiGraph) -> Set[DataNode]: return {node for node, degree in dict(dag.in_degree).items() if degree == 0 and isinstance(node, DataNode)} def get_outputs(self) -> Set[DataNode]: \"\"\"Return the set of output data nodes of the submittable entity. Returns: The set of output data nodes. \"\"\" dag = self._build_dag() return self.__get_outputs(dag) def __get_outputs(self, dag: nx.DiGraph) -> set[DataNode]: return {node for node, degree in dict(dag.out_degree).items() if degree == 0 and isinstance(node, DataNode)} def get_intermediate(self) -> Set[DataNode]: \"\"\"Return the set of intermediate data nodes of the submittable entity. Returns: The set of intermediate data nodes. \"\"\" dag = self._build_dag() all_data_nodes_in_dag = {node for node in dag.nodes if isinstance(node, DataNode)} return all_data_nodes_in_dag - self.__get_inputs(dag) - self.__get_outputs(dag) def is_ready_to_run(self) -> bool: \"\"\"Indicate if the entity is ready to be run. Returns: True if the given entity is ready to be run. False otherwise. \"\"\" return all(dn.is_ready_for_reading for dn in self.get_inputs()) def data_nodes_being_edited(self) -> Set[DataNode]: \"\"\"Return the set of data nodes of the submittable entity that are being edited. Returns: The set of data nodes that are being edited. \"\"\" dag = self._build_dag() return {node for node in dag.nodes if isinstance(node, DataNode) and node.edit_in_progress} @abc.abstractmethod def subscribe(self, callback: Callable[[Submittable, Job], None], params: Optional[List[Any]] = None): raise NotImplementedError @abc.abstractmethod def unsubscribe(self, callback: Callable[[Submittable, Job], None], params: Optional[List[Any]] = None): raise NotImplementedError @abc.abstractmethod def _get_set_of_tasks(self) -> Set[Task]: raise NotImplementedError def _get_dag(self) -> _DAG: return _DAG(self._build_dag()) def _build_dag(self) -> nx.DiGraph: graph = nx.DiGraph() tasks = self._get_set_of_tasks() for task in tasks: if has_input := task.input: for predecessor in task.input.values(): graph.add_edges_from([(predecessor, task)]) if has_output := task.output: for successor in task.output.values(): graph.add_edges_from([(task, successor)]) if not has_input and not has_output: graph.add_node(task) return graph def _get_sorted_tasks(self) -> List[List[Task]]: dag = self._build_dag() remove = [node for node, degree in dict(dag.in_degree).items() if degree == 0 and isinstance(node, DataNode)] dag.remove_nodes_from(remove) return list(nodes for nodes in nx.topological_generations(dag) if (Task in (type(node) for node in nodes))) def _add_subscriber(self, callback: Callable, params: Optional[List[Any]] = None): params = [] if params is None else params self._subscribers.append(_Subscriber(callback=callback, params=params)) def _remove_subscriber(self, callback: Callable, params: Optional[List[Any]] = None): if params is not None: self._subscribers.remove(_Subscriber(callback, params)) else: elem = [x for x in self._subscribers if x.callback == callback] if not elem: raise ValueError self._subscribers.remove(elem[0]) "} {"text": "from typing import List from .._entity._reload import _get_manager from ..notification import Notifier class _Entity: _MANAGER_NAME: str _is_in_context = False _in_context_attributes_changed_collector: List def __enter__(self): self._is_in_context = True self._in_context_attributes_changed_collector = list() return self def __exit__(self, exc_type, exc_value, exc_traceback): # If multiple entities is in context, the last to enter will be the first to exit self._is_in_context = False if hasattr(self, \"_properties\"): for to_delete_key in self._properties._pending_deletions: self._properties.data.pop(to_delete_key, None) self._properties.data.update(self._properties._pending_changes) _get_manager(self._MANAGER_NAME)._set(self) for event in self._in_context_attributes_changed_collector: Notifier.publish(event) _get_manager(self._MANAGER_NAME)._set(self) "} {"text": "from collections import UserDict from ..notification import _ENTITY_TO_EVENT_ENTITY_TYPE, EventOperation, Notifier, _make_event class _Properties(UserDict): __PROPERTIES_ATTRIBUTE_NAME = \"properties\" def __init__(self, entity_owner, **kwargs): super().__init__(**kwargs) self._entity_owner = entity_owner self._pending_changes = {} self._pending_deletions = set() def __setitem__(self, key, value): super(_Properties, self).__setitem__(key, value) from ... import core as tp if hasattr(self, \"_entity_owner\"): event = _make_event( self._entity_owner, EventOperation.UPDATE, attribute_name=self.__PROPERTIES_ATTRIBUTE_NAME, attribute_value=value, ) if not self._entity_owner._is_in_context: tp.set(self._entity_owner) Notifier.publish(event) else: if key in self._pending_deletions: self._pending_deletions.remove(key) self._pending_changes[key] = value self._entity_owner._in_context_attributes_changed_collector.append(event) def __getitem__(self, key): from taipy.config.common._template_handler import _TemplateHandler as _tpl return _tpl._replace_templates(super(_Properties, self).__getitem__(key)) def __delitem__(self, key): super(_Properties, self).__delitem__(key) from ... import core as tp if hasattr(self, \"_entity_owner\"): event = _make_event( self._entity_owner, EventOperation.UPDATE, attribute_name=self.__PROPERTIES_ATTRIBUTE_NAME, attribute_value=None, ) if not self._entity_owner._is_in_context: tp.set(self._entity_owner) Notifier.publish(event) else: self._pending_changes.pop(key, None) self._pending_deletions.add(key) self._entity_owner._in_context_attributes_changed_collector.append(event) "} {"text": "from __future__ import annotations class _EntityIds: def __init__(self): self.data_node_ids = set() self.task_ids = set() self.scenario_ids = set() self.sequence_ids = set() self.job_ids = set() self.cycle_ids = set() self.submission_ids = set() def __add__(self, other: _EntityIds): self.data_node_ids.update(other.data_node_ids) self.task_ids.update(other.task_ids) self.scenario_ids.update(other.scenario_ids) self.sequence_ids.update(other.sequence_ids) self.job_ids.update(other.job_ids) self.cycle_ids.update(other.cycle_ids) self.submission_ids.update(other.submission_ids) return self def __iadd__(self, other: _EntityIds): self.__add__(other) return self "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import math from functools import reduce from typing import Any, Dict, List, Tuple import networkx as nx class _Node: def __init__(self, entity: Any, x, y): self.type = entity.__class__.__name__ self.entity = entity self.x = x self.y = y class _Edge: def __init__(self, src: _Node, dest: _Node): self.src = src self.dest = dest class _DAG: def __init__(self, dag: nx.DiGraph): self._sorted_nodes = list(nodes for nodes in nx.topological_generations(dag)) self._length, self._width = self.__compute_size() self._grid_length, self._grid_width = self.__compute_grid_size() self._nodes = self.__compute_nodes() self._edges = self.__compute_edges(dag) @property def width(self) -> int: return self._width @property def length(self) -> int: return self._length @property def nodes(self) -> Dict[str, _Node]: return self._nodes @property def edges(self) -> List[_Edge]: return self._edges def __compute_size(self) -> Tuple[int, int]: return len(self._sorted_nodes), max([len(i) for i in self._sorted_nodes]) def __compute_grid_size(self) -> Tuple[int, int]: if self._width == 1: grd_wdt = 1 else: grd_wdt = self.__lcm(*[len(i) + 1 if len(i) != self._width else len(i) - 1 for i in self._sorted_nodes]) + 1 return len(self._sorted_nodes), grd_wdt def __compute_nodes(self) -> Dict[str, _Node]: nodes = {} x = 0 for same_lvl_nodes in self._sorted_nodes: lcl_wdt = len(same_lvl_nodes) is_max = lcl_wdt != self.width if self.width != 1: y_incr = (self._grid_width - 1) / (lcl_wdt + 1) if is_max else (self._grid_width - 1) / (lcl_wdt - 1) else: y_incr = 1 y = 0 if is_max else -y_incr for node in same_lvl_nodes: y += y_incr nodes[node.id] = _Node(node, x, y) x += 1 return nodes def __compute_edges(self, dag) -> List[_Edge]: edges = [] for edge in dag.edges(): edges.append(_Edge(self.nodes[edge[0].id], self.nodes[edge[1].id])) return edges @staticmethod def __lcm(*integers) -> int: # Function math.lcm is only implemented for Python 3.9+ # For compatibility with Python 3.8 it has been re implemented. if 0 in integers: return 0 return reduce(lambda x, y: (x * y) // math.gcd(x, y), integers) "} {"text": "import sys from typing import List from taipy._cli._base_cli import _CLI from taipy.logger._taipy_logger import _TaipyLogger from ._migrate import ( _migrate_fs_entities, _migrate_mongo_entities, _migrate_sql_entities, _remove_backup_file_entities, _remove_backup_mongo_entities, _remove_backup_sql_entities, _restore_migrate_file_entities, _restore_migrate_mongo_entities, _restore_migrate_sql_entities, ) class _MigrateCLI: __logger = _TaipyLogger._get_logger() @classmethod def create_parser(cls): migrate_parser = _CLI._add_subparser( \"migrate\", help=\"Migrate entities created from old taipy versions to be compatible with the current taipy version. \" \" The entity migration should be performed only after updating taipy code to the current version.\", ) migrate_parser.add_argument( \"--repository-type\", required=True, nargs=\"+\", help=\"The type of repository to migrate. If filesystem or sql, a path to the database folder/.sqlite file \" \"should be informed. In case of mongo host, port, user and password must be informed, if left empty it \" \"is assumed default values\", ) migrate_parser.add_argument( \"--skip-backup\", action=\"store_true\", help=\"Skip the backup of entities before migration.\", ) migrate_parser.add_argument( \"--restore\", action=\"store_true\", help=\"Restore the migration of entities from backup folder.\", ) migrate_parser.add_argument( \"--remove-backup\", action=\"store_true\", help=\"Remove the backup of entities. Only use this option if the migration was successful.\", ) @classmethod def parse_arguments(cls): args = _CLI._parse() if getattr(args, \"which\", None) != \"migrate\": return repository_type = args.repository_type[0] repository_args = args.repository_type[1:] if len(args.repository_type) > 1 else [None] if args.restore: cls.__handle_restore_backup(repository_type, repository_args) if args.remove_backup: cls.__handle_remove_backup(repository_type, repository_args) do_backup = False if args.skip_backup else True cls.__migrate_entities(repository_type, repository_args, do_backup) sys.exit(0) @classmethod def __handle_remove_backup(cls, repository_type: str, repository_args: List): if repository_type == \"filesystem\": path = repository_args[0] or \".data\" if not _remove_backup_file_entities(path): sys.exit(1) elif repository_type == \"sql\": if not _remove_backup_sql_entities(repository_args[0]): sys.exit(1) elif repository_type == \"mongo\": if not _remove_backup_mongo_entities(): sys.exit(1) else: cls.__logger.error(f\"Unknown repository type {repository_type}\") sys.exit(1) sys.exit(0) @classmethod def __handle_restore_backup(cls, repository_type: str, repository_args: List): if repository_type == \"filesystem\": path = repository_args[0] or \".data\" if not _restore_migrate_file_entities(path): sys.exit(1) elif repository_type == \"sql\": if not _restore_migrate_sql_entities(repository_args[0]): sys.exit(1) elif repository_type == \"mongo\": mongo_args = repository_args[1:5] if repository_args[0] else [] if not _restore_migrate_mongo_entities(*mongo_args): sys.exit(1) else: cls.__logger.error(f\"Unknown repository type {repository_type}\") sys.exit(1) sys.exit(0) @classmethod def __migrate_entities(cls, repository_type: str, repository_args: List, do_backup: bool): if repository_type == \"filesystem\": path = repository_args[0] or \".data\" if not _migrate_fs_entities(path, do_backup): sys.exit(1) elif repository_type == \"sql\": if not _migrate_sql_entities(repository_args[0], do_backup): sys.exit(1) elif repository_type == \"mongo\": mongo_args = repository_args[1:5] if repository_args[0] else [] _migrate_mongo_entities(*mongo_args, backup=do_backup) # type: ignore else: cls.__logger.error(f\"Unknown repository type {repository_type}\") sys.exit(1) "} {"text": "import functools from ..notification import EventOperation, Notifier, _make_event class _Reloader: \"\"\"The _Reloader singleton class\"\"\" _instance = None _no_reload_context = False def __new__(class_, *args, **kwargs): if not isinstance(class_._instance, class_): class_._instance = object.__new__(class_, *args, **kwargs) return class_._instance def _reload(self, manager: str, obj): if self._no_reload_context: return obj entity = _get_manager(manager)._get(obj, obj) if obj._is_in_context and hasattr(entity, \"_properties\"): if obj._properties._pending_changes: entity._properties._pending_changes = obj._properties._pending_changes if obj._properties._pending_deletions: entity._properties._pending_deletions = obj._properties._pending_deletions entity._properties._entity_owner = obj return entity def __enter__(self): self._no_reload_context = True return self def __exit__(self, exc_type, exc_value, exc_traceback): self._no_reload_context = False def _self_reload(manager): def __reload(fct): @functools.wraps(fct) def _do_reload(self, *args, **kwargs): self = _Reloader()._reload(manager, self) return fct(self, *args, **kwargs) return _do_reload return __reload def _self_setter(manager): def __set_entity(fct): @functools.wraps(fct) def _do_set_entity(self, *args, **kwargs): fct(self, *args, **kwargs) entity_manager = _get_manager(manager) if len(args) == 1: value = args[0] else: value = args event = _make_event( self, EventOperation.UPDATE, attribute_name=fct.__name__, attribute_value=value, ) if not self._is_in_context: entity = _Reloader()._reload(manager, self) fct(entity, *args, **kwargs) entity_manager._set(entity) Notifier.publish(event) else: self._in_context_attributes_changed_collector.append(event) return _do_set_entity return __set_entity @functools.lru_cache def _get_manager(manager: str): from ..cycle._cycle_manager_factory import _CycleManagerFactory from ..data._data_manager_factory import _DataManagerFactory from ..job._job_manager_factory import _JobManagerFactory from ..scenario._scenario_manager_factory import _ScenarioManagerFactory from ..sequence._sequence_manager_factory import _SequenceManagerFactory from ..submission._submission_manager_factory import _SubmissionManagerFactory from ..task._task_manager_factory import _TaskManagerFactory return { \"scenario\": _ScenarioManagerFactory._build_manager(), \"sequence\": _SequenceManagerFactory._build_manager(), \"data\": _DataManagerFactory._build_manager(), \"cycle\": _CycleManagerFactory._build_manager(), \"job\": _JobManagerFactory._build_manager(), \"task\": _TaskManagerFactory._build_manager(), \"submission\": _SubmissionManagerFactory._build_manager(), }[manager] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import abc from typing import Optional class _Labeled: __LABEL_SEPARATOR = \" > \" @abc.abstractmethod def get_label(self) -> str: raise NotImplementedError def _get_label(self) -> str: \"\"\"Returns the entity label made of the simple label prefixed by the owner label. Returns: The label of the entity as a string. \"\"\" return self._get_explicit_label() or self._generate_label() @abc.abstractmethod def get_simple_label(self) -> str: raise NotImplementedError def _get_simple_label(self) -> str: \"\"\"Returns the simple label. Returns: The simple label of the entity as a string. \"\"\" return self._get_explicit_label() or self._generate_label(True) def _generate_label(self, simple=False) -> str: ls = [] if not simple: if owner_id := self._get_owner_id(): if getattr(self, \"id\") != owner_id: from ... import core as tp owner = tp.get(owner_id) ls.append(owner.get_label()) ls.append(self._generate_entity_label()) return self.__LABEL_SEPARATOR.join(ls) def _get_explicit_label(self) -> Optional[str]: if hasattr(self, \"_properties\"): return getattr(self, \"_properties\").get(\"label\") return None def _get_owner_id(self) -> Optional[str]: if hasattr(self, \"owner_id\"): return getattr(self, \"owner_id\") return None def _get_name(self) -> Optional[str]: if hasattr(self, \"name\"): return getattr(self, \"name\") if hasattr(self, \"_properties\"): return getattr(self, \"_properties\").get(\"name\") return None def _get_config_id(self) -> Optional[str]: if hasattr(self, \"config_id\"): return getattr(self, \"config_id\") return None def _generate_entity_label(self) -> str: if name := self._get_name(): return name if config_id := self._get_config_id(): return config_id return getattr(self, \"id\") "} {"text": "import os import shutil from functools import lru_cache from typing import Dict import bson import pymongo from taipy.logger._taipy_logger import _TaipyLogger from ._utils import _migrate __logger = _TaipyLogger._get_logger() OLD_COLLECTIONS = [ \"cycle\", \"scenario\", \"pipeline\", \"task\", \"data_node\", \"job\", \"version\", ] NEW_COLLECTIONS = [ \"cycle\", \"scenario\", \"task\", \"data_node\", \"job\", \"version\", ] DATABASE_NAME = \"taipy\" MONGO_BACKUP_FOLDER = \".mongo_backup\" @lru_cache def _connect_mongodb(db_host: str, db_port: int, db_username: str, db_password: str) -> pymongo.MongoClient: auth_str = \"\" if db_username and db_password: auth_str = f\"{db_username}:{db_password}@\" connection_string = f\"mongodb://{auth_str}{db_host}:{db_port}\" return pymongo.MongoClient(connection_string) def __load_all_entities_from_mongo( hostname: str, port: int, user: str, password: str, ): client = _connect_mongodb(hostname, port, user, password) entities = {} for collection in OLD_COLLECTIONS: db = client[DATABASE_NAME] cursor = db[collection].find({}) for document in cursor: entities[document[\"id\"]] = {\"data\": document} return entities def __write_entities_to_mongo( _entities: Dict, hostname: str, port: int, user: str, password: str, ): client = _connect_mongodb(hostname, port, user, password) for collection in NEW_COLLECTIONS: db = client[DATABASE_NAME] db[collection].insert_many( [entity[\"data\"] for entity in _entities.values() if collection in entity[\"data\"][\"id\"]] ) def _backup_mongo_entities( hostname: str = \"localhost\", port: int = 27017, user: str = \"\", password: str = \"\", ) -> bool: client = _connect_mongodb(hostname, port, user, password) db = client[DATABASE_NAME] if not os.path.exists(MONGO_BACKUP_FOLDER): os.makedirs(MONGO_BACKUP_FOLDER, exist_ok=True) for collection in OLD_COLLECTIONS: with open(os.path.join(MONGO_BACKUP_FOLDER, f\"{collection}.bson\"), \"wb+\") as f: for doc in db[collection].find(): f.write(bson.BSON.encode(doc)) __logger.info(f\"Backed up entities to folder '{MONGO_BACKUP_FOLDER}' before migration.\") return True def _restore_migrate_mongo_entities( hostname: str = \"localhost\", port: int = 27017, user: str = \"\", password: str = \"\", ) -> bool: client = _connect_mongodb(hostname, port, user, password) db = client[DATABASE_NAME] if not os.path.isdir(MONGO_BACKUP_FOLDER): __logger.info(f\"The backup folder '{MONGO_BACKUP_FOLDER}' does not exist.\") return False for collection in os.listdir(MONGO_BACKUP_FOLDER): if collection.endswith(\".bson\"): with open(os.path.join(MONGO_BACKUP_FOLDER, collection), \"rb+\") as f: if bson_data := bson.decode_all(f.read()): # type: ignore db[collection.split(\".\")[0]].insert_many(bson_data) shutil.rmtree(MONGO_BACKUP_FOLDER) __logger.info(f\"Restored entities from the backup folder '{MONGO_BACKUP_FOLDER}'.\") return True def _remove_backup_mongo_entities() -> bool: if not os.path.isdir(MONGO_BACKUP_FOLDER): __logger.info(f\"The backup folder '{MONGO_BACKUP_FOLDER}' does not exist.\") return False shutil.rmtree(MONGO_BACKUP_FOLDER) __logger.info(f\"Removed backup entities from the backup folder '{MONGO_BACKUP_FOLDER}'.\") return True def _migrate_mongo_entities( hostname: str = \"localhost\", port: int = 27017, user: str = \"\", password: str = \"\", backup: bool = True, ) -> bool: \"\"\"Migrate entities from mongodb to the current version. Args: hostname (str, optional): The hostname of the mongodb. Defaults to \"localhost\". port (int, optional): The port of the mongodb. Defaults to 27017. user (str, optional): The username of the mongodb. Defaults to \"\". password (str, optional): The password of the mongodb. Defaults to \"\". backup (bool, optional): Whether to backup the entities before migrating. Defaults to True. Returns: bool: True if the migration was successful, False otherwise. \"\"\" if backup: _backup_mongo_entities(hostname=hostname, port=port, user=user, password=password) __logger.info(f\"Starting entity migration from MongoDB {hostname}:{port}\") entities = __load_all_entities_from_mongo(hostname, port, user, password) entities, _ = _migrate(entities) __write_entities_to_mongo(entities, hostname, port, user, password) __logger.info(\"Migration finished\") return True "} {"text": "from ._migrate_fs import _migrate_fs_entities, _remove_backup_file_entities, _restore_migrate_file_entities from ._migrate_mongo import _migrate_mongo_entities, _remove_backup_mongo_entities, _restore_migrate_mongo_entities from ._migrate_sql import _migrate_sql_entities, _remove_backup_sql_entities, _restore_migrate_sql_entities "} {"text": "import json import os import shutil from typing import Dict from taipy.logger._taipy_logger import _TaipyLogger from ._utils import _migrate __logger = _TaipyLogger._get_logger() def _load_all_entities_from_fs(root: str) -> Dict: # run through all files in the data folder and load them entities = {} for root, dirs, files in os.walk(root): for file in files: if file.endswith(\".json\"): with open(os.path.join(root, file)) as f: _id = file.split(\".\")[0] if \"version\" in root: _id = f\"VERSION_{_id}\" entities[_id] = { \"data\": json.load(f), \"path\": os.path.join(root, file), } return entities def __write_entities_to_fs(_entities: Dict, root: str): if not os.path.exists(root): os.makedirs(root, exist_ok=True) for _id, entity in _entities.items(): # Do not write pipeline entities if \"PIPELINE\" in _id: continue with open(entity[\"path\"], \"w\") as f: json.dump(entity[\"data\"], f, indent=0) # Remove pipelines folder pipelines_path = os.path.join(root, \"pipelines\") if os.path.exists(pipelines_path): shutil.rmtree(pipelines_path) def _restore_migrate_file_entities(path: str) -> bool: backup_path = f\"{path}_backup\" if not os.path.exists(backup_path): __logger.error(f\"The backup folder '{backup_path}' does not exist.\") return False if os.path.exists(path): shutil.rmtree(path) else: __logger.warning(f\"The original entities folder '{path}' does not exist.\") os.rename(backup_path, path) __logger.info(f\"Restored entities from the backup folder '{backup_path}' to '{path}'.\") return True def _remove_backup_file_entities(path: str) -> bool: backup_path = f\"{path}_backup\" if not os.path.exists(backup_path): __logger.error(f\"The backup folder '{backup_path}' does not exist.\") return False shutil.rmtree(backup_path) __logger.info(f\"Removed backup entities from the backup folder '{backup_path}'.\") return True def _migrate_fs_entities(path: str, backup: bool = True) -> bool: \"\"\"Migrate entities from filesystem to the current version. Args: path (str): The path to the folder containing the entities. backup (bool, optional): Whether to backup the entities before migrating. Defaults to True. Returns: bool: True if the migration was successful, False otherwise. \"\"\" if not os.path.isdir(path): __logger.error(f\"Folder '{path}' does not exist.\") return False if backup: backup_path = f\"{path}_backup\" try: shutil.copytree(path, backup_path) except FileExistsError: __logger.warning(f\"The backup folder '{backup_path}' already exists. Migration canceled.\") return False else: __logger.info(f\"Backed up entities from '{path}' to '{backup_path}' folder before migration.\") __logger.info(f\"Starting entity migration from '{path}' folder.\") entities = _load_all_entities_from_fs(path) entities, _ = _migrate(entities) __write_entities_to_fs(entities, path) __logger.info(\"Migration finished\") return True "} {"text": "import json from typing import Dict, List, Optional, Tuple from taipy.logger._taipy_logger import _TaipyLogger __logger = _TaipyLogger._get_logger() def __update_parent_ids(entity: Dict, data: Dict) -> Dict: # parent_ids was not present in 2.0, need to be search for in tasks parent_ids = entity.get(\"parent_ids\", []) if not parent_ids: parent_ids = __search_parent_ids(entity[\"id\"], data) entity[\"parent_ids\"] = parent_ids return entity def __update_config_parent_ids(id: str, entity: Dict, entity_type: str, config: Dict) -> Dict: # parent_ids was not present in 2.0, need to be search for in tasks parent_ids = entity.get(\"parent_ids\", []) if not parent_ids: parent_ids = __search_parent_config(id, config, entity_type) entity[\"parent_ids\"] = parent_ids return entity def __search_parent_ids(entity_id: str, data: Dict) -> List: parents = [] entity_type = entity_id.split(\"_\", 1)[0] for _id, entity_data in data.items(): entity_data = entity_data[\"data\"] if entity_type == \"DATANODE\" and \"TASK\" in _id: if entity_id in entity_data[\"input_ids\"] or entity_id in entity_data[\"output_ids\"]: parents.append(_id) if entity_type == \"TASK\" and \"SCENARIO\" in _id: if entity_id in entity_data[\"tasks\"]: parents.append(_id) parents.sort() return parents def __search_parent_config(entity_id: str, config: Dict, entity_type: str) -> List: parents = [] possible_parents = \"TASK\" if entity_type == \"DATA_NODE\" else \"SCENARIO\" data = config[possible_parents] for _id, entity_data in data.items(): section_id = f\"{entity_id}:SECTION\" if entity_type == \"DATANODE\" and possible_parents == \"TASK\": if section_id in entity_data[\"input_ids\"] or section_id in entity_data[\"output_ids\"]: parents.append(section_id) if entity_type == \"TASK\" and possible_parents == \"SCENARIO\": if section_id in entity_data[\"tasks\"]: parents.append(section_id) parents.sort() return parents def __fetch_tasks_from_pipelines(pipelines: List, data: Dict) -> List: tasks = [] for pipeline in pipelines: pipeline_data = data[pipeline][\"data\"] tasks.extend(pipeline_data[\"tasks\"]) return tasks def __migrate_subscriber(fct_module, fct_name): \"\"\"Rename scheduler by orchestrator on old jobs. Used to migrate from <=2.2 to >=2.3 version.\"\"\" if fct_module == \"taipy.core._scheduler._scheduler\": fct_module = fct_module.replace(\"_scheduler\", \"_orchestrator\") fct_name = fct_name.replace(\"_Scheduler\", \"_Orchestrator\") return fct_module, fct_name def __migrate_scenario(scenario: Dict, data: Dict) -> Dict: # pipelines were replaced by tasks scenario[\"tasks\"] = __fetch_tasks_from_pipelines(scenario[\"pipelines\"], data) # pipeline attribute not removed in 3.0 scenario[\"pipelines\"] = None # additional_data_nodes attribute added scenario[\"additional_data_nodes\"] = [] return scenario def __is_cacheable(task: Dict, data: Dict) -> bool: output_ids = task.get(\"output_ids\", []) or task.get(\"outputs\", []) # output_ids is on entity, outputs is on config for output_id in output_ids: if output_id.endswith(\":SECTION\"): # Get the config_id if the task is a Config output_id = output_id.split(\":\")[0] dn = data.get(output_id, {}) if \"data\" in dn: dn = dn.get(\"data\", {}) if \"cacheable\" not in dn or not dn[\"cacheable\"] or dn[\"cacheable\"] == \"False:bool\": return False return True def __migrate_task(task: Dict, data: Dict, is_entity: bool = True) -> Dict: if is_entity: # parent_id has been renamed to owner_id try: task[\"owner_id\"] = task[\"parent_id\"] del task[\"parent_id\"] except KeyError: pass # properties was not present in 2.0 task[\"properties\"] = task.get(\"properties\", {}) # skippable was not present in 2.0 task[\"skippable\"] = task.get(\"skippable\", False) or __is_cacheable(task, data) return task def __migrate_task_entity(task: Dict, data: Dict) -> Dict: task = __update_parent_ids(task, data) return __migrate_task(task, data) def __migrate_task_config(task: Dict, config: Dict) -> Dict: task = __migrate_task(task, config[\"DATA_NODE\"], False) # Convert the skippable boolean to a string if needed if isinstance(task.get(\"skippable\"), bool): task[\"skippable\"] = str(task[\"skippable\"]) + \":bool\" return task def __update_scope(scope: str): if scope in \"\": return \"\" elif scope == \"\": return \"\" elif scope == \"\": return \"\" return scope def __migrate_datanode(datanode: Dict) -> Dict: # cacheable was removed in after 2.0 _ = datanode.pop(\"cacheable\", False) # job_ids was replaced by edits if \"job_ids\" in datanode: datanode[\"edits\"] = [{\"job_id\": job, \"timestamp\": datanode[\"last_edit_date\"]} for job in datanode[\"job_ids\"]] elif \"edits\" in datanode: # make sure timestamp inside edits is a string edits = [] for edit in datanode[\"edits\"]: timestamp = edit.get(\"timestamp\") if isinstance(timestamp, dict): timestamp = timestamp.get(\"__value__\") new_edit = {\"timestamp\": timestamp} if \"job_id\" in edit: new_edit[\"job_id\"] = edit[\"job_id\"] edits.append(new_edit) datanode[\"edits\"] = edits # parent_id has been renamed to owner_id try: datanode[\"owner_id\"] = datanode[\"parent_id\"] del datanode[\"parent_id\"] except KeyError: pass # Update Scope enum after Pipeline removal datanode[\"scope\"] = __update_scope(datanode[\"scope\"]) # Update move name attribute to properties dictionary datanode[\"data_node_properties\"][\"name\"] = datanode.pop(\"name\", None) if \"last_edit_date\" not in datanode: datanode[\"last_edit_date\"] = datanode.get(\"last_edition_date\") if \"last_edition_date\" in datanode: del datanode[\"last_edition_date\"] if \"edit_in_progress\" not in datanode: datanode[\"edit_in_progress\"] = datanode.get(\"edition_in_progress\") if \"edition_in_progress\" in datanode: del datanode[\"edition_in_progress\"] return datanode def __migrate_datanode_entity(datanode: Dict, data: Dict) -> Dict: datanode = __update_parent_ids(datanode, data) return __migrate_datanode(datanode) def __migrate_datanode_config(datanode: Dict) -> Dict: if datanode[\"storage_type\"] in [\"csv\", \"json\"]: datanode[\"encoding\"] = \"utf-8\" return datanode def __migrate_job(job: Dict) -> Dict: # submit_entity_id was not present before 3.0 job[\"submit_entity_id\"] = job.get(\"submit_entity_id\", None) if \"subscribers\" in job: for sub in job[\"subscribers\"]: sub[\"fct_module\"], sub[\"fct_name\"] = __migrate_subscriber(sub[\"fct_module\"], sub[\"fct_name\"]) return job def __migrate_global_config(config: Dict): fields_to_remove = [\"clean_entities_enabled\"] fields_to_move = [\"root_folder\", \"storage_folder\", \"repository_type\", \"read_entity_retry\"] for field in fields_to_remove: if field in config[\"TAIPY\"]: del config[\"TAIPY\"][field] try: for field in fields_to_move: if field not in config[\"CORE\"]: config[\"CORE\"][field] = config[\"TAIPY\"][field] del config[\"TAIPY\"][field] except KeyError: pass return config def __migrate_version(version: Dict) -> Dict: config_str = version[\"config\"] # Remove PIPELINE scope config_str = config_str.replace(\"PIPELINE:SCOPE\", \"SCENARIO:SCOPE\") config = json.loads(config_str) # remove unused fields and move others from TAIPY to CORE section config = __migrate_global_config(config) # replace pipelines for tasks pipelines_section = config[\"PIPELINE\"] for id, content in config[\"SCENARIO\"].items(): tasks = [] for _pipeline in content[\"pipelines\"]: pipeline_id = _pipeline.split(\":\")[0] tasks = pipelines_section[pipeline_id][\"tasks\"] config[\"SCENARIO\"][id][\"tasks\"] = tasks del config[\"SCENARIO\"][id][\"pipelines\"] for id, content in config[\"TASK\"].items(): config[\"TASK\"][id] = __migrate_task_config(content, config) for id, content in config[\"DATA_NODE\"].items(): config[\"DATA_NODE\"][id] = __migrate_datanode_config(content) del config[\"PIPELINE\"] version[\"config\"] = json.dumps(config, ensure_ascii=False, indent=0) return version def __migrate_entities(entity_type: str, data: Dict) -> Dict: migration_fct = FCT_MIGRATION_MAP.get(entity_type) _entities = {k: data[k] for k in data if entity_type in k} for k, v in _entities.items(): if entity_type in [\"JOB\", \"VERSION\"]: v[\"data\"] = migration_fct(v[\"data\"]) # type: ignore else: v[\"data\"] = migration_fct(v[\"data\"], data) # type: ignore data[k] = v return data FCT_MIGRATION_MAP = { \"SCENARIO\": __migrate_scenario, \"TASK\": __migrate_task_entity, \"DATANODE\": __migrate_datanode_entity, \"JOB\": __migrate_job, \"VERSION\": __migrate_version, } def _migrate(entities: Dict, versions: Optional[Dict] = None) -> Tuple[Dict, Dict]: __logger.info(\"Migrating SCENARIOS\") entities = __migrate_entities(\"SCENARIO\", entities) __logger.info(\"Migrating TASKS\") entities = __migrate_entities(\"TASK\", entities) __logger.info(\"Migrating DATANODES\") entities = __migrate_entities(\"DATANODE\", entities) __logger.info(\"Migrating JOBS\") entities = __migrate_entities(\"JOB\", entities) __logger.info(\"Migrating VERSION\") if versions: versions = __migrate_entities(\"VERSION\", versions) else: entities = __migrate_entities(\"VERSION\", entities) versions = {} return entities, versions "} {"text": "import json import os import shutil import sqlite3 from typing import Dict, Tuple from taipy.logger._taipy_logger import _TaipyLogger from ._utils import _migrate __logger = _TaipyLogger._get_logger() def _load_all_entities_from_sql(db_file: str) -> Tuple[Dict, Dict]: conn = sqlite3.connect(db_file) query = \"SELECT model_id, document FROM taipy_model\" query_version = \"SELECT * FROM taipy_version\" cursor = conn.execute(query) entities = {} versions = {} for row in cursor: _id = row[0] document = row[1] entities[_id] = {\"data\": json.loads(document)} cursor = conn.execute(query_version) for row in cursor: id = row[0] config_id = row[1] creation_date = row[2] is_production = row[3] is_development = row[4] is_latest = row[5] versions[id] = { \"config_id\": config_id, \"creation_date\": creation_date, \"is_production\": is_production, \"is_development\": is_development, \"is_latest\": is_latest, } return entities, versions def __insert_scenario(scenario: dict, conn): query = f\"\"\" INSERT INTO scenario (id, config_id, tasks, additional_data_nodes, creation_date, primary_scenario, subscribers, tags, version, pipelines, cycle) VALUES ({scenario['id']}, {scenario['config_id']}, {scenario['tasks']}, {scenario['additional_data_nodes']}, {scenario['creation_date']}, {scenario['primary_scenario']}, {scenario['subscribers']}, {scenario['tags']}, {scenario['version']}, {scenario['pipelines']}, {scenario['cycle']}) \"\"\" conn.execute(query) conn.commit() def __insert_task(task: dict, conn): query = f\"\"\" INSERT INTO task (id, owner_id, parent_ids, config_id, input_ids, function_name, function_module, output_ids, version, skippable, properties) VALUES ({task['id']}, {task['owner_id']}, {task['parent_ids']}, {task['config_id']}, {task['input_ids']}, {task['function_name']}, {task['function_module']}, {task['output_ids']}, {task['version']}, {task['skippable']}, {task['properties']}) \"\"\" conn.execute(query) conn.commit() def __insert_datanode(datanode: dict, conn): query = f\"\"\" INSERT INTO data_node (id, config_id, scope, storage_type, name, owner_id, parent_ids, last_edit_date, edits, version, validity_days, validity_seconds, edit_in_progress, data_node_properties) VALUES ({datanode['id']}, {datanode['config_id']}, {datanode['scope']}, {datanode['storage_type']}, {datanode['name']}, {datanode['owner_id']}, {datanode['parent_ids']}, {datanode['last_edit_date']}, {datanode['edits']}, {datanode['version']}, {datanode['validity_days']}, {datanode['validity_seconds']}, {datanode['edit_in_progress']}, {datanode['data_node_properties']}) \"\"\" conn.execute(query) conn.commit() def __insert_job(job: dict, conn): query = f\"\"\" INSERT INTO job (id, task_id, status, force, submit_id, submit_entity_id, creation_date, subscribers, stacktrace, version) VALUES ({job['id']}, {job['task_id']}, {job['status']}, {job['force']}, {job['submit_id']}, {job['submit_entity_id']}, {job['creation_date']}, {job['subscribers']}, {job['stacktrace']}, {job['version']}) \"\"\" conn.execute(query) conn.commit() def __insert_cycle(cycle: dict, conn): query = f\"\"\" INSERT INTO scenario (id, name, frequency, properties, creation_date, start_date, end_date) VALUES ({cycle['id']}, {cycle['name']}, {cycle['frequency']}, {cycle['properties']}, {cycle['creation_date']}, {cycle['start_date']}, {cycle['end_date']}) \"\"\" conn.execute(query) conn.commit() def __insert_version(version: dict, conn): query = f\"\"\" INSERT INTO version (id, config_id, creation_date, is_production, is_development, is_latest) VALUES ({version['id']}, {version['config_id']}, {version['creation_date']}, {version['is_production']}, {version['is_development']}, {version['is_latest']}) \"\"\" conn.execute(query) conn.commit() def __write_entities_to_sql(_entities: Dict, _versions: Dict, db_file: str): conn = sqlite3.connect(db_file) for k, entity in _entities.items(): if \"SCENARIO\" in k: __insert_scenario(entity[\"data\"], conn) elif \"TASK\" in k: __insert_task(entity[\"data\"], conn) elif \"DATANODE\" in k: __insert_datanode(entity[\"data\"], conn) elif \"JOB\" in k: __insert_job(entity[\"data\"], conn) elif \"CYCLE\" in k: __insert_cycle(entity[\"data\"], conn) for k, version in _versions.items(): __insert_version(version, conn) def _restore_migrate_sql_entities(path: str) -> bool: file_name, file_extension = path.rsplit(\".\", 1) backup_path = f\"{file_name}_backup.{file_extension}\" if not os.path.exists(backup_path): __logger.error(f\"The backup database '{backup_path}' does not exist.\") return False if os.path.exists(path): os.remove(path) else: __logger.warning(f\"The original entities database '{path}' does not exist.\") os.rename(backup_path, path) __logger.info(f\"Restored entities from the backup database '{backup_path}' to '{path}'.\") return True def _remove_backup_sql_entities(path: str) -> bool: file_name, file_extension = path.rsplit(\".\", 1) backup_path = f\"{file_name}_backup.{file_extension}\" if not os.path.exists(backup_path): __logger.error(f\"The backup database '{backup_path}' does not exist.\") return False os.remove(backup_path) __logger.info(f\"Removed backup entities from the backup database '{backup_path}'.\") return True def _migrate_sql_entities(path: str, backup: bool = True) -> bool: \"\"\"Migrate entities from sqlite database to the current version. Args: path (str): The path to the sqlite database. backup (bool, optional): Whether to backup the entities before migrating. Defaults to True. Returns: bool: True if the migration was successful, False otherwise. \"\"\" if not path: __logger.error(\"Missing the required sqlite path.\") return False if not os.path.exists(path): __logger.error(f\"File '{path}' does not exist.\") return False if backup: file_name, file_extension = path.rsplit(\".\", 1) shutil.copyfile(path, f\"{file_name}_backup.{file_extension}\") __logger.info(f\"Backed up entities from '{path}' to '{file_name}_backup.{file_extension}' before migration.\") __logger.info(f\"Starting entity migration from sqlite database '{path}'\") entities, versions = _load_all_entities_from_sql(path) entities, versions = _migrate(entities, versions) __write_entities_to_sql(entities, versions, path) __logger.info(\"Migration finished\") return True "} {"text": "from functools import lru_cache import pymongo @lru_cache def _connect_mongodb( db_host: str, db_port: int, db_username: str, db_password: str, db_extra_args: frozenset, db_driver: str ) -> pymongo.MongoClient: \"\"\"Create a connection to a Mongo database. The `\"mongodb_extra_args\"` passed by the user is originally a dictionary, but since `@lru_cache` wrapper only accepts hashable parameters, the `\"mongodb_extra_args\"` should be converted into a frozenset beforehand. Parameters: db_host (str): the database host. db_port (int): the database port. db_username (str): the database username. db_password (str): the database password. db_extra_args (frozenset): A frozenset converted from a dictionary of additional arguments to be passed into database connection string. Returns: pymongo.MongoClient \"\"\" auth_str = \"\" if db_username and db_password: auth_str = f\"{db_username}:{db_password}@\" extra_args_str = \"&\".join(f\"{k}={str(v)}\" for k, v in db_extra_args) if extra_args_str: extra_args_str = \"/?\" + extra_args_str driver = \"mongodb\" if db_driver: driver = f\"{driver}+{db_driver}\" connection_string = f\"{driver}://{auth_str}{db_host}\" connection_string = connection_string if db_driver else f\"{connection_string}:{db_port}\" connection_string += extra_args_str return pymongo.MongoClient(connection_string) "} {"text": "from taipy.config.common._validate_id import _validate_id class MongoDefaultDocument: \"\"\"The default class for \\\"custom_document\\\" property to configure a `MongoCollectionDataNode^`. Attributes: **kwargs: Attributes of the MongoDefaultDocument object. Example: - `document = MongoDefaultDocument(name=\"example\", age=30})` will return a MongoDefaultDocument object so that `document.name` returns `\"example\"`, and `document.age` returns `30`. - `document = MongoDefaultDocument(date=\"12/24/2018\", temperature=20})` will return a MongoDefaultDocument object so that `document.date` returns `\"12/24/2018\"`, and `document.temperature` returns `20`. \"\"\" def __init__(self, **kwargs): for attribute_name, value in kwargs.items(): setattr(self, _validate_id(attribute_name), value) "} {"text": "from .mongo_default_document import MongoDefaultDocument "} {"text": "from collections import UserList class _ListAttributes(UserList): def __init__(self, parent, *args, **kwargs): super().__init__(*args, **kwargs) self._parent = parent def __add_iterable(self, iterable): for i in iterable: super(_ListAttributes, self).append(i) def __set_self(self): from ... import core as tp if hasattr(self, \"_parent\"): tp.set(self._parent) def __add__(self, value): if hasattr(value, \"__iter__\"): self.__add_iterable(value) else: self.append(value) return self def extend(self, value) -> None: super(_ListAttributes, self).extend(value) self.__set_self() def append(self, value) -> None: super(_ListAttributes, self).append(value) self.__set_self() def remove(self, value): super(_ListAttributes, self).remove(value) self.__set_self() def clear(self) -> None: super(_ListAttributes, self).clear() self.__set_self() "} {"text": "import functools import warnings from typing import Optional warnings.simplefilter(\"once\", ResourceWarning) def _warn_deprecated(deprecated: str, suggest: Optional[str] = None, stacklevel: int = 3) -> None: category = DeprecationWarning message = f\"{deprecated} is deprecated.\" if suggest: message += f\" Use {suggest} instead.\" warnings.warn(message=message, category=category, stacklevel=stacklevel) def _warn_no_core_service(stacklevel: int = 3): def inner(f): @functools.wraps(f) def _check_if_core_service_is_running(*args, **kwargs): from .._orchestrator._orchestrator_factory import _OrchestratorFactory if not _OrchestratorFactory._dispatcher: message = \"The Core service is NOT running\" warnings.warn(message=message, category=ResourceWarning, stacklevel=stacklevel) return f(*args, **kwargs) return _check_if_core_service_is_running return inner "} {"text": "import functools from enum import Enum class _ReprEnum(Enum): @classmethod @functools.lru_cache def _from_repr(cls, repr_: str): return next(filter(lambda e: repr(e) == repr_, cls)) # type: ignore "} {"text": "from typing import Iterable from taipy.logger._taipy_logger import _TaipyLogger from ..data import DataNode def _warn_if_inputs_not_ready(inputs: Iterable[DataNode]): from ..data import CSVDataNode, ExcelDataNode, JSONDataNode, ParquetDataNode, PickleDataNode from ..data._data_manager_factory import _DataManagerFactory logger = _TaipyLogger._get_logger() data_manager = _DataManagerFactory._build_manager() for dn in inputs: dn = data_manager._get(dn.id) if dn.is_ready_for_reading is False and not dn._last_edit_date: if dn.storage_type() in [ CSVDataNode.storage_type(), ExcelDataNode.storage_type(), JSONDataNode.storage_type(), PickleDataNode.storage_type(), ParquetDataNode.storage_type(), ]: logger.warning( f\"{dn.id} cannot be read because it has never been written. \" f\"Hint: The data node may refer to a wrong path : {dn.path} \" ) else: logger.warning(f\"{dn.id} cannot be read because it has never been written.\") "} {"text": "from typing import TypeVar, Union from .._repository._abstract_converter import _AbstractConverter from .._repository._base_taipy_model import _BaseModel ModelType = TypeVar(\"ModelType\", bound=_BaseModel) Entity = TypeVar(\"Entity\") Converter = TypeVar(\"Converter\", bound=_AbstractConverter) Json = Union[dict, list, str, int, float, bool] "} {"text": "import functools import time from collections import namedtuple from importlib import import_module from operator import attrgetter from typing import Callable, Optional, Tuple from taipy.config import Config @functools.lru_cache def _load_fct(module_name: str, fct_name: str) -> Callable: module = import_module(module_name) return attrgetter(fct_name)(module) def _retry_read_entity(exceptions: Tuple, sleep_time: float = 0.2): \"\"\" Retries the wrapped function/method if the exceptions listed in ``exceptions`` are thrown. The number of retries is defined by Config.core.read_entity_retry. Parameters: exceptions (tuple): Tuple of exceptions that trigger a retry attempt. sleep_time (float): Time to sleep between retries. \"\"\" def decorator(func): def newfn(*args, **kwargs): for _ in range(Config.core.read_entity_retry): try: return func(*args, **kwargs) except exceptions: time.sleep(sleep_time) return func(*args, **kwargs) return newfn return decorator @functools.lru_cache def _get_fct_name(f) -> Optional[str]: # Mock function does not have __qualname__ attribute -> return __name__ # Partial or anonymous function does not have __name__ or __qualname__ attribute -> return None name = getattr(f, \"__qualname__\", getattr(f, \"__name__\", None)) return name def _fct_to_dict(obj): params = [] callback = obj if isinstance(obj, _Subscriber): callback = obj.callback params = obj.params fct_name = _get_fct_name(callback) if not fct_name: return None return { \"fct_name\": fct_name, \"fct_params\": params, \"fct_module\": callback.__module__, } def _fcts_to_dict(objs): return [d for obj in objs if (d := _fct_to_dict(obj)) is not None] _Subscriber = namedtuple(\"_Subscriber\", \"callback params\") "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ._scenario_fs_repository import _ScenarioFSRepository from ._scenario_manager import _ScenarioManager from ._scenario_sql_repository import _ScenarioSQLRepository class _ScenarioManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _ScenarioFSRepository, \"sql\": _ScenarioSQLRepository} @classmethod def _build_manager(cls) -> Type[_ScenarioManager]: # type: ignore if cls._using_enterprise(): scenario_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".scenario._scenario_manager\", \"_ScenarioManager\" ) # type: ignore build_repository = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".scenario._scenario_manager_factory\", \"_ScenarioManagerFactory\" )._build_repository # type: ignore else: scenario_manager = _ScenarioManager build_repository = cls._build_repository scenario_manager._repository = build_repository() # type: ignore return scenario_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from dataclasses import dataclass from typing import Any, Dict, List, Optional from sqlalchemy import JSON, Boolean, Column, String, Table from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry from ..cycle.cycle_id import CycleId from ..data.data_node_id import DataNodeId from ..task.task_id import TaskId from .scenario_id import ScenarioId @mapper_registry.mapped @dataclass class _ScenarioModel(_BaseModel): __table__ = Table( \"scenario\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"config_id\", String), Column(\"tasks\", JSON), Column(\"additional_data_nodes\", JSON), Column(\"properties\", JSON), Column(\"creation_date\", String), Column(\"primary_scenario\", Boolean), Column(\"subscribers\", JSON), Column(\"tags\", JSON), Column(\"version\", String), Column(\"sequences\", JSON), Column(\"cycle\", String), ) id: ScenarioId config_id: str tasks: List[TaskId] additional_data_nodes: List[DataNodeId] properties: Dict[str, Any] creation_date: str primary_scenario: bool subscribers: List[Dict] tags: List[str] version: str sequences: Optional[Dict[str, Dict]] = None cycle: Optional[CycleId] = None @staticmethod def from_dict(data: Dict[str, Any]): return _ScenarioModel( id=data[\"id\"], config_id=data[\"config_id\"], tasks=_BaseModel._deserialize_attribute(data[\"tasks\"]), additional_data_nodes=_BaseModel._deserialize_attribute(data[\"additional_data_nodes\"]), properties=_BaseModel._deserialize_attribute(data[\"properties\"]), creation_date=data[\"creation_date\"], primary_scenario=data[\"primary_scenario\"], subscribers=_BaseModel._deserialize_attribute(data[\"subscribers\"]), tags=_BaseModel._deserialize_attribute(data[\"tags\"]), version=data[\"version\"], sequences=_BaseModel._deserialize_attribute(data[\"sequences\"]), cycle=CycleId(data[\"cycle\"]) if \"cycle\" in data else None, ) def to_list(self): return [ self.id, self.config_id, _BaseModel._serialize_attribute(self.tasks), _BaseModel._serialize_attribute(self.additional_data_nodes), _BaseModel._serialize_attribute(self.properties), self.creation_date, self.primary_scenario, _BaseModel._serialize_attribute(self.subscribers), _BaseModel._serialize_attribute(self.tags), self.version, _BaseModel._serialize_attribute(self.sequences), self.cycle, ] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._filesystem_repository import _FileSystemRepository from ._scenario_converter import _ScenarioConverter from ._scenario_model import _ScenarioModel class _ScenarioFSRepository(_FileSystemRepository): def __init__(self): super().__init__(model_type=_ScenarioModel, converter=_ScenarioConverter, dir_name=\"scenarios\") "} {"text": "from __future__ import annotations import pathlib import uuid from datetime import datetime from typing import Any, Callable, Dict, List, Optional, Set, Union import networkx as nx from taipy.config.common._template_handler import _TemplateHandler as _tpl from taipy.config.common._validate_id import _validate_id from .._entity._entity import _Entity from .._entity._labeled import _Labeled from .._entity._properties import _Properties from .._entity._reload import _Reloader, _self_reload, _self_setter from .._entity.submittable import Submittable from .._version._version_manager_factory import _VersionManagerFactory from ..common._listattributes import _ListAttributes from ..common._utils import _Subscriber from ..cycle.cycle import Cycle from ..data._data_manager_factory import _DataManagerFactory from ..data.data_node import DataNode from ..data.data_node_id import DataNodeId from ..exceptions.exceptions import ( InvalidSequence, NonExistingDataNode, NonExistingSequence, NonExistingTask, SequenceTaskDoesNotExistInScenario, ) from ..job.job import Job from ..notification import Event, EventEntityType, EventOperation, Notifier, _make_event from ..sequence.sequence import Sequence from ..task._task_manager_factory import _TaskManagerFactory from ..task.task import Task from ..task.task_id import TaskId from .scenario_id import ScenarioId class Scenario(_Entity, Submittable, _Labeled): \"\"\"Instance of a Business case to solve. A scenario holds a set of tasks (instances of `Task^` class) to submit for execution in order to solve the Business case. It also holds a set of additional data nodes (instances of `DataNode` class) for extra data related to the scenario. Attributes: config_id (str): The identifier of the `ScenarioConfig^`. tasks (Set[Task^]): The set of tasks. additional_data_nodes (Set[DataNode^]): The set of additional data nodes. sequences (Dict[str, Sequence^]): The dictionary of sequences: subsets of tasks that can be submitted together independently of the rest of the scenario's tasks. properties (dict[str, Any]): A dictionary of additional properties. scenario_id (str): The unique identifier of this scenario. creation_date (datetime): The date and time of the scenario's creation. is_primary (bool): True if the scenario is the primary of its cycle. False otherwise. cycle (Cycle^): The cycle of the scenario. subscribers (List[Callable]): The list of callbacks to be called on `Job^`'s status change. tags (Set[str]): The list of scenario's tags. version (str): The string indicates the application version of the scenario to instantiate. If not provided, the latest version is used. \"\"\" _ID_PREFIX = \"SCENARIO\" _MANAGER_NAME = \"scenario\" _MIGRATED_SEQUENCES_KEY = \"sequences\" __SEPARATOR = \"_\" _SEQUENCE_TASKS_KEY = \"tasks\" _SEQUENCE_PROPERTIES_KEY = \"properties\" _SEQUENCE_SUBSCRIBERS_KEY = \"subscribers\" def __init__( self, config_id: str, tasks: Optional[Union[Set[TaskId], Set[Task]]], properties: Dict[str, Any], additional_data_nodes: Optional[Union[Set[DataNodeId], Set[DataNode]]] = None, scenario_id: Optional[ScenarioId] = None, creation_date: Optional[datetime] = None, is_primary: bool = False, cycle: Optional[Cycle] = None, subscribers: Optional[List[_Subscriber]] = None, tags: Optional[Set[str]] = None, version: str = None, sequences: Optional[Dict[str, Dict]] = None, ): super().__init__(subscribers or []) self.config_id = _validate_id(config_id) self.id: ScenarioId = scenario_id or self._new_id(self.config_id) self._tasks: Union[Set[TaskId], Set[Task], Set] = tasks or set() self._additional_data_nodes: Union[Set[DataNodeId], Set[DataNode], Set] = additional_data_nodes or set() self._creation_date = creation_date or datetime.now() self._cycle = cycle self._primary_scenario = is_primary self._tags = tags or set() self._properties = _Properties(self, **properties) self._sequences: Dict[str, Dict] = sequences or {} _scenario_task_ids = set([task.id if isinstance(task, Task) else task for task in self._tasks]) for sequence_name, sequence_data in self._sequences.items(): sequence_task_ids = set( [task.id if isinstance(task, Task) else task for task in sequence_data.get(\"tasks\", [])] ) self.__check_sequence_tasks_exist_in_scenario_tasks( sequence_name, sequence_task_ids, self.id, _scenario_task_ids ) self._version = version or _VersionManagerFactory._build_manager()._get_latest_version() @staticmethod def _new_id(config_id: str) -> ScenarioId: \"\"\"Generate a unique scenario identifier.\"\"\" return ScenarioId(Scenario.__SEPARATOR.join([Scenario._ID_PREFIX, _validate_id(config_id), str(uuid.uuid4())])) def __getstate__(self): return self.id def __setstate__(self, id): from ... import core as tp sc = tp.get(id) self.__dict__ = sc.__dict__ def __hash__(self): return hash(self.id) def __eq__(self, other): return self.id == other.id def __getattr__(self, attribute_name): protected_attribute_name = _validate_id(attribute_name) if protected_attribute_name in self._properties: return _tpl._replace_templates(self._properties[protected_attribute_name]) sequences = self._get_sequences() if protected_attribute_name in sequences: return sequences[protected_attribute_name] tasks = self.tasks if protected_attribute_name in tasks: return tasks[protected_attribute_name] data_nodes = self.data_nodes if protected_attribute_name in data_nodes: return data_nodes[protected_attribute_name] raise AttributeError(f\"{attribute_name} is not an attribute of scenario {self.id}\") @property # type: ignore @_self_reload(_MANAGER_NAME) def sequences(self) -> Dict[str, Sequence]: return self._get_sequences() @sequences.setter # type: ignore @_self_setter(_MANAGER_NAME) def sequences( self, sequences: Dict[str, Dict[str, Union[List[Task], List[TaskId], _ListAttributes, List[_Subscriber], Dict]]] ): self._sequences = sequences actual_sequences = self._get_sequences() for sequence_name in sequences.keys(): if not actual_sequences[sequence_name]._is_consistent(): raise InvalidSequence(actual_sequences[sequence_name].id) def add_sequence( self, name: str, tasks: Union[List[Task], List[TaskId]], properties: Optional[Dict] = None, subscribers: Optional[List[_Subscriber]] = None, ): \"\"\"Add a sequence to the scenario. Parameters: name (str): The name of the sequence. tasks (Union[List[Task], List[TaskId]]): The list of scenario's tasks to add to the sequence. properties (Optional[Dict]): The optional properties of the sequence. subscribers (Optional[List[_Subscriber]]): The optional list of callbacks to be called on `Job^`'s status change. Raises: SequenceTaskDoesNotExistInScenario^: If a task in the sequence does not exist in the scenario. \"\"\" _scenario = _Reloader()._reload(self._MANAGER_NAME, self) _scenario_task_ids = set([task.id if isinstance(task, Task) else task for task in _scenario._tasks]) _sequence_task_ids: Set[TaskId] = set([task.id if isinstance(task, Task) else task for task in tasks]) self.__check_sequence_tasks_exist_in_scenario_tasks(name, _sequence_task_ids, self.id, _scenario_task_ids) _sequences = _Reloader()._reload(self._MANAGER_NAME, self)._sequences _sequences.update( { name: { self._SEQUENCE_TASKS_KEY: tasks, self._SEQUENCE_PROPERTIES_KEY: properties or {}, self._SEQUENCE_SUBSCRIBERS_KEY: subscribers or [], } } ) self.sequences = _sequences # type: ignore if not self.sequences[name]._is_consistent(): raise InvalidSequence(name) Notifier.publish(_make_event(self.sequences[name], EventOperation.CREATION)) def add_sequences(self, sequences: Dict[str, Union[List[Task], List[TaskId]]]): \"\"\"Add multiple sequences to the scenario. Note: To provide properties and subscribers for the sequences, use `Scenario.add_sequence^` instead. Parameters: sequences (Dict[str, Union[List[Task], List[TaskId]]]): A dictionary containing sequences to add. Each key is a sequence name, and the value must be a list of the scenario tasks. Raises: SequenceTaskDoesNotExistInScenario^: If a task in the sequence does not exist in the scenario. \"\"\" _scenario = _Reloader()._reload(self._MANAGER_NAME, self) _sc_task_ids = set([task.id if isinstance(task, Task) else task for task in _scenario._tasks]) for name, tasks in sequences.items(): _seq_task_ids: Set[TaskId] = set([task.id if isinstance(task, Task) else task for task in tasks]) self.__check_sequence_tasks_exist_in_scenario_tasks(name, _seq_task_ids, self.id, _sc_task_ids) # Need to parse twice the sequences to avoid adding some sequences and not others in case of exception for name, tasks in sequences.items(): self.add_sequence(name, tasks) def remove_sequence(self, name: str): \"\"\"Remove a sequence from the scenario. Parameters: name (str): The name of the sequence to remove. \"\"\" seq_id = self.sequences[name].id _sequences = _Reloader()._reload(self._MANAGER_NAME, self)._sequences _sequences.pop(name) self.sequences = _sequences # type: ignore Notifier.publish(Event(EventEntityType.SEQUENCE, EventOperation.DELETION, entity_id=seq_id)) def remove_sequences(self, sequence_names: List[str]): \"\"\" Remove multiple sequences from the scenario. Parameters: sequence_names (List[str]): A list of sequence names to remove. \"\"\" _sequences = _Reloader()._reload(self._MANAGER_NAME, self)._sequences for sequence_name in sequence_names: seq_id = self.sequences[sequence_name].id _sequences.pop(sequence_name) Notifier.publish( Event( EventEntityType.SEQUENCE, EventOperation.DELETION, entity_id=seq_id, ) ) self.sequences = _sequences # type: ignore @staticmethod def __check_sequence_tasks_exist_in_scenario_tasks( sequence_name: str, sequence_task_ids: Set[TaskId], scenario_id: ScenarioId, scenario_task_ids: Set[TaskId] ): non_existing_sequence_task_ids_in_scenario = set() for sequence_task_id in sequence_task_ids: if sequence_task_id not in scenario_task_ids: non_existing_sequence_task_ids_in_scenario.add(sequence_task_id) if len(non_existing_sequence_task_ids_in_scenario) > 0: raise SequenceTaskDoesNotExistInScenario( list(non_existing_sequence_task_ids_in_scenario), sequence_name, scenario_id ) def _get_sequences(self) -> Dict[str, Sequence]: _sequences = {} from ..sequence._sequence_manager_factory import _SequenceManagerFactory sequence_manager = _SequenceManagerFactory._build_manager() for sequence_name, sequence_data in self._sequences.items(): p = sequence_manager._create( sequence_name, sequence_data.get(self._SEQUENCE_TASKS_KEY, []), sequence_data.get(self._SEQUENCE_SUBSCRIBERS_KEY, []), sequence_data.get(self._SEQUENCE_PROPERTIES_KEY, {}), self.id, self.version, ) if not isinstance(p, Sequence): raise NonExistingSequence(sequence_name) _sequences[sequence_name] = p return _sequences @property # type: ignore @_self_reload(_MANAGER_NAME) def tasks(self) -> Dict[str, Task]: return self.__get_tasks() def __get_tasks(self) -> Dict[str, Task]: _tasks = {} task_manager = _TaskManagerFactory._build_manager() for task_or_id in self._tasks: t = task_manager._get(task_or_id, task_or_id) if not isinstance(t, Task): raise NonExistingTask(task_or_id) _tasks[t.config_id] = t return _tasks @tasks.setter # type: ignore @_self_setter(_MANAGER_NAME) def tasks(self, val: Union[Set[TaskId], Set[Task]]): self._tasks = set(val) @property # type: ignore @_self_reload(_MANAGER_NAME) def additional_data_nodes(self) -> Dict[str, DataNode]: return self.__get_additional_data_nodes() def __get_additional_data_nodes(self): additional_data_nodes = {} data_manager = _DataManagerFactory._build_manager() for dn_or_id in self._additional_data_nodes: dn = data_manager._get(dn_or_id, dn_or_id) if not isinstance(dn, DataNode): raise NonExistingDataNode(dn_or_id) additional_data_nodes[dn.config_id] = dn return additional_data_nodes @additional_data_nodes.setter # type: ignore @_self_setter(_MANAGER_NAME) def additional_data_nodes(self, val: Union[Set[TaskId], Set[DataNode]]): self._additional_data_nodes = set(val) def _get_set_of_tasks(self) -> Set[Task]: return set(self.tasks.values()) @property # type: ignore @_self_reload(_MANAGER_NAME) def data_nodes(self) -> Dict[str, DataNode]: data_nodes_dict = self.__get_additional_data_nodes() for _, task in self.__get_tasks().items(): data_nodes_dict.update(task.data_nodes) return data_nodes_dict @property # type: ignore @_self_reload(_MANAGER_NAME) def creation_date(self): return self._creation_date @creation_date.setter # type: ignore @_self_setter(_MANAGER_NAME) def creation_date(self, val): self._creation_date = val @property # type: ignore @_self_reload(_MANAGER_NAME) def cycle(self): return self._cycle @cycle.setter # type: ignore @_self_setter(_MANAGER_NAME) def cycle(self, val): self._cycle = val @property # type: ignore @_self_reload(_MANAGER_NAME) def is_primary(self): return self._primary_scenario @is_primary.setter # type: ignore @_self_setter(_MANAGER_NAME) def is_primary(self, val): self._primary_scenario = val @property # type: ignore @_self_reload(_MANAGER_NAME) def subscribers(self): return self._subscribers @subscribers.setter # type: ignore @_self_setter(_MANAGER_NAME) def subscribers(self, val): self._subscribers = _ListAttributes(self, val) @property # type: ignore @_self_reload(_MANAGER_NAME) def tags(self): return self._tags @tags.setter # type: ignore @_self_setter(_MANAGER_NAME) def tags(self, val): self._tags = val or set() @property def version(self): return self._version @property def owner_id(self): return self._cycle.id @property def properties(self): self._properties = _Reloader()._reload(self._MANAGER_NAME, self)._properties return self._properties @property # type: ignore def name(self) -> Optional[str]: return self.properties.get(\"name\") @name.setter # type: ignore def name(self, val): self.properties[\"name\"] = val def has_tag(self, tag: str) -> bool: \"\"\"Indicate if the scenario has a given tag. Parameters: tag (str): The tag to search among the set of scenario's tags. Returns: True if the scenario has the tag given as parameter. False otherwise. \"\"\" return tag in self.tags def _add_tag(self, tag: str): self._tags = _Reloader()._reload(\"scenario\", self)._tags self._tags.add(tag) def _remove_tag(self, tag: str): self._tags = _Reloader()._reload(\"scenario\", self)._tags if self.has_tag(tag): self._tags.remove(tag) def subscribe( self, callback: Callable[[Scenario, Job], None], params: Optional[List[Any]] = None, ): \"\"\"Subscribe a function to be called on `Job^` status change. The subscription is applied to all jobs created from the scenario's execution. Parameters: callback (Callable[[Scenario^, Job^], None]): The callable function to be called on status change. params (Optional[List[Any]]): The parameters to be passed to the _callback_. Note: Notification will be available only for jobs created after this subscription. \"\"\" from ... import core as tp return tp.subscribe_scenario(callback, params, self) def unsubscribe(self, callback: Callable[[Scenario, Job], None], params: Optional[List[Any]] = None): \"\"\"Unsubscribe a function that is called when the status of a `Job^` changes. Parameters: callback (Callable[[Scenario^, Job^], None]): The callable function to unsubscribe. params (Optional[List[Any]]): The parameters to be passed to the _callback_. Note: The function will continue to be called for ongoing jobs. \"\"\" from ... import core as tp return tp.unsubscribe_scenario(callback, params, self) def submit( self, callbacks: Optional[List[Callable]] = None, force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, ) -> List[Job]: \"\"\"Submit this scenario for execution. All the `Task^`s of the scenario will be submitted for execution. Parameters: callbacks (List[Callable]): The list of callable functions to be called on status change. force (bool): Force execution even if the data nodes are in cache. wait (bool): Wait for the orchestrated jobs created from the scenario submission to be finished in asynchronous mode. timeout (Union[float, int]): The optional maximum number of seconds to wait for the jobs to be finished before returning. Returns: A list of created `Job^`s. \"\"\" from ._scenario_manager_factory import _ScenarioManagerFactory return _ScenarioManagerFactory._build_manager()._submit(self, callbacks, force, wait, timeout) def export( self, folder_path: Union[str, pathlib.Path], ): \"\"\"Export all related entities of this scenario to a folder. Parameters: folder_path (Union[str, pathlib.Path]): The folder path to export the scenario to. \"\"\" from ... import core as tp return tp.export_scenario(self.id, folder_path) def set_primary(self): \"\"\"Promote the scenario as the primary scenario of its cycle. If the cycle already has a primary scenario, it will be demoted, and it will no longer be primary for the cycle. \"\"\" from ... import core as tp return tp.set_primary(self) def add_tag(self, tag: str): \"\"\"Add a tag to this scenario. If the scenario's cycle already have another scenario tagged with _tag_ the other scenario will be untagged. Parameters: tag (str): The tag to add to this scenario. \"\"\" from ... import core as tp return tp.tag(self, tag) def remove_tag(self, tag: str): \"\"\"Remove a tag from this scenario. Parameters: tag (str): The tag to remove from the set of the scenario's tags. \"\"\" from ... import core as tp return tp.untag(self, tag) def is_deletable(self) -> bool: \"\"\"Indicate if the scenario can be deleted. Returns: True if the scenario can be deleted. False otherwise. \"\"\" from ... import core as tp return tp.is_deletable(self) def get_label(self) -> str: \"\"\"Returns the scenario simple label prefixed by its owner label. Returns: The label of the scenario as a string. \"\"\" return self._get_label() def get_simple_label(self) -> str: \"\"\"Returns the scenario simple label. Returns: The simple label of the scenario as a string. \"\"\" return self._get_simple_label() def _is_consistent(self) -> bool: dag = self._build_dag() if dag.number_of_nodes() == 0: return True if not nx.is_directed_acyclic_graph(dag): return False for left_node, right_node in dag.edges: if (isinstance(left_node, DataNode) and isinstance(right_node, Task)) or ( isinstance(left_node, Task) and isinstance(right_node, DataNode) ): continue return False return True @_make_event.register(Scenario) def _make_event_for_scenario( scenario: Scenario, operation: EventOperation, /, attribute_name: Optional[str] = None, attribute_value: Optional[Any] = None, **kwargs, ) -> Event: metadata = {\"config_id\": scenario.config_id, \"version\": scenario.version, **kwargs} return Event( entity_type=EventEntityType.SCENARIO, entity_id=scenario.id, operation=operation, attribute_name=attribute_name, attribute_value=attribute_value, metadata=metadata, ) "} {"text": "import datetime from functools import partial from typing import Any, Callable, List, Optional, Union from taipy.config import Config from .._entity._entity_ids import _EntityIds from .._manager._manager import _Manager from .._repository._abstract_repository import _AbstractRepository from .._version._version_mixin import _VersionMixin from ..common.warn_if_inputs_not_ready import _warn_if_inputs_not_ready from ..config.scenario_config import ScenarioConfig from ..cycle._cycle_manager_factory import _CycleManagerFactory from ..cycle.cycle import Cycle from ..data._data_manager_factory import _DataManagerFactory from ..exceptions.exceptions import ( DeletingPrimaryScenario, DifferentScenarioConfigs, DoesNotBelongToACycle, InsufficientScenarioToCompare, InvalidSequence, InvalidSscenario, NonExistingComparator, NonExistingScenario, NonExistingScenarioConfig, SequenceTaskConfigDoesNotExistInSameScenarioConfig, UnauthorizedTagError, ) from ..job._job_manager_factory import _JobManagerFactory from ..job.job import Job from ..notification import EventEntityType, EventOperation, Notifier, _make_event from ..submission._submission_manager_factory import _SubmissionManagerFactory from ..task._task_manager_factory import _TaskManagerFactory from .scenario import Scenario from .scenario_id import ScenarioId class _ScenarioManager(_Manager[Scenario], _VersionMixin): _AUTHORIZED_TAGS_KEY = \"authorized_tags\" _ENTITY_NAME = Scenario.__name__ _EVENT_ENTITY_TYPE = EventEntityType.SCENARIO _repository: _AbstractRepository @classmethod def _get_all(cls, version_number: Optional[str] = None) -> List[Scenario]: \"\"\" Returns all entities. \"\"\" filters = cls._build_filters_with_version(version_number) return cls._repository._load_all(filters) @classmethod def _subscribe( cls, callback: Callable[[Scenario, Job], None], params: Optional[List[Any]] = None, scenario: Optional[Scenario] = None, ): if scenario is None: scenarios = cls._get_all() for scn in scenarios: cls.__add_subscriber(callback, params, scn) return cls.__add_subscriber(callback, params, scenario) @classmethod def _unsubscribe( cls, callback: Callable[[Scenario, Job], None], params: Optional[List[Any]] = None, scenario: Optional[Scenario] = None, ): if scenario is None: scenarios = cls._get_all() for scn in scenarios: cls.__remove_subscriber(callback, params, scn) return cls.__remove_subscriber(callback, params, scenario) @classmethod def __add_subscriber(cls, callback, params, scenario: Scenario): scenario._add_subscriber(callback, params) Notifier.publish( _make_event(scenario, EventOperation.UPDATE, attribute_name=\"subscribers\", attribute_value=params) ) @classmethod def __remove_subscriber(cls, callback, params, scenario: Scenario): scenario._remove_subscriber(callback, params) Notifier.publish( _make_event(scenario, EventOperation.UPDATE, attribute_name=\"subscribers\", attribute_value=params) ) @classmethod def _create( cls, config: ScenarioConfig, creation_date: Optional[datetime.datetime] = None, name: Optional[str] = None, ) -> Scenario: _task_manager = _TaskManagerFactory._build_manager() _data_manager = _DataManagerFactory._build_manager() scenario_id = Scenario._new_id(str(config.id)) cycle = ( _CycleManagerFactory._build_manager()._get_or_create(config.frequency, creation_date) if config.frequency else None ) cycle_id = cycle.id if cycle else None tasks = ( _task_manager._bulk_get_or_create(config.task_configs, cycle_id, scenario_id) if config.task_configs else [] ) additional_data_nodes = ( _data_manager._bulk_get_or_create(config.additional_data_node_configs, cycle_id, scenario_id) if config.additional_data_node_configs else {} ) sequences = {} tasks_and_config_id_maps = {task.config_id: task for task in tasks} for sequence_name, sequence_task_configs in config.sequences.items(): sequence_tasks = [] non_existing_sequence_task_config_in_scenario_config = set() for sequence_task_config in sequence_task_configs: if task := tasks_and_config_id_maps.get(sequence_task_config.id): sequence_tasks.append(task) else: non_existing_sequence_task_config_in_scenario_config.add(sequence_task_config.id) if len(non_existing_sequence_task_config_in_scenario_config) > 0: raise SequenceTaskConfigDoesNotExistInSameScenarioConfig( list(non_existing_sequence_task_config_in_scenario_config), sequence_name, str(config.id) ) sequences[sequence_name] = {Scenario._SEQUENCE_TASKS_KEY: sequence_tasks} is_primary_scenario = len(cls._get_all_by_cycle(cycle)) == 0 if cycle else False props = config._properties.copy() if name: props[\"name\"] = name version = cls._get_latest_version() scenario = Scenario( config_id=str(config.id), tasks=set(tasks), properties=props, additional_data_nodes=set(additional_data_nodes.values()), scenario_id=scenario_id, creation_date=creation_date, is_primary=is_primary_scenario, cycle=cycle, version=version, sequences=sequences, ) for task in tasks: if scenario_id not in task._parent_ids: task._parent_ids.update([scenario_id]) _task_manager._set(task) for dn in additional_data_nodes.values(): if scenario_id not in dn._parent_ids: dn._parent_ids.update([scenario_id]) _data_manager._set(dn) cls._set(scenario) if not scenario._is_consistent(): raise InvalidSscenario(scenario.id) actual_sequences = scenario._get_sequences() for sequence_name in sequences.keys(): if not actual_sequences[sequence_name]._is_consistent(): raise InvalidSequence(actual_sequences[sequence_name].id) Notifier.publish(_make_event(actual_sequences[sequence_name], EventOperation.CREATION)) Notifier.publish(_make_event(scenario, EventOperation.CREATION)) return scenario @classmethod def _is_submittable(cls, scenario: Union[Scenario, ScenarioId]) -> bool: if isinstance(scenario, str): scenario = cls._get(scenario) return isinstance(scenario, Scenario) and scenario.is_ready_to_run() @classmethod def _submit( cls, scenario: Union[Scenario, ScenarioId], callbacks: Optional[List[Callable]] = None, force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, check_inputs_are_ready: bool = True, ) -> List[Job]: scenario_id = scenario.id if isinstance(scenario, Scenario) else scenario scenario = cls._get(scenario_id) if scenario is None: raise NonExistingScenario(scenario_id) callbacks = callbacks or [] scenario_subscription_callback = cls.__get_status_notifier_callbacks(scenario) + callbacks if check_inputs_are_ready: _warn_if_inputs_not_ready(scenario.get_inputs()) jobs = ( _TaskManagerFactory._build_manager() ._orchestrator() .submit(scenario, callbacks=scenario_subscription_callback, force=force, wait=wait, timeout=timeout) ) Notifier.publish(_make_event(scenario, EventOperation.SUBMISSION)) return jobs @classmethod def __get_status_notifier_callbacks(cls, scenario: Scenario) -> List: return [partial(c.callback, *c.params, scenario) for c in scenario.subscribers] @classmethod def _get_primary(cls, cycle: Cycle) -> Optional[Scenario]: scenarios = cls._get_all_by_cycle(cycle) for scenario in scenarios: if scenario.is_primary: return scenario return None @classmethod def _get_by_tag(cls, cycle: Cycle, tag: str) -> Optional[Scenario]: scenarios = cls._get_all_by_cycle(cycle) for scenario in scenarios: if scenario.has_tag(tag): return scenario return None @classmethod def _get_all_by_tag(cls, tag: str) -> List[Scenario]: scenarios = [] for scenario in cls._get_all(): if scenario.has_tag(tag): scenarios.append(scenario) return scenarios @classmethod def _get_all_by_cycle(cls, cycle: Cycle) -> List[Scenario]: filters = cls._build_filters_with_version(\"all\") if not filters: filters = [{}] for fil in filters: fil.update({\"cycle\": cycle.id}) return cls._get_all_by(filters) @classmethod def _get_primary_scenarios(cls) -> List[Scenario]: primary_scenarios = [] for scenario in cls._get_all(): if scenario.is_primary: primary_scenarios.append(scenario) return primary_scenarios @classmethod def _is_promotable_to_primary(cls, scenario: Union[Scenario, ScenarioId]) -> bool: if isinstance(scenario, str): scenario = cls._get(scenario) if scenario and not scenario.is_primary and scenario.cycle: return True return False @classmethod def _set_primary(cls, scenario: Scenario): if scenario.cycle: primary_scenario = cls._get_primary(scenario.cycle) # To prevent SAME scenario updating out of Context Manager if primary_scenario and primary_scenario != scenario: primary_scenario.is_primary = False # type: ignore scenario.is_primary = True # type: ignore else: raise DoesNotBelongToACycle( f\"Can't set scenario {scenario.id} to primary because it doesn't belong to a cycle.\" ) @classmethod def _tag(cls, scenario: Scenario, tag: str): tags = scenario.properties.get(cls._AUTHORIZED_TAGS_KEY, set()) if len(tags) > 0 and tag not in tags: raise UnauthorizedTagError(f\"Tag `{tag}` not authorized by scenario configuration `{scenario.config_id}`\") if scenario.cycle: old_tagged_scenario = cls._get_by_tag(scenario.cycle, tag) if old_tagged_scenario: old_tagged_scenario.remove_tag(tag) cls._set(old_tagged_scenario) scenario._add_tag(tag) cls._set(scenario) Notifier.publish( _make_event(scenario, EventOperation.UPDATE, attribute_name=\"tags\", attribute_value=scenario.tags) ) @classmethod def _untag(cls, scenario: Scenario, tag: str): scenario._remove_tag(tag) cls._set(scenario) Notifier.publish( _make_event(scenario, EventOperation.UPDATE, attribute_name=\"tags\", attribute_value=scenario.tags) ) @classmethod def _compare(cls, *scenarios: Scenario, data_node_config_id: Optional[str] = None): if len(scenarios) < 2: raise InsufficientScenarioToCompare(\"At least two scenarios are required to compare.\") if not all(scenarios[0].config_id == scenario.config_id for scenario in scenarios): raise DifferentScenarioConfigs(\"Scenarios to compare must have the same configuration.\") if scenario_config := _ScenarioManager.__get_config(scenarios[0]): results = {} if data_node_config_id: if data_node_config_id in scenario_config.comparators.keys(): dn_comparators = {data_node_config_id: scenario_config.comparators[data_node_config_id]} else: raise NonExistingComparator(f\"Data node config {data_node_config_id} has no comparator.\") else: dn_comparators = scenario_config.comparators for data_node_config_id, comparators in dn_comparators.items(): data_nodes = [scenario.__getattr__(data_node_config_id).read() for scenario in scenarios] results[data_node_config_id] = { comparator.__name__: comparator(*data_nodes) for comparator in comparators } return results else: raise NonExistingScenarioConfig(scenarios[0].config_id) @staticmethod def __get_config(scenario: Scenario): return Config.scenarios.get(scenario.config_id, None) @classmethod def _is_deletable(cls, scenario: Union[Scenario, ScenarioId]) -> bool: if isinstance(scenario, str): scenario = cls._get(scenario) if scenario.is_primary: if len(cls._get_all_by_cycle(scenario.cycle)) > 1: return False return True @classmethod def _delete(cls, scenario_id: ScenarioId): scenario = cls._get(scenario_id) if not cls._is_deletable(scenario): raise DeletingPrimaryScenario( f\"Scenario {scenario.id}, which has config id {scenario.config_id}, is primary and there are \" f\"other scenarios in the same cycle. \" ) if scenario.is_primary: _CycleManagerFactory._build_manager()._delete(scenario.cycle.id) super()._delete(scenario_id) @classmethod def _hard_delete(cls, scenario_id: ScenarioId): scenario = cls._get(scenario_id) if not cls._is_deletable(scenario): raise DeletingPrimaryScenario( f\"Scenario {scenario.id}, which has config id {scenario.config_id}, is primary and there are \" f\"other scenarios in the same cycle. \" ) if scenario.is_primary: _CycleManagerFactory._build_manager()._hard_delete(scenario.cycle.id) else: entity_ids_to_delete = cls._get_children_entity_ids(scenario) entity_ids_to_delete.scenario_ids.add(scenario.id) cls._delete_entities_of_multiple_types(entity_ids_to_delete) @classmethod def _delete_by_version(cls, version_number: str): \"\"\" Deletes scenario by the version number. Check if the cycle is only attached to this scenario, then delete it. \"\"\" for scenario in cls._repository._search(\"version\", version_number): if scenario.cycle and len(cls._get_all_by_cycle(scenario.cycle)) == 1: _CycleManagerFactory._build_manager()._delete(scenario.cycle.id) super()._delete(scenario.id) @classmethod def _get_children_entity_ids(cls, scenario: Scenario) -> _EntityIds: entity_ids = _EntityIds() for sequence in scenario.sequences.values(): if sequence.owner_id == scenario.id: entity_ids.sequence_ids.add(sequence.id) for task in scenario.tasks.values(): if task.owner_id == scenario.id: entity_ids.task_ids.add(task.id) for data_node in scenario.data_nodes.values(): if data_node.owner_id == scenario.id: entity_ids.data_node_ids.add(data_node.id) jobs = _JobManagerFactory._build_manager()._get_all() for job in jobs: if job.task.id in entity_ids.task_ids: entity_ids.job_ids.add(job.id) submissions = _SubmissionManagerFactory._build_manager()._get_all() submitted_entity_ids = list(entity_ids.scenario_ids.union(entity_ids.sequence_ids, entity_ids.task_ids)) for submission in submissions: if submission.entity_id in submitted_entity_ids: entity_ids.submission_ids.add(submission.id) return entity_ids @classmethod def _get_by_config_id(cls, config_id: str, version_number: Optional[str] = None) -> List[Scenario]: \"\"\" Get all scenarios by its config id. \"\"\" filters = cls._build_filters_with_version(version_number) if not filters: filters = [{}] for fil in filters: fil.update({\"config_id\": config_id}) return cls._repository._load_all(filters) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._sql_repository import _SQLRepository from ._scenario_converter import _ScenarioConverter from ._scenario_model import _ScenarioModel class _ScenarioSQLRepository(_SQLRepository): def __init__(self): super().__init__(model_type=_ScenarioModel, converter=_ScenarioConverter) "} {"text": "from datetime import datetime from typing import Dict, List, Optional, Set, Union from .._repository._abstract_converter import _AbstractConverter from .._version._utils import _migrate_entity from ..common import _utils from ..cycle._cycle_manager_factory import _CycleManagerFactory from ..cycle.cycle import Cycle, CycleId from ..data.data_node import DataNode, DataNodeId from ..scenario._scenario_model import _ScenarioModel from ..scenario.scenario import Scenario from ..task.task import Task, TaskId class _ScenarioConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, scenario: Scenario) -> _ScenarioModel: sequences: Dict[str, Dict[str, Union[List[TaskId], Dict, List]]] = {} for p_name, sequence_data in scenario._sequences.items(): sequences[p_name] = { Scenario._SEQUENCE_TASKS_KEY: [ t.id if isinstance(t, Task) else t for t in sequence_data.get(\"tasks\", []) ], Scenario._SEQUENCE_PROPERTIES_KEY: sequence_data.get(\"properties\", {}), Scenario._SEQUENCE_SUBSCRIBERS_KEY: _utils._fcts_to_dict(sequence_data.get(\"subscribers\", [])), } return _ScenarioModel( id=scenario.id, config_id=scenario.config_id, tasks=[task.id if isinstance(task, Task) else TaskId(str(task)) for task in list(scenario._tasks)], additional_data_nodes=[ dn.id if isinstance(dn, DataNode) else DataNodeId(str(dn)) for dn in list(scenario._additional_data_nodes) ], properties=scenario._properties.data, creation_date=scenario._creation_date.isoformat(), primary_scenario=scenario._primary_scenario, subscribers=_utils._fcts_to_dict(scenario._subscribers), tags=list(scenario._tags), version=scenario._version, cycle=scenario._cycle.id if scenario._cycle else None, sequences=sequences if sequences else None, ) @classmethod def _model_to_entity(cls, model: _ScenarioModel) -> Scenario: tasks: Union[Set[TaskId], Set[Task], Set] = set() if model.tasks: tasks = set(model.tasks) if model.sequences: for sequence_name, sequence_data in model.sequences.items(): if subscribers := sequence_data.get(Scenario._SEQUENCE_SUBSCRIBERS_KEY): model.sequences[sequence_name][Scenario._SEQUENCE_SUBSCRIBERS_KEY] = [ _utils._Subscriber(_utils._load_fct(it[\"fct_module\"], it[\"fct_name\"]), it[\"fct_params\"]) for it in subscribers ] scenario = Scenario( scenario_id=model.id, config_id=model.config_id, tasks=tasks, additional_data_nodes=set(model.additional_data_nodes), properties=model.properties, creation_date=datetime.fromisoformat(model.creation_date), is_primary=model.primary_scenario, tags=set(model.tags), cycle=cls.__to_cycle(model.cycle), subscribers=[ _utils._Subscriber(_utils._load_fct(it[\"fct_module\"], it[\"fct_name\"]), it[\"fct_params\"]) for it in model.subscribers ], version=model.version, sequences=model.sequences, ) return _migrate_entity(scenario) @staticmethod def __to_cycle(cycle_id: Optional[CycleId] = None) -> Optional[Cycle]: return _CycleManagerFactory._build_manager()._get(cycle_id) if cycle_id else None "} {"text": "from typing import NewType ScenarioId = NewType(\"ScenarioId\", str) ScenarioId.__doc__ = \"\"\"Type that holds a `Scenario^` identifier.\"\"\" "} {"text": "import itertools import uuid from datetime import datetime from multiprocessing import Lock from queue import Queue from time import sleep from typing import Callable, Iterable, List, Optional, Set, Union from taipy.config.config import Config from taipy.logger._taipy_logger import _TaipyLogger from .._entity.submittable import Submittable from ..data._data_manager_factory import _DataManagerFactory from ..job._job_manager_factory import _JobManagerFactory from ..job.job import Job from ..job.job_id import JobId from ..submission._submission_manager_factory import _SubmissionManagerFactory from ..task.task import Task from ._abstract_orchestrator import _AbstractOrchestrator class _Orchestrator(_AbstractOrchestrator): \"\"\" Handles the functional orchestrating. \"\"\" jobs_to_run: Queue = Queue() blocked_jobs: List = [] lock = Lock() __logger = _TaipyLogger._get_logger() @classmethod def initialize(cls): pass @classmethod def submit( cls, submittable: Submittable, callbacks: Optional[Iterable[Callable]] = None, force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, ) -> List[Job]: \"\"\"Submit the given `Scenario^` or `Sequence^` for an execution. Parameters: submittable (Union[SCenario^, Sequence^]): The scenario or sequence to submit for execution. callbacks: The optional list of functions that should be executed on jobs status change. force (bool) : Enforce execution of the scenario's or sequence's tasks even if their output data nodes are cached. wait (bool): Wait for the orchestrated jobs created from the scenario or sequence submission to be finished in asynchronous mode. timeout (Union[float, int]): The optional maximum number of seconds to wait for the jobs to be finished before returning. Returns: The created Jobs. \"\"\" submission = _SubmissionManagerFactory._build_manager()._create(submittable.id) # type: ignore jobs = [] tasks = submittable._get_sorted_tasks() with cls.lock: for ts in tasks: for task in ts: jobs.append( cls._lock_dn_output_and_create_job( task, submission.id, submission.entity_id, callbacks=itertools.chain([submission._update_submission_status], callbacks or []), force=force, # type: ignore ) ) submission.jobs = jobs # type: ignore cls._orchestrate_job_to_run_or_block(jobs) if Config.job_config.is_development: cls._check_and_execute_jobs_if_development_mode() else: if wait: cls.__wait_until_job_finished(jobs, timeout=timeout) return jobs @classmethod def submit_task( cls, task: Task, callbacks: Optional[Iterable[Callable]] = None, force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, ) -> Job: \"\"\"Submit the given `Task^` for an execution. Parameters: task (Task^): The task to submit for execution. submit_id (str): The optional id to differentiate each submission. callbacks: The optional list of functions that should be executed on job status change. force (bool): Enforce execution of the task even if its output data nodes are cached. wait (bool): Wait for the orchestrated job created from the task submission to be finished in asynchronous mode. timeout (Union[float, int]): The optional maximum number of seconds to wait for the job to be finished before returning. Returns: The created `Job^`. \"\"\" submission = _SubmissionManagerFactory._build_manager()._create(task.id) submit_id = submission.id with cls.lock: job = cls._lock_dn_output_and_create_job( task, submit_id, submission.entity_id, itertools.chain([submission._update_submission_status], callbacks or []), force, ) jobs = [job] submission.jobs = jobs # type: ignore cls._orchestrate_job_to_run_or_block(jobs) if Config.job_config.is_development: cls._check_and_execute_jobs_if_development_mode() else: if wait: cls.__wait_until_job_finished(job, timeout=timeout) return job @classmethod def _lock_dn_output_and_create_job( cls, task: Task, submit_id: str, submit_entity_id: str, callbacks: Optional[Iterable[Callable]] = None, force: bool = False, ) -> Job: for dn in task.output.values(): dn.lock_edit() job = _JobManagerFactory._build_manager()._create( task, itertools.chain([cls._on_status_change], callbacks or []), submit_id, submit_entity_id, force=force ) return job @classmethod def _orchestrate_job_to_run_or_block(cls, jobs: List[Job]): blocked_jobs = [] pending_jobs = [] for job in jobs: if cls._is_blocked(job): job.blocked() blocked_jobs.append(job) else: job.pending() pending_jobs.append(job) cls.blocked_jobs.extend(blocked_jobs) for job in pending_jobs: cls.jobs_to_run.put(job) @classmethod def __wait_until_job_finished(cls, jobs: Union[List[Job], Job], timeout: Optional[Union[float, int]] = None): def __check_if_timeout(start, timeout): if timeout: return (datetime.now() - start).seconds < timeout return True start = datetime.now() jobs = jobs if isinstance(jobs, Iterable) else [jobs] index = 0 while __check_if_timeout(start, timeout) and index < len(jobs): try: if jobs[index]._is_finished(): index = index + 1 else: sleep(0.5) # Limit CPU usage except Exception: pass @classmethod def _is_blocked(cls, obj: Union[Task, Job]) -> bool: \"\"\"Returns True if the execution of the `Job^` or the `Task^` is blocked by the execution of another `Job^`. Parameters: obj (Union[Task^, Job^]): The job or task entity to run. Returns: True if one of its input data nodes is blocked. \"\"\" input_data_nodes = obj.task.input.values() if isinstance(obj, Job) else obj.input.values() data_manager = _DataManagerFactory._build_manager() return any(not data_manager._get(dn.id).is_ready_for_reading for dn in input_data_nodes) @staticmethod def _unlock_edit_on_jobs_outputs(jobs: Union[Job, List[Job], Set[Job]]): jobs = [jobs] if isinstance(jobs, Job) else jobs for job in jobs: job._unlock_edit_on_outputs() @classmethod def _on_status_change(cls, job: Job): if job.is_completed() or job.is_skipped(): cls.__unblock_jobs() elif job.is_failed(): print(f\"\\nJob {job.id} failed, abandoning subsequent jobs.\\n\") cls._fail_subsequent_jobs(job) @classmethod def __unblock_jobs(cls): for job in cls.blocked_jobs: if not cls._is_blocked(job): with cls.lock: job.pending() cls.__remove_blocked_job(job) cls.jobs_to_run.put(job) @classmethod def __remove_blocked_job(cls, job): try: # In case the job has been removed from the list of blocked_jobs. cls.blocked_jobs.remove(job) except Exception: cls.__logger.warning(f\"{job.id} is not in the blocked list anymore.\") @classmethod def cancel_job(cls, job: Job): if job.is_canceled(): cls.__logger.info(f\"{job.id} has already been canceled.\") elif job.is_abandoned(): cls.__logger.info(f\"{job.id} has already been abandoned and cannot be canceled.\") elif job.is_failed(): cls.__logger.info(f\"{job.id} has already failed and cannot be canceled.\") else: with cls.lock: to_cancel_or_abandon_jobs = set([job]) to_cancel_or_abandon_jobs.update(cls.__find_subsequent_jobs(job.submit_id, set(job.task.output.keys()))) cls.__remove_blocked_jobs(to_cancel_or_abandon_jobs) cls.__remove_jobs_to_run(to_cancel_or_abandon_jobs) cls._cancel_jobs(job.id, to_cancel_or_abandon_jobs) cls._unlock_edit_on_jobs_outputs(to_cancel_or_abandon_jobs) @classmethod def __find_subsequent_jobs(cls, submit_id, output_dn_config_ids: Set) -> Set[Job]: next_output_dn_config_ids = set() subsequent_jobs = set() for job in cls.blocked_jobs: job_input_dn_config_ids = job.task.input.keys() if job.submit_id == submit_id and len(output_dn_config_ids.intersection(job_input_dn_config_ids)) > 0: next_output_dn_config_ids.update(job.task.output.keys()) subsequent_jobs.update([job]) if len(next_output_dn_config_ids) > 0: subsequent_jobs.update( cls.__find_subsequent_jobs(submit_id, output_dn_config_ids=next_output_dn_config_ids) ) return subsequent_jobs @classmethod def __remove_blocked_jobs(cls, jobs): for job in jobs: cls.__remove_blocked_job(job) @classmethod def __remove_jobs_to_run(cls, jobs): new_jobs_to_run: Queue = Queue() while not cls.jobs_to_run.empty(): current_job = cls.jobs_to_run.get() if current_job not in jobs: new_jobs_to_run.put(current_job) cls.jobs_to_run = new_jobs_to_run @classmethod def _fail_subsequent_jobs(cls, failed_job: Job): with cls.lock: to_fail_or_abandon_jobs = set() to_fail_or_abandon_jobs.update( cls.__find_subsequent_jobs(failed_job.submit_id, set(failed_job.task.output.keys())) ) for job in to_fail_or_abandon_jobs: print(f\"Abandoning job: {job.id}\") job.abandoned() to_fail_or_abandon_jobs.update([failed_job]) cls.__remove_blocked_jobs(to_fail_or_abandon_jobs) cls.__remove_jobs_to_run(to_fail_or_abandon_jobs) cls._unlock_edit_on_jobs_outputs(to_fail_or_abandon_jobs) @classmethod def _cancel_jobs(cls, job_id_to_cancel: JobId, jobs: Set[Job]): from ._orchestrator_factory import _OrchestratorFactory for job in jobs: if job.id in _OrchestratorFactory._dispatcher._dispatched_processes.keys(): # type: ignore cls.__logger.info(f\"{job.id} is running and cannot be canceled.\") elif job.is_completed() or job.is_skipped(): cls.__logger.info(f\"{job.id} has already been completed and cannot be canceled.\") elif job.is_skipped(): cls.__logger.info(f\"{job.id} has already been skipped and cannot be canceled.\") else: if job_id_to_cancel == job.id: job.canceled() else: job.abandoned() @staticmethod def _check_and_execute_jobs_if_development_mode(): from ._orchestrator_factory import _OrchestratorFactory if dispatcher := _OrchestratorFactory._dispatcher: dispatcher._execute_jobs_synchronously() "} {"text": "from abc import abstractmethod from typing import Callable, Iterable, List, Optional, Union from ..job.job import Job from ..task.task import Task class _AbstractOrchestrator: \"\"\"Creates, enqueues, and orchestrates jobs as instances of `Job^` class.\"\"\" @classmethod @abstractmethod def initialize(cls): raise NotImplementedError @classmethod @abstractmethod def submit( cls, sequence, callbacks: Optional[Iterable[Callable]], force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, ) -> List[Job]: raise NotImplementedError @classmethod @abstractmethod def submit_task( cls, task: Task, callbacks: Optional[Iterable[Callable]] = None, force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, ) -> Job: raise NotImplementedError @classmethod @abstractmethod def cancel_job(cls, job): raise NotImplementedError "} {"text": "from importlib import util from typing import Optional, Type from taipy.config.config import Config from ..common._utils import _load_fct from ..exceptions.exceptions import ModeNotAvailable, OrchestratorNotBuilt from ._abstract_orchestrator import _AbstractOrchestrator from ._dispatcher import _DevelopmentJobDispatcher, _JobDispatcher, _StandaloneJobDispatcher from ._orchestrator import _Orchestrator class _OrchestratorFactory: _TAIPY_ENTERPRISE_MODULE = \"taipy.enterprise\" _TAIPY_ENTERPRISE_CORE_ORCHESTRATOR_MODULE = _TAIPY_ENTERPRISE_MODULE + \".core._orchestrator._orchestrator\" _TAIPY_ENTERPRISE_CORE_DISPATCHER_MODULE = _TAIPY_ENTERPRISE_MODULE + \".core._orchestrator._dispatcher\" __TAIPY_ENTERPRISE_BUILD_DISPATCHER_METHOD = \"_build_dispatcher\" _orchestrator: Optional[_Orchestrator] = None _dispatcher: Optional[_JobDispatcher] = None @classmethod def _build_orchestrator(cls) -> Type[_AbstractOrchestrator]: if util.find_spec(cls._TAIPY_ENTERPRISE_MODULE) is not None: cls._orchestrator = _load_fct( cls._TAIPY_ENTERPRISE_CORE_ORCHESTRATOR_MODULE, \"Orchestrator\", ) # type: ignore else: cls._orchestrator = _Orchestrator # type: ignore cls._orchestrator.initialize() # type: ignore return cls._orchestrator # type: ignore @classmethod def _build_dispatcher(cls, force_restart=False) -> Optional[_JobDispatcher]: if not cls._orchestrator: raise OrchestratorNotBuilt if Config.job_config.is_standalone: cls.__build_standalone_job_dispatcher(force_restart=force_restart) elif Config.job_config.is_development: cls.__build_development_job_dispatcher() elif util.find_spec(cls._TAIPY_ENTERPRISE_MODULE): cls.__build_enterprise_job_dispatcher(force_restart=force_restart) else: raise ModeNotAvailable(f\"Job mode {Config.job_config.mode} is not available.\") return cls._dispatcher @classmethod def _remove_dispatcher(cls) -> Optional[_JobDispatcher]: if cls._dispatcher is not None and not isinstance(cls._dispatcher, _DevelopmentJobDispatcher): cls._dispatcher.stop() cls._dispatcher = None return cls._dispatcher @classmethod def __build_standalone_job_dispatcher(cls, force_restart=False): if isinstance(cls._dispatcher, _StandaloneJobDispatcher): if force_restart: cls._dispatcher.stop() else: return if util.find_spec(cls._TAIPY_ENTERPRISE_MODULE) is not None: cls._dispatcher = _load_fct( cls._TAIPY_ENTERPRISE_CORE_DISPATCHER_MODULE, cls.__TAIPY_ENTERPRISE_BUILD_DISPATCHER_METHOD )(cls._orchestrator) else: cls._dispatcher = _StandaloneJobDispatcher(cls._orchestrator) # type: ignore cls._dispatcher.start() # type: ignore @classmethod def __build_development_job_dispatcher(cls): if isinstance(cls._dispatcher, _StandaloneJobDispatcher): cls._dispatcher.stop() cls._dispatcher = _DevelopmentJobDispatcher(cls._orchestrator) # type: ignore @classmethod def __build_enterprise_job_dispatcher(cls, force_restart=False): cls._dispatcher = _load_fct( cls._TAIPY_ENTERPRISE_CORE_DISPATCHER_MODULE, cls.__TAIPY_ENTERPRISE_BUILD_DISPATCHER_METHOD )(cls._orchestrator, force_restart) if cls._dispatcher: cls._dispatcher.start() else: raise ModeNotAvailable(f\"Job mode {Config.job_config.mode} is not available.\") "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from ._development_job_dispatcher import _DevelopmentJobDispatcher from ._job_dispatcher import _JobDispatcher from ._standalone_job_dispatcher import _StandaloneJobDispatcher "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from typing import Optional from ...job.job import Job from .._abstract_orchestrator import _AbstractOrchestrator from ._job_dispatcher import _JobDispatcher class _DevelopmentJobDispatcher(_JobDispatcher): \"\"\"Manages job dispatching (instances of `Job^` class) in a synchronous way.\"\"\" def __init__(self, orchestrator: Optional[_AbstractOrchestrator]): super().__init__(orchestrator) def start(self): raise NotImplementedError def is_running(self) -> bool: return True def stop(self): raise NotImplementedError def run(self): raise NotImplementedError def _dispatch(self, job: Job): \"\"\"Dispatches the given `Job^` on an available worker for execution. Parameters: job (Job^): The job to submit on an executor with an available worker. \"\"\" rs = self._wrapped_function(job.id, job.task) self._update_job_status(job, rs) "} {"text": "from concurrent.futures import ProcessPoolExecutor from functools import partial from typing import Optional from taipy.config._serializer._toml_serializer import _TomlSerializer from taipy.config.config import Config from ...job.job import Job from .._abstract_orchestrator import _AbstractOrchestrator from ._job_dispatcher import _JobDispatcher class _StandaloneJobDispatcher(_JobDispatcher): \"\"\"Manages job dispatching (instances of `Job^` class) in an asynchronous way using a ProcessPoolExecutor.\"\"\" def __init__(self, orchestrator: Optional[_AbstractOrchestrator]): super().__init__(orchestrator) self._executor = ProcessPoolExecutor(Config.job_config.max_nb_of_workers or 1) # type: ignore self._nb_available_workers = self._executor._max_workers # type: ignore def _dispatch(self, job: Job): \"\"\"Dispatches the given `Job^` on an available worker for execution. Parameters: job (Job^): The job to submit on an executor with an available worker. \"\"\" self._nb_available_workers -= 1 config_as_string = _TomlSerializer()._serialize(Config._applied_config) future = self._executor.submit(self._wrapped_function_with_config_load, config_as_string, job.id, job.task) self._set_dispatched_processes(job.id, future) # type: ignore future.add_done_callback(self._release_worker) future.add_done_callback(partial(self._update_job_status_from_future, job)) def _release_worker(self, _): self._nb_available_workers += 1 def _update_job_status_from_future(self, job: Job, ft): self._pop_dispatched_process(job.id) # type: ignore self._update_job_status(job, ft.result()) "} {"text": "import threading from abc import abstractmethod from typing import Dict, Optional from taipy.config.config import Config from taipy.logger._taipy_logger import _TaipyLogger from ...data._data_manager_factory import _DataManagerFactory from ...job._job_manager_factory import _JobManagerFactory from ...job.job import Job from ...task.task import Task from .._abstract_orchestrator import _AbstractOrchestrator from ._task_function_wrapper import _TaskFunctionWrapper class _JobDispatcher(threading.Thread, _TaskFunctionWrapper): \"\"\"Manages job dispatching (instances of `Job^` class) on executors.\"\"\" _STOP_FLAG = False _dispatched_processes: Dict = {} __logger = _TaipyLogger._get_logger() _nb_available_workers: int = 1 def __init__(self, orchestrator: Optional[_AbstractOrchestrator]): threading.Thread.__init__(self, name=\"Thread-Taipy-JobDispatcher\") self.daemon = True self.orchestrator = orchestrator self.lock = self.orchestrator.lock # type: ignore Config.block_update() def start(self): \"\"\"Start the dispatcher\"\"\" threading.Thread.start(self) def is_running(self) -> bool: \"\"\"Return True if the dispatcher is running\"\"\" return self.is_alive() def stop(self): \"\"\"Stop the dispatcher\"\"\" self._STOP_FLAG = True def run(self): _TaipyLogger._get_logger().info(\"Start job dispatcher...\") while not self._STOP_FLAG: try: if self._can_execute(): with self.lock: job = self.orchestrator.jobs_to_run.get(block=True, timeout=0.1) self._execute_job(job) except Exception: # In case the last job of the queue has been removed. pass def _can_execute(self) -> bool: \"\"\"Returns True if the dispatcher have resources to execute a new job.\"\"\" return self._nb_available_workers > 0 def _execute_job(self, job: Job): if job.force or self._needs_to_run(job.task): if job.force: self.__logger.info(f\"job {job.id} is forced to be executed.\") job.running() self._dispatch(job) else: job._unlock_edit_on_outputs() job.skipped() self.__logger.info(f\"job {job.id} is skipped.\") def _execute_jobs_synchronously(self): while not self.orchestrator.jobs_to_run.empty(): with self.lock: try: job = self.orchestrator.jobs_to_run.get() except Exception: # In case the last job of the queue has been removed. self.__logger.warning(f\"{job.id} is no longer in the list of jobs to run.\") self._execute_job(job) @staticmethod def _needs_to_run(task: Task) -> bool: \"\"\" Returns True if the task has no output or if at least one input was modified since the latest run. Parameters: task (Task^): The task to run. Returns: True if the task needs to run. False otherwise. \"\"\" if not task.skippable: return True data_manager = _DataManagerFactory._build_manager() if len(task.output) == 0: return True are_outputs_in_cache = all(data_manager._get(dn.id).is_valid for dn in task.output.values()) if not are_outputs_in_cache: return True if len(task.input) == 0: return False input_last_edit = max(data_manager._get(dn.id).last_edit_date for dn in task.input.values()) output_last_edit = min(data_manager._get(dn.id).last_edit_date for dn in task.output.values()) return input_last_edit > output_last_edit @abstractmethod def _dispatch(self, job: Job): \"\"\" Dispatches the given `Job^` on an available worker for execution. Parameters: job (Job^): The job to submit on an executor with an available worker. \"\"\" raise NotImplementedError @staticmethod def _update_job_status(job: Job, exceptions): job.update_status(exceptions) _JobManagerFactory._build_manager()._set(job) @classmethod def _set_dispatched_processes(cls, job_id, process): cls._dispatched_processes[job_id] = process @classmethod def _pop_dispatched_process(cls, job_id, default=None): return cls._dispatched_processes.pop(job_id, default) # type: ignore "} {"text": "from typing import Any, List from taipy.config._serializer._toml_serializer import _TomlSerializer from taipy.config.config import Config from ...data._data_manager_factory import _DataManagerFactory from ...data.data_node import DataNode from ...exceptions import DataNodeWritingError from ...job.job_id import JobId from ...task.task import Task class _TaskFunctionWrapper: @classmethod def _wrapped_function_with_config_load(cls, config_as_string, job_id: JobId, task: Task): Config._applied_config._update(_TomlSerializer()._deserialize(config_as_string)) Config.block_update() return cls._wrapped_function(job_id, task) @classmethod def _wrapped_function(cls, job_id: JobId, task: Task): try: inputs: List[DataNode] = list(task.input.values()) outputs: List[DataNode] = list(task.output.values()) fct = task.function results = fct(*cls.__read_inputs(inputs)) return cls.__write_data(outputs, results, job_id) except Exception as e: return [e] @classmethod def __read_inputs(cls, inputs: List[DataNode]) -> List[Any]: data_manager = _DataManagerFactory._build_manager() return [data_manager._get(dn.id).read_or_raise() for dn in inputs] @classmethod def __write_data(cls, outputs: List[DataNode], results, job_id: JobId): data_manager = _DataManagerFactory._build_manager() try: if outputs: _results = cls.__extract_results(outputs, results) exceptions = [] for res, dn in zip(_results, outputs): try: data_node = data_manager._get(dn.id) data_node.write(res, job_id=job_id) data_manager._set(data_node) except Exception as e: exceptions.append(DataNodeWritingError(f\"Error writing in datanode id {dn.id}: {e}\")) return exceptions except Exception as e: return [e] @classmethod def __extract_results(cls, outputs: List[DataNode], results: Any) -> List[Any]: _results: List[Any] = [results] if len(outputs) == 1 else results if len(_results) != len(outputs): raise DataNodeWritingError(\"Error: wrong number of result or task output\") return _results "} {"text": "import uuid from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Set, Union from taipy.config.common._template_handler import _TemplateHandler as _tpl from taipy.config.common._validate_id import _validate_id from taipy.config.common.scope import Scope from .._entity._entity import _Entity from .._entity._labeled import _Labeled from .._entity._properties import _Properties from .._entity._reload import _Reloader, _self_reload, _self_setter from .._version._version_manager_factory import _VersionManagerFactory from ..data._data_manager_factory import _DataManagerFactory from ..data.data_node import DataNode from ..exceptions.exceptions import NonExistingDataNode from ..notification.event import Event, EventEntityType, EventOperation, _make_event from .task_id import TaskId if TYPE_CHECKING: from ..job.job import Job class Task(_Entity, _Labeled): \"\"\"Hold a user function that will be executed, its parameters and the results. A `Task` brings together the user code as function, the inputs and the outputs as data nodes (instances of the `DataNode^` class). Attributes: config_id (str): The identifier of the `TaskConfig^`. properties (dict[str, Any]): A dictionary of additional properties. function (callable): The python function to execute. The _function_ must take as parameter the data referenced by inputs data nodes, and must return the data referenced by outputs data nodes. input (Union[DataNode^, List[DataNode^]]): The list of inputs. output (Union[DataNode^, List[DataNode^]]): The list of outputs. id (str): The unique identifier of the task. owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or None. parent_ids (Optional[Set[str]]): The set of identifiers of the parent sequences. version (str): The string indicates the application version of the task to instantiate. If not provided, the latest version is used. skippable (bool): If True, indicates that the task can be skipped if no change has been made on inputs. The default value is _False_. \"\"\" _ID_PREFIX = \"TASK\" __ID_SEPARATOR = \"_\" _MANAGER_NAME = \"task\" def __init__( self, config_id: str, properties: Dict[str, Any], function, input: Optional[Iterable[DataNode]] = None, output: Optional[Iterable[DataNode]] = None, id: Optional[TaskId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, version: Optional[str] = None, skippable: bool = False, ): self.config_id = _validate_id(config_id) self.id = id or TaskId(self.__ID_SEPARATOR.join([self._ID_PREFIX, self.config_id, str(uuid.uuid4())])) self.owner_id = owner_id self._parent_ids = parent_ids or set() self.__input = {dn.config_id: dn for dn in input or []} self.__output = {dn.config_id: dn for dn in output or []} self._function = function self._version = version or _VersionManagerFactory._build_manager()._get_latest_version() self._skippable = skippable self._properties = _Properties(self, **properties) def __hash__(self): return hash(self.id) def __eq__(self, other): return self.id == other.id def __getstate__(self): return vars(self) def __setstate__(self, state): vars(self).update(state) def __getattr__(self, attribute_name): protected_attribute_name = _validate_id(attribute_name) if protected_attribute_name in self._properties: return _tpl._replace_templates(self._properties[protected_attribute_name]) if protected_attribute_name in self.input: return self.input[protected_attribute_name] if protected_attribute_name in self.output: return self.output[protected_attribute_name] raise AttributeError(f\"{attribute_name} is not an attribute of task {self.id}\") @property def properties(self): self._properties = _Reloader()._reload(self._MANAGER_NAME, self)._properties return self._properties def get_parents(self): \"\"\"Get parents of the task.\"\"\" from ... import core as tp return tp.get_parents(self) @property # type: ignore @_self_reload(_MANAGER_NAME) def parent_ids(self): return self._parent_ids @property def input(self) -> Dict[str, DataNode]: return self.__input @property def output(self) -> Dict[str, DataNode]: return self.__output @property def data_nodes(self) -> Dict[str, DataNode]: return {**self.input, **self.output} @property # type: ignore @_self_reload(_MANAGER_NAME) def function(self): return self._function @function.setter # type: ignore @_self_setter(_MANAGER_NAME) def function(self, val): self._function = val @property # type: ignore @_self_reload(_MANAGER_NAME) def skippable(self): return self._skippable @skippable.setter # type: ignore @_self_setter(_MANAGER_NAME) def skippable(self, val): self._skippable = val @property def scope(self) -> Scope: \"\"\"Retrieve the lowest scope of the task based on its data nodes. Returns: The lowest scope present in input and output data nodes or GLOBAL if there are either no input or no output. \"\"\" data_nodes = list(self.__input.values()) + list(self.__output.values()) scope = Scope(min(dn.scope for dn in data_nodes)) if len(data_nodes) != 0 else Scope.GLOBAL return scope @property def version(self): return self._version def submit( self, callbacks: Optional[List[Callable]] = None, force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, ) -> \"Job\": # noqa \"\"\"Submit the task for execution. Parameters: callbacks (List[Callable]): The list of callable functions to be called on status change. force (bool): Force execution even if the data nodes are in cache. wait (bool): Wait for the orchestrated job created from the task submission to be finished in asynchronous mode. timeout (Union[float, int]): The maximum number of seconds to wait for the job to be finished before returning. Returns: The created `Job^`. \"\"\" from ._task_manager_factory import _TaskManagerFactory return _TaskManagerFactory._build_manager()._submit(self, callbacks, force, wait, timeout) def get_label(self) -> str: \"\"\"Returns the task simple label prefixed by its owner label. Returns: The label of the task as a string. \"\"\" return self._get_label() def get_simple_label(self) -> str: \"\"\"Returns the task simple label. Returns: The simple label of the task as a string. \"\"\" return self._get_simple_label() @_make_event.register(Task) def _make_event_for_task( task: Task, operation: EventOperation, /, attribute_name: Optional[str] = None, attribute_value: Optional[Any] = None, **kwargs, ) -> Event: metadata = {\"version\": task.version, \"config_id\": task.config_id, **kwargs} return Event( entity_type=EventEntityType.TASK, entity_id=task.id, operation=operation, attribute_name=attribute_name, attribute_value=attribute_value, metadata=metadata, ) "} {"text": "from typing import Callable, List, Optional, Type, Union from taipy.config import Config from taipy.config.common.scope import Scope from .._entity._entity_ids import _EntityIds from .._manager._manager import _Manager from .._orchestrator._abstract_orchestrator import _AbstractOrchestrator from .._repository._abstract_repository import _AbstractRepository from .._version._version_manager_factory import _VersionManagerFactory from .._version._version_mixin import _VersionMixin from ..common.warn_if_inputs_not_ready import _warn_if_inputs_not_ready from ..config.task_config import TaskConfig from ..cycle.cycle_id import CycleId from ..data._data_manager_factory import _DataManagerFactory from ..exceptions.exceptions import NonExistingTask from ..notification import EventEntityType, EventOperation, Notifier, _make_event from ..scenario.scenario_id import ScenarioId from ..sequence.sequence_id import SequenceId from ..task.task import Task from .task_id import TaskId class _TaskManager(_Manager[Task], _VersionMixin): _ENTITY_NAME = Task.__name__ _repository: _AbstractRepository _EVENT_ENTITY_TYPE = EventEntityType.TASK @classmethod def _orchestrator(cls) -> Type[_AbstractOrchestrator]: from .._orchestrator._orchestrator_factory import _OrchestratorFactory return _OrchestratorFactory._build_orchestrator() @classmethod def _set(cls, task: Task): cls.__save_data_nodes(task.input.values()) cls.__save_data_nodes(task.output.values()) super()._set(task) @classmethod def _bulk_get_or_create( cls, task_configs: List[TaskConfig], cycle_id: Optional[CycleId] = None, scenario_id: Optional[ScenarioId] = None, ) -> List[Task]: data_node_configs = set() for task_config in task_configs: data_node_configs.update([Config.data_nodes[dnc.id] for dnc in task_config.input_configs]) data_node_configs.update([Config.data_nodes[dnc.id] for dnc in task_config.output_configs]) data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create( list(data_node_configs), cycle_id, scenario_id ) tasks_configs_and_owner_id = [] for task_config in task_configs: task_dn_configs = [Config.data_nodes[dnc.id] for dnc in task_config.output_configs] + [ Config.data_nodes[dnc.id] for dnc in task_config.input_configs ] task_config_data_nodes = [data_nodes[dn_config] for dn_config in task_dn_configs] scope = min(dn.scope for dn in task_config_data_nodes) if len(task_config_data_nodes) != 0 else Scope.GLOBAL owner_id: Union[Optional[SequenceId], Optional[ScenarioId], Optional[CycleId]] if scope == Scope.SCENARIO: owner_id = scenario_id elif scope == Scope.CYCLE: owner_id = cycle_id else: owner_id = None tasks_configs_and_owner_id.append((task_config, owner_id)) tasks_by_config = cls._repository._get_by_configs_and_owner_ids( # type: ignore tasks_configs_and_owner_id, cls._build_filters_with_version(None) ) tasks = [] for task_config, owner_id in tasks_configs_and_owner_id: if task := tasks_by_config.get((task_config, owner_id)): tasks.append(task) else: version = _VersionManagerFactory._build_manager()._get_latest_version() inputs = [ data_nodes[input_config] for input_config in [Config.data_nodes[dnc.id] for dnc in task_config.input_configs] ] outputs = [ data_nodes[output_config] for output_config in [Config.data_nodes[dnc.id] for dnc in task_config.output_configs] ] skippable = task_config.skippable task = Task( str(task_config.id), dict(**task_config._properties), task_config.function, inputs, outputs, owner_id=owner_id, parent_ids=set(), version=version, skippable=skippable, ) for dn in set(inputs + outputs): dn._parent_ids.update([task.id]) cls._set(task) Notifier.publish(_make_event(task, EventOperation.CREATION)) tasks.append(task) return tasks @classmethod def _get_all(cls, version_number: Optional[str] = None) -> List[Task]: \"\"\" Returns all entities. \"\"\" filters = cls._build_filters_with_version(version_number) return cls._repository._load_all(filters) @classmethod def __save_data_nodes(cls, data_nodes): data_manager = _DataManagerFactory._build_manager() for i in data_nodes: data_manager._set(i) @classmethod def _hard_delete(cls, task_id: TaskId): task = cls._get(task_id) entity_ids_to_delete = cls._get_children_entity_ids(task) entity_ids_to_delete.task_ids.add(task.id) cls._delete_entities_of_multiple_types(entity_ids_to_delete) @classmethod def _get_children_entity_ids(cls, task: Task): entity_ids = _EntityIds() from ..job._job_manager_factory import _JobManagerFactory from ..submission._submission_manager_factory import _SubmissionManagerFactory jobs = _JobManagerFactory._build_manager()._get_all() for job in jobs: if job.task.id == task.id: entity_ids.job_ids.add(job.id) submissions = _SubmissionManagerFactory._build_manager()._get_all() submitted_entity_ids = list(entity_ids.task_ids) for submission in submissions: if submission.entity_id in submitted_entity_ids: entity_ids.submission_ids.add(submission.id) return entity_ids @classmethod def _is_submittable(cls, task: Union[Task, TaskId]) -> bool: if isinstance(task, str): task = cls._get(task) return isinstance(task, Task) and all(input_dn.is_ready_for_reading for input_dn in task.input.values()) @classmethod def _submit( cls, task: Union[TaskId, Task], callbacks: Optional[List[Callable]] = None, force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, check_inputs_are_ready: bool = True, ): task_id = task.id if isinstance(task, Task) else task task = cls._get(task_id) if task is None: raise NonExistingTask(task_id) if check_inputs_are_ready: _warn_if_inputs_not_ready(task.input.values()) job = cls._orchestrator().submit_task(task, callbacks=callbacks, force=force, wait=wait, timeout=timeout) Notifier.publish(_make_event(task, EventOperation.SUBMISSION)) return job @classmethod def _get_by_config_id(cls, config_id: str, version_number: Optional[str] = None) -> List[Task]: \"\"\" Get all tasks by its config id. \"\"\" filters = cls._build_filters_with_version(version_number) if not filters: filters = [{}] for fil in filters: fil.update({\"config_id\": config_id}) return cls._repository._load_all(filters) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ._task_fs_repository import _TaskFSRepository from ._task_manager import _TaskManager from ._task_sql_repository import _TaskSQLRepository class _TaskManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _TaskFSRepository, \"sql\": _TaskSQLRepository} @classmethod def _build_manager(cls) -> Type[_TaskManager]: # type: ignore if cls._using_enterprise(): task_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".task._task_manager\", \"_TaskManager\" ) # type: ignore build_repository = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".task._task_manager_factory\", \"_TaskManagerFactory\" )._build_repository # type: ignore else: task_manager = _TaskManager build_repository = cls._build_repository task_manager._repository = build_repository() # type: ignore return task_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": " from .._repository._abstract_converter import _AbstractConverter from .._version._utils import _migrate_entity from ..common._utils import _load_fct from ..data._data_manager_factory import _DataManagerFactory from ..exceptions import NonExistingDataNode from ..task._task_model import _TaskModel from ..task.task import Task from .task import TaskId class _TaskConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, task: Task) -> _TaskModel: return _TaskModel( id=task.id, owner_id=task.owner_id, parent_ids=list(task._parent_ids), config_id=task.config_id, input_ids=cls.__to_ids(task.input.values()), function_name=task._function.__name__, function_module=task._function.__module__, output_ids=cls.__to_ids(task.output.values()), version=task._version, skippable=task._skippable, properties=task._properties.data.copy(), ) @classmethod def _model_to_entity(cls, model: _TaskModel) -> Task: task = Task( id=TaskId(model.id), owner_id=model.owner_id, parent_ids=set(model.parent_ids), config_id=model.config_id, function=_load_fct(model.function_module, model.function_name), input=cls.__to_data_nodes(model.input_ids), output=cls.__to_data_nodes(model.output_ids), version=model.version, skippable=model.skippable, properties=model.properties, ) return _migrate_entity(task) @staticmethod def __to_ids(data_nodes): return [i.id for i in data_nodes] @staticmethod def __to_data_nodes(data_nodes_ids): data_nodes = [] data_manager = _DataManagerFactory._build_manager() for _id in data_nodes_ids: if data_node := data_manager._get(_id): data_nodes.append(data_node) else: raise NonExistingDataNode(_id) return data_nodes "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._sql_repository import _SQLRepository from ._task_converter import _TaskConverter from ._task_model import _TaskModel class _TaskSQLRepository(_SQLRepository): def __init__(self): super().__init__(model_type=_TaskModel, converter=_TaskConverter) "} {"text": "from dataclasses import dataclass from typing import Any, Dict, List, Optional from sqlalchemy import JSON, Boolean, Column, String, Table from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry @mapper_registry.mapped @dataclass class _TaskModel(_BaseModel): __table__ = Table( \"task\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"owner_id\", String), Column(\"parent_ids\", JSON), Column(\"config_id\", String), Column(\"input_ids\", JSON), Column(\"function_name\", String), Column(\"function_module\", String), Column(\"output_ids\", JSON), Column(\"version\", String), Column(\"skippable\", Boolean), Column(\"properties\", JSON), ) id: str owner_id: Optional[str] parent_ids: List[str] config_id: str input_ids: List[str] function_name: str function_module: str output_ids: List[str] version: str skippable: bool properties: Dict[str, Any] @staticmethod def from_dict(data: Dict[str, Any]): return _TaskModel( id=data[\"id\"], owner_id=data.get(\"owner_id\"), parent_ids=_BaseModel._deserialize_attribute(data.get(\"parent_ids\", [])), config_id=data[\"config_id\"], input_ids=_BaseModel._deserialize_attribute(data[\"input_ids\"]), function_name=data[\"function_name\"], function_module=data[\"function_module\"], output_ids=_BaseModel._deserialize_attribute(data[\"output_ids\"]), version=data[\"version\"], skippable=data[\"skippable\"], properties=_BaseModel._deserialize_attribute(data[\"properties\"] if \"properties\" in data.keys() else {}), ) def to_list(self): return [ self.id, self.owner_id, _BaseModel._serialize_attribute(self.parent_ids), self.config_id, _BaseModel._serialize_attribute(self.input_ids), self.function_name, self.function_module, _BaseModel._serialize_attribute(self.output_ids), self.version, self.skippable, _BaseModel._serialize_attribute(self.properties), ] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._filesystem_repository import _FileSystemRepository from ._task_converter import _TaskConverter from ._task_model import _TaskModel class _TaskFSRepository(_FileSystemRepository): def __init__(self): super().__init__(model_type=_TaskModel, converter=_TaskConverter, dir_name=\"tasks\") "} {"text": "from typing import NewType TaskId = NewType(\"TaskId\", str) TaskId.__doc__ = \"\"\"Type that holds a `Task^` identifier.\"\"\" "} {"text": "from dataclasses import dataclass from typing import Any, Dict, List from sqlalchemy import JSON, Boolean, Column, Enum, String, Table from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry from .job_id import JobId from .status import Status @mapper_registry.mapped @dataclass class _JobModel(_BaseModel): __table__ = Table( \"job\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"task_id\", String), Column(\"status\", Enum(Status)), Column(\"force\", Boolean), Column(\"submit_id\", String), Column(\"submit_entity_id\", String), Column(\"creation_date\", String), Column(\"subscribers\", JSON), Column(\"stacktrace\", JSON), Column(\"version\", String), ) id: JobId task_id: str status: Status force: bool submit_id: str submit_entity_id: str creation_date: str subscribers: List[Dict] stacktrace: List[str] version: str @staticmethod def from_dict(data: Dict[str, Any]): return _JobModel( id=data[\"id\"], task_id=data[\"task_id\"], status=Status._from_repr(data[\"status\"]), force=data[\"force\"], submit_id=data[\"submit_id\"], submit_entity_id=data[\"submit_entity_id\"], creation_date=data[\"creation_date\"], subscribers=_BaseModel._deserialize_attribute(data[\"subscribers\"]), stacktrace=_BaseModel._deserialize_attribute(data[\"stacktrace\"]), version=data[\"version\"], ) def to_list(self): return [ self.id, self.task_id, repr(self.status), self.force, self.submit_id, self.submit_entity_id, self.creation_date, _BaseModel._serialize_attribute(self.subscribers), _BaseModel._serialize_attribute(self.stacktrace), self.version, ] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._filesystem_repository import _FileSystemRepository from ._job_converter import _JobConverter from ._job_model import _JobModel class _JobFSRepository(_FileSystemRepository): def __init__(self): super().__init__(model_type=_JobModel, converter=_JobConverter, dir_name=\"jobs\") "} {"text": "__all__ = [\"Job\"] import traceback from datetime import datetime from typing import Any, Callable, List, Optional from taipy.logger._taipy_logger import _TaipyLogger from .._entity._entity import _Entity from .._entity._labeled import _Labeled from .._entity._reload import _self_reload, _self_setter from .._version._version_manager_factory import _VersionManagerFactory from ..common._utils import _fcts_to_dict from ..notification.event import Event, EventEntityType, EventOperation, _make_event from ..task.task import Task from .job_id import JobId from .status import Status def _run_callbacks(fn): def __run_callbacks(job): fn(job) for fct in job._subscribers: fct(job) return __run_callbacks class Job(_Entity, _Labeled): \"\"\"Execution of a `Task^`. A job handles the status of the execution, contains the stacktrace of exceptions that were raised during the execution, and notifies subscribers on status change. Attributes: id (str): The identifier of this job. task (Task^): The task of this job. force (bool): Enforce the job's execution whatever the output data nodes are in cache or not. status (Status^): The current status of this job. creation_date (datetime): The date of this job's creation. stacktrace (List[str]): The list of stacktraces of the exceptions raised during the execution. version (str): The string indicates the application version of the job to instantiate. If not provided, the latest version is used. \"\"\" _MANAGER_NAME = \"job\" _ID_PREFIX = \"JOB\" def __init__(self, id: JobId, task: Task, submit_id: str, submit_entity_id: str, force=False, version=None): self.id = id self._task = task self._force = force self._status = Status.SUBMITTED self._creation_date = datetime.now() self._submit_id: str = submit_id self._submit_entity_id: str = submit_entity_id self._subscribers: List[Callable] = [] self._stacktrace: List[str] = [] self.__logger = _TaipyLogger._get_logger() self._version = version or _VersionManagerFactory._build_manager()._get_latest_version() def get_event_context(self): return {\"task_config_id\": self._task.config_id} @property # type: ignore @_self_reload(_MANAGER_NAME) def task(self): return self._task @task.setter # type: ignore @_self_setter(_MANAGER_NAME) def task(self, val): self._task = val @property def owner_id(self) -> str: return self.task.id @property # type: ignore @_self_reload(_MANAGER_NAME) def force(self): return self._force @force.setter # type: ignore @_self_setter(_MANAGER_NAME) def force(self, val): self._force = val @property def submit_id(self): return self._submit_id @property def submit_entity_id(self): return self._submit_entity_id @property # type: ignore def submit_entity(self): from ..taipy import get as tp_get return tp_get(self._submit_entity_id) @property # type: ignore @_self_reload(_MANAGER_NAME) def status(self): return self._status @status.setter # type: ignore @_self_setter(_MANAGER_NAME) def status(self, val): self._status = val @property # type: ignore @_self_reload(_MANAGER_NAME) def creation_date(self): return self._creation_date @creation_date.setter # type: ignore @_self_setter(_MANAGER_NAME) def creation_date(self, val): self._creation_date = val @property def stacktrace(self) -> List[str]: return self._stacktrace @property def version(self): return self._version def __contains__(self, task: Task): return self.task.id == task.id def __lt__(self, other): return self.creation_date.timestamp() < other.creation_date.timestamp() def __le__(self, other): return self.creation_date.timestamp() <= other.creation_date.timestamp() def __gt__(self, other): return self.creation_date.timestamp() > other.creation_date.timestamp() def __ge__(self, other): return self.creation_date.timestamp() >= other.creation_date.timestamp() def __eq__(self, other): return self.id == other.id @_run_callbacks def blocked(self): \"\"\"Set the status to _blocked_ and notify subscribers.\"\"\" self.status = Status.BLOCKED @_run_callbacks def pending(self): \"\"\"Set the status to _pending_ and notify subscribers.\"\"\" self.status = Status.PENDING @_run_callbacks def running(self): \"\"\"Set the status to _running_ and notify subscribers.\"\"\" self.status = Status.RUNNING @_run_callbacks def canceled(self): \"\"\"Set the status to _canceled_ and notify subscribers.\"\"\" self.status = Status.CANCELED @_run_callbacks def abandoned(self): \"\"\"Set the status to _abandoned_ and notify subscribers.\"\"\" self.status = Status.ABANDONED @_run_callbacks def failed(self): \"\"\"Set the status to _failed_ and notify subscribers.\"\"\" self.status = Status.FAILED @_run_callbacks def completed(self): \"\"\"Set the status to _completed_ and notify subscribers.\"\"\" self.status = Status.COMPLETED @_run_callbacks def skipped(self): \"\"\"Set the status to _skipped_ and notify subscribers.\"\"\" self.status = Status.SKIPPED def is_failed(self) -> bool: \"\"\"Indicate if the job has failed. Returns: True if the job has failed. \"\"\" return self.status == Status.FAILED def is_blocked(self) -> bool: \"\"\"Indicate if the job is blocked. Returns: True if the job is blocked. \"\"\" return self.status == Status.BLOCKED def is_canceled(self) -> bool: \"\"\"Indicate if the job was canceled. Returns: True if the job was canceled. \"\"\" return self.status == Status.CANCELED def is_abandoned(self) -> bool: \"\"\"Indicate if the job was abandoned. Returns: True if the job was abandoned. \"\"\" return self.status == Status.ABANDONED def is_submitted(self) -> bool: \"\"\"Indicate if the job is submitted. Returns: True if the job is submitted. \"\"\" return self.status == Status.SUBMITTED def is_completed(self) -> bool: \"\"\"Indicate if the job has completed. Returns: True if the job has completed. \"\"\" return self.status == Status.COMPLETED def is_skipped(self) -> bool: \"\"\"Indicate if the job was skipped. Returns: True if the job was skipped. \"\"\" return self.status == Status.SKIPPED def is_running(self) -> bool: \"\"\"Indicate if the job is running. Returns: True if the job is running. \"\"\" return self.status == Status.RUNNING def is_pending(self) -> bool: \"\"\"Indicate if the job is pending. Returns: True if the job is pending. \"\"\" return self.status == Status.PENDING def is_finished(self) -> bool: \"\"\"Indicate if the job is finished. Returns: True if the job is finished. \"\"\" return self.is_completed() or self.is_failed() or self.is_canceled() or self.is_skipped() or self.is_abandoned() def _is_finished(self) -> bool: \"\"\"Indicate if the job is finished. This function will not triggered the persistency feature like is_finished(). Returns: True if the job is finished. \"\"\" return self._status in [Status.COMPLETED, Status.FAILED, Status.CANCELED, Status.SKIPPED, Status.ABANDONED] def _on_status_change(self, *functions): \"\"\"Get a notification when the status of the job changes. Job are assigned different statuses (_submitted_, _pending_, etc.) before being finished. You can be triggered on each change through this function except for the _submitted_ status. Parameters: functions: Callables that will be called on each status change. \"\"\" functions = list(functions) function = functions.pop() self._subscribers.append(function) if self.status != Status.SUBMITTED: function(self) if functions: self._on_status_change(*functions) def update_status(self, exceptions): \"\"\"Update the job status based on the success or the failure of its execution.\"\"\" if exceptions: self.failed() self.__logger.error(f\" {len(exceptions)} errors occurred during execution of job {self.id}\") for e in exceptions: st = \"\".join(traceback.format_exception(type(e), value=e, tb=e.__traceback__)) self._stacktrace.append(st) self.__logger.error(st) else: self.completed() self.__logger.info(f\"job {self.id} is completed.\") def __hash__(self): return hash(self.id) def _unlock_edit_on_outputs(self): for dn in self.task.output.values(): dn.unlock_edit() @staticmethod def _serialize_subscribers(subscribers: List) -> List: return _fcts_to_dict(subscribers) def get_label(self) -> str: \"\"\"Returns the job simple label prefixed by its owner label. Returns: The label of the job as a string. \"\"\" return self._get_label() def get_simple_label(self) -> str: \"\"\"Returns the job simple label. Returns: The simple label of the job as a string. \"\"\" return self._get_simple_label() def is_deletable(self) -> bool: \"\"\"Indicate if the job can be deleted. Returns: True if the job can be deleted. False otherwise. \"\"\" from ... import core as tp return tp.is_deletable(self) @_make_event.register(Job) def _make_event_for_job( job: Job, operation: EventOperation, /, attribute_name: Optional[str] = None, attribute_value: Optional[Any] = None, **kwargs, ) -> Event: metadata = {\"creation_date\": job.creation_date, \"task_config_id\": job._task.config_id} return Event( entity_type=EventEntityType.JOB, entity_id=job.id, operation=operation, attribute_name=attribute_name, attribute_value=attribute_value, metadata={**metadata, **kwargs}, ) "} {"text": "import uuid from typing import Callable, Iterable, List, Optional, Union from .._manager._manager import _Manager from .._repository._abstract_repository import _AbstractRepository from .._version._version_manager_factory import _VersionManagerFactory from .._version._version_mixin import _VersionMixin from ..exceptions.exceptions import JobNotDeletedException from ..notification import EventEntityType, EventOperation, Notifier, _make_event from ..task.task import Task from .job import Job from .job_id import JobId class _JobManager(_Manager[Job], _VersionMixin): _ENTITY_NAME = Job.__name__ _ID_PREFIX = \"JOB_\" _repository: _AbstractRepository _EVENT_ENTITY_TYPE = EventEntityType.JOB @classmethod def _get_all(cls, version_number: Optional[str] = None) -> List[Job]: \"\"\" Returns all entities. \"\"\" filters = cls._build_filters_with_version(version_number) return cls._repository._load_all(filters) @classmethod def _create( cls, task: Task, callbacks: Iterable[Callable], submit_id: str, submit_entity_id: str, force=False ) -> Job: version = _VersionManagerFactory._build_manager()._get_latest_version() job = Job( id=JobId(f\"{Job._ID_PREFIX}_{task.config_id}_{uuid.uuid4()}\"), task=task, submit_id=submit_id, submit_entity_id=submit_entity_id, force=force, version=version, ) cls._set(job) Notifier.publish(_make_event(job, EventOperation.CREATION)) job._on_status_change(*callbacks) return job @classmethod def _delete(cls, job: Job, force=False): if job.is_finished() or force: super()._delete(job.id) from .._orchestrator._dispatcher._job_dispatcher import _JobDispatcher _JobDispatcher._pop_dispatched_process(job.id) else: err = JobNotDeletedException(job.id) cls._logger.warning(err) raise err @classmethod def _cancel(cls, job: Union[str, Job]): job = cls._get(job) if isinstance(job, str) else job from .._orchestrator._orchestrator_factory import _OrchestratorFactory _OrchestratorFactory._build_orchestrator().cancel_job(job) @classmethod def _get_latest(cls, task: Task) -> Optional[Job]: jobs_of_task = list(filter(lambda job: task in job, cls._get_all())) if len(jobs_of_task) == 0: return None if len(jobs_of_task) == 1: return jobs_of_task[0] else: return max(jobs_of_task) @classmethod def _is_deletable(cls, job: Union[Job, JobId]) -> bool: if isinstance(job, str): job = cls._get(job) if job.is_finished(): return True return False "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from typing import NewType JobId = NewType(\"JobId\", str) JobId.__doc__ = \"\"\"Type that holds a `Job^` identifier.\"\"\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._sql_repository import _SQLRepository from ._job_converter import _JobConverter from ._job_model import _JobModel class _JobSQLRepository(_SQLRepository): def __init__(self): super().__init__(model_type=_JobModel, converter=_JobConverter) "} {"text": "from datetime import datetime from typing import List from .._repository._abstract_converter import _AbstractConverter from ..common._utils import _fcts_to_dict, _load_fct from ..exceptions import InvalidSubscriber from ..job._job_model import _JobModel from ..job.job import Job from ..task._task_manager_factory import _TaskManagerFactory class _JobConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, job: Job) -> _JobModel: return _JobModel( job.id, job._task.id, job._status, job._force, job.submit_id, job.submit_entity_id, job._creation_date.isoformat(), cls.__serialize_subscribers(job._subscribers), job._stacktrace, version=job._version, ) @classmethod def _model_to_entity(cls, model: _JobModel) -> Job: task_manager = _TaskManagerFactory._build_manager() task_repository = task_manager._repository job = Job( id=model.id, task=task_repository._load(model.task_id), submit_id=model.submit_id, submit_entity_id=model.submit_entity_id, version=model.version, ) job._status = model.status # type: ignore job._force = model.force # type: ignore job._creation_date = datetime.fromisoformat(model.creation_date) # type: ignore for it in model.subscribers: try: fct_module, fct_name = it.get(\"fct_module\"), it.get(\"fct_name\") job._subscribers.append(_load_fct(fct_module, fct_name)) # type: ignore except AttributeError: raise InvalidSubscriber(f\"The subscriber function {it.get('fct_name')} cannot be loaded.\") job._stacktrace = model.stacktrace return job @staticmethod def __serialize_subscribers(subscribers: List) -> List: return _fcts_to_dict(subscribers) "} {"text": "from ..common._repr_enum import _ReprEnum class Status(_ReprEnum): \"\"\"Execution status of a `Job^`. It is implemented as an enumeration. The possible values are: - `SUBMITTED`: A `SUBMITTED` job has been submitted for execution but not processed yet by the orchestrator. - `PENDING`: A `PENDING` job has been enqueued by the orchestrator. It is waiting for an executor to be available for its execution. - `BLOCKED`: A `BLOCKED` job has been blocked because its input data nodes are not ready yet. It is waiting for the completion of another `Job^` - `RUNNING`: A `RUNNING` job is currently executed by a dedicated executor. - `CANCELED`: A `CANCELED` job has been submitted but its execution has been canceled. - `FAILED`: A `FAILED` job raised an exception during its execution. - `COMPLETED`: A `COMPLETED` job has successfully been executed. - `SKIPPED`: A `SKIPPED` job has not been executed because its outputs were already computed. - `ABANDONED`: An `ABANDONED` job has not been executed because it depends on a job that could not complete ( cancelled, failed, or abandoned). \"\"\" SUBMITTED = 1 BLOCKED = 2 PENDING = 3 RUNNING = 4 CANCELED = 5 FAILED = 6 COMPLETED = 7 SKIPPED = 8 ABANDONED = 9 "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ._job_fs_repository import _JobFSRepository from ._job_manager import _JobManager from ._job_sql_repository import _JobSQLRepository class _JobManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _JobFSRepository, \"sql\": _JobSQLRepository} @classmethod def _build_manager(cls) -> Type[_JobManager]: # type: ignore if cls._using_enterprise(): job_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".job._job_manager\", \"_JobManager\" ) # type: ignore build_repository = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".job._job_manager_factory\", \"_JobManagerFactory\" )._build_repository # type: ignore else: job_manager = _JobManager build_repository = cls._build_repository job_manager._repository = build_repository() # type: ignore return job_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": "import json import pathlib from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union from .._entity._entity_ids import _EntityIds from .._manager._manager import _Manager from .._version._version_mixin import _VersionMixin from ..common._utils import _Subscriber from ..common.warn_if_inputs_not_ready import _warn_if_inputs_not_ready from ..exceptions.exceptions import ( InvalidSequenceId, ModelNotFound, NonExistingSequence, NonExistingTask, SequenceBelongsToNonExistingScenario, ) from ..job._job_manager_factory import _JobManagerFactory from ..job.job import Job from ..notification import Event, EventEntityType, EventOperation, Notifier from ..notification.event import _make_event from ..scenario._scenario_manager_factory import _ScenarioManagerFactory from ..scenario.scenario import Scenario from ..scenario.scenario_id import ScenarioId from ..submission._submission_manager_factory import _SubmissionManagerFactory from ..task._task_manager_factory import _TaskManagerFactory from ..task.task import Task, TaskId from .sequence import Sequence from .sequence_id import SequenceId class _SequenceManager(_Manager[Sequence], _VersionMixin): _ENTITY_NAME = Sequence.__name__ _EVENT_ENTITY_TYPE = EventEntityType.SEQUENCE _model_name = \"sequences\" @classmethod def _delete(cls, sequence_id: SequenceId): \"\"\" Deletes a Sequence by id. \"\"\" sequence_name, scenario_id = cls._breakdown_sequence_id(sequence_id) if scenario := _ScenarioManagerFactory._build_manager()._get(scenario_id): if sequence_name in scenario._sequences.keys(): scenario.remove_sequences([sequence_name]) if hasattr(cls, \"_EVENT_ENTITY_TYPE\"): Notifier.publish(Event(cls._EVENT_ENTITY_TYPE, EventOperation.DELETION, entity_id=sequence_id)) return raise ModelNotFound(cls._model_name, sequence_id) @classmethod def _delete_all(cls): \"\"\" Deletes all Sequences. \"\"\" scenarios = _ScenarioManagerFactory._build_manager()._get_all() for scenario in scenarios: scenario.sequences = {} if hasattr(cls, \"_EVENT_ENTITY_TYPE\"): Notifier.publish(Event(cls._EVENT_ENTITY_TYPE, EventOperation.DELETION, metadata={\"delete_all\": True})) @classmethod def _delete_many(cls, sequence_ids: Iterable[str]): \"\"\" Deletes Sequence entities by a list of Sequence ids. \"\"\" scenario_manager = _ScenarioManagerFactory._build_manager() scenario_ids_and_sequence_names_map: Dict[str, List[str]] = {} for sequence in sequence_ids: sequence_id = sequence.id if isinstance(sequence, Sequence) else sequence sequence_name, scenario_id = cls._breakdown_sequence_id(sequence_id) sequences_names = scenario_ids_and_sequence_names_map.get(scenario_id, []) sequences_names.append(sequence_name) scenario_ids_and_sequence_names_map[scenario_id] = sequences_names try: for scenario_id, sequence_names in scenario_ids_and_sequence_names_map.items(): scenario = scenario_manager._get(scenario_id) for sequence_name in sequence_names: del scenario._sequences[sequence_name] scenario_manager._set(scenario) if hasattr(cls, \"_EVENT_ENTITY_TYPE\"): for sequence_id in sequence_ids: Notifier.publish(Event(cls._EVENT_ENTITY_TYPE, EventOperation.DELETION, entity_id=sequence_id)) except (ModelNotFound, KeyError): cls.__log_error_entity_not_found(sequence_id) raise ModelNotFound(cls._model_name, sequence_id) @classmethod def _delete_by_version(cls, version_number: str): \"\"\" Deletes Sequences by version number. \"\"\" for scenario in _ScenarioManagerFactory()._build_manager()._repository._search(\"version\", version_number): cls._delete_many(scenario.sequences.values()) @classmethod def _hard_delete(cls, sequence_id: SequenceId): sequence = cls._get(sequence_id) entity_ids_to_delete = cls._get_children_entity_ids(sequence) entity_ids_to_delete.sequence_ids.add(sequence.id) cls._delete_entities_of_multiple_types(entity_ids_to_delete) @classmethod def _set(cls, sequence: Sequence): \"\"\" Save or update a Sequence. \"\"\" sequence_name, scenario_id = cls._breakdown_sequence_id(sequence.id) scenario_manager = _ScenarioManagerFactory._build_manager() if scenario := scenario_manager._get(scenario_id): sequence_data = { Scenario._SEQUENCE_TASKS_KEY: sequence._tasks, Scenario._SEQUENCE_SUBSCRIBERS_KEY: sequence._subscribers, Scenario._SEQUENCE_PROPERTIES_KEY: sequence._properties.data, } scenario._sequences[sequence_name] = sequence_data scenario_manager._set(scenario) else: cls._logger.error(f\"Sequence {sequence.id} belongs to a non-existing Scenario {scenario_id}.\") raise SequenceBelongsToNonExistingScenario(sequence.id, scenario_id) @classmethod def _create( cls, sequence_name: str, tasks: Union[List[Task], List[TaskId]], subscribers: Optional[List[_Subscriber]] = None, properties: Optional[Dict] = None, scenario_id: Optional[ScenarioId] = None, version: Optional[str] = None, ) -> Sequence: sequence_id = Sequence._new_id(sequence_name, scenario_id) task_manager = _TaskManagerFactory._build_manager() _tasks: List[Task] = [] for task in tasks: if not isinstance(task, Task): if _task := task_manager._get(task): _tasks.append(_task) else: raise NonExistingTask(task) else: _tasks.append(task) properties = properties if properties else {} properties[\"name\"] = sequence_name version = version if version else cls._get_latest_version() sequence = Sequence( properties=properties, tasks=_tasks, sequence_id=sequence_id, owner_id=scenario_id, parent_ids={scenario_id} if scenario_id else None, subscribers=subscribers, version=version, ) for task in _tasks: if sequence_id not in task._parent_ids: task._parent_ids.update([sequence_id]) task_manager._set(task) return sequence @classmethod def _breakdown_sequence_id(cls, sequence_id: str) -> Tuple[str, str]: try: sequence_name, scenario_id = sequence_id.split(Scenario._ID_PREFIX) scenario_id = f\"{Scenario._ID_PREFIX}{scenario_id}\" sequence_name = sequence_name.split(Sequence._ID_PREFIX)[1].strip(\"_\") return sequence_name, scenario_id except (ValueError, IndexError): cls._logger.error(f\"SequenceId {sequence_id} is invalid.\") raise InvalidSequenceId(sequence_id) @classmethod def _get(cls, sequence: Union[str, Sequence], default=None) -> Sequence: \"\"\" Returns a Sequence by id or reference. \"\"\" try: sequence_id = sequence.id if isinstance(sequence, Sequence) else sequence sequence_name, scenario_id = cls._breakdown_sequence_id(sequence_id) scenario_manager = _ScenarioManagerFactory._build_manager() if scenario := scenario_manager._get(scenario_id): if sequence_entity := scenario.sequences.get(sequence_name, None): return sequence_entity cls.__log_error_entity_not_found(sequence_id) return default except (ModelNotFound, InvalidSequenceId): cls.__log_error_entity_not_found(sequence_id) return default @classmethod def _get_all(cls, version_number: Optional[str] = None) -> List[Sequence]: \"\"\" Returns all Sequence entities. \"\"\" sequences = [] scenarios = _ScenarioManagerFactory._build_manager()._get_all(version_number) for scenario in scenarios: sequences.extend(list(scenario.sequences.values())) return sequences @classmethod def _get_all_by(cls, filters: Optional[List[Dict]] = None) -> List[Sequence]: sequences = cls._get_all() if not filters: return sequences filtered_sequences = [] for sequence in sequences: for filter in filters: if all([getattr(sequence, key) == item for key, item in filter.items()]): filtered_sequences.append(sequence) return filtered_sequences @classmethod def _get_children_entity_ids(cls, sequence: Sequence) -> _EntityIds: entity_ids = _EntityIds() for task in sequence.tasks.values(): if not isinstance(task, Task): task = _TaskManagerFactory._build_manager()._get(task) if task.owner_id == sequence.id: entity_ids.task_ids.add(task.id) for data_node in task.data_nodes.values(): if data_node.owner_id == sequence.id: entity_ids.data_node_ids.add(data_node.id) jobs = _JobManagerFactory._build_manager()._get_all() for job in jobs: if job.task.id in entity_ids.task_ids: entity_ids.job_ids.add(job.id) submissions = _SubmissionManagerFactory._build_manager()._get_all() submitted_entity_ids = list(entity_ids.sequence_ids.union(entity_ids.task_ids)) for submission in submissions: if submission.entity_id in submitted_entity_ids: entity_ids.submission_ids.add(submission.id) return entity_ids @classmethod def _subscribe( cls, callback: Callable[[Sequence, Job], None], params: Optional[List[Any]] = None, sequence: Optional[Sequence] = None, ): if sequence is None: sequences = cls._get_all() for pln in sequences: cls.__add_subscriber(callback, params, pln) return cls.__add_subscriber(callback, params, sequence) @classmethod def _unsubscribe( cls, callback: Callable[[Sequence, Job], None], params: Optional[List[Any]] = None, sequence: Optional[Sequence] = None, ): if sequence is None: sequences = cls._get_all() for pln in sequences: cls.__remove_subscriber(callback, params, pln) return cls.__remove_subscriber(callback, params, sequence) @classmethod def __add_subscriber(cls, callback, params, sequence): sequence._add_subscriber(callback, params) Notifier.publish(_make_event(sequence, EventOperation.UPDATE, attribute_name=\"subscribers\")) @classmethod def __remove_subscriber(cls, callback, params, sequence): sequence._remove_subscriber(callback, params) Notifier.publish(_make_event(sequence, EventOperation.UPDATE, attribute_name=\"subscribers\")) @classmethod def _is_submittable(cls, sequence: Union[Sequence, SequenceId]) -> bool: if isinstance(sequence, str): sequence = cls._get(sequence) return isinstance(sequence, Sequence) and sequence.is_ready_to_run() @classmethod def _submit( cls, sequence: Union[SequenceId, Sequence], callbacks: Optional[List[Callable]] = None, force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, check_inputs_are_ready: bool = True, ) -> List[Job]: sequence_id = sequence.id if isinstance(sequence, Sequence) else sequence sequence = cls._get(sequence_id) if sequence is None: raise NonExistingSequence(sequence_id) callbacks = callbacks or [] sequence_subscription_callback = cls.__get_status_notifier_callbacks(sequence) + callbacks if check_inputs_are_ready: _warn_if_inputs_not_ready(sequence.get_inputs()) jobs = ( _TaskManagerFactory._build_manager() ._orchestrator() .submit(sequence, callbacks=sequence_subscription_callback, force=force, wait=wait, timeout=timeout) ) Notifier.publish(_make_event(sequence, EventOperation.SUBMISSION)) return jobs @classmethod def _exists(cls, entity_id: str) -> bool: \"\"\" Returns True if the entity id exists. \"\"\" return True if cls._get(entity_id) else False @classmethod def _export(cls, id: str, folder_path: Union[str, pathlib.Path]): \"\"\" Export a Sequence entity. \"\"\" if isinstance(folder_path, str): folder: pathlib.Path = pathlib.Path(folder_path) else: folder = folder_path export_dir = folder / cls._model_name if not export_dir.exists(): export_dir.mkdir(parents=True) export_path = export_dir / f\"{id}.json\" sequence_name, scenario_id = cls._breakdown_sequence_id(id) sequence = {\"id\": id, \"owner_id\": scenario_id, \"parent_ids\": [scenario_id], \"name\": sequence_name} scenario = _ScenarioManagerFactory._build_manager()._get(scenario_id) if sequence_data := scenario._sequences.get(sequence_name, None): sequence.update(sequence_data) with open(export_path, \"w\", encoding=\"utf-8\") as export_file: export_file.write(json.dumps(sequence)) else: raise ModelNotFound(cls._model_name, id) @classmethod def __log_error_entity_not_found(cls, sequence_id: Union[SequenceId, str]): cls._logger.error(f\"{cls._ENTITY_NAME} not found: {str(sequence_id)}\") @staticmethod def __get_status_notifier_callbacks(sequence: Sequence) -> List: return [partial(c.callback, *c.params, sequence) for c in sequence.subscribers] "} {"text": "from typing import NewType SequenceId = NewType(\"SequenceId\", str) SequenceId.__doc__ = \"\"\"Type that holds a `Sequence^` identifier.\"\"\" "} {"text": "from __future__ import annotations from typing import Any, Callable, Dict, List, Optional, Set, Union import networkx as nx from taipy.config.common._template_handler import _TemplateHandler as _tpl from taipy.config.common._validate_id import _validate_id from .._entity._entity import _Entity from .._entity._labeled import _Labeled from .._entity._properties import _Properties from .._entity._reload import _Reloader, _self_reload, _self_setter from .._entity.submittable import Submittable from .._version._version_manager_factory import _VersionManagerFactory from ..common._listattributes import _ListAttributes from ..common._utils import _Subscriber from ..data.data_node import DataNode from ..exceptions.exceptions import NonExistingTask from ..job.job import Job from ..notification.event import Event, EventEntityType, EventOperation, _make_event from ..task.task import Task from ..task.task_id import TaskId from .sequence_id import SequenceId class Sequence(_Entity, Submittable, _Labeled): \"\"\"List of `Task^`s and additional attributes representing a set of data processing elements connected as a direct acyclic graph. Attributes: properties (dict[str, Any]): A dictionary of additional properties. tasks (List[Task^]): The list of `Task`s. sequence_id (str): The Unique identifier of the sequence. owner_id (str): The identifier of the owner (scenario_id, cycle_id) or None. parent_ids (Optional[Set[str]]): The set of identifiers of the parent scenarios. version (str): The string indicates the application version of the sequence to instantiate. If not provided, the latest version is used. \"\"\" _ID_PREFIX = \"SEQUENCE\" _SEPARATOR = \"_\" _MANAGER_NAME = \"sequence\" def __init__( self, properties: Dict[str, Any], tasks: Union[List[TaskId], List[Task], List[Union[TaskId, Task]]], sequence_id: SequenceId, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, subscribers: Optional[List[_Subscriber]] = None, version: Optional[str] = None, ): super().__init__(subscribers) self.id: SequenceId = sequence_id self._tasks = tasks self.owner_id = owner_id self._parent_ids = parent_ids or set() self._properties = _Properties(self, **properties) self._version = version or _VersionManagerFactory._build_manager()._get_latest_version() @staticmethod def _new_id(sequence_name: str, scenario_id) -> SequenceId: return SequenceId(Sequence._SEPARATOR.join([Sequence._ID_PREFIX, _validate_id(sequence_name), scenario_id])) def __hash__(self): return hash(self.id) def __eq__(self, other): return self.id == other.id def __getattr__(self, attribute_name): protected_attribute_name = _validate_id(attribute_name) if protected_attribute_name in self._properties: return _tpl._replace_templates(self._properties[protected_attribute_name]) tasks = self._get_tasks() if protected_attribute_name in tasks: return tasks[protected_attribute_name] for task in tasks.values(): if protected_attribute_name in task.input: return task.input[protected_attribute_name] if protected_attribute_name in task.output: return task.output[protected_attribute_name] raise AttributeError(f\"{attribute_name} is not an attribute of sequence {self.id}\") @property # type: ignore @_self_reload(_MANAGER_NAME) def tasks(self) -> Dict[str, Task]: return self._get_tasks() @tasks.setter # type: ignore @_self_setter(_MANAGER_NAME) def tasks(self, tasks: Union[List[TaskId], List[Task]]): self._tasks = tasks @property def data_nodes(self) -> Dict[str, DataNode]: data_nodes = {} list_data_nodes = [task.data_nodes for task in self._get_tasks().values()] for data_node in list_data_nodes: for k, v in data_node.items(): data_nodes[k] = v return data_nodes @property def parent_ids(self): return self._parent_ids @property def version(self): return self._version @property def properties(self): self._properties = _Reloader()._reload(\"sequence\", self)._properties return self._properties def _is_consistent(self) -> bool: dag = self._build_dag() if dag.number_of_nodes() == 0: return True if not nx.is_directed_acyclic_graph(dag): return False if not nx.is_weakly_connected(dag): return False for left_node, right_node in dag.edges: if (isinstance(left_node, DataNode) and isinstance(right_node, Task)) or ( isinstance(left_node, Task) and isinstance(right_node, DataNode) ): continue return False return True def _get_tasks(self) -> Dict[str, Task]: from ..task._task_manager_factory import _TaskManagerFactory tasks = {} task_manager = _TaskManagerFactory._build_manager() for task_or_id in self._tasks: t = task_manager._get(task_or_id, task_or_id) if not isinstance(t, Task): raise NonExistingTask(task_or_id) tasks[t.config_id] = t return tasks def _get_set_of_tasks(self) -> Set[Task]: from ..task._task_manager_factory import _TaskManagerFactory tasks = set() task_manager = _TaskManagerFactory._build_manager() for task_or_id in self._tasks: task = task_manager._get(task_or_id, task_or_id) if not isinstance(task, Task): raise NonExistingTask(task_or_id) tasks.add(task) return tasks @property # type: ignore @_self_reload(_MANAGER_NAME) def subscribers(self): return self._subscribers @subscribers.setter # type: ignore @_self_setter(_MANAGER_NAME) def subscribers(self, val): self._subscribers = _ListAttributes(self, val) def get_parents(self): \"\"\"Get parents of the sequence entity\"\"\" from ... import core as tp return tp.get_parents(self) def subscribe( self, callback: Callable[[Sequence, Job], None], params: Optional[List[Any]] = None, ): \"\"\"Subscribe a function to be called on `Job^` status change. The subscription is applied to all jobs created from the sequence's execution. Parameters: callback (Callable[[Sequence^, Job^], None]): The callable function to be called on status change. params (Optional[List[Any]]): The parameters to be passed to the _callback_. Note: Notification will be available only for jobs created after this subscription. \"\"\" from ... import core as tp return tp.subscribe_sequence(callback, params, self) def unsubscribe(self, callback: Callable[[Sequence, Job], None], params: Optional[List[Any]] = None): \"\"\"Unsubscribe a function that is called when the status of a `Job^` changes. Parameters: callback (Callable[[Sequence^, Job^], None]): The callable function to unsubscribe. params (Optional[List[Any]]): The parameters to be passed to the _callback_. Note: The function will continue to be called for ongoing jobs. \"\"\" from ... import core as tp return tp.unsubscribe_sequence(callback, params, self) def submit( self, callbacks: Optional[List[Callable]] = None, force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, ) -> List[Job]: \"\"\"Submit the sequence for execution. All the `Task^`s of the sequence will be submitted for execution. Parameters: callbacks (List[Callable]): The list of callable functions to be called on status change. force (bool): Force execution even if the data nodes are in cache. wait (bool): Wait for the orchestrated jobs created from the sequence submission to be finished in asynchronous mode. timeout (Union[float, int]): The maximum number of seconds to wait for the jobs to be finished before returning. Returns: A list of created `Job^`s. \"\"\" from ._sequence_manager_factory import _SequenceManagerFactory return _SequenceManagerFactory._build_manager()._submit(self, callbacks, force, wait, timeout) def get_label(self) -> str: \"\"\"Returns the sequence simple label prefixed by its owner label. Returns: The label of the sequence as a string. \"\"\" return self._get_label() def get_simple_label(self) -> str: \"\"\"Returns the sequence simple label. Returns: The simple label of the sequence as a string. \"\"\" return self._get_simple_label() @_make_event.register(Sequence) def _make_event_for_sequence( sequence: Sequence, operation: EventOperation, /, attribute_name: Optional[str] = None, attribute_value: Optional[Any] = None, **kwargs, ) -> Event: metadata = {**kwargs} return Event( entity_type=EventEntityType.SEQUENCE, entity_id=sequence.id, operation=operation, attribute_name=attribute_name, attribute_value=attribute_value, metadata=metadata, ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from typing import Dict from .._repository._abstract_converter import _AbstractConverter from ..common import _utils from ..task.task import Task from .sequence import Sequence class _SequenceConverter(_AbstractConverter): _SEQUENCE_MODEL_ID_KEY = \"id\" _SEQUENCE_MODEL_OWNER_ID_KEY = \"owner_id\" _SEQUENCE_MODEL_PARENT_IDS_KEY = \"parent_ids\" _SEQUENCE_MODEL_PROPERTIES_KEY = \"properties\" _SEQUENCE_MODEL_TASKS_KEY = \"tasks\" _SEQUENCE_MODEL_SUBSCRIBERS_KEY = \"subscribers\" _SEQUENCE_MODEL_VERSION_KEY = \"version\" @classmethod def _entity_to_model(cls, sequence: Sequence) -> Dict: return { \"id\": sequence.id, \"owner_id\": sequence.owner_id, \"parent_ids\": list(sequence._parent_ids), \"properties\": sequence._properties.data, \"tasks\": cls.__to_task_ids(sequence._tasks), \"subscribers\": _utils._fcts_to_dict(sequence._subscribers), \"version\": sequence._version, } @staticmethod def __to_task_ids(tasks): return [t.id if isinstance(t, Task) else t for t in tasks] "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ._sequence_manager import _SequenceManager class _SequenceManagerFactory(_ManagerFactory): @classmethod def _build_manager(cls) -> Type[_SequenceManager]: # type: ignore if cls._using_enterprise(): sequence_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".sequence._sequence_manager\", \"_SequenceManager\" ) # type: ignore else: sequence_manager = _SequenceManager return sequence_manager # type: ignore "} {"text": "from copy import copy from datetime import datetime, timedelta from pydoc import locate from .._repository._abstract_converter import _AbstractConverter from .._version._utils import _migrate_entity from ..common._utils import _load_fct from ..data._data_model import _DataNodeModel from ..data.data_node import DataNode from . import GenericDataNode, JSONDataNode, MongoCollectionDataNode, SQLDataNode class _DataNodeConverter(_AbstractConverter): _READ_FCT_NAME_KEY = \"read_fct_name\" _READ_FCT_MODULE_KEY = \"read_fct_module\" _WRITE_FCT_NAME_KEY = \"write_fct_name\" _WRITE_FCT_MODULE_KEY = \"write_fct_module\" _JSON_ENCODER_NAME_KEY = \"encoder_name\" _JSON_ENCODER_MODULE_KEY = \"encoder_module\" _JSON_DECODER_NAME_KEY = \"decoder_name\" _JSON_DECODER_MODULE_KEY = \"decoder_module\" _EXPOSED_TYPE_KEY = \"exposed_type\" __WRITE_QUERY_BUILDER_NAME_KEY = \"write_query_builder_name\" __WRITE_QUERY_BUILDER_MODULE_KEY = \"write_query_builder_module\" __APPEND_QUERY_BUILDER_NAME_KEY = \"append_query_builder_name\" __APPEND_QUERY_BUILDER_MODULE_KEY = \"append_query_builder_module\" # TODO: This limits the valid string to only the ones provided by the Converter. # While in practice, each data nodes might have different exposed type possibilities. # The previous implementation used tabular datanode but it's no longer suitable so # new proposal is needed. _VALID_STRING_EXPOSED_TYPES = [\"numpy\", \"pandas\", \"modin\"] @classmethod def __serialize_generic_dn_properties(cls, datanode_properties: dict): read_fct = datanode_properties.get(GenericDataNode._OPTIONAL_READ_FUNCTION_PROPERTY, None) datanode_properties[cls._READ_FCT_NAME_KEY] = read_fct.__name__ if read_fct else None datanode_properties[cls._READ_FCT_MODULE_KEY] = read_fct.__module__ if read_fct else None write_fct = datanode_properties.get(GenericDataNode._OPTIONAL_WRITE_FUNCTION_PROPERTY, None) datanode_properties[cls._WRITE_FCT_NAME_KEY] = write_fct.__name__ if write_fct else None datanode_properties[cls._WRITE_FCT_MODULE_KEY] = write_fct.__module__ if write_fct else None del ( datanode_properties[GenericDataNode._OPTIONAL_READ_FUNCTION_PROPERTY], datanode_properties[GenericDataNode._OPTIONAL_WRITE_FUNCTION_PROPERTY], ) return datanode_properties @classmethod def __serialize_json_dn_properties(cls, datanode_properties: dict): encoder = datanode_properties.get(JSONDataNode._ENCODER_KEY) datanode_properties[cls._JSON_ENCODER_NAME_KEY] = encoder.__name__ if encoder else None datanode_properties[cls._JSON_ENCODER_MODULE_KEY] = encoder.__module__ if encoder else None datanode_properties.pop(JSONDataNode._ENCODER_KEY, None) decoder = datanode_properties.get(JSONDataNode._DECODER_KEY) datanode_properties[cls._JSON_DECODER_NAME_KEY] = decoder.__name__ if decoder else None datanode_properties[cls._JSON_DECODER_MODULE_KEY] = decoder.__module__ if decoder else None datanode_properties.pop(JSONDataNode._DECODER_KEY, None) return datanode_properties @classmethod def __serialize_sql_dn_properties(cls, datanode_properties: dict) -> dict: write_qb = datanode_properties.get(SQLDataNode._WRITE_QUERY_BUILDER_KEY) datanode_properties[cls.__WRITE_QUERY_BUILDER_NAME_KEY] = write_qb.__name__ if write_qb else None datanode_properties[cls.__WRITE_QUERY_BUILDER_MODULE_KEY] = write_qb.__module__ if write_qb else None datanode_properties.pop(SQLDataNode._WRITE_QUERY_BUILDER_KEY, None) append_qb = datanode_properties.get(SQLDataNode._APPEND_QUERY_BUILDER_KEY) datanode_properties[cls.__APPEND_QUERY_BUILDER_NAME_KEY] = append_qb.__name__ if append_qb else None datanode_properties[cls.__APPEND_QUERY_BUILDER_MODULE_KEY] = append_qb.__module__ if append_qb else None datanode_properties.pop(SQLDataNode._APPEND_QUERY_BUILDER_KEY, None) return datanode_properties @classmethod def __serialize_mongo_collection_dn_model_properties(cls, datanode_properties: dict) -> dict: if MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY in datanode_properties.keys(): datanode_properties[MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY] = ( f\"{datanode_properties[MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY].__module__}.\" f\"{datanode_properties[MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY].__qualname__}\" ) return datanode_properties @classmethod def __serialize_edits(cls, edits): new_edits = [] for edit in edits: new_edit = edit.copy() if timestamp := new_edit.get(\"timestamp\", None): new_edit[\"timestamp\"] = timestamp.isoformat() else: new_edit[\"timestamp\"] = datetime.now().isoformat() new_edits.append(new_edit) return new_edits @staticmethod def __serialize_exposed_type(properties: dict, exposed_type_key: str, valid_str_exposed_types) -> dict: if not isinstance(properties[exposed_type_key], str): if isinstance(properties[exposed_type_key], dict): properties[exposed_type_key] = { k: v if v in valid_str_exposed_types else f\"{v.__module__}.{v.__qualname__}\" for k, v in properties[exposed_type_key].items() } elif isinstance(properties[exposed_type_key], list): properties[exposed_type_key] = [ v if v in valid_str_exposed_types else f\"{v.__module__}.{v.__qualname__}\" for v in properties[exposed_type_key] ] else: properties[ exposed_type_key ] = f\"{properties[exposed_type_key].__module__}.{properties[exposed_type_key].__qualname__}\" return properties @classmethod def _entity_to_model(cls, data_node: DataNode) -> _DataNodeModel: properties = data_node._properties.data.copy() if data_node.storage_type() == GenericDataNode.storage_type(): properties = cls.__serialize_generic_dn_properties(properties) if data_node.storage_type() == JSONDataNode.storage_type(): properties = cls.__serialize_json_dn_properties(properties) if data_node.storage_type() == SQLDataNode.storage_type(): properties = cls.__serialize_sql_dn_properties(properties) if data_node.storage_type() == MongoCollectionDataNode.storage_type(): properties = cls.__serialize_mongo_collection_dn_model_properties(properties) if cls._EXPOSED_TYPE_KEY in properties.keys(): properties = cls.__serialize_exposed_type( properties, cls._EXPOSED_TYPE_KEY, cls._VALID_STRING_EXPOSED_TYPES ) return _DataNodeModel( data_node.id, data_node.config_id, data_node._scope, data_node.storage_type(), data_node.owner_id, list(data_node._parent_ids), data_node._last_edit_date.isoformat() if data_node._last_edit_date else None, cls.__serialize_edits(data_node._edits), data_node._version, data_node._validity_period.days if data_node._validity_period else None, data_node._validity_period.seconds if data_node._validity_period else None, data_node._edit_in_progress, data_node._editor_id, data_node._editor_expiration_date.isoformat() if data_node._editor_expiration_date else None, properties, ) @classmethod def __deserialize_generic_dn_properties(cls, datanode_model_properties): if datanode_model_properties[cls._READ_FCT_MODULE_KEY]: datanode_model_properties[GenericDataNode._OPTIONAL_READ_FUNCTION_PROPERTY] = _load_fct( datanode_model_properties[cls._READ_FCT_MODULE_KEY], datanode_model_properties[cls._READ_FCT_NAME_KEY], ) else: datanode_model_properties[GenericDataNode._OPTIONAL_READ_FUNCTION_PROPERTY] = None if datanode_model_properties[cls._WRITE_FCT_MODULE_KEY]: datanode_model_properties[GenericDataNode._OPTIONAL_WRITE_FUNCTION_PROPERTY] = _load_fct( datanode_model_properties[cls._WRITE_FCT_MODULE_KEY], datanode_model_properties[cls._WRITE_FCT_NAME_KEY], ) else: datanode_model_properties[GenericDataNode._OPTIONAL_WRITE_FUNCTION_PROPERTY] = None del datanode_model_properties[cls._READ_FCT_NAME_KEY] del datanode_model_properties[cls._READ_FCT_MODULE_KEY] del datanode_model_properties[cls._WRITE_FCT_NAME_KEY] del datanode_model_properties[cls._WRITE_FCT_MODULE_KEY] return datanode_model_properties @classmethod def __deserialize_json_dn_properties(cls, datanode_model_properties: dict) -> dict: if datanode_model_properties[cls._JSON_ENCODER_MODULE_KEY]: datanode_model_properties[JSONDataNode._ENCODER_KEY] = _load_fct( datanode_model_properties[cls._JSON_ENCODER_MODULE_KEY], datanode_model_properties[cls._JSON_ENCODER_NAME_KEY], ) else: datanode_model_properties[JSONDataNode._ENCODER_KEY] = None if datanode_model_properties[cls._JSON_DECODER_MODULE_KEY]: datanode_model_properties[JSONDataNode._DECODER_KEY] = _load_fct( datanode_model_properties[cls._JSON_DECODER_MODULE_KEY], datanode_model_properties[cls._JSON_DECODER_NAME_KEY], ) else: datanode_model_properties[JSONDataNode._DECODER_KEY] = None del datanode_model_properties[cls._JSON_ENCODER_NAME_KEY] del datanode_model_properties[cls._JSON_ENCODER_MODULE_KEY] del datanode_model_properties[cls._JSON_DECODER_NAME_KEY] del datanode_model_properties[cls._JSON_DECODER_MODULE_KEY] return datanode_model_properties @classmethod def __deserialize_sql_dn_model_properties(cls, datanode_model_properties: dict) -> dict: if datanode_model_properties[cls.__WRITE_QUERY_BUILDER_MODULE_KEY]: datanode_model_properties[SQLDataNode._WRITE_QUERY_BUILDER_KEY] = _load_fct( datanode_model_properties[cls.__WRITE_QUERY_BUILDER_MODULE_KEY], datanode_model_properties[cls.__WRITE_QUERY_BUILDER_NAME_KEY], ) else: datanode_model_properties[SQLDataNode._WRITE_QUERY_BUILDER_KEY] = None del datanode_model_properties[cls.__WRITE_QUERY_BUILDER_NAME_KEY] del datanode_model_properties[cls.__WRITE_QUERY_BUILDER_MODULE_KEY] if datanode_model_properties[cls.__APPEND_QUERY_BUILDER_MODULE_KEY]: datanode_model_properties[SQLDataNode._APPEND_QUERY_BUILDER_KEY] = _load_fct( datanode_model_properties[cls.__APPEND_QUERY_BUILDER_MODULE_KEY], datanode_model_properties[cls.__APPEND_QUERY_BUILDER_NAME_KEY], ) else: datanode_model_properties[SQLDataNode._APPEND_QUERY_BUILDER_KEY] = None del datanode_model_properties[cls.__APPEND_QUERY_BUILDER_NAME_KEY] del datanode_model_properties[cls.__APPEND_QUERY_BUILDER_MODULE_KEY] return datanode_model_properties @classmethod def __deserialize_mongo_collection_dn_model_properties(cls, datanode_model_properties: dict) -> dict: if MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY in datanode_model_properties.keys(): if isinstance(datanode_model_properties[MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY], str): datanode_model_properties[MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY] = locate( datanode_model_properties[MongoCollectionDataNode._CUSTOM_DOCUMENT_PROPERTY] ) return datanode_model_properties @classmethod def __deserialize_edits(cls, edits): for edit in edits: if timestamp := edit.get(\"timestamp\", None): edit[\"timestamp\"] = datetime.fromisoformat(timestamp) else: edit[\"timestamp\"] = datetime.now() return edits @staticmethod def __deserialize_exposed_type(properties: dict, exposed_type_key: str, valid_str_exposed_types) -> dict: if properties[exposed_type_key] not in valid_str_exposed_types: if isinstance(properties[exposed_type_key], str): properties[exposed_type_key] = locate(properties[exposed_type_key]) elif isinstance(properties[exposed_type_key], dict): properties[exposed_type_key] = { k: v if v in valid_str_exposed_types else locate(v) for k, v in properties[exposed_type_key].items() } elif isinstance(properties[exposed_type_key], list): properties[exposed_type_key] = [ v if v in valid_str_exposed_types else locate(v) for v in properties[exposed_type_key] ] return properties @classmethod def _model_to_entity(cls, model: _DataNodeModel) -> DataNode: data_node_properties = model.data_node_properties.copy() if model.storage_type == GenericDataNode.storage_type(): data_node_properties = cls.__deserialize_generic_dn_properties(data_node_properties) if model.storage_type == JSONDataNode.storage_type(): data_node_properties = cls.__deserialize_json_dn_properties(data_node_properties) if model.storage_type == SQLDataNode.storage_type(): data_node_properties = cls.__deserialize_sql_dn_model_properties(data_node_properties) if model.storage_type == MongoCollectionDataNode.storage_type(): data_node_properties = cls.__deserialize_mongo_collection_dn_model_properties(data_node_properties) if cls._EXPOSED_TYPE_KEY in data_node_properties.keys(): data_node_properties = cls.__deserialize_exposed_type( data_node_properties, cls._EXPOSED_TYPE_KEY, cls._VALID_STRING_EXPOSED_TYPES ) validity_period = None if model.validity_seconds is not None and model.validity_days is not None: validity_period = timedelta(days=model.validity_days, seconds=model.validity_seconds) exp_date = datetime.fromisoformat(model.editor_expiration_date) if model.editor_expiration_date else None datanode = DataNode._class_map()[model.storage_type]( config_id=model.config_id, scope=model.scope, id=model.id, owner_id=model.owner_id, parent_ids=set(model.parent_ids), last_edit_date=datetime.fromisoformat(model.last_edit_date) if model.last_edit_date else None, edits=cls.__deserialize_edits(copy(model.edits)), version=model.version, validity_period=validity_period, edit_in_progress=model.edit_in_progress, editor_id=model.editor_id, editor_expiration_date=exp_date, properties=data_node_properties, ) return _migrate_entity(datanode) "} {"text": "import os import re import urllib.parse from abc import abstractmethod from datetime import datetime, timedelta from typing import Dict, List, Optional, Set, Tuple, Union import modin.pandas as modin_pd import numpy as np import pandas as pd from sqlalchemy import create_engine, text from taipy.config.common.scope import Scope from .._version._version_manager_factory import _VersionManagerFactory from ..data.operator import JoinOperator, Operator from ..exceptions.exceptions import MissingRequiredProperty, UnknownDatabaseEngine from ._abstract_tabular import _AbstractTabularDataNode from .data_node import DataNode from .data_node_id import DataNodeId, Edit class _AbstractSQLDataNode(DataNode, _AbstractTabularDataNode): \"\"\"Abstract base class for data node implementations (SQLDataNode and SQLTableDataNode) that use SQL.\"\"\" __STORAGE_TYPE = \"NOT_IMPLEMENTED\" __EXPOSED_TYPE_PROPERTY = \"exposed_type\" __EXPOSED_TYPE_NUMPY = \"numpy\" __EXPOSED_TYPE_PANDAS = \"pandas\" __EXPOSED_TYPE_MODIN = \"modin\" __VALID_STRING_EXPOSED_TYPES = [__EXPOSED_TYPE_PANDAS, __EXPOSED_TYPE_NUMPY, __EXPOSED_TYPE_MODIN] __DB_NAME_KEY = \"db_name\" __DB_USERNAME_KEY = \"db_username\" __DB_PASSWORD_KEY = \"db_password\" __DB_HOST_KEY = \"db_host\" __DB_PORT_KEY = \"db_port\" __DB_ENGINE_KEY = \"db_engine\" __DB_DRIVER_KEY = \"db_driver\" __DB_EXTRA_ARGS_KEY = \"db_extra_args\" __SQLITE_FOLDER_PATH = \"sqlite_folder_path\" __SQLITE_FILE_EXTENSION = \"sqlite_file_extension\" __ENGINE_PROPERTIES: List[str] = [ __DB_NAME_KEY, __DB_USERNAME_KEY, __DB_PASSWORD_KEY, __DB_HOST_KEY, __DB_PORT_KEY, __DB_DRIVER_KEY, __DB_EXTRA_ARGS_KEY, __SQLITE_FOLDER_PATH, __SQLITE_FILE_EXTENSION, ] __DB_HOST_DEFAULT = \"localhost\" __DB_PORT_DEFAULT = 1433 __DB_DRIVER_DEFAULT = \"\" __SQLITE_FOLDER_PATH_DEFAULT = \"\" __SQLITE_FILE_EXTENSION_DEFAULT = \".db\" __ENGINE_MSSQL = \"mssql\" __ENGINE_SQLITE = \"sqlite\" __ENGINE_MYSQL = \"mysql\" __ENGINE_POSTGRESQL = \"postgresql\" _ENGINE_REQUIRED_PROPERTIES: Dict[str, List[str]] = { __ENGINE_MSSQL: [__DB_USERNAME_KEY, __DB_PASSWORD_KEY, __DB_NAME_KEY], __ENGINE_MYSQL: [__DB_USERNAME_KEY, __DB_PASSWORD_KEY, __DB_NAME_KEY], __ENGINE_POSTGRESQL: [__DB_USERNAME_KEY, __DB_PASSWORD_KEY, __DB_NAME_KEY], __ENGINE_SQLITE: [__DB_NAME_KEY], } def __init__( self, config_id: str, scope: Scope, id: Optional[DataNodeId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, last_edit_date: Optional[datetime] = None, edits: Optional[List[Edit]] = None, version: Optional[str] = None, validity_period: Optional[timedelta] = None, edit_in_progress: bool = False, editor_id: Optional[str] = None, editor_expiration_date: Optional[datetime] = None, properties: Optional[Dict] = None, ): if properties is None: properties = {} self._check_required_properties(properties) if self.__EXPOSED_TYPE_PROPERTY not in properties.keys(): properties[self.__EXPOSED_TYPE_PROPERTY] = self.__EXPOSED_TYPE_PANDAS self._check_exposed_type(properties[self.__EXPOSED_TYPE_PROPERTY], self.__VALID_STRING_EXPOSED_TYPES) super().__init__( config_id, scope, id, owner_id, parent_ids, last_edit_date, edits, version or _VersionManagerFactory._build_manager()._get_latest_version(), validity_period, edit_in_progress, editor_id, editor_expiration_date, **properties, ) self._engine = None if not self._last_edit_date: self._last_edit_date = datetime.now() self._TAIPY_PROPERTIES.update( { self.__DB_NAME_KEY, self.__DB_USERNAME_KEY, self.__DB_PASSWORD_KEY, self.__DB_HOST_KEY, self.__DB_PORT_KEY, self.__DB_ENGINE_KEY, self.__DB_DRIVER_KEY, self.__DB_EXTRA_ARGS_KEY, self.__SQLITE_FOLDER_PATH, self.__SQLITE_FILE_EXTENSION, self.__EXPOSED_TYPE_PROPERTY, } ) def _check_required_properties(self, properties: Dict): db_engine = properties.get(self.__DB_ENGINE_KEY) if not db_engine: raise MissingRequiredProperty(f\"{self.__DB_ENGINE_KEY} is required.\") if db_engine not in self._ENGINE_REQUIRED_PROPERTIES.keys(): raise UnknownDatabaseEngine(f\"Unknown engine: {db_engine}\") required = self._ENGINE_REQUIRED_PROPERTIES[db_engine] if missing := set(required) - set(properties.keys()): raise MissingRequiredProperty( f\"The following properties \" f\"{', '.join(x for x in missing)} were not informed and are required.\" ) def _get_engine(self): if self._engine is None: self._engine = create_engine(self._conn_string()) return self._engine def _conn_string(self) -> str: engine = self.properties.get(self.__DB_ENGINE_KEY) if self.__DB_USERNAME_KEY in self._ENGINE_REQUIRED_PROPERTIES[engine]: username = self.properties.get(self.__DB_USERNAME_KEY) username = urllib.parse.quote_plus(username) if self.__DB_PASSWORD_KEY in self._ENGINE_REQUIRED_PROPERTIES[engine]: password = self.properties.get(self.__DB_PASSWORD_KEY) password = urllib.parse.quote_plus(password) if self.__DB_NAME_KEY in self._ENGINE_REQUIRED_PROPERTIES[engine]: db_name = self.properties.get(self.__DB_NAME_KEY) db_name = urllib.parse.quote_plus(db_name) host = self.properties.get(self.__DB_HOST_KEY, self.__DB_HOST_DEFAULT) port = self.properties.get(self.__DB_PORT_KEY, self.__DB_PORT_DEFAULT) driver = self.properties.get(self.__DB_DRIVER_KEY, self.__DB_DRIVER_DEFAULT) extra_args = self.properties.get(self.__DB_EXTRA_ARGS_KEY, {}) if driver: extra_args = {**extra_args, \"driver\": driver} for k, v in extra_args.items(): extra_args[k] = re.sub(r\"\\s+\", \"+\", v) extra_args_str = \"&\".join(f\"{k}={str(v)}\" for k, v in extra_args.items()) if engine == self.__ENGINE_MSSQL: return f\"mssql+pyodbc://{username}:{password}@{host}:{port}/{db_name}?{extra_args_str}\" elif engine == self.__ENGINE_MYSQL: return f\"mysql+pymysql://{username}:{password}@{host}:{port}/{db_name}?{extra_args_str}\" elif engine == self.__ENGINE_POSTGRESQL: return f\"postgresql+psycopg2://{username}:{password}@{host}:{port}/{db_name}?{extra_args_str}\" elif engine == self.__ENGINE_SQLITE: folder_path = self.properties.get(self.__SQLITE_FOLDER_PATH, self.__SQLITE_FOLDER_PATH_DEFAULT) file_extension = self.properties.get(self.__SQLITE_FILE_EXTENSION, self.__SQLITE_FILE_EXTENSION_DEFAULT) return \"sqlite:///\" + os.path.join(folder_path, f\"{db_name}{file_extension}\") raise UnknownDatabaseEngine(f\"Unknown engine: {engine}\") def filter(self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND): if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_PANDAS: return self._read_as_pandas_dataframe(operators=operators, join_operator=join_operator) if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_MODIN: return self._read_as_modin_dataframe(operators=operators, join_operator=join_operator) if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_NUMPY: return self._read_as_numpy(operators=operators, join_operator=join_operator) return self._read_as(operators=operators, join_operator=join_operator) def _read(self): if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_PANDAS: return self._read_as_pandas_dataframe() if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_MODIN: return self._read_as_modin_dataframe() if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_NUMPY: return self._read_as_numpy() return self._read_as() def _read_as(self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND): custom_class = self.properties[self.__EXPOSED_TYPE_PROPERTY] with self._get_engine().connect() as connection: query_result = connection.execute(text(self._get_read_query(operators, join_operator))) return list(map(lambda row: custom_class(**row), query_result)) def _read_as_numpy( self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND ) -> np.ndarray: return self._read_as_pandas_dataframe(operators=operators, join_operator=join_operator).to_numpy() def _read_as_pandas_dataframe( self, columns: Optional[List[str]] = None, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND, ): with self._get_engine().connect() as conn: if columns: return pd.DataFrame(conn.execute(text(self._get_read_query(operators, join_operator))))[columns] return pd.DataFrame(conn.execute(text(self._get_read_query(operators, join_operator)))) def _read_as_modin_dataframe( self, columns: Optional[List[str]] = None, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND, ): if columns: return modin_pd.read_sql_query(self._get_read_query(operators, join_operator), con=self._get_engine())[ columns ] return modin_pd.read_sql_query(self._get_read_query(operators, join_operator), con=self._get_engine()) @abstractmethod def _get_read_query(self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND): query = self._get_base_read_query() if not operators: return query if not isinstance(operators, List): operators = [operators] conditions = [] for key, value, operator in operators: if operator == Operator.EQUAL: conditions.append(f\"{key} = '{value}'\") elif operator == Operator.NOT_EQUAL: conditions.append(f\"{key} <> '{value}'\") elif operator == Operator.GREATER_THAN: conditions.append(f\"{key} > '{value}'\") elif operator == Operator.GREATER_OR_EQUAL: conditions.append(f\"{key} >= '{value}'\") elif operator == Operator.LESS_THAN: conditions.append(f\"{key} < '{value}'\") elif operator == Operator.LESS_OR_EQUAL: conditions.append(f\"{key} <= '{value}'\") if join_operator == JoinOperator.AND: query += f\" WHERE {' AND '.join(conditions)}\" elif join_operator == JoinOperator.OR: query += f\" WHERE {' OR '.join(conditions)}\" else: raise NotImplementedError(f\"Join operator {join_operator} not implemented.\") return query @abstractmethod def _get_base_read_query(self) -> str: raise NotImplementedError def _append(self, data) -> None: engine = self._get_engine() with engine.connect() as connection: with connection.begin() as transaction: try: self._do_append(data, engine, connection) except Exception as e: transaction.rollback() raise e else: transaction.commit() @abstractmethod def _do_append(self, data, engine, connection) -> None: raise NotImplementedError def _write(self, data) -> None: \"\"\"Check data against a collection of types to handle insertion on the database.\"\"\" engine = self._get_engine() with engine.connect() as connection: with connection.begin() as transaction: try: self._do_write(data, engine, connection) except Exception as e: transaction.rollback() raise e else: transaction.commit() @abstractmethod def _do_write(self, data, engine, connection) -> None: raise NotImplementedError def __setattr__(self, key: str, value) -> None: if key in self.__ENGINE_PROPERTIES: self._engine = None return super().__setattr__(key, value) "} {"text": "import os from datetime import datetime, timedelta from os.path import isdir, isfile from typing import Any, Dict, List, Optional, Set import modin.pandas as modin_pd import numpy as np import pandas as pd from taipy.config.common.scope import Scope from .._backup._backup import _replace_in_backup_file from .._entity._reload import _self_reload from .._version._version_manager_factory import _VersionManagerFactory from ..exceptions.exceptions import UnknownCompressionAlgorithm, UnknownParquetEngine from ..job.job_id import JobId from ._abstract_file import _AbstractFileDataNode from ._abstract_tabular import _AbstractTabularDataNode from .data_node import DataNode from .data_node_id import DataNodeId, Edit class ParquetDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode): \"\"\"Data Node stored as a Parquet file. Attributes: config_id (str): Identifier of the data node configuration. This string must be a valid Python identifier. scope (Scope^): The scope of this data node. id (str): The unique identifier of this data node. owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or `None`. parent_ids (Optional[Set[str]]): The identifiers of the parent tasks or `None`. last_edit_date (datetime): The date and time of the last modification. edits (List[Edit^]): The ordered list of edits for that job. version (str): The string indicates the application version of the data node to instantiate. If not provided, the current version is used. validity_period (Optional[timedelta]): The duration implemented as a timedelta since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task management page](../core/entities/task-mgt.md) for more details). If _validity_period_ is set to `None`, the data node is always up-to-date. edit_in_progress (bool): True if a task computing the data node has been submitted and not completed yet. False otherwise. editor_id (Optional[str]): The identifier of the user who is currently editing the data node. editor_expiration_date (Optional[datetime]): The expiration date of the editor lock. path (str): The path to the Parquet file. properties (dict[str, Any]): A dictionary of additional properties. *properties* must have a *\"default_path\"* or *\"path\"* entry with the path of the Parquet file: - *\"default_path\"* (`str`): The default path of the Parquet file. - *\"exposed_type\"*: The exposed type of the data read from Parquet file. The default value is `pandas`. - *\"engine\"* (`Optional[str]`): Parquet library to use. Possible values are *\"fastparquet\"* or *\"pyarrow\"*.
The default value is *\"pyarrow\"*. - *\"compression\"* (`Optional[str]`): Name of the compression to use. Possible values are *\"snappy\"*, *\"gzip\"*, *\"brotli\"*, or *\"none\"* (no compression).
The default value is *\"snappy\"*. - *\"read_kwargs\"* (`Optional[dict]`): Additional parameters passed to the *pandas.read_parquet()* function. - *\"write_kwargs\"* (`Optional[dict]`): Additional parameters passed to the *pandas.DataFrame.write_parquet()* fucntion. The parameters in *\"read_kwargs\"* and *\"write_kwargs\"* have a **higher precedence** than the top-level parameters which are also passed to Pandas. \"\"\" __STORAGE_TYPE = \"parquet\" __EXPOSED_TYPE_PROPERTY = \"exposed_type\" __EXPOSED_TYPE_NUMPY = \"numpy\" __EXPOSED_TYPE_PANDAS = \"pandas\" __EXPOSED_TYPE_MODIN = \"modin\" __VALID_STRING_EXPOSED_TYPES = [__EXPOSED_TYPE_PANDAS, __EXPOSED_TYPE_MODIN, __EXPOSED_TYPE_NUMPY] __PATH_KEY = \"path\" __DEFAULT_DATA_KEY = \"default_data\" __DEFAULT_PATH_KEY = \"default_path\" __ENGINE_PROPERTY = \"engine\" __VALID_PARQUET_ENGINES = [\"pyarrow\", \"fastparquet\"] __COMPRESSION_PROPERTY = \"compression\" __VALID_COMPRESSION_ALGORITHMS = [\"snappy\", \"gzip\", \"brotli\", \"none\"] __READ_KWARGS_PROPERTY = \"read_kwargs\" __WRITE_KWARGS_PROPERTY = \"write_kwargs\" _REQUIRED_PROPERTIES: List[str] = [] def __init__( self, config_id: str, scope: Scope, id: Optional[DataNodeId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, last_edit_date: Optional[datetime] = None, edits: Optional[List[Edit]] = None, version: Optional[str] = None, validity_period: Optional[timedelta] = None, edit_in_progress: bool = False, editor_id: Optional[str] = None, editor_expiration_date: Optional[datetime] = None, properties: Optional[Dict] = None, ): if properties is None: properties = {} default_value = properties.pop(self.__DEFAULT_DATA_KEY, None) if self.__ENGINE_PROPERTY not in properties.keys(): properties[self.__ENGINE_PROPERTY] = \"pyarrow\" if properties[self.__ENGINE_PROPERTY] not in self.__VALID_PARQUET_ENGINES: raise UnknownParquetEngine( f\"Invalid parquet engine: {properties[self.__ENGINE_PROPERTY]}. \" f\"Supported engines are {', '.join(self.__VALID_PARQUET_ENGINES)}\" ) if self.__COMPRESSION_PROPERTY not in properties.keys(): properties[self.__COMPRESSION_PROPERTY] = \"snappy\" if properties[self.__COMPRESSION_PROPERTY] == \"none\": properties[self.__COMPRESSION_PROPERTY] = None if ( properties[self.__COMPRESSION_PROPERTY] and properties[self.__COMPRESSION_PROPERTY] not in self.__VALID_COMPRESSION_ALGORITHMS ): raise UnknownCompressionAlgorithm( f\"Unsupported compression algorithm: {properties[self.__COMPRESSION_PROPERTY]}. \" f\"Supported algorithms are {', '.join(self.__VALID_COMPRESSION_ALGORITHMS)}\" ) if self.__READ_KWARGS_PROPERTY not in properties.keys(): properties[self.__READ_KWARGS_PROPERTY] = {} if self.__WRITE_KWARGS_PROPERTY not in properties.keys(): properties[self.__WRITE_KWARGS_PROPERTY] = {} if self.__EXPOSED_TYPE_PROPERTY not in properties.keys(): properties[self.__EXPOSED_TYPE_PROPERTY] = self.__EXPOSED_TYPE_PANDAS self._check_exposed_type(properties[self.__EXPOSED_TYPE_PROPERTY], self.__VALID_STRING_EXPOSED_TYPES) super().__init__( config_id, scope, id, owner_id, parent_ids, last_edit_date, edits, version or _VersionManagerFactory._build_manager()._get_latest_version(), validity_period, edit_in_progress, editor_id, editor_expiration_date, **properties, ) self._path = properties.get(self.__PATH_KEY, properties.get(self.__DEFAULT_PATH_KEY)) if not self._path: self._path = self._build_path(self.storage_type()) properties[self.__PATH_KEY] = self._path if default_value is not None and not os.path.exists(self._path): self._write(default_value) self._last_edit_date = datetime.now() self._edits.append( Edit( { \"timestamp\": self._last_edit_date, \"writer_identifier\": \"TAIPY\", \"comments\": \"Default data written.\", } ) ) if not self._last_edit_date and (isfile(self._path) or isdir(self._path)): self._last_edit_date = datetime.now() self._TAIPY_PROPERTIES.update( { self.__EXPOSED_TYPE_PROPERTY, self.__PATH_KEY, self.__DEFAULT_PATH_KEY, self.__DEFAULT_DATA_KEY, self.__ENGINE_PROPERTY, self.__COMPRESSION_PROPERTY, self.__READ_KWARGS_PROPERTY, self.__WRITE_KWARGS_PROPERTY, } ) @classmethod def storage_type(cls) -> str: return cls.__STORAGE_TYPE @property # type: ignore @_self_reload(DataNode._MANAGER_NAME) def path(self): return self._path @path.setter def path(self, value): tmp_old_path = self._path self._path = value self.properties[self.__PATH_KEY] = value _replace_in_backup_file(old_file_path=tmp_old_path, new_file_path=self._path) def _read(self): return self.read_with_kwargs() def _read_as(self, read_kwargs: Dict): custom_class = self.properties[self.__EXPOSED_TYPE_PROPERTY] list_of_dicts = self._read_as_pandas_dataframe(read_kwargs).to_dict(orient=\"records\") return [custom_class(**dct) for dct in list_of_dicts] def _read_as_numpy(self, read_kwargs: Dict) -> np.ndarray: return self._read_as_pandas_dataframe(read_kwargs).to_numpy() def _read_as_pandas_dataframe(self, read_kwargs: Dict) -> pd.DataFrame: return pd.read_parquet(self._path, **read_kwargs) def _read_as_modin_dataframe(self, read_kwargs: Dict) -> modin_pd.DataFrame: return modin_pd.read_parquet(self._path, **read_kwargs) def _append(self, data: Any): self.write_with_kwargs(data, engine=\"fastparquet\", append=True) def _write(self, data: Any): self.write_with_kwargs(data) def write_with_kwargs(self, data: Any, job_id: Optional[JobId] = None, **write_kwargs): \"\"\"Write the data referenced by this data node. Keyword arguments here which are also present in the Data Node config will overwrite them. Parameters: data (Any): The data to write. job_id (JobId^): An optional identifier of the writer. **write_kwargs (dict[str, any]): The keyword arguments passed to the function `pandas.DataFrame.to_parquet()`. \"\"\" kwargs = { self.__ENGINE_PROPERTY: self.properties[self.__ENGINE_PROPERTY], self.__COMPRESSION_PROPERTY: self.properties[self.__COMPRESSION_PROPERTY], } kwargs.update(self.properties[self.__WRITE_KWARGS_PROPERTY]) kwargs.update(write_kwargs) if isinstance(data, (pd.DataFrame, modin_pd.DataFrame)): data.to_parquet(self._path, **kwargs) else: pd.DataFrame(data).to_parquet(self._path, **kwargs) self.track_edit(timestamp=datetime.now(), job_id=job_id) def read_with_kwargs(self, **read_kwargs): \"\"\"Read data from this data node. Keyword arguments here which are also present in the Data Node config will overwrite them. Parameters: **read_kwargs (dict[str, any]): The keyword arguments passed to the function `pandas.read_parquet()`. \"\"\" # return None if data was never written if not self.last_edit_date: self._DataNode__logger.warning( f\"Data node {self.id} from config {self.config_id} is being read but has never been written.\" ) return None kwargs = self.properties[self.__READ_KWARGS_PROPERTY] kwargs.update( { self.__ENGINE_PROPERTY: self.properties[self.__ENGINE_PROPERTY], } ) kwargs.update(read_kwargs) if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_PANDAS: return self._read_as_pandas_dataframe(kwargs) if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_MODIN: return self._read_as_modin_dataframe(kwargs) if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_NUMPY: return self._read_as_numpy(kwargs) return self._read_as(kwargs) "} {"text": "from datetime import datetime, timedelta from inspect import isclass from typing import Any, Dict, List, Optional, Set, Tuple, Union from taipy.config.common.scope import Scope from .._version._version_manager_factory import _VersionManagerFactory from ..common._mongo_connector import _connect_mongodb from ..data.operator import JoinOperator, Operator from ..exceptions.exceptions import InvalidCustomDocument, MissingRequiredProperty from .data_node import DataNode from .data_node_id import DataNodeId, Edit class MongoCollectionDataNode(DataNode): \"\"\"Data Node stored in a Mongo collection. Attributes: config_id (str): Identifier of the data node configuration. It must be a valid Python identifier. scope (Scope^): The scope of this data node. id (str): The unique identifier of this data node. owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or None. parent_ids (Optional[Set[str]]): The identifiers of the parent tasks or `None`. last_edit_date (datetime): The date and time of the last modification. edits (List[Edit^]): The ordered list of edits for that job. version (str): The string indicates the application version of the data node to instantiate. If not provided, the current version is used. validity_period (Optional[timedelta]): The duration implemented as a timedelta since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task management page](../core/entities/task-mgt.md) for more details). If _validity_period_ is set to `None`, the data node is always up-to-date. edit_in_progress (bool): True if a task computing the data node has been submitted and not completed yet. False otherwise. editor_id (Optional[str]): The identifier of the user who is currently editing the data node. editor_expiration_date (Optional[datetime]): The expiration date of the editor lock. properties (dict[str, Any]): A dictionary of additional properties. Note that the _properties_ parameter must at least contain an entry for _\"db_name\"_ and _\"collection_name\"_: - _\"db_name\"_ `(str)`: The database name.\\n - _\"collection_name\"_ `(str)`: The collection in the database to read from and to write the data to.\\n - _\"custom_document\"_ `(Any)`: The custom document class to store, encode, and decode data when reading and writing to a Mongo collection.\\n - _\"db_username\"_ `(str)`: The database username.\\n - _\"db_password\"_ `(str)`: The database password.\\n - _\"db_host\"_ `(str)`: The database host. The default value is _\"localhost\"_.\\n - _\"db_port\"_ `(int)`: The database port. The default value is 27017.\\n - _\"db_driver\"_ `(str)`: The database driver.\\n - _\"db_extra_args\"_ `(Dict[str, Any])`: A dictionary of additional arguments to be passed into database connection string.\\n \"\"\" __STORAGE_TYPE = \"mongo_collection\" __DB_NAME_KEY = \"db_name\" __COLLECTION_KEY = \"collection_name\" __DB_USERNAME_KEY = \"db_username\" __DB_PASSWORD_KEY = \"db_password\" __DB_HOST_KEY = \"db_host\" __DB_PORT_KEY = \"db_port\" __DB_EXTRA_ARGS_KEY = \"db_extra_args\" __DB_DRIVER_KEY = \"db_driver\" __DB_HOST_DEFAULT = \"localhost\" __DB_PORT_DEFAULT = 27017 _CUSTOM_DOCUMENT_PROPERTY = \"custom_document\" _REQUIRED_PROPERTIES: List[str] = [ __DB_NAME_KEY, __COLLECTION_KEY, ] def __init__( self, config_id: str, scope: Scope, id: Optional[DataNodeId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, last_edit_date: Optional[datetime] = None, edits: List[Edit] = None, version: str = None, validity_period: Optional[timedelta] = None, edit_in_progress: bool = False, editor_id: Optional[str] = None, editor_expiration_date: Optional[datetime] = None, properties: Dict = None, ): if properties is None: properties = {} required = self._REQUIRED_PROPERTIES if missing := set(required) - set(properties.keys()): raise MissingRequiredProperty( f\"The following properties \" f\"{', '.join(x for x in missing)} were not informed and are required.\" ) self._check_custom_document(properties[self._CUSTOM_DOCUMENT_PROPERTY]) super().__init__( config_id, scope, id, owner_id, parent_ids, last_edit_date, edits, version or _VersionManagerFactory._build_manager()._get_latest_version(), validity_period, edit_in_progress, editor_id, editor_expiration_date, **properties, ) mongo_client = _connect_mongodb( db_host=properties.get(self.__DB_HOST_KEY, self.__DB_HOST_DEFAULT), db_port=properties.get(self.__DB_PORT_KEY, self.__DB_PORT_DEFAULT), db_username=properties.get(self.__DB_USERNAME_KEY, \"\"), db_password=properties.get(self.__DB_PASSWORD_KEY, \"\"), db_driver=properties.get(self.__DB_DRIVER_KEY, \"\"), db_extra_args=frozenset(properties.get(self.__DB_EXTRA_ARGS_KEY, {}).items()), ) self.collection = mongo_client[properties.get(self.__DB_NAME_KEY, \"\")][ properties.get(self.__COLLECTION_KEY, \"\") ] self.custom_document = properties[self._CUSTOM_DOCUMENT_PROPERTY] self._decoder = self._default_decoder custom_decoder = getattr(self.custom_document, \"decode\", None) if callable(custom_decoder): self._decoder = custom_decoder self._encoder = self._default_encoder custom_encoder = getattr(self.custom_document, \"encode\", None) if callable(custom_encoder): self._encoder = custom_encoder if not self._last_edit_date: self._last_edit_date = datetime.now() self._TAIPY_PROPERTIES.update( { self.__COLLECTION_KEY, self.__DB_NAME_KEY, self._CUSTOM_DOCUMENT_PROPERTY, self.__DB_USERNAME_KEY, self.__DB_PASSWORD_KEY, self.__DB_HOST_KEY, self.__DB_PORT_KEY, self.__DB_DRIVER_KEY, self.__DB_EXTRA_ARGS_KEY, } ) def _check_custom_document(self, custom_document): if not isclass(custom_document): raise InvalidCustomDocument( f\"Invalid custom document of {custom_document}. Only custom class are supported.\" ) @classmethod def storage_type(cls) -> str: return cls.__STORAGE_TYPE def filter(self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND): cursor = self._read_by_query(operators, join_operator) return list(map(lambda row: self._decoder(row), cursor)) def _read(self): cursor = self._read_by_query() return list(map(lambda row: self._decoder(row), cursor)) def _read_by_query(self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND): \"\"\"Query from a Mongo collection, exclude the _id field\"\"\" if not operators: return self.collection.find() if not isinstance(operators, List): operators = [operators] conditions = [] for key, value, operator in operators: if operator == Operator.EQUAL: conditions.append({key: value}) elif operator == Operator.NOT_EQUAL: conditions.append({key: {\"$ne\": value}}) elif operator == Operator.GREATER_THAN: conditions.append({key: {\"$gt\": value}}) elif operator == Operator.GREATER_OR_EQUAL: conditions.append({key: {\"$gte\": value}}) elif operator == Operator.LESS_THAN: conditions.append({key: {\"$lt\": value}}) elif operator == Operator.LESS_OR_EQUAL: conditions.append({key: {\"$lte\": value}}) query = {} if join_operator == JoinOperator.AND: query = {\"$and\": conditions} elif join_operator == JoinOperator.OR: query = {\"$or\": conditions} else: raise NotImplementedError(f\"Join operator {join_operator} is not supported.\") return self.collection.find(query) def _append(self, data) -> None: \"\"\"Append data to a Mongo collection.\"\"\" if not isinstance(data, list): data = [data] if len(data) == 0: return if isinstance(data[0], dict): self._insert_dicts(data) else: self._insert_dicts([self._encoder(row) for row in data]) def _write(self, data) -> None: \"\"\"Check data against a collection of types to handle insertion on the database. Parameters: data (Any): the data to write to the database. \"\"\" if not isinstance(data, list): data = [data] if len(data) == 0: self.collection.drop() return if isinstance(data[0], dict): self._insert_dicts(data, drop=True) else: self._insert_dicts([self._encoder(row) for row in data], drop=True) def _insert_dicts(self, data: List[Dict], drop=False) -> None: \"\"\" This method will insert data contained in a list of dictionaries into a collection. Parameters: data (List[Dict]): a list of dictionaries drop (bool): drop the collection before inserting the data to overwrite the data in the collection. \"\"\" if drop: self.collection.drop() self.collection.insert_many(data) def _default_decoder(self, document: Dict) -> Any: \"\"\"Decode a Mongo dictionary to a custom document object for reading. Parameters: document (Dict): the document dictionary return by Mongo query. Returns: A custom document object. \"\"\" return self.custom_document(**document) def _default_encoder(self, document_object: Any) -> Dict: \"\"\"Encode a custom document object to a dictionary for writing to MongoDB. Args: document_object: the custom document class. Returns: The document dictionary. \"\"\" return document_object.__dict__ "} {"text": "from collections.abc import Hashable from functools import reduce from itertools import chain from operator import and_, or_ from typing import Dict, Iterable, List, Tuple, Union import modin.pandas as modin_pd import numpy as np import pandas as pd from pandas.core.common import is_bool_indexer from .operator import JoinOperator, Operator class _FilterDataNode: @staticmethod def __is_pandas_object(data) -> bool: return isinstance(data, (pd.DataFrame, modin_pd.DataFrame)) or isinstance(data, (pd.Series, modin_pd.DataFrame)) @staticmethod def __is_multi_sheet_excel(data) -> bool: if isinstance(data, Dict): has_df_children = all([isinstance(e, (pd.DataFrame, modin_pd.DataFrame)) for e in data.values()]) has_list_children = all([isinstance(e, List) for e in data.values()]) has_np_array_children = all([isinstance(e, np.ndarray) for e in data.values()]) return has_df_children or has_list_children or has_np_array_children return False @staticmethod def __is_list_of_dict(data) -> bool: return all(isinstance(x, Dict) for x in data) @staticmethod def _filter_by_key(data, key): if isinstance(key, int): return _FilterDataNode.__getitem_int(data, key) if isinstance(key, slice) or (isinstance(key, tuple) and any(isinstance(e, slice) for e in key)): return _FilterDataNode.__getitem_slice(data, key) if isinstance(key, Hashable): return _FilterDataNode.__getitem_hashable(data, key) if isinstance(key, (pd.DataFrame, modin_pd.DataFrame)): return _FilterDataNode.__getitem_dataframe(data, key) if is_bool_indexer(key): return _FilterDataNode.__getitem_bool_indexer(data, key) if isinstance(key, Iterable): return _FilterDataNode.__getitem_iterable(data, key) return None @staticmethod def __getitem_int(data, key): return data[key] @staticmethod def __getitem_hashable(data, key): if _FilterDataNode.__is_pandas_object(data) or _FilterDataNode.__is_multi_sheet_excel(data): return data.get(key) return [getattr(entry, key, None) for entry in data] @staticmethod def __getitem_slice(data, key): return data[key] @staticmethod def __getitem_dataframe(data, key: Union[pd.DataFrame, modin_pd.DataFrame]): if _FilterDataNode.__is_pandas_object(data): return data[key] if _FilterDataNode.__is_list_of_dict(data): filtered_data = list() for i, row in key.iterrows(): filtered_row = dict() for col in row.index: filtered_row[col] = data[i][col] if row[col] else None filtered_data.append(filtered_row) return filtered_data return None @staticmethod def __getitem_bool_indexer(data, key): if _FilterDataNode.__is_pandas_object(data): return data[key] return [e for i, e in enumerate(data) if key[i]] @staticmethod def __getitem_iterable(data, keys): if _FilterDataNode.__is_pandas_object(data): return data[keys] filtered_data = [] for entry in data: filtered_data.append({k: getattr(entry, k) for k in keys if hasattr(entry, k)}) return filtered_data @staticmethod def _filter(data, operators: Union[List, Tuple], join_operator=JoinOperator.AND): if len(operators) == 0: return data if isinstance(data, Dict): return {k: _FilterDataNode._filter(v, operators, join_operator) for k, v in data.items()} if not ((isinstance(operators[0], list)) or (isinstance(operators[0], tuple))): if isinstance(data, (pd.DataFrame, modin_pd.DataFrame)): return _FilterDataNode.__filter_dataframe_per_key_value(data, operators[0], operators[1], operators[2]) if isinstance(data, np.ndarray): list_operators = [operators] return _FilterDataNode.__filter_numpy_array(data, list_operators) if isinstance(data, List): return _FilterDataNode.__filter_list_per_key_value(data, operators[0], operators[1], operators[2]) else: if isinstance(data, (pd.DataFrame, modin_pd.DataFrame)): return _FilterDataNode.__filter_dataframe(data, operators, join_operator=join_operator) if isinstance(data, np.ndarray): return _FilterDataNode.__filter_numpy_array(data, operators, join_operator=join_operator) if isinstance(data, List): return _FilterDataNode.__filter_list(data, operators, join_operator=join_operator) raise NotImplementedError @staticmethod def __filter_dataframe( df_data: Union[pd.DataFrame, modin_pd.DataFrame], operators: Union[List, Tuple], join_operator=JoinOperator.AND ): filtered_df_data = [] if join_operator == JoinOperator.AND: how = \"inner\" elif join_operator == JoinOperator.OR: how = \"outer\" else: return NotImplementedError for key, value, operator in operators: filtered_df_data.append(_FilterDataNode.__filter_dataframe_per_key_value(df_data, key, value, operator)) if isinstance(df_data, modin_pd.DataFrame): if filtered_df_data: return _FilterDataNode.__modin_dataframe_merge(filtered_df_data, how) return modin_pd.DataFrame() return _FilterDataNode.__dataframe_merge(filtered_df_data, how) if filtered_df_data else pd.DataFrame() @staticmethod def __filter_dataframe_per_key_value( df_data: Union[pd.DataFrame, modin_pd.DataFrame], key: str, value, operator: Operator ): df_by_col = df_data[key] if operator == Operator.EQUAL: df_by_col = df_by_col == value if operator == Operator.NOT_EQUAL: df_by_col = df_by_col != value if operator == Operator.LESS_THAN: df_by_col = df_by_col < value if operator == Operator.LESS_OR_EQUAL: df_by_col = df_by_col <= value if operator == Operator.GREATER_THAN: df_by_col = df_by_col > value if operator == Operator.GREATER_OR_EQUAL: df_by_col = df_by_col >= value return df_data[df_by_col] @staticmethod def __dataframe_merge(df_list: List, how=\"inner\"): return reduce(lambda df1, df2: pd.merge(df1, df2, how=how), df_list) @staticmethod def __modin_dataframe_merge(df_list: List, how=\"inner\"): return reduce(lambda df1, df2: modin_pd.merge(df1, df2, how=how), df_list) @staticmethod def __filter_numpy_array(data: np.ndarray, operators: Union[List, Tuple], join_operator=JoinOperator.AND): conditions = [] for key, value, operator in operators: conditions.append(_FilterDataNode.__get_filter_condition_per_key_value(data, key, value, operator)) if join_operator == JoinOperator.AND: join_conditions = reduce(and_, conditions) elif join_operator == JoinOperator.OR: join_conditions = reduce(or_, conditions) else: return NotImplementedError return data[join_conditions] @staticmethod def __get_filter_condition_per_key_value(array_data: np.ndarray, key, value, operator: Operator): if not isinstance(key, int): key = int(key) if operator == Operator.EQUAL: return array_data[:, key] == value if operator == Operator.NOT_EQUAL: return array_data[:, key] != value if operator == Operator.LESS_THAN: return array_data[:, key] < value if operator == Operator.LESS_OR_EQUAL: return array_data[:, key] <= value if operator == Operator.GREATER_THAN: return array_data[:, key] > value if operator == Operator.GREATER_OR_EQUAL: return array_data[:, key] >= value return NotImplementedError @staticmethod def __filter_list(list_data: List, operators: Union[List, Tuple], join_operator=JoinOperator.AND): filtered_list_data = [] for key, value, operator in operators: filtered_list_data.append(_FilterDataNode.__filter_list_per_key_value(list_data, key, value, operator)) if len(filtered_list_data) == 0: return filtered_list_data if join_operator == JoinOperator.AND: return _FilterDataNode.__list_intersect(filtered_list_data) elif join_operator == JoinOperator.OR: merged_list = list(chain.from_iterable(filtered_list_data)) if all(isinstance(e, Dict) for e in merged_list): return list({frozenset(item.items()) for item in merged_list}) return list(set(merged_list)) else: return NotImplementedError @staticmethod def __filter_list_per_key_value(list_data: List, key: str, value, operator: Operator): filtered_list = [] for row in list_data: if isinstance(row, Dict): row_value = row.get(key, None) else: row_value = getattr(row, key, None) if operator == Operator.EQUAL and row_value == value: filtered_list.append(row) if operator == Operator.NOT_EQUAL and row_value != value: filtered_list.append(row) if operator == Operator.LESS_THAN and row_value < value: filtered_list.append(row) if operator == Operator.LESS_OR_EQUAL and row_value <= value: filtered_list.append(row) if operator == Operator.GREATER_THAN and row_value > value: filtered_list.append(row) if operator == Operator.GREATER_OR_EQUAL and row_value >= value: filtered_list.append(row) return filtered_list @staticmethod def __list_intersect(list_data): return list(set(list_data.pop()).intersection(*map(set, list_data))) "} {"text": "from datetime import datetime, timedelta from typing import Any, Dict, List, Optional, Set from taipy.config.common.scope import Scope from .._version._version_manager_factory import _VersionManagerFactory from ..exceptions.exceptions import MissingReadFunction, MissingRequiredProperty, MissingWriteFunction from .data_node import DataNode from .data_node_id import DataNodeId, Edit class GenericDataNode(DataNode): \"\"\"Generic Data Node that uses custom read and write functions. The read and write function for this data node type can be implemented is Python. Attributes: config_id (str): Identifier of the data node configuration. It must be a valid Python identifier. scope (Scope^): The scope of this data node. id (str): The unique identifier of the data node. owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or `None`. parent_ids (Optional[Set[str]]): The identifiers of the parent tasks or `None`. last_edit_date (datetime): The date and time of the last modification. edits (List[Edit^]): The ordered list of edits for that job. version (str): The string indicates the application version of the data node to instantiate. If not provided, the current version is used. validity_period (Optional[timedelta]): The duration implemented as a timedelta since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task management page](../core/entities/task-mgt.md) for more details). If _validity_period_ is set to `None`, the data node is always up-to-date. edit_in_progress (bool): True if a task computing the data node has been submitted and not completed yet. False otherwise. editor_id (Optional[str]): The identifier of the user who is currently editing the data node. editor_expiration_date (Optional[datetime]): The expiration date of the editor lock. properties (dict[str, Any]): A dictionary of additional properties. Note that the _properties_ parameter must at least contain an entry for either _\"read_fct\"_ or _\"write_fct\"_ representing the read and write functions. Entries for _\"read_fct_args\"_ and _\"write_fct_args\"_ respectively represent potential parameters for the _\"read_fct\"_ and _\"write_fct\"_ functions. \"\"\" __STORAGE_TYPE = \"generic\" _OPTIONAL_READ_FUNCTION_PROPERTY = \"read_fct\" __READ_FUNCTION_ARGS_PROPERTY = \"read_fct_args\" _OPTIONAL_WRITE_FUNCTION_PROPERTY = \"write_fct\" __WRITE_FUNCTION_ARGS_PROPERTY = \"write_fct_args\" _REQUIRED_PROPERTIES: List[str] = [] _REQUIRED_AT_LEAST_ONE_PROPERTY: List[str] = [_OPTIONAL_READ_FUNCTION_PROPERTY, _OPTIONAL_WRITE_FUNCTION_PROPERTY] def __init__( self, config_id: str, scope: Scope, id: Optional[DataNodeId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, last_edit_date: Optional[datetime] = None, edits: List[Edit] = None, version: str = None, validity_period: Optional[timedelta] = None, edit_in_progress: bool = False, editor_id: Optional[str] = None, editor_expiration_date: Optional[datetime] = None, properties: Dict = None, ): if properties is None: properties = {} if missing := set(self._REQUIRED_PROPERTIES) - set(properties.keys()): raise MissingRequiredProperty( f\"The following properties \" f\"{', '.join(x for x in missing)} were not informed and are required.\" ) missing_optional_fcts = set(self._REQUIRED_AT_LEAST_ONE_PROPERTY) - set(properties.keys()) if len(missing_optional_fcts) == len(self._REQUIRED_AT_LEAST_ONE_PROPERTY): raise MissingRequiredProperty( f\"None of the following properties \" f\"{', '.join(x for x in missing)} were informed and at least one must be populated.\" ) for missing_optional_fct in missing_optional_fcts: properties[missing_optional_fct] = None super().__init__( config_id, scope, id, owner_id, parent_ids, last_edit_date, edits, version or _VersionManagerFactory._build_manager()._get_latest_version(), validity_period, edit_in_progress, editor_id, editor_expiration_date, **properties, ) if not self._last_edit_date: self._last_edit_date = datetime.now() self._TAIPY_PROPERTIES.update( { self.__READ_FUNCTION_ARGS_PROPERTY, self.__WRITE_FUNCTION_ARGS_PROPERTY, self._OPTIONAL_READ_FUNCTION_PROPERTY, self._OPTIONAL_WRITE_FUNCTION_PROPERTY, } ) @classmethod def storage_type(cls) -> str: return cls.__STORAGE_TYPE def _read(self): if read_fct := self.properties[self._OPTIONAL_READ_FUNCTION_PROPERTY]: if read_fct_args := self.properties.get(self.__READ_FUNCTION_ARGS_PROPERTY, None): if not isinstance(read_fct_args, list): return read_fct(*[read_fct_args]) return read_fct(*read_fct_args) return read_fct() raise MissingReadFunction(f\"The read function is not defined in data node config {self.config_id}.\") def _write(self, data: Any): if write_fct := self.properties[self._OPTIONAL_WRITE_FUNCTION_PROPERTY]: if write_fct_args := self.properties.get(self.__WRITE_FUNCTION_ARGS_PROPERTY, None): if not isinstance(write_fct_args, list): return write_fct(data, *[write_fct_args]) return write_fct(data, *write_fct_args) return write_fct(data) raise MissingWriteFunction(f\"The write function is not defined in data node config {self.config_id}.\") "} {"text": "from ..exceptions.exceptions import InvalidExposedType class _AbstractTabularDataNode(object): \"\"\"Abstract base class for tabular data node implementations (CSVDataNode, ParquetDataNode, ExcelDataNode, SQLTableDataNode and SQLDataNode) that are tabular representable.\"\"\" @staticmethod def _check_exposed_type(exposed_type, valid_string_exposed_types): if isinstance(exposed_type, str) and exposed_type not in valid_string_exposed_types: raise InvalidExposedType( f\"Invalid string exposed type {exposed_type}. Supported values are \" f\"{', '.join(valid_string_exposed_types)}\" ) "} {"text": "from enum import Enum class Operator(Enum): \"\"\"Enumeration of operators for Data Node filtering. The possible values are: - `EQUAL` - `NOT_EQUAL` - `LESS_THAN` - `LESS_OR_EQUAL` - `GREATER_THAN` - `GREATER_OR_EQUAL` \"\"\" EQUAL = 1 NOT_EQUAL = 2 LESS_THAN = 3 LESS_OR_EQUAL = 4 GREATER_THAN = 5 GREATER_OR_EQUAL = 6 class JoinOperator(Enum): \"\"\" Enumeration of join operators for Data Node filtering. The possible values are `AND` and `OR`. \"\"\" AND = 1 OR = 2 "} {"text": "from .csv import CSVDataNode from .data_node import DataNode from .excel import ExcelDataNode from .generic import GenericDataNode from .in_memory import InMemoryDataNode from .json import JSONDataNode from .mongo import MongoCollectionDataNode from .operator import JoinOperator, Operator from .parquet import ParquetDataNode from .pickle import PickleDataNode from .sql import SQLDataNode from .sql_table import SQLTableDataNode "} {"text": "import pathlib class _AbstractFileDataNode(object): \"\"\"Abstract base class for data node implementations (CSVDataNode, ParquetDataNode, ExcelDataNode, PickleDataNode and JSONDataNode) that are file based.\"\"\" __EXTENSION_MAP = {\"csv\": \"csv\", \"excel\": \"xlsx\", \"parquet\": \"parquet\", \"pickle\": \"p\", \"json\": \"json\"} def _build_path(self, storage_type): from taipy.config.config import Config folder = f\"{storage_type}s\" dir_path = pathlib.Path(Config.core.storage_folder) / folder if not dir_path.exists(): dir_path.mkdir(parents=True, exist_ok=True) return dir_path / f\"{self.id}.{self.__EXTENSION_MAP.get(storage_type)}\" "} {"text": "import os from collections import defaultdict from datetime import datetime, timedelta from os.path import isfile from typing import Any, Dict, List, Optional, Set, Tuple, Union import modin.pandas as modin_pd import numpy as np import pandas as pd from openpyxl import load_workbook from taipy.config.common.scope import Scope from .._backup._backup import _replace_in_backup_file from .._entity._reload import _self_reload from .._version._version_manager_factory import _VersionManagerFactory from ..exceptions.exceptions import ExposedTypeLengthMismatch, NonExistingExcelSheet, SheetNameLengthMismatch from ..job.job_id import JobId from ._abstract_file import _AbstractFileDataNode from ._abstract_tabular import _AbstractTabularDataNode from .data_node import DataNode from .data_node_id import DataNodeId, Edit class ExcelDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode): \"\"\"Data Node stored as an Excel file. The Excel file format is _xlsx_. Attributes: config_id (str): Identifier of this data node configuration. It must be a valid Python identifier. scope (Scope^): The scope of this data node. id (str): The unique identifier of this data node. owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or `None`. parent_ids (Optional[Set[str]]): The identifiers of the parent tasks or `None`. last_edit_date (datetime): The date and time of the last modification. edits (List[Edit^]): The ordered list of edits for that job. version (str): The string indicates the application version of the data node to instantiate. If not provided, the current version is used. validity_period (Optional[timedelta]): The duration implemented as a timedelta since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task management page](../core/entities/task-mgt.md) for more details). If _validity_period_ is set to `None`, the data node is always up-to-date. edit_in_progress (bool): True if a task computing the data node has been submitted and not completed yet. False otherwise. editor_id (Optional[str]): The identifier of the user who is currently editing the data node. editor_expiration_date (Optional[datetime]): The expiration date of the editor lock. path (str): The path to the Excel file. properties (dict[str, Any]): A dictionary of additional properties. The _properties_ must have a _\"default_path\"_ or _\"path\"_ entry with the path of the Excel file: - _\"default_path\"_ `(str)`: The path of the Excel file.\\n - _\"has_header\"_ `(bool)`: If True, indicates that the Excel file has a header.\\n - _\"sheet_name\"_ `(Union[List[str], str])`: The list of sheet names to be used. This can be a unique name.\\n - _\"exposed_type\"_: The exposed type of the data read from Excel file. The default value is `pandas`.\\n \"\"\" __STORAGE_TYPE = \"excel\" __EXPOSED_TYPE_PROPERTY = \"exposed_type\" __EXPOSED_TYPE_NUMPY = \"numpy\" __EXPOSED_TYPE_PANDAS = \"pandas\" __EXPOSED_TYPE_MODIN = \"modin\" __VALID_STRING_EXPOSED_TYPES = [__EXPOSED_TYPE_PANDAS, __EXPOSED_TYPE_MODIN, __EXPOSED_TYPE_NUMPY] __PATH_KEY = \"path\" __DEFAULT_DATA_KEY = \"default_data\" __DEFAULT_PATH_KEY = \"default_path\" __HAS_HEADER_PROPERTY = \"has_header\" __SHEET_NAME_PROPERTY = \"sheet_name\" _REQUIRED_PROPERTIES: List[str] = [] def __init__( self, config_id: str, scope: Scope, id: Optional[DataNodeId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, last_edit_date: Optional[datetime] = None, edits: List[Edit] = None, version: str = None, validity_period: Optional[timedelta] = None, edit_in_progress: bool = False, editor_id: Optional[str] = None, editor_expiration_date: Optional[datetime] = None, properties: Dict = None, ): if properties is None: properties = {} default_value = properties.pop(self.__DEFAULT_DATA_KEY, None) self._path = properties.get(self.__PATH_KEY, properties.get(self.__DEFAULT_PATH_KEY)) properties[self.__PATH_KEY] = self._path if self.__SHEET_NAME_PROPERTY not in properties.keys(): properties[self.__SHEET_NAME_PROPERTY] = None if self.__HAS_HEADER_PROPERTY not in properties.keys(): properties[self.__HAS_HEADER_PROPERTY] = True if self.__EXPOSED_TYPE_PROPERTY not in properties.keys(): properties[self.__EXPOSED_TYPE_PROPERTY] = self.__EXPOSED_TYPE_PANDAS self._check_exposed_type(properties[self.__EXPOSED_TYPE_PROPERTY], self.__VALID_STRING_EXPOSED_TYPES) super().__init__( config_id, scope, id, owner_id, parent_ids, last_edit_date, edits, version or _VersionManagerFactory._build_manager()._get_latest_version(), validity_period, edit_in_progress, editor_id, editor_expiration_date, **properties, ) if not self._path: self._path = self._build_path(self.storage_type()) properties[self.__PATH_KEY] = self._path if default_value is not None and not os.path.exists(self._path): self._write(default_value) self._last_edit_date = datetime.now() self._edits.append( Edit( { \"timestamp\": self._last_edit_date, \"writer_identifier\": \"TAIPY\", \"comments\": \"Default data written.\", } ) ) if not self._last_edit_date and isfile(self._path): self._last_edit_date = datetime.now() self._TAIPY_PROPERTIES.update( { self.__EXPOSED_TYPE_PROPERTY, self.__PATH_KEY, self.__DEFAULT_PATH_KEY, self.__DEFAULT_DATA_KEY, self.__HAS_HEADER_PROPERTY, self.__SHEET_NAME_PROPERTY, } ) @property # type: ignore @_self_reload(DataNode._MANAGER_NAME) def path(self): return self._path @path.setter def path(self, value): tmp_old_path = self._path self._path = value self.properties[self.__PATH_KEY] = value _replace_in_backup_file(old_file_path=tmp_old_path, new_file_path=self._path) @classmethod def storage_type(cls) -> str: return cls.__STORAGE_TYPE @staticmethod def _check_exposed_type(exposed_type, valid_string_exposed_types): if isinstance(exposed_type, str): _AbstractTabularDataNode._check_exposed_type(exposed_type, valid_string_exposed_types) elif isinstance(exposed_type, list): for t in exposed_type: _AbstractTabularDataNode._check_exposed_type(t, valid_string_exposed_types) elif isinstance(exposed_type, dict): for t in exposed_type.values(): _AbstractTabularDataNode._check_exposed_type(t, valid_string_exposed_types) def _read(self): if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_PANDAS: return self._read_as_pandas_dataframe() if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_MODIN: return self._read_as_modin_dataframe() if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_NUMPY: return self._read_as_numpy() return self._read_as() def __sheet_name_to_list(self, properties): if properties[self.__SHEET_NAME_PROPERTY]: sheet_names = properties[self.__SHEET_NAME_PROPERTY] else: excel_file = load_workbook(properties[self.__PATH_KEY]) sheet_names = excel_file.sheetnames excel_file.close() return sheet_names if isinstance(sheet_names, (List, Set, Tuple)) else [sheet_names] def _read_as(self): excel_file = load_workbook(self._path) exposed_type = self.properties[self.__EXPOSED_TYPE_PROPERTY] work_books = defaultdict() sheet_names = excel_file.sheetnames provided_sheet_names = self.__sheet_name_to_list(self.properties) for sheet_name in provided_sheet_names: if sheet_name not in sheet_names: raise NonExistingExcelSheet(sheet_name, self._path) if isinstance(exposed_type, List): if len(provided_sheet_names) != len(self.properties[self.__EXPOSED_TYPE_PROPERTY]): raise ExposedTypeLengthMismatch( f\"Expected {len(provided_sheet_names)} exposed types, got \" f\"{len(self.properties[self.__EXPOSED_TYPE_PROPERTY])}\" ) for i, sheet_name in enumerate(provided_sheet_names): work_sheet = excel_file[sheet_name] sheet_exposed_type = exposed_type if not isinstance(sheet_exposed_type, str): if isinstance(exposed_type, dict): sheet_exposed_type = exposed_type.get(sheet_name, self.__EXPOSED_TYPE_PANDAS) elif isinstance(exposed_type, List): sheet_exposed_type = exposed_type[i] if isinstance(sheet_exposed_type, str): if sheet_exposed_type == self.__EXPOSED_TYPE_NUMPY: work_books[sheet_name] = self._read_as_pandas_dataframe(sheet_name).to_numpy() elif sheet_exposed_type == self.__EXPOSED_TYPE_PANDAS: work_books[sheet_name] = self._read_as_pandas_dataframe(sheet_name) continue res = list() for row in work_sheet.rows: res.append([col.value for col in row]) if self.properties[self.__HAS_HEADER_PROPERTY] and res: header = res.pop(0) for i, row in enumerate(res): res[i] = sheet_exposed_type(**dict([[h, r] for h, r in zip(header, row)])) else: for i, row in enumerate(res): res[i] = sheet_exposed_type(*row) work_books[sheet_name] = res excel_file.close() if len(provided_sheet_names) == 1: return work_books[provided_sheet_names[0]] return work_books def _read_as_numpy(self): sheets = self._read_as_pandas_dataframe() if isinstance(sheets, dict): return {sheet_name: df.to_numpy() for sheet_name, df in sheets.items()} return sheets.to_numpy() def _do_read_excel(self, engine, sheet_names, kwargs) -> pd.DataFrame: df = pd.read_excel( self._path, sheet_name=sheet_names, **kwargs, ) # We are using pandas to load modin dataframes because of a modin issue # https://github.com/modin-project/modin/issues/4924 if engine == \"modin\": if isinstance(df, dict): # Check if it s a multiple sheet Excel file for key, value in df.items(): df[key] = modin_pd.DataFrame(value) return df return modin_pd.DataFrame(df) return df def __get_sheet_names_and_header(self, sheet_names): kwargs: Dict[str, Any] = {} if sheet_names is None: sheet_names = self.properties[self.__SHEET_NAME_PROPERTY] if not self.properties[self.__HAS_HEADER_PROPERTY]: kwargs[\"header\"] = None return sheet_names, kwargs def _read_as_pandas_dataframe(self, sheet_names=None) -> Union[Dict[Union[int, str], pd.DataFrame], pd.DataFrame]: sheet_names, kwargs = self.__get_sheet_names_and_header(sheet_names) try: return self._do_read_excel(\"pandas\", sheet_names, kwargs) except pd.errors.EmptyDataError: return pd.DataFrame() def _read_as_modin_dataframe( self, sheet_names=None ) -> Union[Dict[Union[int, str], modin_pd.DataFrame], modin_pd.DataFrame]: sheet_names, kwargs = self.__get_sheet_names_and_header(sheet_names) try: if kwargs.get(\"header\", None): return modin_pd.read_excel( self._path, sheet_name=sheet_names, **kwargs, ) else: return self._do_read_excel(\"modin\", sheet_names, kwargs) except pd.errors.EmptyDataError: return modin_pd.DataFrame() def __append_excel_with_single_sheet(self, append_excel_fct, *args, **kwargs): sheet_name = self.properties.get(self.__SHEET_NAME_PROPERTY) with pd.ExcelWriter(self._path, mode=\"a\", engine=\"openpyxl\", if_sheet_exists=\"overlay\") as writer: if sheet_name: if not isinstance(sheet_name, str): sheet_name = sheet_name[0] append_excel_fct( writer, *args, **kwargs, sheet_name=sheet_name, startrow=writer.sheets[sheet_name].max_row ) else: sheet_name = list(writer.sheets.keys())[0] append_excel_fct(writer, *args, **kwargs, startrow=writer.sheets[sheet_name].max_row) def __append_excel_with_multiple_sheets(self, data: Any, columns: List[str] = None): with pd.ExcelWriter(self._path, mode=\"a\", engine=\"openpyxl\", if_sheet_exists=\"overlay\") as writer: # Each key stands for a sheet name for sheet_name in data.keys(): if isinstance(data[sheet_name], np.ndarray): df = pd.DataFrame(data[sheet_name]) else: df = data[sheet_name] if columns: data[sheet_name].columns = columns df.to_excel( writer, sheet_name=sheet_name, index=False, header=False, startrow=writer.sheets[sheet_name].max_row ) def _append(self, data: Any): if isinstance(data, Dict) and all( [isinstance(x, (pd.DataFrame, modin_pd.DataFrame, np.ndarray)) for x in data.values()] ): self.__append_excel_with_multiple_sheets(data) elif isinstance(data, (pd.DataFrame, modin_pd.DataFrame)): self.__append_excel_with_single_sheet(data.to_excel, index=False, header=False) else: self.__append_excel_with_single_sheet(pd.DataFrame(data).to_excel, index=False, header=False) def __write_excel_with_single_sheet(self, write_excel_fct, *args, **kwargs): sheet_name = self.properties.get(self.__SHEET_NAME_PROPERTY) if sheet_name: if not isinstance(sheet_name, str): if len(sheet_name) > 1: raise SheetNameLengthMismatch else: sheet_name = sheet_name[0] write_excel_fct(*args, **kwargs, sheet_name=sheet_name) else: write_excel_fct(*args, **kwargs) def __write_excel_with_multiple_sheets(self, data: Any, columns: List[str] = None): with pd.ExcelWriter(self._path) as writer: # Each key stands for a sheet name for key in data.keys(): if isinstance(data[key], np.ndarray): df = pd.DataFrame(data[key]) else: df = data[key] if columns: data[key].columns = columns df.to_excel(writer, key, index=False) def _write(self, data: Any): if isinstance(data, Dict) and all( [isinstance(x, (pd.DataFrame, modin_pd.DataFrame, np.ndarray)) for x in data.values()] ): self.__write_excel_with_multiple_sheets(data) elif isinstance(data, (pd.DataFrame, modin_pd.DataFrame)): self.__write_excel_with_single_sheet(data.to_excel, self._path, index=False) else: self.__write_excel_with_single_sheet(pd.DataFrame(data).to_excel, self._path, index=False) def write_with_column_names(self, data: Any, columns: List[str] = None, job_id: Optional[JobId] = None): \"\"\"Write a set of columns. Parameters: data (Any): The data to write. columns (List[str]): The list of column names to write. job_id (JobId^): An optional identifier of the writer. \"\"\" if isinstance(data, Dict) and all( [isinstance(x, (pd.DataFrame, modin_pd.DataFrame, np.ndarray)) for x in data.values()] ): self.__write_excel_with_multiple_sheets(data, columns=columns) else: df = pd.DataFrame(data) if columns: df.columns = columns self.__write_excel_with_single_sheet(df.to_excel, self.path, index=False) self.track_edit(timestamp=datetime.now(), job_id=job_id) "} {"text": "import os from typing import Dict, Iterable, List, Optional, Set, Union from taipy.config._config import _Config from taipy.config.common.scope import Scope from taipy.config.config import Config from .._backup._backup import _append_to_backup_file, _remove_from_backup_file from .._manager._manager import _Manager from .._version._version_mixin import _VersionMixin from ..config.data_node_config import DataNodeConfig from ..cycle.cycle_id import CycleId from ..exceptions.exceptions import InvalidDataNodeType from ..notification import Event, EventEntityType, EventOperation, Notifier, _make_event from ..scenario.scenario_id import ScenarioId from ..sequence.sequence_id import SequenceId from ._abstract_file import _AbstractFileDataNode from ._data_fs_repository import _DataFSRepository from .data_node import DataNode from .data_node_id import DataNodeId from .pickle import PickleDataNode class _DataManager(_Manager[DataNode], _VersionMixin): __DATA_NODE_CLASS_MAP = DataNode._class_map() # type: ignore _ENTITY_NAME = DataNode.__name__ _EVENT_ENTITY_TYPE = EventEntityType.DATA_NODE _repository: _DataFSRepository __NAME_KEY = \"name\" @classmethod def _bulk_get_or_create( cls, data_node_configs: List[DataNodeConfig], cycle_id: Optional[CycleId] = None, scenario_id: Optional[ScenarioId] = None, ) -> Dict[DataNodeConfig, DataNode]: data_node_configs = [Config.data_nodes[dnc.id] for dnc in data_node_configs] dn_configs_and_owner_id = [] for dn_config in data_node_configs: scope = dn_config.scope owner_id: Union[Optional[SequenceId], Optional[ScenarioId], Optional[CycleId]] if scope == Scope.SCENARIO: owner_id = scenario_id elif scope == Scope.CYCLE: owner_id = cycle_id else: owner_id = None dn_configs_and_owner_id.append((dn_config, owner_id)) data_nodes = cls._repository._get_by_configs_and_owner_ids( dn_configs_and_owner_id, cls._build_filters_with_version(None) ) return { dn_config: data_nodes.get((dn_config, owner_id)) or cls._create_and_set(dn_config, owner_id, None) for dn_config, owner_id in dn_configs_and_owner_id } @classmethod def _create_and_set( cls, data_node_config: DataNodeConfig, owner_id: Optional[str], parent_ids: Optional[Set[str]] ) -> DataNode: data_node = cls.__create(data_node_config, owner_id, parent_ids) cls._set(data_node) if isinstance(data_node, _AbstractFileDataNode): _append_to_backup_file(new_file_path=data_node._path) Notifier.publish(_make_event(data_node, EventOperation.CREATION)) return data_node @classmethod def __create( cls, data_node_config: DataNodeConfig, owner_id: Optional[str], parent_ids: Optional[Set[str]] ) -> DataNode: try: version = cls._get_latest_version() props = data_node_config._properties.copy() if data_node_config.storage_type: storage_type = data_node_config.storage_type else: storage_type = Config.sections[DataNodeConfig.name][_Config.DEFAULT_KEY].storage_type return cls.__DATA_NODE_CLASS_MAP[storage_type]( config_id=data_node_config.id, scope=data_node_config.scope or DataNodeConfig._DEFAULT_SCOPE, validity_period=data_node_config.validity_period, owner_id=owner_id, parent_ids=parent_ids, version=version, properties=props, ) except KeyError: raise InvalidDataNodeType(data_node_config.storage_type) @classmethod def _get_all(cls, version_number: Optional[str] = None) -> List[DataNode]: \"\"\" Returns all entities. \"\"\" filters = cls._build_filters_with_version(version_number) return cls._repository._load_all(filters) @classmethod def _clean_pickle_file(cls, data_node: DataNode): if not isinstance(data_node, PickleDataNode): return if data_node.is_generated and os.path.exists(data_node.path): os.remove(data_node.path) @classmethod def _clean_pickle_files(cls, data_nodes: Iterable[DataNode]): for data_node in data_nodes: cls._clean_pickle_file(data_node) @classmethod def _remove_dn_file_path_in_backup_file(cls, data_node: DataNode): if isinstance(data_node, _AbstractFileDataNode): _remove_from_backup_file(to_remove_file_path=data_node.path) @classmethod def _remove_dn_file_paths_in_backup_file(cls, data_nodes: Iterable[DataNode]): for data_node in data_nodes: cls._remove_dn_file_path_in_backup_file(data_node) @classmethod def _delete(cls, data_node_id: DataNodeId): data_node = cls._get(data_node_id, None) if data_node: cls._clean_pickle_file(data_node) cls._remove_dn_file_path_in_backup_file(data_node) super()._delete(data_node_id) @classmethod def _delete_many(cls, data_node_ids: Iterable[DataNodeId]): data_nodes = [] for data_node_id in data_node_ids: if data_node := cls._get(data_node_id): data_nodes.append(data_node) cls._clean_pickle_files(data_nodes) cls._remove_dn_file_paths_in_backup_file(data_nodes) super()._delete_many(data_node_ids) @classmethod def _delete_all(cls): data_nodes = cls._get_all() cls._clean_pickle_files(data_nodes) cls._remove_dn_file_paths_in_backup_file(data_nodes) super()._delete_all() @classmethod def _delete_by_version(cls, version_number: str): data_nodes = cls._get_all(version_number) cls._clean_pickle_files(data_nodes) cls._remove_dn_file_paths_in_backup_file(data_nodes) cls._repository._delete_by(attribute=\"version\", value=version_number) Notifier.publish( Event(EventEntityType.DATA_NODE, EventOperation.DELETION, metadata={\"delete_by_version\": version_number}) ) @classmethod def _get_by_config_id(cls, config_id: str, version_number: Optional[str] = None) -> List[DataNode]: \"\"\" Get all datanodes by its config id. \"\"\" filters = cls._build_filters_with_version(version_number) if not filters: filters = [{}] for fil in filters: fil.update({\"config_id\": config_id}) return cls._repository._load_all(filters) "} {"text": "from datetime import datetime, timedelta from typing import Any, Dict, List, Optional, Set, Tuple, Union import modin.pandas as modin_pd import numpy as np import pandas as pd from sqlalchemy import MetaData, Table from taipy.config.common.scope import Scope from .._version._version_manager_factory import _VersionManagerFactory from ..exceptions.exceptions import MissingRequiredProperty from ._abstract_sql import _AbstractSQLDataNode from .data_node_id import DataNodeId, Edit class SQLTableDataNode(_AbstractSQLDataNode): \"\"\"Data Node stored in a SQL table. Attributes: config_id (str): Identifier of the data node configuration. It must be a valid Python identifier. scope (Scope^): The scope of this data node. id (str): The unique identifier of this data node. owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or None. parent_ids (Optional[Set[str]]): The identifiers of the parent tasks or `None`. last_edit_date (datetime): The date and time of the last modification. edits (List[Edit^]): The ordered list of edits for that job. version (str): The string indicates the application version of the data node to instantiate. If not provided, the current version is used. validity_period (Optional[timedelta]): The duration implemented as a timedelta since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task management page](../core/entities/task-mgt.md) for more details). If _validity_period_ is set to `None`, the data node is always up-to-date. edit_in_progress (bool): True if a task computing the data node has been submitted and not completed yet. False otherwise. editor_id (Optional[str]): The identifier of the user who is currently editing the data node. editor_expiration_date (Optional[datetime]): The expiration date of the editor lock. properties (dict[str, Any]): A dictionary of additional properties. Note that the _properties_ parameter must at least contain an entry for _\"db_name\"_, _\"db_engine\"_, _\"table_name\"_: - _\"db_name\"_ `(str)`: The database name, or the name of the SQLite database file. - _\"db_engine\"_ `(str)`: The database engine. For now, the accepted values are _\"sqlite\"_, _\"mssql\"_, _\"mysql\"_, or _\"postgresql\"_. - _\"table_name\"_ `(str)`: The name of the SQL table. - _\"db_username\"_ `(str)`: The database username. - _\"db_password\"_ `(str)`: The database password. - _\"db_host\"_ `(str)`: The database host. The default value is _\"localhost\"_. - _\"db_port\"_ `(int)`: The database port. The default value is 1433. - _\"db_driver\"_ `(str)`: The database driver. - _\"sqlite_folder_path\"_ (str): The path to the folder that contains SQLite file. The default value is the current working folder. - _\"sqlite_file_extension\"_ (str): The filename extension of the SQLite file. The default value is \".db\". - _\"db_extra_args\"_ `(Dict[str, Any])`: A dictionary of additional arguments to be passed into database connection string. - _\"exposed_type\"_: The exposed type of the data read from SQL query. The default value is `pandas`. \"\"\" __STORAGE_TYPE = \"sql_table\" __TABLE_KEY = \"table_name\" def __init__( self, config_id: str, scope: Scope, id: Optional[DataNodeId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, last_edit_date: Optional[datetime] = None, edits: Optional[List[Edit]] = None, version: Optional[str] = None, validity_period: Optional[timedelta] = None, edit_in_progress: bool = False, editor_id: Optional[str] = None, editor_expiration_date: Optional[datetime] = None, properties: Optional[Dict] = None, ): if properties is None: properties = {} if properties.get(self.__TABLE_KEY) is None: raise MissingRequiredProperty(f\"Property {self.__TABLE_KEY} is not informed and is required.\") super().__init__( config_id, scope, id=id, owner_id=owner_id, parent_ids=parent_ids, last_edit_date=last_edit_date, edits=edits, version=version or _VersionManagerFactory._build_manager()._get_latest_version(), validity_period=validity_period, edit_in_progress=edit_in_progress, editor_id=editor_id, editor_expiration_date=editor_expiration_date, properties=properties, ) self._TAIPY_PROPERTIES.update({self.__TABLE_KEY}) @classmethod def storage_type(cls) -> str: return cls.__STORAGE_TYPE def _get_base_read_query(self) -> str: return f\"SELECT * FROM {self.properties[self.__TABLE_KEY]}\" def _do_append(self, data, engine, connection) -> None: self.__insert_data(data, engine, connection) def _do_write(self, data, engine, connection) -> None: self.__insert_data(data, engine, connection, delete_table=True) def __insert_data(self, data, engine, connection, delete_table: bool = False) -> None: \"\"\" Insert data into a SQL table. Parameters: data (List[Dict]): a list of dictionaries, where each dictionary represents a row of the table. table: a SQLAlchemy object that represents a table. connection: a SQLAlchemy connection to write the data. delete_table (bool): indicates if the table should be deleted before inserting the data. \"\"\" table = self._create_table(engine) if isinstance(data, (modin_pd.DataFrame, pd.DataFrame)): self.__insert_dataframe(data, table, connection, delete_table) return if isinstance(data, np.ndarray): data = data.tolist() if not isinstance(data, list): data = [data] if len(data) == 0: self.__delete_all_rows(table, connection, delete_table) return if isinstance(data[0], (tuple, list)): self.__insert_tuples(data, table, connection, delete_table) elif isinstance(data[0], dict): self.__insert_dicts(data, table, connection, delete_table) # If data is a primitive type, it will be inserted as a tuple of one element. else: self.__insert_tuples([(x,) for x in data], table, connection, delete_table) def _create_table(self, engine) -> Table: return Table( self.properties[self.__TABLE_KEY], MetaData(), autoload_with=engine, ) @classmethod def __insert_dicts(cls, data: List[Dict], table: Any, connection: Any, delete_table: bool) -> None: \"\"\" This method will insert the data contained in a list of dictionaries into a table. The query itself is handled by SQLAlchemy, so it's only needed to pass the correct data type. \"\"\" cls.__delete_all_rows(table, connection, delete_table) connection.execute(table.insert(), data) @classmethod def __insert_dataframe( cls, df: Union[modin_pd.DataFrame, pd.DataFrame], table: Any, connection: Any, delete_table: bool ) -> None: cls.__insert_dicts(df.to_dict(orient=\"records\"), table, connection, delete_table) @classmethod def __insert_tuples(cls, data: List[Union[Tuple, List]], table: Any, connection: Any, delete_table: bool) -> None: \"\"\" This method will look up the length of the first object of the list and build the insert through creation of a string of '?' equivalent to the length of the element. The '?' character is used as placeholder for a tuple of same size. \"\"\" cls.__delete_all_rows(table, connection, delete_table) markers = \",\".join(\"?\" * len(data[0])) ins = \"INSERT INTO {tablename} VALUES ({markers})\" ins = ins.format(tablename=table.name, markers=markers) connection.execute(ins, data) @classmethod def __delete_all_rows(cls, table: Any, connection: Any, delete_table: bool) -> None: if delete_table: connection.execute(table.delete()) "} {"text": "import csv import os from datetime import datetime, timedelta from os.path import isfile from typing import Any, Dict, List, Optional, Set import modin.pandas as modin_pd import numpy as np import pandas as pd from taipy.config.common.scope import Scope from .._backup._backup import _replace_in_backup_file from .._entity._reload import _self_reload from .._version._version_manager_factory import _VersionManagerFactory from ..job.job_id import JobId from ._abstract_file import _AbstractFileDataNode from ._abstract_tabular import _AbstractTabularDataNode from .data_node import DataNode from .data_node_id import DataNodeId, Edit class CSVDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode): \"\"\"Data Node stored as a CSV file. Attributes: config_id (str): Identifier of the data node configuration. This string must be a valid Python identifier. scope (Scope^): The scope of this data node. id (str): The unique identifier of this data node. owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or `None`. parent_ids (Optional[Set[str]]): The identifiers of the parent tasks or `None`. last_edit_date (datetime): The date and time of the last modification. edits (List[Edit^]): The ordered list of edits for that job. version (str): The string indicates the application version of the data node to instantiate. If not provided, the current version is used. validity_period (Optional[timedelta]): The duration implemented as a timedelta since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task management page](../core/entities/task-mgt.md) for more details). If _validity_period_ is set to `None`, the data node is always up-to-date. edit_in_progress (bool): True if a task computing the data node has been submitted and not completed yet. False otherwise. editor_id (Optional[str]): The identifier of the user who is currently editing the data node. editor_expiration_date (Optional[datetime]): The expiration date of the editor lock. path (str): The path to the CSV file. properties (dict[str, Any]): A dictionary of additional properties. The _properties_ must have a _\"default_path\"_ or _\"path\"_ entry with the path of the CSV file: - _\"default_path\"_ `(str)`: The default path of the CSV file.\\n - _\"encoding\"_ `(str)`: The encoding of the CSV file. The default value is `utf-8`.\\n - _\"default_data\"_: The default data of the data nodes instantiated from this csv data node.\\n - _\"has_header\"_ `(bool)`: If True, indicates that the CSV file has a header.\\n - _\"exposed_type\"_: The exposed type of the data read from CSV file. The default value is `pandas`.\\n \"\"\" __STORAGE_TYPE = \"csv\" __EXPOSED_TYPE_PROPERTY = \"exposed_type\" __EXPOSED_TYPE_NUMPY = \"numpy\" __EXPOSED_TYPE_PANDAS = \"pandas\" __EXPOSED_TYPE_MODIN = \"modin\" __VALID_STRING_EXPOSED_TYPES = [__EXPOSED_TYPE_PANDAS, __EXPOSED_TYPE_MODIN, __EXPOSED_TYPE_NUMPY] __PATH_KEY = \"path\" __DEFAULT_PATH_KEY = \"default_path\" __ENCODING_KEY = \"encoding\" __DEFAULT_DATA_KEY = \"default_data\" __HAS_HEADER_PROPERTY = \"has_header\" _REQUIRED_PROPERTIES: List[str] = [] def __init__( self, config_id: str, scope: Scope, id: Optional[DataNodeId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, last_edit_date: Optional[datetime] = None, edits: Optional[List[Edit]] = None, version: Optional[str] = None, validity_period: Optional[timedelta] = None, edit_in_progress: bool = False, editor_id: Optional[str] = None, editor_expiration_date: Optional[datetime] = None, properties: Optional[Dict] = None, ): if properties is None: properties = {} default_value = properties.pop(self.__DEFAULT_DATA_KEY, None) if self.__ENCODING_KEY not in properties.keys(): properties[self.__ENCODING_KEY] = \"utf-8\" if self.__HAS_HEADER_PROPERTY not in properties.keys(): properties[self.__HAS_HEADER_PROPERTY] = True if self.__EXPOSED_TYPE_PROPERTY not in properties.keys(): properties[self.__EXPOSED_TYPE_PROPERTY] = self.__EXPOSED_TYPE_PANDAS self._check_exposed_type(properties[self.__EXPOSED_TYPE_PROPERTY], self.__VALID_STRING_EXPOSED_TYPES) super().__init__( config_id, scope, id, owner_id, parent_ids, last_edit_date, edits, version or _VersionManagerFactory._build_manager()._get_latest_version(), validity_period, edit_in_progress, editor_id, editor_expiration_date, **properties, ) self._path = properties.get(self.__PATH_KEY, properties.get(self.__DEFAULT_PATH_KEY)) if not self._path: self._path = self._build_path(self.storage_type()) properties[self.__PATH_KEY] = self._path if not self._last_edit_date and isfile(self._path): self._last_edit_date = datetime.now() if default_value is not None and not os.path.exists(self._path): self._write(default_value) self._last_edit_date = datetime.now() self._edits.append( Edit( { \"timestamp\": self._last_edit_date, \"writer_identifier\": \"TAIPY\", \"comments\": \"Default data written.\", } ) ) self._TAIPY_PROPERTIES.update( { self.__EXPOSED_TYPE_PROPERTY, self.__PATH_KEY, self.__DEFAULT_PATH_KEY, self.__ENCODING_KEY, self.__DEFAULT_DATA_KEY, self.__HAS_HEADER_PROPERTY, } ) @classmethod def storage_type(cls) -> str: return cls.__STORAGE_TYPE @property # type: ignore @_self_reload(DataNode._MANAGER_NAME) def path(self): return self._path @path.setter def path(self, value): tmp_old_path = self._path self._path = value self.properties[self.__PATH_KEY] = value _replace_in_backup_file(old_file_path=tmp_old_path, new_file_path=self._path) def _read(self): if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_PANDAS: return self._read_as_pandas_dataframe() if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_MODIN: return self._read_as_modin_dataframe() if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_NUMPY: return self._read_as_numpy() return self._read_as() def _read_as(self): custom_class = self.properties[self.__EXPOSED_TYPE_PROPERTY] with open(self._path, encoding=self.properties[self.__ENCODING_KEY]) as csvFile: res = list() if self.properties[self.__HAS_HEADER_PROPERTY]: reader = csv.DictReader(csvFile) for line in reader: res.append(custom_class(**line)) else: reader = csv.reader( csvFile, ) for line in reader: res.append(custom_class(*line)) return res def _read_as_numpy(self) -> np.ndarray: return self._read_as_pandas_dataframe().to_numpy() def _read_as_pandas_dataframe( self, usecols: Optional[List[int]] = None, column_names: Optional[List[str]] = None ) -> pd.DataFrame: try: if self.properties[self.__HAS_HEADER_PROPERTY]: if column_names: return pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY])[column_names] return pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY]) else: if usecols: return pd.read_csv( self._path, encoding=self.properties[self.__ENCODING_KEY], header=None, usecols=usecols ) return pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY], header=None) except pd.errors.EmptyDataError: return pd.DataFrame() def _read_as_modin_dataframe( self, usecols: Optional[List[int]] = None, column_names: Optional[List[str]] = None ) -> modin_pd.DataFrame: try: if self.properties[self.__HAS_HEADER_PROPERTY]: if column_names: return modin_pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY])[column_names] return modin_pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY]) else: if usecols: return modin_pd.read_csv( self._path, header=None, usecols=usecols, encoding=self.properties[self.__ENCODING_KEY] ) return modin_pd.read_csv(self._path, header=None, encoding=self.properties[self.__ENCODING_KEY]) except pd.errors.EmptyDataError: return modin_pd.DataFrame() def _append(self, data: Any): if isinstance(data, (pd.DataFrame, modin_pd.DataFrame)): data.to_csv(self._path, mode=\"a\", index=False, encoding=self.properties[self.__ENCODING_KEY], header=False) else: pd.DataFrame(data).to_csv( self._path, mode=\"a\", index=False, encoding=self.properties[self.__ENCODING_KEY], header=False ) def _write(self, data: Any): if isinstance(data, (pd.DataFrame, modin_pd.DataFrame)): data.to_csv(self._path, index=False, encoding=self.properties[self.__ENCODING_KEY]) else: pd.DataFrame(data).to_csv(self._path, index=False, encoding=self.properties[self.__ENCODING_KEY]) def write_with_column_names(self, data: Any, columns: Optional[List[str]] = None, job_id: Optional[JobId] = None): \"\"\"Write a selection of columns. Parameters: data (Any): The data to write. columns (Optional[List[str]]): The list of column names to write. job_id (JobId^): An optional identifier of the writer. \"\"\" if not columns: df = pd.DataFrame(data) else: df = pd.DataFrame(data, columns=columns) df.to_csv(self._path, index=False, encoding=self.properties[self.__ENCODING_KEY]) self.track_edit(timestamp=datetime.now(), job_id=job_id) "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ._data_fs_repository import _DataFSRepository from ._data_manager import _DataManager from ._data_sql_repository import _DataSQLRepository class _DataManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _DataFSRepository, \"sql\": _DataSQLRepository} @classmethod def _build_manager(cls) -> Type[_DataManager]: # type: ignore if cls._using_enterprise(): data_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".data._data_manager\", \"_DataManager\" ) # type: ignore build_repository = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".data._data_manager_factory\", \"_DataManagerFactory\" )._build_repository # type: ignore else: data_manager = _DataManager build_repository = cls._build_repository data_manager._repository = build_repository() # type: ignore return data_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": "import os import pickle from datetime import datetime, timedelta from typing import Any, List, Optional, Set import modin.pandas as pd from taipy.config.common.scope import Scope from .._backup._backup import _replace_in_backup_file from .._entity._reload import _self_reload from .._version._version_manager_factory import _VersionManagerFactory from ._abstract_file import _AbstractFileDataNode from .data_node import DataNode from .data_node_id import DataNodeId, Edit class PickleDataNode(DataNode, _AbstractFileDataNode): \"\"\"Data Node stored as a pickle file. Attributes: config_id (str): Identifier of the data node configuration. It must be a valid Python identifer. scope (Scope^): The scope of this data node. id (str): The unique identifier of this data node. owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or `None`. parent_ids (Optional[Set[str]]): The identifiers of the parent tasks or `None`. last_edit_date (datetime): The date and time of the last modification. edits (List[Edit^]): The ordered list of edits for that job. version (str): The string indicates the application version of the data node to instantiate. If not provided, the current version is used. validity_period (Optional[timedelta]): The duration implemented as a timedelta since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task management page](../core/entities/task-mgt.md) for more details). If _validity_period_ is set to `None`, the data node is always up-to-date. edit_in_progress (bool): True if a task computing the data node has been submitted and not completed yet. False otherwise. editor_id (Optional[str]): The identifier of the user who is currently editing the data node. editor_expiration_date (Optional[datetime]): The expiration date of the editor lock. properties (dict[str, Any]): A dictionary of additional properties. When creating a pickle data node, if the _properties_ dictionary contains a _\"default_data\"_ entry, the data node is automatically written with the corresponding _\"default_data\"_ value. If the _properties_ dictionary contains a _\"default_path\"_ or _\"path\"_ entry, the data will be stored using the corresponding value as the name of the pickle file. \"\"\" __STORAGE_TYPE = \"pickle\" __PATH_KEY = \"path\" __DEFAULT_PATH_KEY = \"default_path\" __DEFAULT_DATA_KEY = \"default_data\" __IS_GENERATED_KEY = \"is_generated\" _REQUIRED_PROPERTIES: List[str] = [] def __init__( self, config_id: str, scope: Scope, id: Optional[DataNodeId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, last_edit_date: Optional[datetime] = None, edits: Optional[List[Edit]] = None, version: str = None, validity_period: Optional[timedelta] = None, edit_in_progress: bool = False, editor_id: Optional[str] = None, editor_expiration_date: Optional[datetime] = None, properties=None, ): if properties is None: properties = {} default_value = properties.pop(self.__DEFAULT_DATA_KEY, None) self._path = properties.get(self.__PATH_KEY, properties.get(self.__DEFAULT_PATH_KEY)) if self._path is not None: properties[self.__PATH_KEY] = self._path self._is_generated = properties.get(self.__IS_GENERATED_KEY, self._path is None) properties[self.__IS_GENERATED_KEY] = self._is_generated super().__init__( config_id, scope, id, owner_id, parent_ids, last_edit_date, edits, version or _VersionManagerFactory._build_manager()._get_latest_version(), validity_period, edit_in_progress, editor_id, editor_expiration_date, **properties, ) if self._path is None: self._path = self._build_path(self.storage_type()) if not self._last_edit_date and os.path.exists(self._path): self._last_edit_date = datetime.now() if default_value is not None and not os.path.exists(self._path): self._write(default_value) self._last_edit_date = datetime.now() self._edits.append( Edit( { \"timestamp\": self._last_edit_date, \"writer_identifier\": \"TAIPY\", \"comments\": \"Default data written.\", } ) ) self._TAIPY_PROPERTIES.update( { self.__PATH_KEY, self.__DEFAULT_PATH_KEY, self.__DEFAULT_DATA_KEY, self.__IS_GENERATED_KEY, } ) @classmethod def storage_type(cls) -> str: return cls.__STORAGE_TYPE @property # type: ignore @_self_reload(DataNode._MANAGER_NAME) def path(self) -> Any: return self._path @path.setter def path(self, value): tmp_old_path = self._path self._path = value self.properties[self.__PATH_KEY] = value self.properties[self.__IS_GENERATED_KEY] = False _replace_in_backup_file(old_file_path=tmp_old_path, new_file_path=self._path) @property # type: ignore @_self_reload(DataNode._MANAGER_NAME) def is_generated(self) -> bool: return self._is_generated def _read(self): os.environ[\"MODIN_PERSISTENT_PICKLE\"] = \"True\" with open(self._path, \"rb\") as pf: return pickle.load(pf) def _write(self, data): if isinstance(data, (pd.DataFrame, pd.Series)): os.environ[\"MODIN_PERSISTENT_PICKLE\"] = \"True\" with open(self._path, \"wb\") as pf: pickle.dump(data, pf) "} {"text": "from datetime import datetime, timedelta from typing import Dict, List, Optional, Set from sqlalchemy import text from taipy.config.common.scope import Scope from .._version._version_manager_factory import _VersionManagerFactory from ..exceptions.exceptions import MissingAppendQueryBuilder, MissingRequiredProperty from ._abstract_sql import _AbstractSQLDataNode from .data_node_id import DataNodeId, Edit class SQLDataNode(_AbstractSQLDataNode): \"\"\"Data Node stored in a SQL database. Attributes: config_id (str): Identifier of the data node configuration. It must be a valid Python identifier. scope (Scope^): The scope of this data node. id (str): The unique identifier of this data node. owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or None. parent_ids (Optional[Set[str]]): The identifiers of the parent tasks or `None`. last_edit_date (datetime): The date and time of the last modification. edits (List[Edit^]): The ordered list of edits for that job. version (str): The string indicates the application version of the data node to instantiate. If not provided, the current version is used. validity_period (Optional[timedelta]): The duration implemented as a timedelta since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task management page](../core/entities/task-mgt.md) for more details). If _validity_period_ is set to `None`, the data node is always up-to-date. edit_in_progress (bool): True if a task computing the data node has been submitted and not completed yet. False otherwise. editor_id (Optional[str]): The identifier of the user who is currently editing the data node. editor_expiration_date (Optional[datetime]): The expiration date of the editor lock. properties (dict[str, Any]): A dictionary of additional properties. Note that the _properties_ parameter must at least contain an entry for _\"db_name\"_, _\"db_engine\"_, _\"read_query\"_, and _\"write_query_builder\"_: - _\"db_name\"_ `(str)`: The database name, or the name of the SQLite database file. - _\"db_engine\"_ `(str)`: The database engine. Possible values are _\"sqlite\"_, _\"mssql\"_, _\"mysql\"_, or _\"postgresql\"_. - _\"read_query\"_ `(str)`: The SQL query string used to read the data from the database. - _\"write_query_builder\"_ `(Callable)`: A callback function that takes the data as an input parameter and returns a list of SQL queries to be executed when writing data to the data node. - _\"append_query_builder\"_ `(Callable)`: A callback function that takes the data as an input parameter and returns a list of SQL queries to be executed when appending data to the data node. - _\"db_username\"_ `(str)`: The database username. - _\"db_password\"_ `(str)`: The database password. - _\"db_host\"_ `(str)`: The database host. The default value is _\"localhost\"_. - _\"db_port\"_ `(int)`: The database port. The default value is 1433. - _\"db_driver\"_ `(str)`: The database driver. - _\"sqlite_folder_path\"_ (str): The path to the folder that contains SQLite file. The default value is the current working folder. - _\"sqlite_file_extension\"_ (str): The filename extension of the SQLite file. The default value is \".db\". - _\"db_extra_args\"_ `(Dict[str, Any])`: A dictionary of additional arguments to be passed into database connection string. - _\"exposed_type\"_: The exposed type of the data read from SQL query. The default value is `pandas`. \"\"\" __STORAGE_TYPE = \"sql\" __READ_QUERY_KEY = \"read_query\" _WRITE_QUERY_BUILDER_KEY = \"write_query_builder\" _APPEND_QUERY_BUILDER_KEY = \"append_query_builder\" def __init__( self, config_id: str, scope: Scope, id: Optional[DataNodeId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, last_edit_date: Optional[datetime] = None, edits: Optional[List[Edit]] = None, version: Optional[str] = None, validity_period: Optional[timedelta] = None, edit_in_progress: bool = False, editor_id: Optional[str] = None, editor_expiration_date: Optional[datetime] = None, properties: Optional[Dict] = None, ): if properties is None: properties = {} if properties.get(self.__READ_QUERY_KEY) is None: raise MissingRequiredProperty(f\"Property {self.__READ_QUERY_KEY} is not informed and is required.\") if properties.get(self._WRITE_QUERY_BUILDER_KEY) is None: raise MissingRequiredProperty(f\"Property {self._WRITE_QUERY_BUILDER_KEY} is not informed and is required.\") super().__init__( config_id, scope, id, owner_id, parent_ids, last_edit_date, edits, version or _VersionManagerFactory._build_manager()._get_latest_version(), validity_period, edit_in_progress, editor_id, editor_expiration_date, properties=properties, ) self._TAIPY_PROPERTIES.update( { self.__READ_QUERY_KEY, self._WRITE_QUERY_BUILDER_KEY, self._APPEND_QUERY_BUILDER_KEY, } ) @classmethod def storage_type(cls) -> str: return cls.__STORAGE_TYPE def _get_base_read_query(self) -> str: return self.properties.get(self.__READ_QUERY_KEY) def _do_append(self, data, engine, connection) -> None: if not self.properties.get(self._APPEND_QUERY_BUILDER_KEY): raise MissingAppendQueryBuilder queries = self.properties.get(self._APPEND_QUERY_BUILDER_KEY)(data) self.__execute_queries(queries, connection) def _do_write(self, data, engine, connection) -> None: queries = self.properties.get(self._WRITE_QUERY_BUILDER_KEY)(data) self.__execute_queries(queries, connection) def __execute_queries(self, queries, connection) -> None: if not isinstance(queries, List): queries = [queries] for query in queries: if isinstance(query, str): connection.execute(text(query)) else: statement = query[0] parameters = query[1] connection.execute(text(statement), parameters) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._sql_repository import _SQLRepository from ._data_converter import _DataNodeConverter from ._data_model import _DataNodeModel class _DataSQLRepository(_SQLRepository): def __init__(self): super().__init__(model_type=_DataNodeModel, converter=_DataNodeConverter) "} {"text": "import os import uuid from abc import abstractmethod from datetime import datetime, timedelta from typing import Any, Dict, List, Optional, Set, Tuple, Union import networkx as nx from taipy.config.common._validate_id import _validate_id from taipy.config.common.scope import Scope from taipy.logger._taipy_logger import _TaipyLogger from .._entity._entity import _Entity from .._entity._labeled import _Labeled from .._entity._properties import _Properties from .._entity._reload import _Reloader, _self_reload, _self_setter from .._version._version_manager_factory import _VersionManagerFactory from ..common._warnings import _warn_deprecated from ..exceptions.exceptions import DataNodeIsBeingEdited, NoData from ..job.job_id import JobId from ..notification.event import Event, EventEntityType, EventOperation, _make_event from ._filter import _FilterDataNode from .data_node_id import DataNodeId, Edit from .operator import JoinOperator class DataNode(_Entity, _Labeled): \"\"\"Reference to a dataset. A Data Node is an abstract class that holds metadata related to the dataset it refers to. In particular, a data node holds the name, the scope, the owner identifier, the last edit date, and some additional properties of the data.
A Data Node also contains information and methods needed to access the dataset. This information depends on the type of storage, and it is held by subclasses (such as SQL Data Node, CSV Data Node, ...). !!! note It is recommended not to instantiate subclasses of `DataNode` directly. Attributes: config_id (str): Identifier of the data node configuration. It must be a valid Python identifier. scope (Scope^): The scope of this data node. id (str): The unique identifier of this data node. name (str): A user-readable name of this data node. owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or None. parent_ids (Optional[Set[str]]): The set of identifiers of the parent tasks. last_edit_date (datetime): The date and time of the last modification. edits (List[Edit^]): The list of Edits (an alias for dict) containing metadata about each data edition including but not limited to timestamp, comments, job_id: timestamp: The time instant of the writing comments: Representation of a free text to explain or comment on a data change job_id: Only populated when the data node is written by a task execution and corresponds to the job's id. Additional metadata related to the edition made to the data node can also be provided in Edits. version (str): The string indicates the application version of the data node to instantiate. If not provided, the current version is used. validity_period (Optional[timedelta]): The duration implemented as a timedelta since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task management page](../core/entities/task-mgt.md) for more details). If _validity_period_ is set to `None`, the data node is always up-to-date. edit_in_progress (bool): True if the data node is locked for modification. False otherwise. editor_id (Optional[str]): The identifier of the user who is currently editing the data node. editor_expiration_date (Optional[datetime]): The expiration date of the editor lock. kwargs: A dictionary of additional properties. \"\"\" _ID_PREFIX = \"DATANODE\" __ID_SEPARATOR = \"_\" __logger = _TaipyLogger._get_logger() _REQUIRED_PROPERTIES: List[str] = [] _MANAGER_NAME = \"data\" __PATH_KEY = \"path\" __EDIT_TIMEOUT = 30 _TAIPY_PROPERTIES: Set[str] = set() def __init__( self, config_id, scope: Scope = Scope(Scope.SCENARIO), id: Optional[DataNodeId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, last_edit_date: Optional[datetime] = None, edits: Optional[List[Edit]] = None, version: Optional[str] = None, validity_period: Optional[timedelta] = None, edit_in_progress: bool = False, editor_id: Optional[str] = None, editor_expiration_date: Optional[datetime] = None, **kwargs, ): self.config_id = _validate_id(config_id) self.id = id or DataNodeId(self.__ID_SEPARATOR.join([self._ID_PREFIX, self.config_id, str(uuid.uuid4())])) self.owner_id = owner_id self._parent_ids = parent_ids or set() self._scope = scope self._last_edit_date = last_edit_date self._edit_in_progress = edit_in_progress self._version = version or _VersionManagerFactory._build_manager()._get_latest_version() self._validity_period = validity_period self._editor_id: Optional[str] = editor_id self._editor_expiration_date: Optional[datetime] = editor_expiration_date # Track edits self._edits = edits or list() self._properties = _Properties(self, **kwargs) def get_parents(self): \"\"\"Get all parents of this data node.\"\"\" from ... import core as tp return tp.get_parents(self) @property # type: ignore @_self_reload(_MANAGER_NAME) def parent_ids(self): \"\"\"List of parent ids of this data node.\"\"\" return self._parent_ids @property # type: ignore @_self_reload(_MANAGER_NAME) def edits(self): \"\"\"Get all `Edit^`s of this data node.\"\"\" return self._edits def get_last_edit(self) -> Optional[Edit]: \"\"\"Get last `Edit^` of this data node. Returns: None if there has been no `Edit^` on this data node. \"\"\" if self._edits: return self._edits[-1] return None @property # type: ignore @_self_reload(_MANAGER_NAME) def last_edit_date(self): last_modified_datetime = self.__get_last_modified_datetime() if last_modified_datetime and last_modified_datetime > self._last_edit_date: return last_modified_datetime else: return self._last_edit_date @last_edit_date.setter # type: ignore @_self_setter(_MANAGER_NAME) def last_edit_date(self, val): self._last_edit_date = val @property # type: ignore @_self_reload(_MANAGER_NAME) def scope(self): return self._scope @scope.setter # type: ignore @_self_setter(_MANAGER_NAME) def scope(self, val): self._scope = val @property # type: ignore @_self_reload(_MANAGER_NAME) def validity_period(self) -> Optional[timedelta]: return self._validity_period if self._validity_period else None @validity_period.setter # type: ignore @_self_setter(_MANAGER_NAME) def validity_period(self, val): self._validity_period = val @property # type: ignore @_self_reload(_MANAGER_NAME) def expiration_date(self) -> datetime: \"\"\"Datetime instant of the expiration date of this data node.\"\"\" last_edit_date = self.last_edit_date validity_period = self._validity_period if not last_edit_date: raise NoData(f\"Data node {self.id} from config {self.config_id} has not been written yet.\") return last_edit_date + validity_period if validity_period else last_edit_date @property # type: ignore def name(self) -> Optional[str]: return self.properties.get(\"name\") @name.setter # type: ignore def name(self, val): self.properties[\"name\"] = val @property def version(self): return self._version @property def cacheable(self): \"\"\"Deprecated. Use `skippable` attribute of a `Task^` instead.\"\"\" _warn_deprecated(\"cacheable\", suggest=\"the skippable feature\") return self.properties.get(\"cacheable\", False) @cacheable.setter def cacheable(self, val): _warn_deprecated(\"cacheable\", suggest=\"the skippable feature\") @property # type: ignore @_self_reload(_MANAGER_NAME) def edit_in_progress(self): return self._edit_in_progress @edit_in_progress.setter # type: ignore @_self_setter(_MANAGER_NAME) def edit_in_progress(self, val): self._edit_in_progress = val @property # type: ignore @_self_reload(_MANAGER_NAME) def editor_id(self): return self._editor_id @editor_id.setter # type: ignore @_self_setter(_MANAGER_NAME) def editor_id(self, val): self._editor_id = val @property # type: ignore @_self_reload(_MANAGER_NAME) def editor_expiration_date(self): return self._editor_expiration_date @editor_expiration_date.setter # type: ignore @_self_setter(_MANAGER_NAME) def editor_expiration_date(self, val): self._editor_expiration_date = val @property # type: ignore @_self_reload(_MANAGER_NAME) def job_ids(self): \"\"\"List of the jobs having edited this data node.\"\"\" return [edit.get(\"job_id\") for edit in self.edits if edit.get(\"job_id\")] @property def properties(self): \"\"\"Dictionary of custom properties.\"\"\" self._properties = _Reloader()._reload(self._MANAGER_NAME, self)._properties return self._properties def _get_user_properties(self) -> Dict[str, Any]: \"\"\"Get user properties.\"\"\" return {key: value for key, value in self.properties.items() if key not in self._TAIPY_PROPERTIES} def __eq__(self, other): return self.id == other.id def __ne__(self, other): return not self == other def __hash__(self): return hash(self.id) def __getstate__(self): return vars(self) def __setstate__(self, state): vars(self).update(state) def __getattr__(self, attribute_name): protected_attribute_name = _validate_id(attribute_name) if protected_attribute_name in self._properties: return self._properties[protected_attribute_name] raise AttributeError(f\"{attribute_name} is not an attribute of data node {self.id}\") def __get_last_modified_datetime(self) -> Optional[datetime]: path = self._properties.get(self.__PATH_KEY, None) if path and os.path.isfile(path): return datetime.fromtimestamp(os.path.getmtime(path)) last_modified_datetime = None if path and os.path.isdir(path): for filename in os.listdir(path): filepath = os.path.join(path, filename) if os.path.isfile(filepath): file_mtime = datetime.fromtimestamp(os.path.getmtime(filepath)) if last_modified_datetime is None or file_mtime > last_modified_datetime: last_modified_datetime = file_mtime return last_modified_datetime @classmethod @abstractmethod def storage_type(cls) -> str: raise NotImplementedError def read_or_raise(self) -> Any: \"\"\"Read the data referenced by this data node. Returns: The data referenced by this data node. Raises: NoData^: If the data has not been written yet. \"\"\" if not self.last_edit_date: raise NoData(f\"Data node {self.id} from config {self.config_id} has not been written yet.\") return self._read() def read(self) -> Any: \"\"\"Read the data referenced by this data node. Returns: The data referenced by this data node. None if the data has not been written yet. \"\"\" try: return self.read_or_raise() except NoData: self.__logger.warning( f\"Data node {self.id} from config {self.config_id} is being read but has never been \" f\"written.\" ) return None def append(self, data, job_id: Optional[JobId] = None, **kwargs: Dict[str, Any]): \"\"\"Append some data to this data node. Parameters: data (Any): The data to write to this data node. job_id (JobId^): An optional identifier of the writer. **kwargs (dict[str, any]): Extra information to attach to the edit document corresponding to this write. \"\"\" from ._data_manager_factory import _DataManagerFactory self._append(data) self.track_edit(job_id=job_id, **kwargs) self.unlock_edit() _DataManagerFactory._build_manager()._set(self) def write(self, data, job_id: Optional[JobId] = None, **kwargs: Dict[str, Any]): \"\"\"Write some data to this data node. Parameters: data (Any): The data to write to this data node. job_id (JobId^): An optional identifier of the writer. **kwargs (dict[str, any]): Extra information to attach to the edit document corresponding to this write. \"\"\" from ._data_manager_factory import _DataManagerFactory self._write(data) self.track_edit(job_id=job_id, **kwargs) self.unlock_edit() _DataManagerFactory._build_manager()._set(self) def track_edit(self, **options): \"\"\"Creates and adds a new entry in the edits attribute without writing the data. Parameters: options (dict[str, any)): track `timestamp`, `comments`, `job_id`. The others are user-custom, users can use options to attach any information to an external edit of a data node. \"\"\" edit = {} for k, v in options.items(): if v is not None: edit[k] = v if \"timestamp\" not in edit: edit[\"timestamp\"] = datetime.now() self.last_edit_date = edit.get(\"timestamp\") self._edits.append(edit) def lock_edit(self, editor_id: Optional[str] = None): \"\"\"Lock the data node modification. Note: The data node can be unlocked with the method `(DataNode.)unlock_edit()^`. Parameters: editor_id (Optional[str]): The editor's identifier. \"\"\" if editor_id: if ( self.edit_in_progress and self.editor_id != editor_id and self.editor_expiration_date and self.editor_expiration_date > datetime.now() ): raise DataNodeIsBeingEdited(self.id, self._editor_id) self.editor_id = editor_id # type: ignore self.editor_expiration_date = datetime.now() + timedelta(minutes=self.__EDIT_TIMEOUT) # type: ignore else: self.editor_id = None # type: ignore self.editor_expiration_date = None # type: ignore self.edit_in_progress = True # type: ignore def unlock_edit(self, editor_id: Optional[str] = None): \"\"\"Unlocks the data node modification. Note: The data node can be locked with the method `(DataNode.)lock_edit()^`. Parameters: editor_id (Optional[str]): The editor's identifier. \"\"\" if ( editor_id and self.editor_id != editor_id and self.editor_expiration_date and self.editor_expiration_date > datetime.now() ): raise DataNodeIsBeingEdited(self.id, self._editor_id) else: self.editor_id = None # type: ignore self.editor_expiration_date = None # type: ignore self.edit_in_progress = False # type: ignore def filter(self, operators: Union[List, Tuple], join_operator=JoinOperator.AND): \"\"\"Read and filter the data referenced by this data node. The data is filtered by the provided list of 3-tuples (key, value, `Operator^`). If multiple filter operators are provided, filtered data will be joined based on the join operator (*AND* or *OR*). Parameters: operators (Union[List[Tuple], Tuple]): A 3-element tuple or a list of 3-element tuples, each is in the form of (key, value, `Operator^`). join_operator (JoinOperator^): The operator used to join the multiple filter 3-tuples. Returns: The filtered data. Raises: NotImplementedError: If the data type is not supported. \"\"\" data = self._read() return _FilterDataNode._filter(data, operators, join_operator) def __getitem__(self, item): data = self._read() return _FilterDataNode._filter_by_key(data, item) @abstractmethod def _read(self): raise NotImplementedError @abstractmethod def _append(self, data): raise NotImplementedError @abstractmethod def _write(self, data): raise NotImplementedError @property # type: ignore @_self_reload(_MANAGER_NAME) def is_ready_for_reading(self) -> bool: \"\"\"Indicate if this data node is ready for reading. Returns: False if the data is locked for modification or if the data has never been written. True otherwise. \"\"\" if self._edit_in_progress: return False if not self._last_edit_date: # Never been written so it is not up-to-date return False return True @property # type: ignore @_self_reload(_MANAGER_NAME) def is_valid(self) -> bool: \"\"\"Indicate if this data node is valid. Returns: False if the data ever been written or the expiration date has passed.
True otherwise. \"\"\" if not self._last_edit_date: # Never been written so it is not valid return False if not self._validity_period: # No validity period and has already been written, so it is valid return True if datetime.now() > self.expiration_date: # expiration_date has been passed return False return True @property def is_up_to_date(self) -> bool: \"\"\"Indicate if this data node is up-to-date. Returns: False if a preceding data node has been updated before the selected data node or the selected data is invalid.
True otherwise. \"\"\" from ..scenario.scenario import Scenario from ..taipy import get_parents parent_scenarios: Set[Scenario] = get_parents(self)[\"scenario\"] # type: ignore for parent_scenario in parent_scenarios: for ancestor_node in nx.ancestors(parent_scenario._build_dag(), self): if ( isinstance(ancestor_node, DataNode) and ancestor_node.last_edit_date and ancestor_node.last_edit_date > self.last_edit_date ): return False return self.is_valid @staticmethod def _class_map(): def all_subclasses(cls): subclasses = set(cls.__subclasses__()) for s in cls.__subclasses__(): subclasses.update(all_subclasses(s)) return subclasses class_map = {} for c in all_subclasses(DataNode): try: if c.storage_type() is not None: class_map[c.storage_type()] = c except NotImplementedError: pass return class_map def get_label(self) -> str: \"\"\"Returns the data node simple label prefixed by its owner label. Returns: The label of the data node as a string. \"\"\" return self._get_label() def get_simple_label(self) -> str: \"\"\"Returns the data node simple label. Returns: The simple label of the data node as a string. \"\"\" return self._get_simple_label() @_make_event.register(DataNode) def make_event_for_datanode( data_node: DataNode, operation: EventOperation, /, attribute_name: Optional[str] = None, attribute_value: Optional[Any] = None, **kwargs, ) -> Event: metadata = {\"config_id\": data_node.config_id, **kwargs} return Event( entity_type=EventEntityType.DATA_NODE, entity_id=data_node.id, operation=operation, attribute_name=attribute_name, attribute_value=attribute_value, metadata=metadata, ) "} {"text": "import dataclasses import json import os from datetime import date, datetime, timedelta from enum import Enum from os.path import isfile from pydoc import locate from typing import Any, Dict, List, Optional, Set from taipy.config.common.scope import Scope from .._backup._backup import _replace_in_backup_file from .._entity._reload import _self_reload from .._version._version_manager_factory import _VersionManagerFactory from ._abstract_file import _AbstractFileDataNode from .data_node import DataNode from .data_node_id import DataNodeId, Edit class JSONDataNode(DataNode, _AbstractFileDataNode): \"\"\"Data Node stored as a JSON file. Attributes: config_id (str): Identifier of the data node configuration. This string must be a valid Python identifier. scope (Scope^): The scope of this data node. id (str): The unique identifier of this data node. owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or `None`. parent_ids (Optional[Set[str]]): The identifiers of the parent tasks or `None`. last_edit_date (datetime): The date and time of the last modification. edits (List[Edit^]): The ordered list of edits for that job. version (str): The string indicates the application version of the data node to instantiate. If not provided, the current version is used. validity_period (Optional[timedelta]): The duration implemented as a timedelta since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task management page](../core/entities/task-mgt.md) for more details). If _validity_period_ is set to `None`, the data node is always up-to-date. edit_in_progress (bool): True if a task computing the data node has been submitted and not completed yet. False otherwise. editor_id (Optional[str]): The identifier of the user who is currently editing the data node. editor_expiration_date (Optional[datetime]): The expiration date of the editor lock. path (str): The path to the JSON file. encoder (json.JSONEncoder): The JSON encoder that is used to write into the JSON file. decoder (json.JSONDecoder): The JSON decoder that is used to read from the JSON file. properties (dict[str, Any]): A dictionary of additional properties. The _properties_ must have a _\"default_path\"_ or _\"path\"_ entry with the path of the JSON file: - _\"default_path\"_ `(str)`: The default path of the CSV file.\\n - _\"encoding\"_ `(str)`: The encoding of the CSV file. The default value is `utf-8`.\\n - _\"default_data\"_: The default data of the data nodes instantiated from this json data node.\\n \"\"\" __STORAGE_TYPE = \"json\" __DEFAULT_DATA_KEY = \"default_data\" __DEFAULT_PATH_KEY = \"default_path\" __PATH_KEY = \"path\" __ENCODING_KEY = \"encoding\" _ENCODER_KEY = \"encoder\" _DECODER_KEY = \"decoder\" _REQUIRED_PROPERTIES: List[str] = [] def __init__( self, config_id: str, scope: Scope, id: Optional[DataNodeId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, last_edit_date: Optional[datetime] = None, edits: Optional[List[Edit]] = None, version: Optional[str] = None, validity_period: Optional[timedelta] = None, edit_in_progress: bool = False, editor_id: Optional[str] = None, editor_expiration_date: Optional[datetime] = None, properties: Optional[Dict] = None, ): if properties is None: properties = {} default_value = properties.pop(self.__DEFAULT_DATA_KEY, None) if self.__ENCODING_KEY not in properties.keys(): properties[self.__ENCODING_KEY] = \"utf-8\" super().__init__( config_id, scope, id, owner_id, parent_ids, last_edit_date, edits, version or _VersionManagerFactory._build_manager()._get_latest_version(), validity_period, edit_in_progress, editor_id, editor_expiration_date, **properties, ) self._path = properties.get(self.__PATH_KEY, properties.get(self.__DEFAULT_PATH_KEY)) if not self._path: self._path = self._build_path(self.storage_type()) properties[self.__PATH_KEY] = self._path self._decoder = self._properties.get(self._DECODER_KEY, _DefaultJSONDecoder) self._encoder = self._properties.get(self._ENCODER_KEY, _DefaultJSONEncoder) if default_value is not None and not os.path.exists(self._path): self._write(default_value) self._last_edit_date = datetime.now() self._edits.append( Edit( { \"timestamp\": self._last_edit_date, \"writer_identifier\": \"TAIPY\", \"comments\": \"Default data written.\", } ) ) if not self._last_edit_date and isfile(self._path): # type: ignore self._last_edit_date = datetime.now() self._TAIPY_PROPERTIES.update( { self.__PATH_KEY, self.__DEFAULT_PATH_KEY, self.__ENCODING_KEY, self.__DEFAULT_DATA_KEY, self._ENCODER_KEY, self._DECODER_KEY, } ) @classmethod def storage_type(cls) -> str: return cls.__STORAGE_TYPE @property # type: ignore @_self_reload(DataNode._MANAGER_NAME) def path(self): return self._path @path.setter def path(self, value): tmp_old_path = self._path self._path = value self.properties[self.__PATH_KEY] = value _replace_in_backup_file(old_file_path=tmp_old_path, new_file_path=self._path) @property # type: ignore @_self_reload(DataNode._MANAGER_NAME) def encoder(self): return self._encoder @encoder.setter def encoder(self, encoder: json.JSONEncoder): self.properties[self._ENCODER_KEY] = encoder @property # type: ignore @_self_reload(DataNode._MANAGER_NAME) def decoder(self): return self._decoder @decoder.setter def decoder(self, decoder: json.JSONDecoder): self.properties[self._DECODER_KEY] = decoder def _read(self): with open(self._path, \"r\", encoding=self.properties[self.__ENCODING_KEY]) as f: return json.load(f, cls=self._decoder) def _append(self, data: Any): with open(self._path, \"r+\", encoding=self.properties[self.__ENCODING_KEY]) as f: file_data = json.load(f, cls=self._decoder) if isinstance(file_data, List): if isinstance(data, List): file_data.extend(data) else: file_data.append(data) elif isinstance(data, Dict): file_data.update(data) f.seek(0) json.dump(file_data, f, indent=4, cls=self._encoder) def _write(self, data: Any): with open(self._path, \"w\", encoding=self.properties[self.__ENCODING_KEY]) as f: # type: ignore json.dump(data, f, indent=4, cls=self._encoder) class _DefaultJSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, Enum): return { \"__type__\": f\"Enum-{o.__class__.__module__}-{o.__class__.__qualname__}-{o.name}\", \"__value__\": o.value, } if isinstance(o, (datetime, date)): return {\"__type__\": \"Datetime\", \"__value__\": o.isoformat()} if dataclasses.is_dataclass(o): return { \"__type__\": f\"dataclass-{o.__class__.__module__}-{o.__class__.__qualname__}\", \"__value__\": dataclasses.asdict(o), } return super().default(o) class _DefaultJSONDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) def object_hook(self, source): if _type := source.get(\"__type__\"): if _type.startswith(\"Enum\"): _, module, classname, name = _type.split(\"-\") _enum_class = locate(f\"{module}.{classname}\") return _enum_class[name] if _type == \"Datetime\": return datetime.fromisoformat(source.get(\"__value__\")) if _type.startswith(\"dataclass\"): _, module, classname = _type.split(\"-\") _data_class = locate(f\"{module}.{classname}\") return _data_class(**source.get(\"__value__\")) return source "} {"text": "from dataclasses import dataclass from typing import Any, Dict, List, Optional from sqlalchemy import JSON, Boolean, Column, Enum, Float, String, Table, UniqueConstraint from taipy.config.common.scope import Scope from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry from .data_node_id import Edit @mapper_registry.mapped @dataclass class _DataNodeModel(_BaseModel): __table__ = Table( \"data_node\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"config_id\", String), Column(\"scope\", Enum(Scope)), Column(\"storage_type\", String), Column(\"owner_id\", String), Column(\"parent_ids\", JSON), Column(\"last_edit_date\", String), Column(\"edits\", JSON), Column(\"version\", String), Column(\"validity_days\", Float), Column(\"validity_seconds\", Float), Column(\"edit_in_progress\", Boolean), Column(\"editor_id\", String), Column(\"editor_expiration_date\", String), Column(\"data_node_properties\", JSON), ) __table_args__ = (UniqueConstraint(\"config_id\", \"owner_id\", name=\"_config_owner_uc\"),) id: str config_id: str scope: Scope storage_type: str owner_id: Optional[str] parent_ids: List[str] last_edit_date: Optional[str] edits: List[Edit] version: str validity_days: Optional[float] validity_seconds: Optional[float] edit_in_progress: bool editor_id: Optional[str] editor_expiration_date: Optional[str] data_node_properties: Dict[str, Any] @staticmethod def from_dict(data: Dict[str, Any]): return _DataNodeModel( id=data[\"id\"], config_id=data[\"config_id\"], scope=Scope._from_repr(data[\"scope\"]), storage_type=data[\"storage_type\"], owner_id=data.get(\"owner_id\"), parent_ids=data.get(\"parent_ids\", []), last_edit_date=data.get(\"last_edit_date\"), edits=_BaseModel._deserialize_attribute(data[\"edits\"]), version=data[\"version\"], validity_days=data[\"validity_days\"], validity_seconds=data[\"validity_seconds\"], edit_in_progress=bool(data.get(\"edit_in_progress\", False)), editor_id=data.get(\"editor_id\", None), editor_expiration_date=data.get(\"editor_expiration_date\"), data_node_properties=_BaseModel._deserialize_attribute(data[\"data_node_properties\"]), ) def to_list(self): return [ self.id, self.config_id, repr(self.scope), self.storage_type, self.owner_id, _BaseModel._serialize_attribute(self.parent_ids), self.last_edit_date, _BaseModel._serialize_attribute(self.edits), self.version, self.validity_days, self.validity_seconds, self.edit_in_progress, self.editor_id, self.editor_expiration_date, _BaseModel._serialize_attribute(self.data_node_properties), ] "} {"text": "from datetime import datetime, timedelta from typing import Any, Dict, List, Optional, Set from taipy.config.common.scope import Scope from .._version._version_manager_factory import _VersionManagerFactory from .data_node import DataNode from .data_node_id import DataNodeId, Edit in_memory_storage: Dict[str, Any] = {} class InMemoryDataNode(DataNode): \"\"\"Data Node stored in memory. Warning: This Data Node implementation is not compatible with a parallel execution of taipy tasks, but only with a task executor in development mode. The purpose of `InMemoryDataNode` is to be used for development or debugging. Attributes: config_id (str): Identifier of the data node configuration. It must be a valid Python identifier. scope (Scope^): The scope of this data node. id (str): The unique identifier of this data node. owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or `None`. parent_ids (Optional[Set[str]]): The identifiers of the parent tasks or `None`. last_edit_date (datetime): The date and time of the last modification. edits (List[Edit^]): The ordered list of edits for that job. version (str): The string indicates the application version of the data node to instantiate. If not provided, the current version is used. validity_period (Optional[timedelta]): The duration implemented as a timedelta since the last edit date for which the data node can be considered up-to-date. Once the validity period has passed, the data node is considered stale and relevant tasks will run even if they are skippable (see the [Task management page](../core/entities/task-mgt.md) for more details). If _validity_period_ is set to `None`, the data node is always up-to-date. edit_in_progress (bool): True if a task computing the data node has been submitted and not completed yet. False otherwise. editor_id (Optional[str]): The identifier of the user who is currently editing the data node. editor_expiration_date (Optional[datetime]): The expiration date of the editor lock. properties (dict[str, Any]): A dictionary of additional properties. When creating an _In Memory_ data node, if the _properties_ dictionary contains a _\"default_data\"_ entry, the data node is automatically written with the corresponding _\"default_data\"_ value. \"\"\" __STORAGE_TYPE = \"in_memory\" __DEFAULT_DATA_VALUE = \"default_data\" _REQUIRED_PROPERTIES: List[str] = [] def __init__( self, config_id: str, scope: Scope, id: Optional[DataNodeId] = None, owner_id: Optional[str] = None, parent_ids: Optional[Set[str]] = None, last_edit_date: Optional[datetime] = None, edits: List[Edit] = None, version: str = None, validity_period: Optional[timedelta] = None, edit_in_progress: bool = False, editor_id: Optional[str] = None, editor_expiration_date: Optional[datetime] = None, properties=None, ): if properties is None: properties = {} default_value = properties.pop(self.__DEFAULT_DATA_VALUE, None) super().__init__( config_id, scope, id, owner_id, parent_ids, last_edit_date, edits, version or _VersionManagerFactory._build_manager()._get_latest_version(), validity_period, edit_in_progress, editor_id, editor_expiration_date, **properties ) if default_value is not None and self.id not in in_memory_storage: self._write(default_value) self._last_edit_date = datetime.now() self._edits.append( Edit( { \"timestamp\": self._last_edit_date, \"writer_identifier\": \"TAIPY\", \"comments\": \"Default data written.\", } ) ) self._TAIPY_PROPERTIES.update({self.__DEFAULT_DATA_VALUE}) @classmethod def storage_type(cls) -> str: return cls.__STORAGE_TYPE def _read(self): return in_memory_storage.get(self.id) def _write(self, data): in_memory_storage[self.id] = data "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._filesystem_repository import _FileSystemRepository from ._data_converter import _DataNodeConverter from ._data_model import _DataNodeModel class _DataFSRepository(_FileSystemRepository): def __init__(self): super().__init__(model_type=_DataNodeModel, converter=_DataNodeConverter, dir_name=\"data_nodes\") "} {"text": "from typing import Any, Dict, NewType DataNodeId = NewType(\"DataNodeId\", str) DataNodeId.__doc__ = \"\"\"Type that holds a `DataNode^` identifier.\"\"\" Edit = NewType(\"Edit\", Dict[str, Any]) Edit.__doc__ = \"\"\"Type that holds a `DataNode^` edit information.\"\"\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._filesystem_repository import _FileSystemRepository from ._submission_converter import _SubmissionConverter from ._submission_model import _SubmissionModel class _SubmissionFSRepository(_FileSystemRepository): def __init__(self): super().__init__(model_type=_SubmissionModel, converter=_SubmissionConverter, dir_name=\"submission\") "} {"text": "from typing import NewType SubmissionId = NewType(\"SubmissionId\", str) SubmissionId.__doc__ = \"\"\"Type that holds a `Submission^` identifier.\"\"\" "} {"text": "from ..common._repr_enum import _ReprEnum class SubmissionStatus(_ReprEnum): \"\"\"Execution status of a `Submission^`. It is implemented as an enumeration. The possible values are: - `SUBMITTED`: A `SUBMITTED` submission has been submitted for execution but not processed yet by the orchestrator. - `UNDEFINED`: AN `UNDEFINED` submission's jobs have been submitted for execution but got some undefined status changes. - `PENDING`: A `PENDING` submission has been enqueued by the orchestrator. It is waiting for an executor to be available for its execution. - `BLOCKED`: A `BLOCKED` submission has been blocked because it has been finished with a job being blocked. - `RUNNING`: A `RUNNING` submission has its jobs currently being executed. - `CANCELED`: A `CANCELED` submission has been submitted but its execution has been canceled. - `FAILED`: A `FAILED` submission has a job failed during its execution. - `COMPLETED`: A `COMPLETED` submission has successfully been executed. \"\"\" SUBMITTED = 0 UNDEFINED = 1 BLOCKED = 2 PENDING = 3 RUNNING = 4 CANCELED = 5 FAILED = 6 COMPLETED = 7 "} {"text": "from dataclasses import dataclass from typing import Any, Dict, List, Union from sqlalchemy import JSON, Column, Enum, String, Table from .._repository._base_taipy_model import _BaseModel from .._repository.db._sql_base_model import mapper_registry from ..job.job_id import JobId from .submission_status import SubmissionStatus @mapper_registry.mapped @dataclass class _SubmissionModel(_BaseModel): __table__ = Table( \"submission\", mapper_registry.metadata, Column(\"id\", String, primary_key=True), Column(\"entity_id\", String), Column(\"job_ids\", JSON), Column(\"creation_date\", String), Column(\"submission_status\", Enum(SubmissionStatus)), Column(\"version\", String), ) id: str entity_id: str job_ids: Union[List[JobId], List] creation_date: str submission_status: SubmissionStatus version: str @staticmethod def from_dict(data: Dict[str, Any]): return _SubmissionModel( id=data[\"id\"], entity_id=data[\"entity_id\"], job_ids=_BaseModel._deserialize_attribute(data[\"job_ids\"]), creation_date=data[\"creation_date\"], submission_status=SubmissionStatus._from_repr(data[\"submission_status\"]), version=data[\"version\"], ) def to_list(self): return [ self.id, self.entity_id, _BaseModel._serialize_attribute(self.job_ids), self.creation_date, repr(self.submission_status), self.version, ] "} {"text": "from datetime import datetime from .._repository._abstract_converter import _AbstractConverter from ..job.job import Job, JobId from ..submission._submission_model import _SubmissionModel from ..submission.submission import Submission from .submission import SubmissionId class _SubmissionConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, submission: Submission) -> _SubmissionModel: return _SubmissionModel( id=submission.id, entity_id=submission._entity_id, job_ids=[job.id if isinstance(job, Job) else JobId(str(job)) for job in list(submission._jobs)], creation_date=submission._creation_date.isoformat(), submission_status=submission._submission_status, version=submission._version, ) @classmethod def _model_to_entity(cls, model: _SubmissionModel) -> Submission: submission = Submission( entity_id=model.entity_id, id=SubmissionId(model.id), jobs=model.job_ids, creation_date=datetime.fromisoformat(model.creation_date), submission_status=model.submission_status, version=model.version, ) return submission "} {"text": "from typing import List, Optional, Union from .._manager._manager import _Manager from .._repository._abstract_repository import _AbstractRepository from .._version._version_mixin import _VersionMixin from ..notification import EventEntityType, EventOperation, Notifier, _make_event from ..scenario.scenario import Scenario from ..sequence.sequence import Sequence from ..submission.submission import Submission from ..task.task import Task class _SubmissionManager(_Manager[Submission], _VersionMixin): _ENTITY_NAME = Submission.__name__ _repository: _AbstractRepository _EVENT_ENTITY_TYPE = EventEntityType.SUBMISSION @classmethod def _get_all(cls, version_number: Optional[str] = None) -> List[Submission]: \"\"\" Returns all entities. \"\"\" filters = cls._build_filters_with_version(version_number) return cls._repository._load_all(filters) @classmethod def _create( cls, entity_id: str, ) -> Submission: submission = Submission(entity_id=entity_id) cls._set(submission) Notifier.publish(_make_event(submission, EventOperation.CREATION)) return submission @classmethod def _get_latest(cls, entity: Union[Scenario, Sequence, Task]) -> Optional[Submission]: entity_id = entity.id if not isinstance(entity, str) else entity submissions_of_task = list(filter(lambda submission: submission.entity_id == entity_id, cls._get_all())) if len(submissions_of_task) == 0: return None if len(submissions_of_task) == 1: return submissions_of_task[0] else: return max(submissions_of_task) "} {"text": "import uuid from datetime import datetime from typing import Any, List, Optional, Union from .._entity._entity import _Entity from .._entity._labeled import _Labeled from .._entity._reload import _self_reload, _self_setter from .._version._version_manager_factory import _VersionManagerFactory from ..job._job_manager_factory import _JobManagerFactory from ..job.job import Job, JobId from ..notification.event import Event, EventEntityType, EventOperation, _make_event from .submission_id import SubmissionId from .submission_status import SubmissionStatus class Submission(_Entity, _Labeled): \"\"\"Hold the jobs and submission status when a Scenario^, Sequence^ or Task^ is submitted. Attributes: entity_id (str): The identifier of the entity that was submitted. id (str): The identifier of the `Submission^` entity. jobs (Optional[Union[List[Job], List[JobId]]]): A list of jobs. creation_date (Optional[datetime]): The date of this submission's creation. submission_status (Optional[SubmissionStatus]): The current status of this submission. version (Optional[str]): The string indicates the application version of the submission to instantiate. If not provided, the latest version is used. \"\"\" _ID_PREFIX = \"SUBMISSION\" _MANAGER_NAME = \"submission\" __SEPARATOR = \"_\" def __init__( self, entity_id: str, id: Optional[str] = None, jobs: Optional[Union[List[Job], List[JobId]]] = None, creation_date: Optional[datetime] = None, submission_status: Optional[SubmissionStatus] = None, version: Optional[str] = None, ): self._entity_id = entity_id self.id = id or self.__new_id() self._jobs: Union[List[Job], List[JobId], List] = jobs or [] self._creation_date = creation_date or datetime.now() self._submission_status = submission_status or SubmissionStatus.SUBMITTED self._version = version or _VersionManagerFactory._build_manager()._get_latest_version() @staticmethod def __new_id() -> str: \"\"\"Generate a unique Submission identifier.\"\"\" return SubmissionId(Submission.__SEPARATOR.join([Submission._ID_PREFIX, str(uuid.uuid4())])) @property def entity_id(self) -> str: return self._entity_id @property def creation_date(self): return self._creation_date def get_label(self) -> str: \"\"\"Returns the submission simple label prefixed by its owner label. Returns: The label of the submission as a string. \"\"\" return self._get_label() def get_simple_label(self) -> str: \"\"\"Returns the submission simple label. Returns: The simple label of the submission as a string. \"\"\" return self._get_simple_label() @property # type: ignore @_self_reload(_MANAGER_NAME) def jobs(self) -> List[Job]: jobs = [] job_manager = _JobManagerFactory._build_manager() for job in self._jobs: jobs.append(job_manager._get(job)) return jobs @jobs.setter # type: ignore @_self_setter(_MANAGER_NAME) def jobs(self, jobs: Union[List[Job], List[JobId]]): self._jobs = jobs def __hash__(self): return hash(self.id) def __eq__(self, other): return self.id == other.id @property # type: ignore @_self_reload(_MANAGER_NAME) def submission_status(self): return self._submission_status @submission_status.setter # type: ignore @_self_setter(_MANAGER_NAME) def submission_status(self, submission_status): self._submission_status = submission_status def __lt__(self, other): return self.creation_date.timestamp() < other.creation_date.timestamp() def __le__(self, other): return self.creation_date.timestamp() <= other.creation_date.timestamp() def __gt__(self, other): return self.creation_date.timestamp() > other.creation_date.timestamp() def __ge__(self, other): return self.creation_date.timestamp() >= other.creation_date.timestamp() def _update_submission_status(self, _: Job): abandoned = False canceled = False blocked = False pending = False running = False completed = False for job in self.jobs: if not job: continue if job.is_failed(): self.submission_status = SubmissionStatus.FAILED # type: ignore return if job.is_canceled(): canceled = True continue if job.is_blocked(): blocked = True continue if job.is_pending() or job.is_submitted(): pending = True continue if job.is_running(): running = True continue if job.is_completed() or job.is_skipped(): completed = True continue if job.is_abandoned(): abandoned = True if canceled: self.submission_status = SubmissionStatus.CANCELED # type: ignore return if abandoned: self.submission_status = SubmissionStatus.UNDEFINED # type: ignore return if running: self.submission_status = SubmissionStatus.RUNNING # type: ignore return if pending: self.submission_status = SubmissionStatus.PENDING # type: ignore return if blocked: self.submission_status = SubmissionStatus.BLOCKED # type: ignore return if completed: self.submission_status = SubmissionStatus.COMPLETED # type: ignore return self.submission_status = SubmissionStatus.UNDEFINED # type: ignore @_make_event.register(Submission) def _make_event_for_submission( submission: Submission, operation: EventOperation, /, attribute_name: Optional[str] = None, attribute_value: Optional[Any] = None, **kwargs, ) -> Event: metadata = {\"creation_date\": submission.creation_date, \"version\": submission._version} return Event( entity_type=EventEntityType.SUBMISSION, entity_id=submission.id, operation=operation, attribute_name=attribute_name, attribute_value=attribute_value, metadata={**metadata, **kwargs}, ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from typing import Type from .._manager._manager_factory import _ManagerFactory from ..common._utils import _load_fct from ._submission_fs_repository import _SubmissionFSRepository from ._submission_manager import _SubmissionManager from ._submission_sql_repository import _SubmissionSQLRepository class _SubmissionManagerFactory(_ManagerFactory): __REPOSITORY_MAP = {\"default\": _SubmissionFSRepository, \"sql\": _SubmissionSQLRepository} @classmethod def _build_manager(cls) -> Type[_SubmissionManager]: # type: ignore if cls._using_enterprise(): submission_manager = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".submission._submission_manager\", \"_SubmissionManager\" ) # type: ignore build_repository = _load_fct( cls._TAIPY_ENTERPRISE_CORE_MODULE + \".submission._submission_manager_factory\", \"_SubmissionManagerFactory\", )._build_repository # type: ignore else: submission_manager = _SubmissionManager build_repository = cls._build_repository submission_manager._repository = build_repository() # type: ignore return submission_manager # type: ignore @classmethod def _build_repository(cls): return cls._get_repository_with_repo_map(cls.__REPOSITORY_MAP)() "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .._repository._sql_repository import _SQLRepository from ._submission_converter import _SubmissionConverter from ._submission_model import _SubmissionModel class _SubmissionSQLRepository(_SQLRepository): def __init__(self): super().__init__(model_type=_SubmissionModel, converter=_SubmissionConverter) "} {"text": "\"\"\" A single-page Taipy application. Please refer to https://docs.taipy.io/en/latest/manuals/gui/ for more details. \"\"\" import webbrowser from taipy.gui import Markdown, notify import taipy as tp value = 0 logo = \"images/taipy_logo.jpg\" page = Markdown( \"\"\"
<|navbar|lov={[(\"page1\", \"Homepage\"), (\"https://docs.taipy.io/en/latest/manuals/about/\", \"Taipy Docs\"), (\"https://docs.taipy.io/en/latest/getting_started/\", \"Getting Started\")]}|>
<|
<|{logo}|image|height=200px|width=200px|on_action=image_action|>
|> # Taipy Application <|{value}|slider|on_change=on_slider|> <|Push|button|on_action=on_push|> \"\"\" ) def image_action(state): webbrowser.open(\"https://taipy.io\") def on_push(state): ... def on_slider(state): if state.value == 100: notify(state, \"success\", \"Taipy is running!\") def on_change(state, var_name: str, var_value): ... gui = tp.Gui(page=page) if __name__ == '__main__': # Execute by the _Python_ interpretor, for debug only. tp.run(gui, title=\"Taipy Application (development)\") else: # Execute by _Gunicorn_, for production environment. app = tp.run(gui, title=\"Taipy Application\", run_server=False)"} {"text": "from taipy import Gui # A dark mode is available in Taipy # However, we will use the light mode for the Getting Started Gui(page=\"# Getting started with *Taipy*\").run(dark_mode=False)"} {"text": ""} {"text": ""} {"text": "\"\"\"The setup script.\"\"\" import json import os import sysconfig from importlib.util import find_spec from pathlib import Path from setuptools import find_namespace_packages, find_packages, setup from setuptools.command.build_py import build_py with open(\"README.md\", \"rb\") as readme_file: readme = readme_file.read().decode(\"UTF-8\") with open(f\"src{os.sep}taipy{os.sep}version.json\") as version_file: version = json.load(version_file) version_string = f'{version.get(\"major\", 0)}.{version.get(\"minor\", 0)}.{version.get(\"patch\", 0)}' if vext := version.get(\"ext\"): version_string = f\"{version_string}.{vext}\" requirements = [ \"backports.zoneinfo>=0.2.1,<0.3;python_version<'3.9'\", \"cookiecutter>=2.1.1,<2.2\", \"taipy-gui@git+https://git@github.com/Avaiga/taipy-gui.git@develop\", \"taipy-rest@git+https://git@github.com/Avaiga/taipy-rest.git@develop\", \"taipy-templates@git+https://git@github.com/Avaiga/taipy-templates.git@develop\", ] test_requirements = [\"pytest>=3.8\"] extras_require = { \"ngrok\": [\"pyngrok>=5.1,<6.0\"], \"image\": [ \"python-magic>=0.4.24,<0.5;platform_system!='Windows'\", \"python-magic-bin>=0.4.14,<0.5;platform_system=='Windows'\", ], \"rdp\": [\"rdp>=0.8\"], \"arrow\": [\"pyarrow>=10.0.1,<11.0\"], \"mssql\": [\"pyodbc>=4\"], } def _build_webapp(): already_exists = Path(\"./src/taipy/gui_core/lib/taipy-gui-core.js\").exists() if not already_exists: # default site-packages path is from the current python interpreter site_packages_path = sysconfig.get_path(\"purelib\") # taipy-gui should be available through setup_requires option # taipy-gui at this step is installed in a backend site-packages separated from the one being used by pip if find_spec(\"taipy\") and find_spec(\"taipy.gui\"): import taipy site_packages_path = Path(taipy.__file__).absolute().parent.parent # Specify the correct path to taipy-gui in gui/.env file env_file_path = Path(__file__).absolute().parent / \"frontend\" / \"taipy\" / \".env\" if not os.path.exists(env_file_path): with open(env_file_path, \"w\") as env_file: env_file.write(f\"TAIPY_GUI_DIR={site_packages_path}\\n\") os.system(\"cd frontend/taipy && npm ci && npm run build\") class NPMInstall(build_py): def run(self): _build_webapp() build_py.run(self) setup( author=\"Avaiga\", author_email=\"dev@taipy.io\", python_requires=\">=3.8\", classifiers=[ \"Intended Audience :: Developers\", \"License :: OSI Approved :: Apache Software License\", \"Natural Language :: English\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.8\", \"Programming Language :: Python :: 3.9\", \"Programming Language :: Python :: 3.10\", \"Programming Language :: Python :: 3.11\", ], description=\"A 360\u00b0 open-source platform from Python pilots to production-ready web apps.\", install_requires=requirements, entry_points={ \"console_scripts\": [ \"taipy = taipy._entrypoint:_entrypoint\", ] }, license=\"Apache License 2.0\", long_description=readme, long_description_content_type=\"text/markdown\", keywords=\"taipy\", name=\"taipy\", package_dir={\"\": \"src\"}, packages=find_namespace_packages(where=\"src\") + find_packages(include=[\"taipy\"]), include_package_data=True, test_suite=\"tests\", url=\"https://github.com/avaiga/taipy\", version=version_string, zip_safe=False, extras_require=extras_require, cmdclass={\"build_py\": NPMInstall}, ) "} {"text": "import re import sys repo_name = sys.argv[1] branch_name = sys.argv[2] # Regex pattern ]*?)(?]*?)> pattern = re.compile(\"]*?)(?]*?)>\") replacement = r'' with open(\"README.md\") as readme_file: readme_str = readme_file.read() modified_readme = re.sub(pattern, replacement.format(repo_name=repo_name, branch_name=branch_name), readme_str) with open(\"README.md\", \"w\") as readme_file: readme_file.write(modified_readme) "} {"text": "# ############################################################ # Generate Python interface definition files # ############################################################ from src.taipy.gui.config import Config import json import os import typing as t # ############################################################ # Generate gui pyi file (gui/gui.pyi) # ############################################################ gui_py_file = \"./src/taipy/gui/gui.py\" gui_pyi_file = gui_py_file + \"i\" os.system(f\"pipenv run stubgen {gui_py_file} --no-import --parse-only --export-less -o ./\") gui_config = \"\".join( f\", {k}: {v.__name__} = ...\" if \" t.List[t.Dict[str, t.Any]]: properties = element[\"properties\"] if \"inherits\" not in element: return properties for inherit in element[\"inherits\"]: inherit_element = next((e for e in viselements[\"undocumented\"] if e[0] == inherit), None) if inherit_element is None: inherit_element = next((e for e in viselements[\"blocks\"] if e[0] == inherit), None) if inherit_element is None: inherit_element = next((e for e in viselements[\"controls\"] if e[0] == inherit), None) if inherit_element is None: raise RuntimeError(f\"Can't find element with name {inherit}\") properties += get_properties(inherit_element[1], viselements) return properties def build_doc(element: t.Dict[str, t.Any]): if \"doc\" not in element: return \"\" doc = str(element[\"doc\"]).replace(\"\\n\", f'\\n{16*\" \"}') return f\"{element['name']} ({element['type']}): {doc} {'(default: '+element['default_value'] + ')' if 'default_value' in element else ''}\" # noqa: E501 for control_element in viselements[\"controls\"]: name = control_element[0] property_list = [] property_names = [] for property in get_properties(control_element[1], viselements): if property[\"name\"] not in property_names and \"[\" not in property[\"name\"]: property_list.append(property) property_names.append(property[\"name\"]) properties = \", \".join([f\"{p} = ...\" for p in property_names]) doc_arguments = f\"\\n{12*' '}\".join([build_doc(p) for p in property_list]) # append properties to __init__.pyi with open(builder_pyi_file, \"a\") as file: file.write( control_template.replace(\"{{name}}\", name) .replace(\"{{properties}}\", properties) .replace(\"{{doc_arguments}}\", doc_arguments) ) for block_element in viselements[\"blocks\"]: name = block_element[0] property_list = [] property_names = [] for property in get_properties(block_element[1], viselements): if property[\"name\"] not in property_names and \"[\" not in property[\"name\"]: property_list.append(property) property_names.append(property[\"name\"]) properties = \", \".join([f\"{p} = ...\" for p in property_names]) doc_arguments = f\"{8*' '}\".join([build_doc(p) for p in property_list]) # append properties to __init__.pyi with open(builder_pyi_file, \"a\") as file: file.write( block_template.replace(\"{{name}}\", name) .replace(\"{{properties}}\", properties) .replace(\"{{doc_arguments}}\", doc_arguments) ) os.system(f\"pipenv run isort {gui_pyi_file}\") os.system(f\"pipenv run black {gui_pyi_file}\") os.system(f\"pipenv run isort {builder_pyi_file}\") os.system(f\"pipenv run black {builder_pyi_file}\") "} {"text": "import pytest def pytest_addoption(parser): parser.addoption(\"--e2e-base-url\", action=\"store\", default=\"/\", help=\"base url for e2e testing\") parser.addoption(\"--e2e-port\", action=\"store\", default=\"5000\", help=\"port for e2e testing\") @pytest.fixture(scope=\"session\") def e2e_base_url(request): return request.config.getoption(\"--e2e-base-url\") @pytest.fixture(scope=\"session\") def e2e_port(request): return request.config.getoption(\"--e2e-port\") "} {"text": "from unittest import mock from src.taipy._run import _run from taipy.core import Core from taipy.gui import Gui from taipy.rest import Rest @mock.patch(\"taipy.gui.Gui.run\") def test_run_pass_with_gui(gui_run): _run(Gui()) gui_run.assert_called_once() @mock.patch(\"taipy.core.Core.run\") def test_run_pass_with_core(core_run): _run(Core()) core_run.assert_called_once() @mock.patch(\"taipy.rest.Rest.run\") @mock.patch(\"taipy.core.Core.run\") def test_run_pass_with_rest(rest_run, core_run): _run(Rest()) rest_run.assert_called_once() core_run.assert_called_once() @mock.patch(\"taipy.rest.Rest.run\") @mock.patch(\"taipy.core.Core.run\") def test_run_pass_with_core_and_rest(core_run, rest_run): _run(Core(), Rest()) core_run.assert_called_once() rest_run.assert_called_once() @mock.patch(\"taipy.gui.Gui.run\") @mock.patch(\"taipy.rest.Rest.run\") @mock.patch(\"taipy.core.Core.run\") def test_run_pass_with_gui_and_rest(core_run, rest_run, gui_run): _run(Gui(), Rest()) gui_run.assert_called_once() core_run.assert_called_once() rest_run.assert_not_called() @mock.patch(\"taipy.gui.Gui.run\") @mock.patch(\"taipy.core.Core.run\") def test_run_pass_with_gui_and_core(core_run, gui_run): _run(Gui(), Core()) gui_run.assert_called_once() core_run.assert_called_once() "} {"text": "\"\"\"Unit test package for taipy.\"\"\" "} {"text": "from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.common._utils import _load_fct from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.exceptions.exceptions import NonExistingVersion from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.conftest import init_config, init_managers from tests.core.utils import assert_true_after_time def test_core_cli_no_arguments(init_sql_repo): with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() assert not Config.core.force core.stop() def test_core_cli_development_mode(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() core.stop() def test_core_cli_dev_mode(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"-dev\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() core.stop() def test_core_cli_experiment_mode(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_latest_version() assert not Config.core.force core.stop() def test_core_cli_experiment_mode_with_version(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"2.1\" assert not Config.core.force core.stop() def test_core_cli_experiment_mode_with_force_version(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\", \"--taipy-force\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"2.1\" assert Config.core.force core.stop() def test_core_cli_production_mode(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"--production\"]): core = Core() core.run() assert Config.core.mode == \"production\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_latest_version() assert not Config.core.force core.stop() def test_dev_mode_clean_all_entities_of_the_latest_version(init_sql_repo): scenario_config = config_scenario() init_managers() # Create a scenario in development mode with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Initial assertion assert len(_DataManager._get_all(version_number=\"all\")) == 2 assert len(_TaskManager._get_all(version_number=\"all\")) == 1 assert len(_SequenceManager._get_all(version_number=\"all\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 1 assert len(_CycleManager._get_all(version_number=\"all\")) == 1 assert len(_JobManager._get_all(version_number=\"all\")) == 1 # Create a new scenario in experiment mode with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Assert number of entities in 2nd version assert len(_DataManager._get_all(version_number=\"all\")) == 4 assert len(_TaskManager._get_all(version_number=\"all\")) == 2 assert len(_SequenceManager._get_all(version_number=\"all\")) == 2 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 2 assert ( len(_CycleManager._get_all(version_number=\"all\")) == 1 ) # No new cycle is created since old dev version use the same cycle assert len(_JobManager._get_all(version_number=\"all\")) == 2 # Run development mode again with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() # The 1st dev version should be deleted run with development mode assert len(_DataManager._get_all(version_number=\"all\")) == 2 assert len(_TaskManager._get_all(version_number=\"all\")) == 1 assert len(_SequenceManager._get_all(version_number=\"all\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 1 assert len(_CycleManager._get_all(version_number=\"all\")) == 1 assert len(_JobManager._get_all(version_number=\"all\")) == 1 # Submit new dev version scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) # Assert number of entities with 1 dev version and 1 exp version assert len(_DataManager._get_all(version_number=\"all\")) == 4 assert len(_TaskManager._get_all(version_number=\"all\")) == 2 assert len(_SequenceManager._get_all(version_number=\"all\")) == 2 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 2 assert len(_CycleManager._get_all(version_number=\"all\")) == 1 assert len(_JobManager._get_all(version_number=\"all\")) == 2 # Assert number of entities of the latest version only assert len(_DataManager._get_all(version_number=\"latest\")) == 2 assert len(_TaskManager._get_all(version_number=\"latest\")) == 1 assert len(_SequenceManager._get_all(version_number=\"latest\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"latest\")) == 1 assert len(_JobManager._get_all(version_number=\"latest\")) == 1 # Assert number of entities of the development version only assert len(_DataManager._get_all(version_number=\"development\")) == 2 assert len(_TaskManager._get_all(version_number=\"development\")) == 1 assert len(_SequenceManager._get_all(version_number=\"development\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"development\")) == 1 assert len(_JobManager._get_all(version_number=\"development\")) == 1 # Assert number of entities of an unknown version with pytest.raises(NonExistingVersion): assert _DataManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _TaskManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _SequenceManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _ScenarioManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _JobManager._get_all(version_number=\"foo\") core.stop() def twice_doppelganger(a): return a * 2 def test_dev_mode_clean_all_entities_when_config_is_alternated(init_sql_repo): data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) data_node_2_config = Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"foo.csv\") task_config = Config.configure_task(\"my_task\", twice_doppelganger, data_node_1_config, data_node_2_config) scenario_config = Config.configure_scenario(\"my_scenario\", [task_config], frequency=Frequency.DAILY) # Create a scenario in development mode with the doppelganger function with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Delete the twice_doppelganger function # and clear cache of _load_fct() to simulate a new run del globals()[\"twice_doppelganger\"] _load_fct.cache_clear() # Create a scenario in development mode with another function scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() def test_version_number_when_switching_mode(init_sql_repo): with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() ver_1 = _VersionManager._get_latest_version() ver_dev = _VersionManager._get_development_version() assert ver_1 == ver_dev assert len(_VersionManager._get_all()) == 1 core.stop() # Run with dev mode, the version number is the same with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() ver_2 = _VersionManager._get_latest_version() assert ver_2 == ver_dev assert len(_VersionManager._get_all()) == 1 core.stop() # When run with experiment mode, a new version is created with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() ver_3 = _VersionManager._get_latest_version() assert ver_3 != ver_dev assert len(_VersionManager._get_all()) == 2 core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() ver_4 = _VersionManager._get_latest_version() assert ver_4 == \"2.1\" assert len(_VersionManager._get_all()) == 3 core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() ver_5 = _VersionManager._get_latest_version() assert ver_5 != ver_3 assert ver_5 != ver_4 assert ver_5 != ver_dev assert len(_VersionManager._get_all()) == 4 core.stop() # When run with production mode, the latest version is used as production with patch(\"sys.argv\", [\"prog\", \"--production\"]): core = Core() core.run() ver_6 = _VersionManager._get_latest_version() production_versions = _VersionManager._get_production_versions() assert ver_6 == ver_5 assert production_versions == [ver_6] assert len(_VersionManager._get_all()) == 4 core.stop() # When run with production mode, the \"2.1\" version is used as production with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() ver_7 = _VersionManager._get_latest_version() production_versions = _VersionManager._get_production_versions() assert ver_7 == \"2.1\" assert production_versions == [ver_7, ver_6] assert len(_VersionManager._get_all()) == 4 core.stop() # Run with dev mode, the version number is the same as the first dev version to overide it with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() ver_7 = _VersionManager._get_latest_version() assert ver_1 == ver_7 assert len(_VersionManager._get_all()) == 4 core.stop() def test_production_mode_load_all_entities_from_previous_production_version(init_sql_repo): scenario_config = config_scenario() init_managers() with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() production_ver_1 = _VersionManager._get_latest_version() assert _VersionManager._get_production_versions() == [production_ver_1] # When run production mode on a new app, a dev version is created alongside assert _VersionManager._get_development_version() not in _VersionManager._get_production_versions() assert len(_VersionManager._get_all()) == 2 scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 1 core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() production_ver_2 = _VersionManager._get_latest_version() assert _VersionManager._get_production_versions() == [production_ver_1, production_ver_2] assert len(_VersionManager._get_all()) == 3 # All entities from previous production version should be saved scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 2 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 2 core.stop() def test_force_override_experiment_version(init_sql_repo): scenario_config = config_scenario() init_managers() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() ver_1 = _VersionManager._get_latest_version() assert ver_1 == \"1.0\" # When create new experiment version, a development version entity is also created as a placeholder assert len(_VersionManager._get_all()) == 2 # 2 version include 1 experiment 1 development scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 1 core.stop() Config.configure_global_app(foo=\"bar\") # Without --taipy-force parameter, a SystemExit will be raised with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() core.stop() # With --taipy-force parameter with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\", \"--taipy-force\"]): core = Core() core.run() core.stop() ver_2 = _VersionManager._get_latest_version() assert ver_2 == \"1.0\" assert len(_VersionManager._get_all()) == 2 # 2 version include 1 experiment 1 development # All entities from previous submit should be saved scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 2 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 2 def test_force_override_production_version(init_sql_repo): scenario_config = config_scenario() init_managers() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() ver_1 = _VersionManager._get_latest_version() production_versions = _VersionManager._get_production_versions() assert ver_1 == \"1.0\" assert production_versions == [\"1.0\"] # When create new production version, a development version entity is also created as a placeholder assert len(_VersionManager._get_all()) == 2 # 2 version include 1 production 1 development scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 1 core.stop() Config.configure_global_app(foo=\"bar\") # Without --taipy-force parameter, a SystemExit will be raised with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() # With --taipy-force parameter with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\", \"--taipy-force\"]): core = Core() core.run() ver_2 = _VersionManager._get_latest_version() assert ver_2 == \"1.0\" assert len(_VersionManager._get_all()) == 2 # 2 version include 1 production 1 development # All entities from previous submit should be saved scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 2 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 2 core.stop() def test_modify_config_properties_without_force(caplog, init_sql_repo): scenario_config = config_scenario() init_managers() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() init_config() Config.configure_core(repository_type=\"sql\", repository_properties={\"db_location\": init_sql_repo}) scenario_config_2 = config_scenario_2() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config_2) _ScenarioManager._submit(scenario) core.stop() error_message = str(caplog.text) assert 'DATA_NODE \"d3\" was added' in error_message assert 'DATA_NODE \"d0\" was removed' in error_message assert 'DATA_NODE \"d2\" has attribute \"default_path\" modified' in error_message assert 'CORE \"root_folder\" was modified' in error_message assert 'JOB \"mode\" was modified' in error_message assert 'JOB \"max_nb_of_workers\" was modified' in error_message assert 'SCENARIO \"my_scenario\" has attribute \"frequency\" modified' in error_message assert 'SCENARIO \"my_scenario\" has attribute \"tasks\" modified' in error_message assert 'TASK \"my_task\" has attribute \"inputs\" modified' in error_message assert 'TASK \"my_task\" has attribute \"function\" modified' in error_message assert 'TASK \"my_task\" has attribute \"outputs\" modified' in error_message assert 'DATA_NODE \"d2\" has attribute \"has_header\" modified' in error_message assert 'DATA_NODE \"d2\" has attribute \"exposed_type\" modified' in error_message def test_modify_job_configuration_dont_stop_application(caplog, init_sql_repo): scenario_config = config_scenario() init_managers() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): Config.configure_job_executions(mode=\"development\") core = Core() core.run(force_restart=True) scenario = _ScenarioManager._create(scenario_config) jobs = _ScenarioManager._submit(scenario) assert all([job.is_finished() for job in jobs]) core.stop() init_config() Config.configure_core(repository_type=\"sql\", repository_properties={\"db_location\": init_sql_repo}) scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) core = Core() core.run(force_restart=True) scenario = _ScenarioManager._create(scenario_config) jobs = _ScenarioManager._submit(scenario) assert_true_after_time(lambda: all(job.is_finished() for job in jobs)) error_message = str(caplog.text) assert 'JOB \"mode\" was modified' in error_message assert 'JOB \"max_nb_of_workers\" was modified' in error_message core.stop() assert_true_after_time(lambda: core._dispatcher is None) def twice(a): return a * 2 def config_scenario(): Config.configure_data_node(id=\"d0\") data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) data_node_2_config = Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"foo.csv\") task_config = Config.configure_task(\"my_task\", twice, data_node_1_config, data_node_2_config) scenario_config = Config.configure_scenario(\"my_scenario\", [task_config], frequency=Frequency.DAILY) scenario_config.add_sequences({\"my_sequence\": [task_config]}) return scenario_config def double_twice(a): return a * 2, a * 2 def config_scenario_2(): Config.configure_core( root_folder=\"foo_root\", # Changing the \"storage_folder\" will fail since older versions are stored in older folder # storage_folder=\"foo_storage\", ) Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=5) data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) # Modify properties of \"d2\" data_node_2_config = Config.configure_data_node( id=\"d2\", storage_type=\"csv\", default_path=\"bar.csv\", has_header=False, exposed_type=\"numpy\" ) # Add new data node \"d3\" data_node_3_config = Config.configure_data_node( id=\"d3\", storage_type=\"csv\", default_path=\"baz.csv\", has_header=False, exposed_type=\"numpy\" ) # Modify properties of \"my_task\", including the function and outputs list Config.configure_task(\"my_task\", double_twice, data_node_3_config, [data_node_1_config, data_node_2_config]) task_config_1 = Config.configure_task(\"my_task_1\", double_twice, data_node_3_config, [data_node_2_config]) # Modify properties of \"my_scenario\", where tasks is now my_task_1 scenario_config = Config.configure_scenario(\"my_scenario\", [task_config_1], frequency=Frequency.MONTHLY) scenario_config.add_sequences({\"my_sequence\": [task_config_1]}) return scenario_config "} {"text": "import os import pickle import shutil from datetime import datetime from queue import Queue import pandas as pd import pytest from sqlalchemy import create_engine, text from src.taipy.core._core import Core from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._repository.db._sql_connection import _SQLConnection from src.taipy.core._version._version import _Version from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.config import ( CoreSection, DataNodeConfig, JobConfig, MigrationConfig, ScenarioConfig, TaskConfig, _ConfigIdChecker, _CoreSectionChecker, _DataNodeConfigChecker, _JobConfigChecker, _ScenarioConfigChecker, _TaskConfigChecker, ) from src.taipy.core.cycle._cycle_manager_factory import _CycleManagerFactory from src.taipy.core.cycle._cycle_model import _CycleModel from src.taipy.core.cycle.cycle import Cycle from src.taipy.core.cycle.cycle_id import CycleId from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data._data_model import _DataNodeModel from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.job._job_manager_factory import _JobManagerFactory from src.taipy.core.job.job import Job from src.taipy.core.job.job_id import JobId from src.taipy.core.notification.notifier import Notifier from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from src.taipy.core.scenario._scenario_model import _ScenarioModel from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.scenario.scenario_id import ScenarioId from src.taipy.core.sequence._sequence_manager_factory import _SequenceManagerFactory from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission._submission_model import _SubmissionModel from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task from taipy.config import _inject_section from taipy.config._config import _Config from taipy.config._serializer._toml_serializer import _TomlSerializer from taipy.config.checker._checker import _Checker from taipy.config.checker.issue_collector import IssueCollector from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config current_time = datetime.now() _OrchestratorFactory._build_orchestrator() @pytest.fixture(scope=\"function\") def csv_file(tmpdir_factory) -> str: csv = pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) fn = tmpdir_factory.mktemp(\"data\").join(\"df.csv\") csv.to_csv(str(fn), index=False) return fn.strpath @pytest.fixture(scope=\"function\") def excel_file(tmpdir_factory) -> str: excel = pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) fn = tmpdir_factory.mktemp(\"data\").join(\"df.xlsx\") excel.to_excel(str(fn), index=False) return fn.strpath @pytest.fixture(scope=\"function\") def excel_file_with_sheet_name(tmpdir_factory) -> str: excel = pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) fn = tmpdir_factory.mktemp(\"data\").join(\"df.xlsx\") excel.to_excel(str(fn), sheet_name=\"sheet_name\", index=False) return fn.strpath @pytest.fixture(scope=\"function\") def json_file(tmpdir_factory) -> str: json_data = pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) fn = tmpdir_factory.mktemp(\"data\").join(\"df.json\") json_data.to_json(str(fn), orient=\"records\") return fn.strpath @pytest.fixture(scope=\"function\") def excel_file_with_multi_sheet(tmpdir_factory) -> str: excel_multi_sheet = { \"Sheet1\": pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]), \"Sheet2\": pd.DataFrame([{\"a\": 7, \"b\": 8, \"c\": 9}, {\"a\": 10, \"b\": 11, \"c\": 12}]), } fn = tmpdir_factory.mktemp(\"data\").join(\"df.xlsx\") with pd.ExcelWriter(str(fn)) as writer: for key in excel_multi_sheet.keys(): excel_multi_sheet[key].to_excel(writer, key, index=False) return fn.strpath @pytest.fixture(scope=\"function\") def pickle_file_path(tmpdir_factory) -> str: data = pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) fn = tmpdir_factory.mktemp(\"data\").join(\"df.p\") with open(str(fn), \"wb\") as f: pickle.dump(data, f) return fn.strpath @pytest.fixture(scope=\"function\") def parquet_file_path(tmpdir_factory) -> str: data = pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) fn = tmpdir_factory.mktemp(\"data\").join(\"df.parquet\") data.to_parquet(str(fn)) return fn.strpath @pytest.fixture(scope=\"function\") def tmp_sqlite_db_file_path(tmpdir_factory): fn = tmpdir_factory.mktemp(\"data\") db_name = \"df\" file_extension = \".db\" db = create_engine(\"sqlite:///\" + os.path.join(fn.strpath, f\"{db_name}{file_extension}\")) conn = db.connect() conn.execute(text(\"CREATE TABLE example (foo int, bar int);\")) conn.execute(text(\"INSERT INTO example (foo, bar) VALUES (1, 2);\")) conn.execute(text(\"INSERT INTO example (foo, bar) VALUES (3, 4);\")) conn.commit() conn.close() db.dispose() return fn.strpath, db_name, file_extension @pytest.fixture(scope=\"function\") def tmp_sqlite_sqlite3_file_path(tmpdir_factory): fn = tmpdir_factory.mktemp(\"data\") db_name = \"df\" file_extension = \".sqlite3\" db = create_engine(\"sqlite:///\" + os.path.join(fn.strpath, f\"{db_name}{file_extension}\")) conn = db.connect() conn.execute(text(\"CREATE TABLE example (foo int, bar int);\")) conn.execute(text(\"INSERT INTO example (foo, bar) VALUES (1, 2);\")) conn.execute(text(\"INSERT INTO example (foo, bar) VALUES (3, 4);\")) conn.commit() conn.close() db.dispose() return fn.strpath, db_name, file_extension @pytest.fixture(scope=\"function\") def default_data_frame(): return pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]) @pytest.fixture(scope=\"function\") def default_multi_sheet_data_frame(): return { \"Sheet1\": pd.DataFrame([{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]), \"Sheet2\": pd.DataFrame([{\"a\": 7, \"b\": 8, \"c\": 9}, {\"a\": 10, \"b\": 11, \"c\": 12}]), } @pytest.fixture(scope=\"session\", autouse=True) def cleanup_files(): yield if os.path.exists(\".data\"): shutil.rmtree(\".data\", ignore_errors=True) if os.path.exists(\".my_data\"): shutil.rmtree(\".my_data\", ignore_errors=True) @pytest.fixture(scope=\"function\") def current_datetime(): return current_time @pytest.fixture(scope=\"function\") def scenario(cycle): return Scenario( \"sc\", set(), {}, set(), ScenarioId(\"sc_id\"), current_time, is_primary=False, tags={\"foo\"}, version=\"random_version_number\", cycle=None, ) @pytest.fixture(scope=\"function\") def data_node(): return InMemoryDataNode(\"data_node_config_id\", Scope.SCENARIO, version=\"random_version_number\") @pytest.fixture(scope=\"function\") def data_node_model(): return _DataNodeModel( \"my_dn_id\", \"test_data_node\", Scope.SCENARIO, \"csv\", \"name\", \"owner_id\", list({\"parent_id_1\", \"parent_id_2\"}), datetime(1985, 10, 14, 2, 30, 0).isoformat(), [dict(timestamp=datetime(1985, 10, 14, 2, 30, 0).isoformat(), job_id=\"job_id\")], \"latest\", None, None, False, {\"path\": \"/path\", \"has_header\": True, \"prop\": \"ENV[FOO]\", \"exposed_type\": \"pandas\"}, ) @pytest.fixture(scope=\"function\") def task(data_node): dn = InMemoryDataNode(\"dn_config_id\", Scope.SCENARIO, version=\"random_version_number\") return Task(\"task_config_id\", {}, print, [data_node], [dn]) @pytest.fixture(scope=\"function\") def scenario_model(cycle): return _ScenarioModel( ScenarioId(\"sc_id\"), \"sc\", set(), set(), {}, creation_date=current_time.isoformat(), primary_scenario=False, subscribers=[], tags=[\"foo\"], version=\"random_version_number\", cycle=None, ) @pytest.fixture(scope=\"function\") def cycle(): example_date = datetime.fromisoformat(\"2021-11-11T11:11:01.000001\") return Cycle( Frequency.DAILY, {}, creation_date=example_date, start_date=example_date, end_date=example_date, name=\"cc\", id=CycleId(\"cc_id\"), ) @pytest.fixture(scope=\"class\") def sequence(): return Sequence( {}, [], SequenceId(\"sequence_id\"), owner_id=\"owner_id\", parent_ids=set([\"parent_id_1\", \"parent_id_2\"]), version=\"random_version_number\", ) @pytest.fixture(scope=\"function\") def job(task): return Job(JobId(\"job\"), task, \"foo\", \"bar\", version=\"random_version_number\") @pytest.fixture(scope=\"function\") def _version(): return _Version(id=\"foo\", config=Config._applied_config) @pytest.fixture(scope=\"function\") def cycle_model(): return _CycleModel( CycleId(\"cc_id\"), \"cc\", Frequency.DAILY, {}, creation_date=\"2021-11-11T11:11:01.000001\", start_date=\"2021-11-11T11:11:01.000001\", end_date=\"2021-11-11T11:11:01.000001\", ) @pytest.fixture(scope=\"function\") def tmp_sqlite(tmpdir_factory): fn = tmpdir_factory.mktemp(\"db\") return os.path.join(fn.strpath, \"test.db\") @pytest.fixture(scope=\"function\", autouse=True) def clean_repository(): from sqlalchemy.orm import close_all_sessions close_all_sessions() init_config() init_orchestrator() init_managers() init_config() init_notifier() yield def init_config(): Config.unblock_update() Config._default_config = _Config()._default_config() Config._python_config = _Config() Config._file_config = _Config() Config._env_file_config = _Config() Config._applied_config = _Config() Config._collector = IssueCollector() Config._serializer = _TomlSerializer() _Checker._checkers = [] _inject_section( JobConfig, \"job_config\", JobConfig(\"development\"), [(\"configure_job_executions\", JobConfig._configure)], True ) _inject_section( CoreSection, \"core\", CoreSection.default_config(), [(\"configure_core\", CoreSection._configure)], add_to_unconflicted_sections=True, ) _inject_section( DataNodeConfig, \"data_nodes\", DataNodeConfig.default_config(), [ (\"configure_data_node\", DataNodeConfig._configure), (\"configure_data_node_from\", DataNodeConfig._configure_from), (\"set_default_data_node_configuration\", DataNodeConfig._set_default_configuration), (\"configure_csv_data_node\", DataNodeConfig._configure_csv), (\"configure_json_data_node\", DataNodeConfig._configure_json), (\"configure_sql_table_data_node\", DataNodeConfig._configure_sql_table), (\"configure_sql_data_node\", DataNodeConfig._configure_sql), (\"configure_mongo_collection_data_node\", DataNodeConfig._configure_mongo_collection), (\"configure_in_memory_data_node\", DataNodeConfig._configure_in_memory), (\"configure_pickle_data_node\", DataNodeConfig._configure_pickle), (\"configure_excel_data_node\", DataNodeConfig._configure_excel), (\"configure_generic_data_node\", DataNodeConfig._configure_generic), ], ) _inject_section( TaskConfig, \"tasks\", TaskConfig.default_config(), [ (\"configure_task\", TaskConfig._configure), (\"set_default_task_configuration\", TaskConfig._set_default_configuration), ], ) _inject_section( ScenarioConfig, \"scenarios\", ScenarioConfig.default_config(), [ (\"configure_scenario\", ScenarioConfig._configure), (\"set_default_scenario_configuration\", ScenarioConfig._set_default_configuration), ], ) _inject_section( MigrationConfig, \"migration_functions\", MigrationConfig.default_config(), [(\"add_migration_function\", MigrationConfig._add_migration_function)], True, ) _Checker.add_checker(_ConfigIdChecker) _Checker.add_checker(_CoreSectionChecker) _Checker.add_checker(_DataNodeConfigChecker) _Checker.add_checker(_JobConfigChecker) # We don't need to add _MigrationConfigChecker because it is run only when the Core service is run. _Checker.add_checker(_TaskConfigChecker) _Checker.add_checker(_ScenarioConfigChecker) Config.configure_core(read_entity_retry=0) Core._is_running = False def init_managers(): _CycleManagerFactory._build_manager()._delete_all() _ScenarioManagerFactory._build_manager()._delete_all() _SequenceManagerFactory._build_manager()._delete_all() _JobManagerFactory._build_manager()._delete_all() _TaskManagerFactory._build_manager()._delete_all() _DataManagerFactory._build_manager()._delete_all() _VersionManagerFactory._build_manager()._delete_all() _SubmissionManagerFactory._build_manager()._delete_all() def init_orchestrator(): if _OrchestratorFactory._orchestrator is None: _OrchestratorFactory._build_orchestrator() _OrchestratorFactory._build_dispatcher() _OrchestratorFactory._orchestrator.jobs_to_run = Queue() _OrchestratorFactory._orchestrator.blocked_jobs = [] def init_notifier(): Notifier._topics_registrations_list = {} @pytest.fixture def sql_engine(): return create_engine(\"sqlite:///:memory:\") @pytest.fixture def init_sql_repo(tmp_sqlite): Config.configure_core(repository_type=\"sql\", repository_properties={\"db_location\": tmp_sqlite}) # Clean SQLite database if _SQLConnection._connection: _SQLConnection._connection.close() _SQLConnection._connection = None _SQLConnection.init_db() return tmp_sqlite "} {"text": "from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._orchestrator._dispatcher import _DevelopmentJobDispatcher, _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.exceptions.exceptions import CoreServiceIsAlreadyRunning from taipy.config import Config from taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked class TestCore: def test_run_core_trigger_config_check(self, caplog): Config.configure_data_node(id=\"d0\", storage_type=\"toto\") with patch(\"sys.argv\", [\"prog\"]): with pytest.raises(SystemExit): core = Core() core.run() expected_error_message = ( \"`storage_type` field of DataNodeConfig `d0` must be either csv, sql_table,\" \" sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory.\" ' Current value of property `storage_type` is \"toto\".' ) assert expected_error_message in caplog.text core.stop() def test_run_core_as_a_service_development_mode(self): _OrchestratorFactory._dispatcher = None with patch(\"sys.argv\", [\"prog\"]): core = Core() assert core._orchestrator is None assert core._dispatcher is None assert _OrchestratorFactory._dispatcher is None core.run() assert core._orchestrator is not None assert core._orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._orchestrator == _Orchestrator assert core._dispatcher is not None assert isinstance(core._dispatcher, _DevelopmentJobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) core.stop() def test_run_core_as_a_service_standalone_mode(self): _OrchestratorFactory._dispatcher = None with patch(\"sys.argv\", [\"prog\"]): core = Core() assert core._orchestrator is None assert core._dispatcher is None assert _OrchestratorFactory._dispatcher is None Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) core.run() assert core._orchestrator is not None assert core._orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._orchestrator == _Orchestrator assert core._dispatcher is not None assert isinstance(core._dispatcher, _StandaloneJobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher) assert core._dispatcher.is_running() assert _OrchestratorFactory._dispatcher.is_running() core.stop() def test_core_service_can_only_be_run_once(self): with patch(\"sys.argv\", [\"prog\"]): core_instance_1 = Core() core_instance_2 = Core() core_instance_1.run() with pytest.raises(CoreServiceIsAlreadyRunning): core_instance_1.run() with pytest.raises(CoreServiceIsAlreadyRunning): core_instance_2.run() # Stop the Core service and run it again should work core_instance_1.stop() core_instance_1.run() core_instance_1.stop() core_instance_2.run() core_instance_2.stop() def test_block_config_update_when_core_service_is_running_development_mode(self): _OrchestratorFactory._dispatcher = None with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_data_node(id=\"i1\") core.stop() def test_block_config_update_when_core_service_is_running_standalone_mode(self): _OrchestratorFactory._dispatcher = None with patch(\"sys.argv\", [\"prog\"]): core = Core() Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) core.run() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_data_node(id=\"i1\") core.stop() "} {"text": "import os import pathlib from time import sleep from unittest.mock import patch import pandas as pd import src.taipy.core.taipy as tp from src.taipy.core import Core, Status from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from taipy.config import Config # ################################ USER FUNCTIONS ################################## def sum(a, b): a = a[\"number\"] b = b[\"number\"] return a + b def subtract(a, b): a = a[\"number\"] b = b[\"number\"] return a - b def mult(a, b): return a * b def mult_by_2(a): return a def divide(a, b): return a / b def average(a): return [a.sum() / len(a)] def div_constant_with_sleep(a): sleep(1) return a[\"number\"] / 10 def return_a_number(): return 10 def return_a_number_with_sleep(): sleep(1) return 10 # ################################ TEST METHODS ################################## def test_skipped_jobs(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_orchestrator() input_config = Config.configure_data_node(\"input\") intermediate_config = Config.configure_data_node(\"intermediate\") output_config = Config.configure_data_node(\"output\") task_config_1 = Config.configure_task(\"first\", mult_by_2, input_config, intermediate_config, skippable=True) task_config_2 = Config.configure_task(\"second\", mult_by_2, intermediate_config, output_config, skippable=True) scenario_config = Config.configure_scenario(\"scenario\", [task_config_1, task_config_2]) with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = tp.create_scenario(scenario_config) scenario.input.write(2) scenario.submit() assert len(tp.get_jobs()) == 2 for job in tp.get_jobs(): assert job.status == Status.COMPLETED scenario.submit() assert len(tp.get_jobs()) == 4 skipped = [] for job in tp.get_jobs(): if job.status != Status.COMPLETED: assert job.status == Status.SKIPPED skipped.append(job) assert len(skipped) == 2 core.stop() def test_complex(): # d1 --- t1 # | # | --- t2 --- d5 --- | t10 --- d12 # | | | # | | | # d2 | --- t5 --- d7 --- t7 --- d9 --- t8 --- d10 --- t9 --- d11 # | | | # d3 --- | | | | # | | | t6 --- d8 ------------------- # | t3 --- d6 ---| # | | # | | # t4 d4 Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_orchestrator() csv_path_inp = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") excel_path_inp = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") csv_path_sum = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/sum.csv\") excel_path_sum = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/sum.xlsx\") excel_path_out = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/res.xlsx\") csv_path_out = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/res.csv\") inp_csv_dn_1 = Config.configure_csv_data_node(\"dn_csv_in_1\", default_path=csv_path_inp) inp_csv_dn_2 = Config.configure_csv_data_node(\"dn_csv_in_2\", default_path=csv_path_inp) inp_excel_dn_1 = Config.configure_excel_data_node(\"dn_excel_in_1\", default_path=excel_path_inp, sheet_name=\"Sheet1\") inp_excel_dn_2 = Config.configure_excel_data_node(\"dn_excel_in_2\", default_path=excel_path_inp, sheet_name=\"Sheet1\") placeholder = Config.configure_data_node(\"dn_placeholder\", default_data=10) dn_csv_sum = Config.configure_csv_data_node(\"dn_sum_csv\", default_path=csv_path_sum) dn_excel_sum = Config.configure_excel_data_node(\"dn_sum_excel\", default_path=excel_path_sum, sheet_name=\"Sheet1\") dn_subtract_csv_excel = Config.configure_pickle_data_node(\"dn_subtract_csv_excel\") dn_mult = Config.configure_pickle_data_node(\"dn_mult\") dn_div = Config.configure_pickle_data_node(\"dn_div\") output_csv_dn = Config.configure_csv_data_node(\"csv_out\", csv_path_out) output_excel_dn = Config.configure_excel_data_node(\"excel_out\", excel_path_out) task_print_csv = Config.configure_task(\"task_print_csv\", print, input=inp_csv_dn_1) task_print_excel = Config.configure_task(\"task_print_excel\", print, input=inp_excel_dn_1) task_sum_csv = Config.configure_task(\"task_sum_csv\", sum, input=[inp_csv_dn_2, inp_csv_dn_1], output=dn_csv_sum) task_sum_excel = Config.configure_task( \"task_sum_excel\", sum, input=[inp_excel_dn_2, inp_excel_dn_1], output=dn_excel_sum ) task_subtract_csv_excel = Config.configure_task( \"task_subtract_csv_excel\", subtract, input=[dn_csv_sum, dn_excel_sum], output=dn_subtract_csv_excel ) task_insert_placeholder = Config.configure_task(\"task_insert_placeholder\", return_a_number, output=[placeholder]) task_mult = Config.configure_task( \"task_mult_by_placeholder\", mult, input=[dn_subtract_csv_excel, placeholder], output=dn_mult ) task_div = Config.configure_task(\"task_div_by_placeholder\", divide, input=[dn_mult, placeholder], output=dn_div) task_avg_div = Config.configure_task(\"task_avg_div\", average, input=dn_div, output=output_csv_dn) task_avg_mult = Config.configure_task(\"task_avg_mult\", average, input=dn_mult, output=output_excel_dn) scenario_config = Config.configure_scenario( \"scenario\", [ task_print_csv, task_print_excel, task_sum_csv, task_sum_excel, task_subtract_csv_excel, task_insert_placeholder, task_mult, task_div, task_avg_div, task_avg_mult, ], ) with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = tp.create_scenario(scenario_config) tp.submit(scenario) core.stop() csv_sum_res = pd.read_csv(csv_path_sum) excel_sum_res = pd.read_excel(excel_path_sum) csv_out = pd.read_csv(csv_path_out) excel_out = pd.read_excel(excel_path_out) assert csv_sum_res.to_numpy().flatten().tolist() == [i * 20 for i in range(1, 11)] assert excel_sum_res.to_numpy().flatten().tolist() == [i * 2 for i in range(1, 11)] assert average(csv_sum_res[\"number\"] - excel_sum_res[\"number\"]) == csv_out.to_numpy()[0] assert average((csv_sum_res[\"number\"] - excel_sum_res[\"number\"]) * 10) == excel_out.to_numpy()[0] for path in [csv_path_sum, excel_path_sum, csv_path_out, excel_path_out]: os.remove(path) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.common._utils import _load_fct from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.exceptions.exceptions import NonExistingVersion from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.conftest import init_config from tests.core.utils import assert_true_after_time def test_core_cli_no_arguments(): with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() assert not Config.core.force core.stop() def test_core_cli_development_mode(): with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() core.stop() def test_core_cli_dev_mode(): with patch(\"sys.argv\", [\"prog\", \"-dev\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() core.stop() def test_core_cli_experiment_mode(): with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_latest_version() assert not Config.core.force core.stop() def test_core_cli_experiment_mode_with_version(): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"2.1\" assert not Config.core.force core.stop() def test_core_cli_experiment_mode_with_force_version(): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\", \"--taipy-force\"]): init_config() core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"2.1\" assert Config.core.force core.stop() def test_core_cli_production_mode(): with patch(\"sys.argv\", [\"prog\", \"--production\"]): core = Core() core.run() assert Config.core.mode == \"production\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_latest_version() assert not Config.core.force core.stop() def test_dev_mode_clean_all_entities_of_the_latest_version(): scenario_config = config_scenario() # Create a scenario in development mode with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Initial assertion assert len(_DataManager._get_all(version_number=\"all\")) == 2 assert len(_TaskManager._get_all(version_number=\"all\")) == 1 assert len(_SequenceManager._get_all(version_number=\"all\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 1 assert len(_CycleManager._get_all(version_number=\"all\")) == 1 assert len(_JobManager._get_all(version_number=\"all\")) == 1 # Create a new scenario in experiment mode with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Assert number of entities in 2nd version assert len(_DataManager._get_all(version_number=\"all\")) == 4 assert len(_TaskManager._get_all(version_number=\"all\")) == 2 assert len(_SequenceManager._get_all(version_number=\"all\")) == 2 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 2 assert ( len(_CycleManager._get_all(version_number=\"all\")) == 1 ) # No new cycle is created since old dev version use the same cycle assert len(_JobManager._get_all(version_number=\"all\")) == 2 # Run development mode again with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() # The 1st dev version should be deleted run with development mode assert len(_DataManager._get_all(version_number=\"all\")) == 2 assert len(_TaskManager._get_all(version_number=\"all\")) == 1 assert len(_SequenceManager._get_all(version_number=\"all\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 1 assert len(_CycleManager._get_all(version_number=\"all\")) == 1 assert len(_JobManager._get_all(version_number=\"all\")) == 1 # Submit new dev version scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Assert number of entities with 1 dev version and 1 exp version assert len(_DataManager._get_all(version_number=\"all\")) == 4 assert len(_TaskManager._get_all(version_number=\"all\")) == 2 assert len(_SequenceManager._get_all(version_number=\"all\")) == 2 assert len(_ScenarioManager._get_all(version_number=\"all\")) == 2 assert len(_CycleManager._get_all(version_number=\"all\")) == 1 assert len(_JobManager._get_all(version_number=\"all\")) == 2 # Assert number of entities of the latest version only assert len(_DataManager._get_all(version_number=\"latest\")) == 2 assert len(_TaskManager._get_all(version_number=\"latest\")) == 1 assert len(_SequenceManager._get_all(version_number=\"latest\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"latest\")) == 1 assert len(_JobManager._get_all(version_number=\"latest\")) == 1 # Assert number of entities of the development version only assert len(_DataManager._get_all(version_number=\"development\")) == 2 assert len(_TaskManager._get_all(version_number=\"development\")) == 1 assert len(_SequenceManager._get_all(version_number=\"development\")) == 1 assert len(_ScenarioManager._get_all(version_number=\"development\")) == 1 assert len(_JobManager._get_all(version_number=\"development\")) == 1 # Assert number of entities of an unknown version with pytest.raises(NonExistingVersion): assert _DataManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _TaskManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _SequenceManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _ScenarioManager._get_all(version_number=\"foo\") with pytest.raises(NonExistingVersion): assert _JobManager._get_all(version_number=\"foo\") def twice_doppelganger(a): return a * 2 def test_dev_mode_clean_all_entities_when_config_is_alternated(): data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) data_node_2_config = Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"foo.csv\") task_config = Config.configure_task(\"my_task\", twice_doppelganger, data_node_1_config, data_node_2_config) scenario_config = Config.configure_scenario(\"my_scenario\", [task_config], frequency=Frequency.DAILY) # Create a scenario in development mode with the doppelganger function with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() # Delete the twice_doppelganger function # and clear cache of _load_fct() to simulate a new run del globals()[\"twice_doppelganger\"] _load_fct.cache_clear() # Create a scenario in development mode with another function scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() def test_version_number_when_switching_mode(): with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() ver_1 = _VersionManager._get_latest_version() ver_dev = _VersionManager._get_development_version() assert ver_1 == ver_dev assert len(_VersionManager._get_all()) == 1 core.stop() # Run with dev mode, the version number is the same with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() ver_2 = _VersionManager._get_latest_version() assert ver_2 == ver_dev assert len(_VersionManager._get_all()) == 1 core.stop() # When run with experiment mode, a new version is created with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() ver_3 = _VersionManager._get_latest_version() assert ver_3 != ver_dev assert len(_VersionManager._get_all()) == 2 core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() ver_4 = _VersionManager._get_latest_version() assert ver_4 == \"2.1\" assert len(_VersionManager._get_all()) == 3 core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\"]): core = Core() core.run() ver_5 = _VersionManager._get_latest_version() assert ver_5 != ver_3 assert ver_5 != ver_4 assert ver_5 != ver_dev assert len(_VersionManager._get_all()) == 4 core.stop() # When run with production mode, the latest version is used as production with patch(\"sys.argv\", [\"prog\", \"--production\"]): core = Core() core.run() ver_6 = _VersionManager._get_latest_version() production_versions = _VersionManager._get_production_versions() assert ver_6 == ver_5 assert production_versions == [ver_6] assert len(_VersionManager._get_all()) == 4 core.stop() # When run with production mode, the \"2.1\" version is used as production with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() ver_7 = _VersionManager._get_latest_version() production_versions = _VersionManager._get_production_versions() assert ver_7 == \"2.1\" assert production_versions == [ver_6, ver_7] assert len(_VersionManager._get_all()) == 4 core.stop() # Run with dev mode, the version number is the same as the first dev version to overide it with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() ver_7 = _VersionManager._get_latest_version() assert ver_1 == ver_7 assert len(_VersionManager._get_all()) == 4 core.stop() def test_production_mode_load_all_entities_from_previous_production_version(): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() production_ver_1 = _VersionManager._get_latest_version() assert _VersionManager._get_production_versions() == [production_ver_1] # When run production mode on a new app, a dev version is created alongside assert _VersionManager._get_development_version() not in _VersionManager._get_production_versions() assert len(_VersionManager._get_all()) == 2 scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 1 core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() production_ver_2 = _VersionManager._get_latest_version() assert _VersionManager._get_production_versions() == [production_ver_1, production_ver_2] assert len(_VersionManager._get_all()) == 3 # All entities from previous production version should be saved scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 2 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 2 core.stop() def test_force_override_experiment_version(): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() ver_1 = _VersionManager._get_latest_version() assert ver_1 == \"1.0\" # When create new experiment version, a development version entity is also created as a placeholder assert len(_VersionManager._get_all()) == 2 # 2 version include 1 experiment 1 development scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 1 core.stop() Config.configure_global_app(foo=\"bar\") # Without --taipy-force parameter, a SystemExit will be raised with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() core.stop() # With --taipy-force parameter with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\", \"--taipy-force\"]): core = Core() core.run() ver_2 = _VersionManager._get_latest_version() assert ver_2 == \"1.0\" assert len(_VersionManager._get_all()) == 2 # 2 version include 1 experiment 1 development # All entities from previous submit should be saved scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 2 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 2 core.stop() def test_force_override_production_version(): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() ver_1 = _VersionManager._get_latest_version() production_versions = _VersionManager._get_production_versions() assert ver_1 == \"1.0\" assert production_versions == [\"1.0\"] # When create new production version, a development version entity is also created as a placeholder assert len(_VersionManager._get_all()) == 2 # 2 version include 1 production 1 development scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 1 core.stop() Config.configure_global_app(foo=\"bar\") # Without --taipy-force parameter, a SystemExit will be raised with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() # With --taipy-force parameter with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\", \"--taipy-force\"]): core = Core() core.run() ver_2 = _VersionManager._get_latest_version() assert ver_2 == \"1.0\" assert len(_VersionManager._get_all()) == 2 # 2 version include 1 production 1 development # All entities from previous submit should be saved scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 2 assert len(_CycleManager._get_all()) == 1 assert len(_JobManager._get_all()) == 2 core.stop() def test_modify_job_configuration_dont_stop_application(caplog): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() Config.configure_job_executions(mode=\"development\") core.run(force_restart=True) scenario = _ScenarioManager._create(scenario_config) jobs = _ScenarioManager._submit(scenario) assert all([job.is_finished() for job in jobs]) core.stop() init_config() scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=5) core.run(force_restart=True) scenario = _ScenarioManager._create(scenario_config) jobs = _ScenarioManager._submit(scenario) assert_true_after_time(lambda: all(job.is_finished() for job in jobs)) error_message = str(caplog.text) assert 'JOB \"mode\" was modified' in error_message assert 'JOB \"max_nb_of_workers\" was modified' in error_message core.stop() def test_modify_config_properties_without_force(caplog): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() init_config() scenario_config_2 = config_scenario_2() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config_2) _ScenarioManager._submit(scenario) core.stop() error_message = str(caplog.text) assert 'DATA_NODE \"d3\" was added' in error_message assert 'DATA_NODE \"d0\" was removed' in error_message assert 'DATA_NODE \"d2\" has attribute \"default_path\" modified' in error_message assert 'CORE \"root_folder\" was modified' in error_message assert 'CORE \"repository_type\" was modified' in error_message assert 'JOB \"mode\" was modified' in error_message assert 'JOB \"max_nb_of_workers\" was modified' in error_message assert 'SCENARIO \"my_scenario\" has attribute \"frequency\" modified' in error_message assert 'SCENARIO \"my_scenario\" has attribute \"tasks\" modified' in error_message assert 'TASK \"my_task\" has attribute \"inputs\" modified' in error_message assert 'TASK \"my_task\" has attribute \"function\" modified' in error_message assert 'TASK \"my_task\" has attribute \"outputs\" modified' in error_message assert 'DATA_NODE \"d2\" has attribute \"has_header\" modified' in error_message assert 'DATA_NODE \"d2\" has attribute \"exposed_type\" modified' in error_message assert 'CORE \"repository_properties\" was added' in error_message def twice(a): return a * 2 def config_scenario(): Config.configure_data_node(id=\"d0\") data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) data_node_2_config = Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"foo.csv\") task_config = Config.configure_task(\"my_task\", twice, data_node_1_config, data_node_2_config) scenario_config = Config.configure_scenario(\"my_scenario\", [task_config], frequency=Frequency.DAILY) scenario_config.add_sequences({\"my_sequence\": [task_config]}) return scenario_config def double_twice(a): return a * 2, a * 2 def config_scenario_2(): Config.configure_core( root_folder=\"foo_root\", # Changing the \"storage_folder\" will fail since older versions are stored in older folder # storage_folder=\"foo_storage\", repository_type=\"bar\", repository_properties={\"foo\": \"bar\"}, ) Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=5) data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) # Modify properties of \"d2\" data_node_2_config = Config.configure_data_node( id=\"d2\", storage_type=\"csv\", default_path=\"bar.csv\", has_header=False, exposed_type=\"numpy\" ) # Add new data node \"d3\" data_node_3_config = Config.configure_data_node( id=\"d3\", storage_type=\"csv\", default_path=\"baz.csv\", has_header=False, exposed_type=\"numpy\" ) # Modify properties of \"my_task\", including the function and outputs list Config.configure_task(\"my_task\", double_twice, data_node_3_config, [data_node_1_config, data_node_2_config]) task_config_1 = Config.configure_task(\"my_task_1\", double_twice, data_node_3_config, [data_node_2_config]) # Modify properties of \"my_scenario\", where tasks is now my_task_1 scenario_config = Config.configure_scenario(\"my_scenario\", [task_config_1], frequency=Frequency.MONTHLY) scenario_config.add_sequences({\"my_sequence\": [task_config_1]}) return scenario_config "} {"text": "import datetime import os import pathlib import shutil from unittest import mock import pytest import src.taipy.core.taipy as tp from src.taipy.core import ( Core, Cycle, CycleId, DataNodeId, JobId, Scenario, ScenarioId, Sequence, SequenceId, Task, TaskId, ) from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config.data_node_config import DataNodeConfig from src.taipy.core.config.job_config import JobConfig from src.taipy.core.config.scenario_config import ScenarioConfig from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.pickle import PickleDataNode from src.taipy.core.exceptions.exceptions import DataNodeConfigIsNotGlobal, InvalidExportPath from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.job.job import Job from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked class TestTaipy: def test_set(self, scenario, cycle, sequence, data_node, task): with mock.patch(\"src.taipy.core.data._data_manager._DataManager._set\") as mck: tp.set(data_node) mck.assert_called_once_with(data_node) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._set\") as mck: tp.set(task) mck.assert_called_once_with(task) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._set\") as mck: tp.set(sequence) mck.assert_called_once_with(sequence) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._set\") as mck: tp.set(scenario) mck.assert_called_once_with(scenario) with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._set\") as mck: tp.set(cycle) mck.assert_called_once_with(cycle) def test_is_editable_is_called(self, cycle, job, data_node): with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._is_editable\") as mck: cycle_id = CycleId(\"CYCLE_id\") tp.is_editable(cycle_id) mck.assert_called_once_with(cycle_id) with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._is_editable\") as mck: tp.is_editable(cycle) mck.assert_called_once_with(cycle) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_editable\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.is_editable(scenario_id) mck.assert_called_once_with(scenario_id) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_editable\") as mck: scenario = Scenario(\"scenario_config_id\", [], {}) tp.is_editable(scenario) mck.assert_called_once_with(scenario) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._is_editable\") as mck: sequence_id = SequenceId(\"SEQUENCE_id\") tp.is_editable(sequence_id) mck.assert_called_once_with(sequence_id) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._is_editable\") as mck: sequence = Sequence({}, [], \"sequence_id\") tp.is_editable(sequence) mck.assert_called_once_with(sequence) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._is_editable\") as mck: task_id = TaskId(\"TASK_id\") tp.is_editable(task_id) mck.assert_called_once_with(task_id) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._is_editable\") as mck: task = Task(\"task_config_id\", {}, print) tp.is_editable(task) mck.assert_called_once_with(task) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_editable\") as mck: job_id = JobId(\"JOB_id\") tp.is_editable(job_id) mck.assert_called_once_with(job_id) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_editable\") as mck: tp.is_editable(job) mck.assert_called_once_with(job) with mock.patch(\"src.taipy.core.data._data_manager._DataManager._is_editable\") as mck: dn_id = DataNodeId(\"DATANODE_id\") tp.is_editable(dn_id) mck.assert_called_once_with(dn_id) with mock.patch(\"src.taipy.core.data._data_manager._DataManager._is_editable\") as mck: tp.is_editable(data_node) mck.assert_called_once_with(data_node) def test_is_editable(self): current_date = datetime.datetime.now() cycle = Cycle(Frequency.DAILY, {}, current_date, current_date, current_date) scenario = Scenario(\"scenario_config_id\", [], {}, sequences={\"sequence\": {}}) task = Task(\"task_config_id\", {}, print) job = Job(\"job_id\", task, \"submit_id\", scenario.id) dn = PickleDataNode(\"data_node_config_id\", Scope.SCENARIO) _CycleManager._set(cycle) _ScenarioManager._set(scenario) _TaskManager._set(task) _JobManager._set(job) _DataManager._set(dn) sequence = scenario.sequences[\"sequence\"] assert tp.is_editable(scenario) assert tp.is_editable(sequence) assert tp.is_editable(task) assert tp.is_editable(cycle) assert tp.is_editable(job) assert tp.is_editable(dn) def test_is_readable_is_called(self, cycle, job, data_node): with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._is_readable\") as mck: cycle_id = CycleId(\"CYCLE_id\") tp.is_readable(cycle_id) mck.assert_called_once_with(cycle_id) with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._is_readable\") as mck: tp.is_readable(cycle) mck.assert_called_once_with(cycle) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_readable\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.is_readable(scenario_id) mck.assert_called_once_with(scenario_id) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_readable\") as mck: scenario = Scenario(\"scenario_config_id\", [], {}) tp.is_readable(scenario) mck.assert_called_once_with(scenario) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._is_readable\") as mck: sequence_id = SequenceId(\"SEQUENCE_id\") tp.is_readable(sequence_id) mck.assert_called_once_with(sequence_id) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._is_readable\") as mck: sequence = Sequence({}, [], \"sequence_id\") tp.is_readable(sequence) mck.assert_called_once_with(sequence) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._is_readable\") as mck: task_id = TaskId(\"TASK_id\") tp.is_readable(task_id) mck.assert_called_once_with(task_id) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._is_readable\") as mck: task = Task(\"task_config_id\", {}, print) tp.is_readable(task) mck.assert_called_once_with(task) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_readable\") as mck: job_id = JobId(\"JOB_id\") tp.is_readable(job_id) mck.assert_called_once_with(job_id) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_readable\") as mck: tp.is_readable(job) mck.assert_called_once_with(job) with mock.patch(\"src.taipy.core.data._data_manager._DataManager._is_readable\") as mck: dn_id = DataNodeId(\"DATANODE_id\") tp.is_readable(dn_id) mck.assert_called_once_with(dn_id) with mock.patch(\"src.taipy.core.data._data_manager._DataManager._is_readable\") as mck: tp.is_readable(data_node) mck.assert_called_once_with(data_node) def test_is_readable(self): current_date = datetime.datetime.now() cycle = Cycle(Frequency.DAILY, {}, current_date, current_date, current_date) scenario = Scenario(\"scenario_config_id\", [], {}, sequences={\"sequence\": {}}) task = Task(\"task_config_id\", {}, print) job = Job(\"job_id\", task, \"submit_id\", scenario.id) dn = PickleDataNode(\"data_node_config_id\", Scope.SCENARIO) _CycleManager._set(cycle) _ScenarioManager._set(scenario) _TaskManager._set(task) _JobManager._set(job) _DataManager._set(dn) sequence = scenario.sequences[\"sequence\"] assert tp.is_readable(scenario) assert tp.is_readable(sequence) assert tp.is_readable(task) assert tp.is_readable(cycle) assert tp.is_readable(job) assert tp.is_readable(dn) def test_is_submittable_is_called(self): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_submittable\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.is_submittable(scenario_id) mck.assert_called_once_with(scenario_id) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_submittable\") as mck: scenario = Scenario(\"scenario_config_id\", [], {}) tp.is_submittable(scenario) mck.assert_called_once_with(scenario) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._is_submittable\") as mck: sequence_id = SequenceId(\"SEQUENCE_id\") tp.is_submittable(sequence_id) mck.assert_called_once_with(sequence_id) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._is_submittable\") as mck: sequence = Sequence({}, [], \"sequence_id\") tp.is_submittable(sequence) mck.assert_called_once_with(sequence) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._is_submittable\") as mck: task_id = TaskId(\"TASK_id\") tp.is_submittable(task_id) mck.assert_called_once_with(task_id) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._is_submittable\") as mck: task = Task(\"task_config_id\", {}, print) tp.is_submittable(task) mck.assert_called_once_with(task) def test_is_submittable(self): current_date = datetime.datetime.now() cycle = Cycle(Frequency.DAILY, {}, current_date, current_date, current_date) scenario = Scenario(\"scenario_config_id\", [], {}, sequences={\"sequence\": {}}) task = Task(\"task_config_id\", {}, print) job = Job(\"job_id\", task, \"submit_id\", scenario.id) dn = PickleDataNode(\"data_node_config_id\", Scope.SCENARIO) _CycleManager._set(cycle) _ScenarioManager._set(scenario) _TaskManager._set(task) _JobManager._set(job) _DataManager._set(dn) sequence = scenario.sequences[\"sequence\"] assert tp.is_submittable(scenario) assert tp.is_submittable(sequence) assert tp.is_submittable(task) assert not tp.is_submittable(cycle) assert not tp.is_submittable(job) assert not tp.is_submittable(dn) def test_submit(self, scenario, sequence, task): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._submit\") as mck: tp.submit(scenario) mck.assert_called_once_with(scenario, force=False, wait=False, timeout=None) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._submit\") as mck: tp.submit(sequence) mck.assert_called_once_with(sequence, force=False, wait=False, timeout=None) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._submit\") as mck: tp.submit(task) mck.assert_called_once_with(task, force=False, wait=False, timeout=None) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._submit\") as mck: tp.submit(scenario, False, False, None) mck.assert_called_once_with(scenario, force=False, wait=False, timeout=None) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._submit\") as mck: tp.submit(sequence, False, False, None) mck.assert_called_once_with(sequence, force=False, wait=False, timeout=None) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._submit\") as mck: tp.submit(task, False, False, None) mck.assert_called_once_with(task, force=False, wait=False, timeout=None) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._submit\") as mck: tp.submit(scenario, True, True, 60) mck.assert_called_once_with(scenario, force=True, wait=True, timeout=60) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._submit\") as mck: tp.submit(sequence, True, True, 60) mck.assert_called_once_with(sequence, force=True, wait=True, timeout=60) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._submit\") as mck: tp.submit(task, True, True, 60) mck.assert_called_once_with(task, force=True, wait=True, timeout=60) def test_warning_no_core_service_running(self, scenario): _OrchestratorFactory._remove_dispatcher() with pytest.warns(ResourceWarning) as warning: with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._submit\"): tp.submit(scenario) assert len(warning) == 1 assert warning[0].message.args[0] == \"The Core service is NOT running\" def test_get_tasks(self): with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._get_all\") as mck: tp.get_tasks() mck.assert_called_once_with() def test_get_task(self, task): with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._get\") as mck: task_id = TaskId(\"TASK_id\") tp.get(task_id) mck.assert_called_once_with(task_id) def test_task_exists(self): with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._exists\") as mck: task_id = TaskId(\"TASK_id\") tp.exists(task_id) mck.assert_called_once_with(task_id) def test_is_deletable(self, task): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_deletable\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.is_deletable(scenario_id) mck.assert_called_once_with(scenario_id) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_deletable\") as mck: scenario = Scenario(\"config_id\", [], {}) tp.is_deletable(scenario) mck.assert_called_once_with(scenario) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_deletable\") as mck: job_id = JobId(\"JOB_job_id\") tp.is_deletable(job_id) mck.assert_called_once_with(job_id) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_deletable\") as mck: job = Job(\"job_id\", task, \"submit_id\", task.id) tp.is_deletable(job) mck.assert_called_once_with(job) def test_is_promotable(self): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_promotable_to_primary\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.is_promotable(scenario_id) mck.assert_called_once_with(scenario_id) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_promotable_to_primary\") as mck: scenario = Scenario(\"config_id\", [], {}) tp.is_promotable(scenario) mck.assert_called_once_with(scenario) def test_delete_scenario(self): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._hard_delete\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.delete(scenario_id) mck.assert_called_once_with(scenario_id) def test_delete(self): with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._hard_delete\") as mck: cycle_id = CycleId(\"CYCLE_id\") tp.delete(cycle_id) mck.assert_called_once_with(cycle_id) def test_get_scenarios(self, cycle): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._get_all\") as mck: tp.get_scenarios() mck.assert_called_once_with() with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._get_all_by_cycle\") as mck: tp.get_scenarios(cycle) mck.assert_called_once_with(cycle) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._get_all_by_tag\") as mck: tp.get_scenarios(tag=\"tag\") mck.assert_called_once_with(\"tag\") def test_get_scenario(self, scenario): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._get\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.get(scenario_id) mck.assert_called_once_with(scenario_id) def test_scenario_exists(self): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._exists\") as mck: scenario_id = ScenarioId(\"SCENARIO_id\") tp.exists(scenario_id) mck.assert_called_once_with(scenario_id) def test_get_primary(self, cycle): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._get_primary\") as mck: tp.get_primary(cycle) mck.assert_called_once_with(cycle) def test_get_primary_scenarios(self): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._get_primary_scenarios\") as mck: tp.get_primary_scenarios() mck.assert_called_once_with() def test_set_primary(self, scenario): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._set_primary\") as mck: tp.set_primary(scenario) mck.assert_called_once_with(scenario) def test_tag_and_untag(self, scenario): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._tag\") as mck: tp.tag(scenario, \"tag\") mck.assert_called_once_with(scenario, \"tag\") with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._untag\") as mck: tp.untag(scenario, \"tag\") mck.assert_called_once_with(scenario, \"tag\") def test_compare_scenarios(self, scenario): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._compare\") as mck: tp.compare_scenarios(scenario, scenario, data_node_config_id=\"dn\") mck.assert_called_once_with(scenario, scenario, data_node_config_id=\"dn\") def test_subscribe_scenario(self, scenario): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._subscribe\") as mck: tp.subscribe_scenario(print) mck.assert_called_once_with(print, [], None) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._subscribe\") as mck: tp.subscribe_scenario(print, scenario=scenario) mck.assert_called_once_with(print, [], scenario) def test_unsubscribe_scenario(self, scenario): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._unsubscribe\") as mck: tp.unsubscribe_scenario(print) mck.assert_called_once_with(print, None, None) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._unsubscribe\") as mck: tp.unsubscribe_scenario(print, scenario=scenario) mck.assert_called_once_with(print, None, scenario) def test_subscribe_sequence(self, sequence): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._subscribe\") as mck: tp.subscribe_sequence(print) mck.assert_called_once_with(print, None, None) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._subscribe\") as mck: tp.subscribe_sequence(print, sequence=sequence) mck.assert_called_once_with(print, None, sequence) def test_unsubscribe_sequence(self, sequence): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._unsubscribe\") as mck: tp.unsubscribe_sequence(callback=print) mck.assert_called_once_with(print, None, None) with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._unsubscribe\") as mck: tp.unsubscribe_sequence(callback=print, sequence=sequence) mck.assert_called_once_with(print, None, sequence) def test_delete_sequence(self): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._hard_delete\") as mck: sequence_id = SequenceId(\"SEQUENCE_id\") tp.delete(sequence_id) mck.assert_called_once_with(sequence_id) def test_get_sequence(self, sequence): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._get\") as mck: sequence_id = SequenceId(\"SEQUENCE_id\") tp.get(sequence_id) mck.assert_called_once_with(sequence_id) def test_get_sequences(self): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._get_all\") as mck: tp.get_sequences() mck.assert_called_once_with() def test_sequence_exists(self): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._exists\") as mck: sequence_id = SequenceId(\"SEQUENCE_id\") tp.exists(sequence_id) mck.assert_called_once_with(sequence_id) def test_get_job(self): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._get\") as mck: job_id = JobId(\"JOB_id\") tp.get(job_id) mck.assert_called_once_with(job_id) def test_get_jobs(self): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._get_all\") as mck: tp.get_jobs() mck.assert_called_once_with() def test_job_exists(self): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._exists\") as mck: job_id = JobId(\"JOB_id\") tp.exists(job_id) mck.assert_called_once_with(job_id) def test_delete_job(self, task): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._delete\") as mck: job = Job(JobId(\"job_id\"), task, \"submit_id\", \"scenario_id\") tp.delete_job(job) mck.assert_called_once_with(job, False) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._delete\") as mck: job = Job(JobId(\"job_id\"), task, \"submit_id\", \"scenario_id\") tp.delete_job(job, False) mck.assert_called_once_with(job, False) with mock.patch(\"src.taipy.core.job._job_manager._JobManager._delete\") as mck: job = Job(JobId(\"job_id\"), task, \"submit_id\", \"scenario_id\") tp.delete_job(job, True) mck.assert_called_once_with(job, True) def test_delete_jobs(self): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._delete_all\") as mck: tp.delete_jobs() mck.assert_called_once_with() def test_get_latest_job(self, task): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._get_latest\") as mck: tp.get_latest_job(task) mck.assert_called_once_with(task) def test_get_latest_submission(self, task): with mock.patch(\"src.taipy.core.submission._submission_manager._SubmissionManager._get_latest\") as mck: tp.get_latest_submission(task) mck.assert_called_once_with(task) def test_cancel_job(self): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._cancel\") as mck: tp.cancel_job(\"job_id\") mck.assert_called_once_with(\"job_id\") def test_block_config_when_core_is_running_in_development_mode(self): input_cfg_1 = Config.configure_data_node(id=\"i1\", storage_type=\"pickle\", default_data=1, scope=Scope.SCENARIO) output_cfg_1 = Config.configure_data_node(id=\"o1\", storage_type=\"pickle\", scope=Scope.SCENARIO) task_cfg_1 = Config.configure_task(\"t1\", print, input_cfg_1, output_cfg_1) scenario_cfg_1 = Config.configure_scenario(\"s1\", [task_cfg_1], [], Frequency.DAILY) with mock.patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario_1 = tp.create_scenario(scenario_cfg_1) tp.submit(scenario_1) with pytest.raises(ConfigurationUpdateBlocked): Config.configure_scenario(\"block_scenario\", set([task_cfg_1])) core.stop() def test_block_config_when_core_is_running_in_standalone_mode(self): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE) input_cfg_1 = Config.configure_data_node(id=\"i1\", storage_type=\"pickle\", default_data=1, scope=Scope.SCENARIO) output_cfg_1 = Config.configure_data_node(id=\"o1\", storage_type=\"pickle\", scope=Scope.SCENARIO) task_cfg_1 = Config.configure_task(\"t1\", print, input_cfg_1, output_cfg_1) scenario_cfg_1 = Config.configure_scenario(\"s1\", [task_cfg_1], [], Frequency.DAILY) with mock.patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() scenario_1 = tp.create_scenario(scenario_cfg_1) tp.submit(scenario_1, wait=True) with pytest.raises(ConfigurationUpdateBlocked): Config.configure_scenario(\"block_scenario\", set([task_cfg_1])) core.stop() def test_get_data_node(self, data_node): with mock.patch(\"src.taipy.core.data._data_manager._DataManager._get\") as mck: tp.get(data_node.id) mck.assert_called_once_with(data_node.id) def test_get_data_nodes(self): with mock.patch(\"src.taipy.core.data._data_manager._DataManager._get_all\") as mck: tp.get_data_nodes() mck.assert_called_once_with() def test_data_node_exists(self): with mock.patch(\"src.taipy.core.data._data_manager._DataManager._exists\") as mck: data_node_id = DataNodeId(\"DATANODE_id\") tp.exists(data_node_id) mck.assert_called_once_with(data_node_id) def test_get_cycles(self): with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._get_all\") as mck: tp.get_cycles() mck.assert_called_once_with() def test_cycle_exists(self): with mock.patch(\"src.taipy.core.cycle._cycle_manager._CycleManager._exists\") as mck: cycle_id = CycleId(\"CYCLE_id\") tp.exists(cycle_id) mck.assert_called_once_with(cycle_id) def test_create_global_data_node(self): dn_cfg = DataNodeConfig(\"id\", \"pickle\", Scope.GLOBAL) with mock.patch(\"src.taipy.core.data._data_manager._DataManager._create_and_set\") as mck: dn = tp.create_global_data_node(dn_cfg) mck.assert_called_once_with(dn_cfg, None, None) dn = tp.create_global_data_node(dn_cfg) assert dn.scope == Scope.GLOBAL assert dn.config_id == dn_cfg.id # Create a global data node from the same configuration should return the same data node dn_2 = tp.create_global_data_node(dn_cfg) assert dn_2.id == dn.id dn_cfg.scope = Scope.SCENARIO with pytest.raises(DataNodeConfigIsNotGlobal): tp.create_global_data_node(dn_cfg) def test_create_scenario(self, scenario): scenario_config = ScenarioConfig(\"scenario_config\") with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._create\") as mck: tp.create_scenario(scenario_config) mck.assert_called_once_with(scenario_config, None, None) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._create\") as mck: tp.create_scenario(scenario_config, datetime.datetime(2022, 2, 5)) mck.assert_called_once_with(scenario_config, datetime.datetime(2022, 2, 5), None) with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._create\") as mck: tp.create_scenario(scenario_config, datetime.datetime(2022, 2, 5), \"displayable_name\") mck.assert_called_once_with(scenario_config, datetime.datetime(2022, 2, 5), \"displayable_name\") def test_export_scenario_filesystem(self): shutil.rmtree(\"./tmp\", ignore_errors=True) input_cfg_1 = Config.configure_data_node(id=\"i1\", storage_type=\"pickle\", default_data=1, scope=Scope.SCENARIO) output_cfg_1 = Config.configure_data_node(id=\"o1\", storage_type=\"pickle\", scope=Scope.SCENARIO) task_cfg_1 = Config.configure_task(\"t1\", print, input_cfg_1, output_cfg_1) scenario_cfg_1 = Config.configure_scenario(\"s1\", [task_cfg_1], [], Frequency.DAILY) input_cfg_2 = Config.configure_data_node(id=\"i2\", storage_type=\"pickle\", default_data=2, scope=Scope.SCENARIO) output_cfg_2 = Config.configure_data_node(id=\"o2\", storage_type=\"pickle\", scope=Scope.SCENARIO) task_cfg_2 = Config.configure_task(\"t2\", print, input_cfg_2, output_cfg_2) scenario_cfg_2 = Config.configure_scenario(\"s2\", [task_cfg_2], [], Frequency.DAILY) scenario_1 = tp.create_scenario(scenario_cfg_1) job_1 = tp.submit(scenario_1)[0] # Export scenario 1 tp.export_scenario(scenario_1.id, \"./tmp/exp_scenario_1\") assert sorted(os.listdir(\"./tmp/exp_scenario_1/data_nodes\")) == sorted( [f\"{scenario_1.i1.id}.json\", f\"{scenario_1.o1.id}.json\"] ) assert sorted(os.listdir(\"./tmp/exp_scenario_1/tasks\")) == sorted([f\"{scenario_1.t1.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_1/scenarios\")) == sorted([f\"{scenario_1.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_1/jobs\")) == sorted([f\"{job_1.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_1/cycles\")) == sorted([f\"{scenario_1.cycle.id}.json\"]) scenario_2 = tp.create_scenario(scenario_cfg_2) job_2 = tp.submit(scenario_2)[0] # Export scenario 2 scenario_2.export(pathlib.Path.cwd() / \"./tmp/exp_scenario_2\") assert sorted(os.listdir(\"./tmp/exp_scenario_2/data_nodes\")) == sorted( [f\"{scenario_2.i2.id}.json\", f\"{scenario_2.o2.id}.json\"] ) assert sorted(os.listdir(\"./tmp/exp_scenario_2/tasks\")) == sorted([f\"{scenario_2.t2.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_2/scenarios\")) == sorted([f\"{scenario_2.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_2/jobs\")) == sorted([f\"{job_2.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_2/cycles\")) == sorted([f\"{scenario_2.cycle.id}.json\"]) # Export scenario 2 into the folder containing scenario 1 files tp.export_scenario(scenario_2.id, \"./tmp/exp_scenario_1\") # Should have the files as scenario 1 only assert sorted(os.listdir(\"./tmp/exp_scenario_1/tasks\")) == sorted([f\"{scenario_2.t2.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_1/scenarios\")) == sorted([f\"{scenario_2.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_1/jobs\")) == sorted([f\"{job_2.id}.json\"]) assert sorted(os.listdir(\"./tmp/exp_scenario_1/cycles\")) == sorted([f\"{scenario_2.cycle.id}.json\"]) with pytest.raises(InvalidExportPath): tp.export_scenario(scenario_2.id, Config.core.storage_folder) shutil.rmtree(\"./tmp\", ignore_errors=True) def test_get_parents(self): def assert_result_parents_and_expected_parents(parents, expected_parents): for key, items in expected_parents.items(): assert len(parents[key]) == len(expected_parents[key]) parent_ids = [parent.id for parent in parents[key]] assert all([item.id in parent_ids for item in items]) dn_config_1 = Config.configure_data_node(id=\"d1\", storage_type=\"in_memory\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(id=\"d2\", storage_type=\"in_memory\", scope=Scope.SCENARIO) dn_config_3 = Config.configure_data_node(id=\"d3\", storage_type=\"in_memory\", scope=Scope.SCENARIO) dn_config_4 = Config.configure_data_node(id=\"d4\", storage_type=\"in_memory\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"t1\", print, dn_config_1, dn_config_2) task_config_2 = Config.configure_task(\"t2\", print, dn_config_2, dn_config_3) scenario_cfg_1 = Config.configure_scenario(\"s1\", [task_config_1, task_config_2], [dn_config_4], Frequency.DAILY) scenario = tp.create_scenario(scenario_cfg_1) tasks = scenario.tasks expected_parents = { \"scenario\": {scenario}, \"task\": {tasks[\"t1\"]}, } parents = tp.get_parents(scenario.tasks[\"t1\"].data_nodes[\"d1\"]) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = { \"scenario\": {scenario}, \"task\": {tasks[\"t1\"], tasks[\"t2\"]}, } parents = tp.get_parents(scenario.tasks[\"t1\"].data_nodes[\"d2\"]) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = {\"scenario\": {scenario}, \"task\": {tasks[\"t2\"]}} parents = tp.get_parents(scenario.tasks[\"t2\"].data_nodes[\"d3\"]) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = {\"scenario\": {scenario}} parents = tp.get_parents(scenario.tasks[\"t1\"]) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = {\"scenario\": {scenario}} parents = tp.get_parents(scenario.tasks[\"t2\"]) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = {\"scenario\": {scenario}} parents = tp.get_parents(scenario.additional_data_nodes[\"d4\"]) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = {} parents = tp.get_parents(scenario) assert_result_parents_and_expected_parents(parents, expected_parents) expected_parents = {} parents = tp.get_parents(scenario.cycle) assert_result_parents_and_expected_parents(parents, expected_parents) def test_get_cycles_scenarios(self): scenario_cfg_1 = Config.configure_scenario( \"s1\", set(), set(), Frequency.DAILY, ) scenario_cfg_2 = Config.configure_scenario(\"s2\", set(), set(), Frequency.WEEKLY) scenario_cfg_3 = Config.configure_scenario(\"s3\", set(), set(), Frequency.MONTHLY) scenario_cfg_4 = Config.configure_scenario(\"s4\", set(), set(), Frequency.YEARLY) scenario_cfg_5 = Config.configure_scenario(\"s5\", set(), set(), None) now = datetime.datetime.now() scenario_1_1 = tp.create_scenario(scenario_cfg_1, now) scenario_1_2 = tp.create_scenario(scenario_cfg_1, datetime.datetime.now()) scenario_1_3 = tp.create_scenario(scenario_cfg_1, now + datetime.timedelta(days=1)) scenario_1_4 = tp.create_scenario(scenario_cfg_1, now + datetime.timedelta(days=8)) scenario_1_5 = tp.create_scenario(scenario_cfg_1, now + datetime.timedelta(days=25)) scenario_2 = tp.create_scenario(scenario_cfg_2) scenario_3 = tp.create_scenario(scenario_cfg_3) scenario_4 = tp.create_scenario(scenario_cfg_4) scenario_5_1 = tp.create_scenario(scenario_cfg_5) scenario_5_2 = tp.create_scenario(scenario_cfg_5) scenario_5_3 = tp.create_scenario(scenario_cfg_5) expected_cycles_scenarios = { scenario_1_1.cycle: [scenario_1_1.id, scenario_1_2.id], scenario_1_3.cycle: [scenario_1_3.id], scenario_1_4.cycle: [scenario_1_4.id], scenario_1_5.cycle: [scenario_1_5.id], scenario_2.cycle: [scenario_2.id], scenario_3.cycle: [scenario_3.id], scenario_4.cycle: [scenario_4.id], None: [scenario_5_1.id, scenario_5_2.id, scenario_5_3.id], } cycles_scenarios = tp.get_cycles_scenarios() assert expected_cycles_scenarios.keys() == cycles_scenarios.keys() for cycle, scenarios in cycles_scenarios.items(): expected_scenarios = expected_cycles_scenarios[cycle] assert sorted([scenario.id for scenario in scenarios]) == sorted(expected_scenarios) def test_get_entities_by_config_id(self): scenario_config_1 = Config.configure_scenario(\"s1\", sequence_configs=[]) scenario_config_2 = Config.configure_scenario(\"s2\", sequence_configs=[]) s_1_1 = tp.create_scenario(scenario_config_1) s_1_2 = tp.create_scenario(scenario_config_1) s_1_3 = tp.create_scenario(scenario_config_1) assert len(tp.get_scenarios()) == 3 s_2_1 = tp.create_scenario(scenario_config_2) s_2_2 = tp.create_scenario(scenario_config_2) assert len(tp.get_scenarios()) == 5 s1_scenarios = tp.get_entities_by_config_id(scenario_config_1.id) assert len(s1_scenarios) == 3 assert sorted([s_1_1.id, s_1_2.id, s_1_3.id]) == sorted([scenario.id for scenario in s1_scenarios]) s2_scenarios = tp.get_entities_by_config_id(scenario_config_2.id) assert len(s2_scenarios) == 2 assert sorted([s_2_1.id, s_2_2.id]) == sorted([scenario.id for scenario in s2_scenarios]) def test_get_entities_by_config_id_in_multiple_versions_environment(self): scenario_config_1 = Config.configure_scenario(\"s1\", sequence_configs=[]) scenario_config_2 = Config.configure_scenario(\"s2\", sequence_configs=[]) _VersionManager._set_experiment_version(\"1.0\") tp.create_scenario(scenario_config_1) tp.create_scenario(scenario_config_1) tp.create_scenario(scenario_config_1) tp.create_scenario(scenario_config_2) tp.create_scenario(scenario_config_2) assert len(tp.get_scenarios()) == 5 assert len(tp.get_entities_by_config_id(scenario_config_1.id)) == 3 assert len(tp.get_entities_by_config_id(scenario_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") tp.create_scenario(scenario_config_1) tp.create_scenario(scenario_config_1) tp.create_scenario(scenario_config_1) tp.create_scenario(scenario_config_2) tp.create_scenario(scenario_config_2) assert len(tp.get_scenarios()) == 5 assert len(tp.get_entities_by_config_id(scenario_config_1.id)) == 3 assert len(tp.get_entities_by_config_id(scenario_config_2.id)) == 2 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.csv import CSVDataNode from src.taipy.core.data.excel import ExcelDataNode from src.taipy.core.data.json import JSONDataNode from src.taipy.core.data.parquet import ParquetDataNode from src.taipy.core.data.pickle import PickleDataNode from taipy.config.config import Config def read_backup_file(path): with open(path, \"r\") as f: lines = f.readlines() return lines @pytest.fixture(scope=\"function\", autouse=True) def init_backup_file(): os.environ[\"TAIPY_BACKUP_FILE_PATH\"] = \".taipy_backups\" if os.path.exists(os.environ[\"TAIPY_BACKUP_FILE_PATH\"]): os.remove(os.environ[\"TAIPY_BACKUP_FILE_PATH\"]) yield if os.path.exists(\".taipy_backups\"): os.remove(\".taipy_backups\") del os.environ[\"TAIPY_BACKUP_FILE_PATH\"] backup_file_path = \".taipy_backups\" def test_backup_storage_folder_when_core_run(): with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{Config.core.storage_folder}\\n\"] core.stop() def test_no_new_entry_when_file_is_in_storage_folder(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", path=\"dn_1.pickle\") dn_cfg_2 = Config.configure_data_node(\"dn_cfg_2\") # stored in .data folder dn_1 = _DataManager._create_and_set(dn_cfg_1, None, None) dn_2 = _DataManager._create_and_set(dn_cfg_2, None, None) dn_1.write(\"DN1_CONTENT\") dn_2.write(\"DN2_CONTENT\") backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{dn_1.path}\\n\"] os.remove(dn_1.path) def test_backup_csv_files(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", \"csv\", path=\"example_1.csv\") dn_cfg_2 = Config.configure_data_node(\"dn_cfg_2\", \"csv\", path=\"example_2.csv\") csv_dn_1 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(csv_dn_1, CSVDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_1.path}\\n\"] csv_dn_2 = _DataManager._create_and_set(dn_cfg_2, None, None) assert isinstance(csv_dn_2, CSVDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_1.path}\\n\", f\"{csv_dn_2.path}\\n\"] csv_dn_1.path = \"example_3.csv\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_2.path}\\n\", f\"{csv_dn_1.path}\\n\"] csv_dn_2.path = \"example_4.csv\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_1.path}\\n\", f\"{csv_dn_2.path}\\n\"] _DataManager._delete(csv_dn_1.id) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_2.path}\\n\"] csv_dn_3 = _DataManager._create_and_set(dn_cfg_1, None, None) csv_dn_4 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(csv_dn_3, CSVDataNode) assert isinstance(csv_dn_4, CSVDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_2.path}\\n\", f\"{csv_dn_3.path}\\n\", f\"{csv_dn_4.path}\\n\"] csv_dn_4.path = \"example_5.csv\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{csv_dn_2.path}\\n\", f\"{csv_dn_3.path}\\n\", f\"{csv_dn_4.path}\\n\"] _DataManager._delete_all() backup_files = read_backup_file(backup_file_path) assert backup_files == [] def test_backup_excel_files(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", \"excel\", path=\"example_1.xlsx\") dn_cfg_2 = Config.configure_data_node(\"dn_cfg_2\", \"excel\", path=\"example_2.xlsx\") excel_dn_1 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(excel_dn_1, ExcelDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_1.path}\\n\"] excel_dn_2 = _DataManager._create_and_set(dn_cfg_2, None, None) assert isinstance(excel_dn_2, ExcelDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_1.path}\\n\", f\"{excel_dn_2.path}\\n\"] excel_dn_1.path = \"example_3.excel\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_2.path}\\n\", f\"{excel_dn_1.path}\\n\"] excel_dn_2.path = \"example_4.excel\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_1.path}\\n\", f\"{excel_dn_2.path}\\n\"] _DataManager._delete(excel_dn_1.id) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_2.path}\\n\"] excel_dn_3 = _DataManager._create_and_set(dn_cfg_1, None, None) excel_dn_4 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(excel_dn_3, ExcelDataNode) assert isinstance(excel_dn_4, ExcelDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_2.path}\\n\", f\"{excel_dn_3.path}\\n\", f\"{excel_dn_4.path}\\n\"] excel_dn_4.path = \"example_5.excel\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{excel_dn_2.path}\\n\", f\"{excel_dn_3.path}\\n\", f\"{excel_dn_4.path}\\n\"] _DataManager._delete_all() backup_files = read_backup_file(backup_file_path) assert backup_files == [] def test_backup_pickle_files(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", \"pickle\", path=\"example_1.p\") dn_cfg_2 = Config.configure_data_node(\"dn_cfg_2\", \"pickle\", path=\"example_2.p\") pickle_dn_1 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(pickle_dn_1, PickleDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_1.path}\\n\"] pickle_dn_2 = _DataManager._create_and_set(dn_cfg_2, None, None) assert isinstance(pickle_dn_2, PickleDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_1.path}\\n\", f\"{pickle_dn_2.path}\\n\"] pickle_dn_1.path = \"example_3.pickle\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_2.path}\\n\", f\"{pickle_dn_1.path}\\n\"] pickle_dn_2.path = \"example_4.pickle\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_1.path}\\n\", f\"{pickle_dn_2.path}\\n\"] _DataManager._delete(pickle_dn_1.id) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_2.path}\\n\"] pickle_dn_3 = _DataManager._create_and_set(dn_cfg_1, None, None) pickle_dn_4 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(pickle_dn_3, PickleDataNode) assert isinstance(pickle_dn_4, PickleDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_2.path}\\n\", f\"{pickle_dn_3.path}\\n\", f\"{pickle_dn_4.path}\\n\"] pickle_dn_4.path = \"example_5.pickle\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{pickle_dn_2.path}\\n\", f\"{pickle_dn_3.path}\\n\", f\"{pickle_dn_4.path}\\n\"] _DataManager._delete_all() backup_files = read_backup_file(backup_file_path) assert backup_files == [] def test_backup_json_files(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", \"json\", path=\"example_1.json\") dn_cfg_2 = Config.configure_data_node(\"dn_cfg_2\", \"json\", path=\"example_2.json\") json_dn_1 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(json_dn_1, JSONDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_1.path}\\n\"] json_dn_2 = _DataManager._create_and_set(dn_cfg_2, None, None) assert isinstance(json_dn_2, JSONDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_1.path}\\n\", f\"{json_dn_2.path}\\n\"] json_dn_1.path = \"example_3.json\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_2.path}\\n\", f\"{json_dn_1.path}\\n\"] json_dn_2.path = \"example_4.json\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_1.path}\\n\", f\"{json_dn_2.path}\\n\"] _DataManager._delete(json_dn_1.id) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_2.path}\\n\"] json_dn_3 = _DataManager._create_and_set(dn_cfg_1, None, None) json_dn_4 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(json_dn_3, JSONDataNode) assert isinstance(json_dn_4, JSONDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_2.path}\\n\", f\"{json_dn_3.path}\\n\", f\"{json_dn_4.path}\\n\"] json_dn_4.path = \"example_5.json\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{json_dn_2.path}\\n\", f\"{json_dn_3.path}\\n\", f\"{json_dn_4.path}\\n\"] _DataManager._delete_all() backup_files = read_backup_file(backup_file_path) assert backup_files == [] def test_backup_parquet_files(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", \"parquet\", path=\"example_1.parquet\") dn_cfg_2 = Config.configure_data_node(\"dn_cfg_2\", \"parquet\", path=\"example_2.parquet\") parquet_dn_1 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(parquet_dn_1, ParquetDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_1.path}\\n\"] parquet_dn_2 = _DataManager._create_and_set(dn_cfg_2, None, None) assert isinstance(parquet_dn_2, ParquetDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_1.path}\\n\", f\"{parquet_dn_2.path}\\n\"] parquet_dn_1.path = \"example_3.parquet\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_2.path}\\n\", f\"{parquet_dn_1.path}\\n\"] parquet_dn_2.path = \"example_4.parquet\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_1.path}\\n\", f\"{parquet_dn_2.path}\\n\"] _DataManager._delete(parquet_dn_1.id) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_2.path}\\n\"] parquet_dn_3 = _DataManager._create_and_set(dn_cfg_1, None, None) parquet_dn_4 = _DataManager._create_and_set(dn_cfg_1, None, None) assert isinstance(parquet_dn_3, ParquetDataNode) assert isinstance(parquet_dn_4, ParquetDataNode) backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_2.path}\\n\", f\"{parquet_dn_3.path}\\n\", f\"{parquet_dn_4.path}\\n\"] parquet_dn_4.path = \"example_5.parquet\" backup_files = read_backup_file(backup_file_path) assert backup_files == [f\"{parquet_dn_2.path}\\n\", f\"{parquet_dn_3.path}\\n\", f\"{parquet_dn_4.path}\\n\"] _DataManager._delete_all() backup_files = read_backup_file(backup_file_path) assert backup_files == [] def test_no_backup_if_no_env_var(): dn_cfg_1 = Config.configure_data_node(\"dn_cfg_1\", \"csv\", path=\"example_1.csv\") _DataManager._create_and_set(dn_cfg_1, None, None) "} {"text": "import json import os from datetime import datetime, timedelta import pytest from src.taipy.core._repository._decoder import _Decoder from src.taipy.core._repository._encoder import _Encoder @pytest.fixture(scope=\"function\", autouse=True) def create_and_delete_json_file(): test_json_file = { \"name\": \"testing\", \"date\": datetime(1991, 1, 1), \"default_data\": \"data for testing encoder\", \"validity_period\": timedelta(days=1), } with open(\"data.json\", \"w\") as f: json.dump(test_json_file, f, ensure_ascii=False, indent=4, cls=_Encoder) yield os.unlink(\"data.json\") def test_json_encoder(): with open(\"data.json\") as json_file: data = json.load(json_file) assert data[\"name\"] == \"testing\" assert data[\"default_data\"] == \"data for testing encoder\" assert data[\"date\"] == { \"__type__\": \"Datetime\", \"__value__\": \"1991-01-01T00:00:00\", } assert data[\"date\"].get(\"__type__\") == \"Datetime\" assert data[\"date\"].get(\"__value__\") == \"1991-01-01T00:00:00\" def test_json_decoder(): with open(\"data.json\") as json_file: data = json.load(json_file, cls=_Decoder) assert data[\"name\"] == \"testing\" assert data[\"default_data\"] == \"data for testing encoder\" assert data[\"date\"] == datetime(1991, 1, 1) "} {"text": "import src.taipy.core.taipy as tp from src.taipy.core.config import Config def test_no_special_characters(): scenario_config = Config.configure_scenario(\"scenario_1\") scenario = tp.create_scenario(scenario_config, name=\"martin\") assert scenario.name == \"martin\" scenarios = tp.get_scenarios() assert len(scenarios) == 1 assert scenarios[0].name == \"martin\" def test_many_special_characters(): scenario_config = Config.configure_scenario(\"scenario_1\") special_characters = ( \"!#$%&'()*+,-./:;<=>?@[]^_`\\\\{\" \"\u00bb\u00bc\u00bd\u00be\u00bf\u00c0\u00c1\u00c2\u00c3\u00c4\u00c5\u00c6\u00c7\u00c8\u00c9\u00ca\u00cb\u00cc\u00cd\u00ce\u00cf\u00d0\u00d1\u00d2\u00d3\u00d4\u00d5\u00d6\" \"\u00d7\u00d8\u00d9\u00da\u00db\u00dc\u00dd\u00de\u00df\u00e0\u00e1\u00e2\u00e3\u00e4\u00e5\u00e6\u00e7\u00e8\u00e9\u00ea\u00eb\u00ec\u00ed\u00ee\u00ef\u00f0\u00f1\u00f2\" \"\u00f3\u00f4\u00f5\u00f6\u00f7\u00f8\u00f9\u00fa\u00fb\u00fc\u00fd\u00fe\u00ff\u0100\u0101\u0102\u0103\u0104\u0105\u0106\u0107\u0108\u0109\u010a\u010b\u010c\u010d\u010e\" \"\u010f\u0110\u0111\u0112\u0113\u0114\u0115\u0116\u0117\u0118\u0119\u011a\u011b\u011c\u011d\u011e\u011f\u0120\u0121\u0122\u0123\u0124\u0125\u0126\u0127\u0128\u0129\u012a\" \"\u012b\u012c\u012d\u012e\u012f\u0130\u0132\u0133\u0134\u0135\u0136\u0137\u0138\u0139\u013a\u013b\u013c\u013d\u013e\u013f\u0140\u0141\u0142\u0143\u0144\u0145\u0146\u0147\" \"\u0148\u0149\u014a\u014b\u014c\u014d\u014e\u014f\u0150\u0151\u0152\u0153\u0154\u0155\u0156\u0157\u0158\u0159\u015a\u015b\u015c\u015d\u015e\u015f\u0160\u0161\u0162\u0163\" \"\u0164\u0165\u0166\u0167\u0168\u0169\u016a\u016b\u016c\u016d\u016e\u016f\u0170\u0171\u0172\u0173\u0174\u0175\u0176\u0177\u0178\u0179\u017a\u017b\u017c\u017d\u017e\u017f\" ) scenario = tp.create_scenario(scenario_config, name=special_characters) assert scenario.name == special_characters scenarios = tp.get_scenarios() assert len(scenarios) == 1 assert scenarios[0].name == special_characters "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import json import os import pathlib import shutil import pytest from src.taipy.core.exceptions.exceptions import InvalidExportPath from taipy.config.config import Config from .mocks import MockConverter, MockFSRepository, MockModel, MockObj, MockSQLRepository class TestRepositoriesStorage: @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_save_and_fetch_model(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) m = MockObj(\"uuid\", \"foo\") r._save(m) fetched_model = r._load(m.id) assert m == fetched_model @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_exists(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) m = MockObj(\"uuid\", \"foo\") r._save(m) assert r._exists(m.id) assert not r._exists(\"not-existed-model\") @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_get_all(self, mock_repo, params, init_sql_repo): objs = [] r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) r._save(m) _objs = r._load_all() assert len(_objs) == 5 for obj in _objs: assert isinstance(obj, MockObj) assert sorted(objs, key=lambda o: o.id) == sorted(_objs, key=lambda o: o.id) @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_delete_all(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f\"uuid-{i}\", f\"Foo{i}\") r._save(m) _models = r._load_all() assert len(_models) == 5 r._delete_all() _models = r._load_all() assert len(_models) == 0 @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_delete_many(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f\"uuid-{i}\", f\"Foo{i}\") r._save(m) _models = r._load_all() assert len(_models) == 5 r._delete_many([\"uuid-0\", \"uuid-1\"]) _models = r._load_all() assert len(_models) == 3 @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) def test_search(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() m = MockObj(\"uuid\", \"foo\") r._save(m) m1 = r._search(\"name\", \"bar\") m2 = r._search(\"name\", \"foo\") assert m1 == [] assert m2 == [m] @pytest.mark.parametrize( \"mock_repo,params\", [ (MockFSRepository, {\"model_type\": MockModel, \"dir_name\": \"mock_model\", \"converter\": MockConverter}), (MockSQLRepository, {\"model_type\": MockModel, \"converter\": MockConverter}), ], ) @pytest.mark.parametrize(\"export_path\", [\"tmp\"]) def test_export(self, mock_repo, params, export_path, init_sql_repo): r = mock_repo(**params) m = MockObj(\"uuid\", \"foo\") r._save(m) r._export(\"uuid\", export_path) assert pathlib.Path(os.path.join(export_path, \"mock_model/uuid.json\")).exists() with open(os.path.join(export_path, \"mock_model/uuid.json\"), \"r\") as exported_file: exported_data = json.load(exported_file) assert exported_data[\"id\"] == \"uuid\" assert exported_data[\"name\"] == \"foo\" # Export to same location again should work r._export(\"uuid\", export_path) assert pathlib.Path(os.path.join(export_path, \"mock_model/uuid.json\")).exists() if mock_repo == MockFSRepository: with pytest.raises(InvalidExportPath): r._export(\"uuid\", Config.core.storage_folder) shutil.rmtree(export_path, ignore_errors=True) "} {"text": "import dataclasses import pathlib from dataclasses import dataclass from typing import Any, Dict, Optional from sqlalchemy import Column, String, Table from sqlalchemy.dialects import sqlite from sqlalchemy.orm import declarative_base, registry from sqlalchemy.schema import CreateTable from src.taipy.core._repository._abstract_converter import _AbstractConverter from src.taipy.core._repository._filesystem_repository import _FileSystemRepository from src.taipy.core._repository._sql_repository import _SQLRepository from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config class Base: __allow_unmapped__ = True Base = declarative_base(cls=Base) # type: ignore mapper_registry = registry() @dataclass class MockObj: def __init__(self, id: str, name: str, version: Optional[str] = None) -> None: self.id = id self.name = name if version: self._version = version else: self._version = _VersionManager._get_latest_version() @dataclass class MockModel(Base): # type: ignore __table__ = Table( \"mock_model\", mapper_registry.metadata, Column(\"id\", String(200), primary_key=True), Column(\"name\", String(200)), Column(\"version\", String(200)), ) id: str name: str version: str def to_dict(self): return dataclasses.asdict(self) @staticmethod def from_dict(data: Dict[str, Any]): return MockModel(id=data[\"id\"], name=data[\"name\"], version=data[\"version\"]) def _to_entity(self): return MockObj(id=self.id, name=self.name, version=self.version) @classmethod def _from_entity(cls, entity: MockObj): return MockModel(id=entity.id, name=entity.name, version=entity._version) def to_list(self): return [self.id, self.name, self.version] class MockConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, entity): return MockModel(id=entity.id, name=entity.name, version=entity._version) @classmethod def _model_to_entity(cls, model): return MockObj(id=model.id, name=model.name, version=model.version) class MockFSRepository(_FileSystemRepository): def __init__(self, **kwargs): super().__init__(**kwargs) @property def _storage_folder(self) -> pathlib.Path: return pathlib.Path(Config.core.storage_folder) # type: ignore class MockSQLRepository(_SQLRepository): def __init__(self, **kwargs): super().__init__(**kwargs) self.db.execute(str(CreateTable(MockModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect()))) "} {"text": "import json from datetime import timedelta from src.taipy.core.common.mongo_default_document import MongoDefaultDocument from taipy.config.common.scope import Scope from taipy.config.config import Config def test_set_default_data_node_configuration(): data_node1 = Config.configure_data_node(id=\"input_data1\") assert data_node1.storage_type == \"pickle\" assert data_node1.scope == Scope.SCENARIO assert data_node1.validity_period is None Config.set_default_data_node_configuration(\"in_memory\", scope=Scope.GLOBAL) data_node2 = Config.configure_data_node(id=\"input_data2\") assert data_node2.storage_type == \"in_memory\" assert data_node2.scope == Scope.GLOBAL assert data_node2.validity_period is None Config.set_default_data_node_configuration(\"csv\") data_node3 = Config.configure_data_node(id=\"input_data3\") assert data_node3.storage_type == \"csv\" assert data_node3.scope == Scope.SCENARIO assert data_node3.validity_period is None Config.set_default_data_node_configuration(\"json\", validity_period=timedelta(1)) data_node4 = Config.configure_data_node(id=\"input_data4\") assert data_node4.storage_type == \"json\" assert data_node4.scope == Scope.SCENARIO assert data_node4.validity_period == timedelta(1) def test_set_default_data_node_configuration_replace_old_default_config(): Config.set_default_data_node_configuration( \"in_memory\", prop1=\"1\", prop2=\"2\", prop3=\"3\", ) dn1 = Config.configure_data_node(id=\"dn1\") assert len(dn1.properties) == 3 Config.set_default_data_node_configuration( \"csv\", prop4=\"4\", prop5=\"5\", prop6=\"6\", ) dn2 = Config.configure_data_node(id=\"dn2\") assert dn2.storage_type == \"csv\" assert len(dn2.properties) == 6 # encoding, exposed_type, and has_header too assert dn2.prop4 == \"4\" assert dn2.prop5 == \"5\" assert dn2.prop6 == \"6\" assert dn2.prop1 is None assert dn2.prop2 is None assert dn2.prop3 is None def test_config_storage_type_different_from_default_data_node(): Config.set_default_data_node_configuration( storage_type=\"pickle\", custom_property={\"foo\": \"bar\"}, scope=Scope.GLOBAL, ) # Config a datanode with specific \"storage_type\" different than \"pickle\" # should ignore the default datanode csv_dn = Config.configure_data_node(id=\"csv_dn\", storage_type=\"csv\") assert len(csv_dn.properties) == 3 # encoding, exposed_type, and has_header assert csv_dn.properties.get(\"custom_property\") is None assert csv_dn.scope == Scope.SCENARIO def test_set_default_csv_data_node_configuration(): Config.set_default_data_node_configuration( storage_type=\"csv\", default_path=\"default.csv\", has_header=False, exposed_type=\"numpy\", scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"csv\" assert dn1.scope == Scope.GLOBAL assert dn1.default_path == \"default.csv\" assert dn1.has_header is False assert dn1.exposed_type == \"numpy\" assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node(id=\"dn2\", default_path=\"dn2.csv\") assert dn2.storage_type == \"csv\" assert dn2.default_path == \"dn2.csv\" assert dn2.has_header is False assert dn2.exposed_type == \"numpy\" assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"csv\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"csv\", default_path=\"dn3.csv\", scope=Scope.SCENARIO, validity_period=timedelta(1), ) assert dn3.storage_type == \"csv\" assert dn3.default_path == \"dn3.csv\" assert dn3.has_header is False assert dn3.exposed_type == \"numpy\" assert dn3.scope == Scope.SCENARIO assert dn3.validity_period == timedelta(1) def test_set_default_json_data_node_configuration(): class MyCustomEncoder(json.JSONEncoder): ... class MyCustomDecoder(json.JSONDecoder): ... Config.set_default_data_node_configuration( storage_type=\"json\", default_path=\"default.json\", encoder=MyCustomEncoder, scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"json\" assert dn1.default_path == \"default.json\" assert dn1.encoder == MyCustomEncoder assert dn1.decoder is None assert dn1.scope == Scope.GLOBAL assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node(id=\"dn2\", default_path=\"dn2.json\") assert dn2.storage_type == \"json\" assert dn2.default_path == \"dn2.json\" assert dn2.encoder == MyCustomEncoder assert dn2.decoder is None assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"json\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"json\", default_path=\"dn3.json\", decoder=MyCustomDecoder, validity_period=timedelta(1), ) assert dn3.storage_type == \"json\" assert dn3.default_path == \"dn3.json\" assert dn3.encoder == MyCustomEncoder assert dn3.decoder == MyCustomDecoder assert dn3.scope == Scope.GLOBAL assert dn3.validity_period == timedelta(1) def test_set_default_parquet_data_node_configuration(): Config.set_default_data_node_configuration( storage_type=\"parquet\", default_path=\"default.parquet\", compression=\"gzip\", exposed_type=\"numpy\", scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"parquet\" assert dn1.default_path == \"default.parquet\" assert dn1.engine == \"pyarrow\" assert dn1.compression == \"gzip\" assert dn1.read_kwargs is None assert dn1.write_kwargs is None assert dn1.exposed_type == \"numpy\" assert dn1.scope == Scope.GLOBAL assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node( id=\"dn2\", default_path=\"dn2.parquet\", engine=\"fastparquet\", ) assert dn2.storage_type == \"parquet\" assert dn2.default_path == \"dn2.parquet\" assert dn2.engine == \"fastparquet\" assert dn2.compression == \"gzip\" assert dn2.read_kwargs is None assert dn2.write_kwargs is None assert dn2.exposed_type == \"numpy\" assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"parquet\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"parquet\", default_path=\"dn3.parquet\", read_kwargs={\"filter\": \"foo\"}, scope=Scope.SCENARIO, validity_period=timedelta(1), ) assert dn3.storage_type == \"parquet\" assert dn3.default_path == \"dn3.parquet\" assert dn3.engine == \"pyarrow\" assert dn3.compression == \"gzip\" assert dn3.read_kwargs == {\"filter\": \"foo\"} assert dn3.write_kwargs is None assert dn3.exposed_type == \"numpy\" assert dn3.scope == Scope.SCENARIO assert dn3.validity_period == timedelta(1) def test_set_default_excel_data_node_configuration(): Config.set_default_data_node_configuration( storage_type=\"excel\", default_path=\"default.xlsx\", has_header=False, exposed_type=\"numpy\", scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"excel\" assert dn1.scope == Scope.GLOBAL assert dn1.default_path == \"default.xlsx\" assert dn1.has_header is False assert dn1.sheet_name is None assert dn1.exposed_type == \"numpy\" assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node(id=\"dn2\", default_path=\"dn2.xlsx\", sheet_name=\"sheet_1\") assert dn2.storage_type == \"excel\" assert dn2.default_path == \"dn2.xlsx\" assert dn2.has_header is False assert dn2.sheet_name == \"sheet_1\" assert dn2.exposed_type == \"numpy\" assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"excel\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"excel\", default_path=\"dn3.xlsx\", scope=Scope.SCENARIO, validity_period=timedelta(1), ) assert dn3.storage_type == \"excel\" assert dn3.default_path == \"dn3.xlsx\" assert dn3.has_header is False assert dn3.sheet_name is None assert dn3.exposed_type == \"numpy\" assert dn3.scope == Scope.SCENARIO assert dn3.validity_period == timedelta(1) def test_set_default_pickle_data_node_configuration(): Config.set_default_data_node_configuration( storage_type=\"pickle\", default_data=1, exposed_type=\"numpy\", scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"pickle\" assert dn1.scope == Scope.GLOBAL assert dn1.default_path is None assert dn1.default_data == 1 assert dn1.exposed_type == \"numpy\" assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node(id=\"dn2\", default_path=\"dn2.pkl\", default_data=2) assert dn2.storage_type == \"pickle\" assert dn2.default_path == \"dn2.pkl\" assert dn2.default_data == 2 assert dn2.exposed_type == \"numpy\" assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"pickle\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"pickle\", default_path=\"dn3.pkl\", scope=Scope.SCENARIO, validity_period=timedelta(1), ) assert dn3.storage_type == \"pickle\" assert dn3.default_path == \"dn3.pkl\" assert dn3.default_data == 1 assert dn3.exposed_type == \"numpy\" assert dn3.scope == Scope.SCENARIO assert dn3.validity_period == timedelta(1) def test_set_default_sql_table_data_node_configuration(): Config.set_default_data_node_configuration( storage_type=\"sql_table\", db_username=\"default_user\", db_password=\"default_pwd\", db_name=\"default_db_name\", db_engine=\"mssql\", table_name=\"default_table\", db_port=1010, db_host=\"default_host\", db_driver=\"default server\", db_extra_args={\"default\": \"default\"}, scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"sql_table\" assert dn1.db_username == \"default_user\" assert dn1.db_password == \"default_pwd\" assert dn1.db_name == \"default_db_name\" assert dn1.db_engine == \"mssql\" assert dn1.table_name == \"default_table\" assert dn1.db_port == 1010 assert dn1.db_host == \"default_host\" assert dn1.db_driver == \"default server\" assert dn1.db_extra_args == {\"default\": \"default\"} assert dn1.scope == Scope.GLOBAL assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node( id=\"dn2\", table_name=\"table_2\", db_port=2020, db_host=\"host_2\", ) assert dn2.storage_type == \"sql_table\" assert dn2.db_username == \"default_user\" assert dn2.db_password == \"default_pwd\" assert dn2.db_name == \"default_db_name\" assert dn2.db_engine == \"mssql\" assert dn2.table_name == \"table_2\" assert dn2.db_port == 2020 assert dn2.db_host == \"host_2\" assert dn2.db_driver == \"default server\" assert dn2.db_extra_args == {\"default\": \"default\"} assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"sql_table\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"sql_table\", db_username=\"user_3\", db_password=\"pwd_3\", db_name=\"db_3\", db_engine=\"postgresql\", table_name=\"table_3\", validity_period=timedelta(1), ) assert dn3.storage_type == \"sql_table\" assert dn3.db_username == \"user_3\" assert dn3.db_password == \"pwd_3\" assert dn3.db_name == \"db_3\" assert dn3.db_engine == \"postgresql\" assert dn3.table_name == \"table_3\" assert dn3.db_port == 1010 assert dn3.db_host == \"default_host\" assert dn3.db_driver == \"default server\" assert dn3.db_extra_args == {\"default\": \"default\"} assert dn3.scope == Scope.GLOBAL assert dn3.validity_period == timedelta(1) def test_set_default_sql_data_node_configuration(): def query_builder(): ... Config.set_default_data_node_configuration( storage_type=\"sql\", db_username=\"default_user\", db_password=\"default_pwd\", db_name=\"default_db_name\", db_engine=\"mssql\", read_query=\"SELECT * FROM default_table\", write_query_builder=query_builder, append_query_builder=query_builder, db_port=1010, db_host=\"default_host\", db_driver=\"default server\", db_extra_args={\"default\": \"default\"}, scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"sql\" assert dn1.db_username == \"default_user\" assert dn1.db_password == \"default_pwd\" assert dn1.db_name == \"default_db_name\" assert dn1.db_engine == \"mssql\" assert dn1.read_query == \"SELECT * FROM default_table\" assert dn1.write_query_builder == query_builder assert dn1.append_query_builder == query_builder assert dn1.db_port == 1010 assert dn1.db_host == \"default_host\" assert dn1.db_driver == \"default server\" assert dn1.db_extra_args == {\"default\": \"default\"} assert dn1.scope == Scope.GLOBAL assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node( id=\"dn2\", table_name=\"table_2\", db_port=2020, db_host=\"host_2\", read_query=\"SELECT * FROM table_2\" ) assert dn2.storage_type == \"sql\" assert dn2.db_username == \"default_user\" assert dn2.db_password == \"default_pwd\" assert dn2.db_name == \"default_db_name\" assert dn2.db_engine == \"mssql\" assert dn2.read_query == \"SELECT * FROM table_2\" assert dn2.write_query_builder == query_builder assert dn2.append_query_builder == query_builder assert dn2.db_port == 2020 assert dn2.db_host == \"host_2\" assert dn2.db_driver == \"default server\" assert dn2.db_extra_args == {\"default\": \"default\"} assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"sql\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"sql\", db_username=\"user_3\", db_password=\"pwd_3\", db_name=\"db_3\", db_engine=\"postgresql\", read_query=\"SELECT * FROM table_3\", write_query_builder=query_builder, validity_period=timedelta(1), ) assert dn3.storage_type == \"sql\" assert dn3.db_username == \"user_3\" assert dn3.db_password == \"pwd_3\" assert dn3.db_name == \"db_3\" assert dn3.db_engine == \"postgresql\" assert dn3.read_query == \"SELECT * FROM table_3\" assert dn3.write_query_builder == query_builder assert dn3.append_query_builder == query_builder assert dn3.db_port == 1010 assert dn3.db_host == \"default_host\" assert dn3.db_driver == \"default server\" assert dn3.db_extra_args == {\"default\": \"default\"} assert dn3.scope == Scope.GLOBAL assert dn3.validity_period == timedelta(1) def test_set_default_mongo_collection_data_node_configuration(): Config.set_default_data_node_configuration( storage_type=\"mongo_collection\", db_name=\"default_db_name\", collection_name=\"default_collection\", db_port=1010, db_host=\"default_host\", db_driver=\"default server\", db_extra_args={\"default\": \"default\"}, scope=Scope.GLOBAL, validity_period=timedelta(2), ) # Config with generic config_data_node without storage_type # should return the default DataNode dn1 = Config.configure_data_node(id=\"dn1\") assert dn1.storage_type == \"mongo_collection\" assert dn1.db_username == \"\" assert dn1.db_password == \"\" assert dn1.db_name == \"default_db_name\" assert dn1.collection_name == \"default_collection\" assert dn1.custom_document == MongoDefaultDocument assert dn1.db_host == \"default_host\" assert dn1.db_port == 1010 assert dn1.db_driver == \"default server\" assert dn1.db_extra_args == {\"default\": \"default\"} assert dn1.scope == Scope.GLOBAL assert dn1.validity_period == timedelta(2) # Config with generic config_data_node without storage_type # with custom properties dn2 = Config.configure_data_node( id=\"dn2\", collection_name=\"collection_2\", db_port=2020, db_host=\"host_2\", ) assert dn2.storage_type == \"mongo_collection\" assert dn2.db_username == \"\" assert dn2.db_password == \"\" assert dn2.db_name == \"default_db_name\" assert dn2.collection_name == \"collection_2\" assert dn2.custom_document == MongoDefaultDocument assert dn2.db_host == \"host_2\" assert dn2.db_port == 2020 assert dn2.db_driver == \"default server\" assert dn2.db_extra_args == {\"default\": \"default\"} assert dn2.scope == Scope.GLOBAL assert dn2.validity_period == timedelta(2) # Config a datanode with specific \"storage_type\" = \"mongo_collection\" # should use properties from the default datanode dn3 = Config.configure_data_node( id=\"dn3\", storage_type=\"mongo_collection\", db_name=\"db_3\", collection_name=\"collection_3\", db_username=\"user_3\", db_password=\"pwd_3\", validity_period=timedelta(1), ) assert dn3.storage_type == \"mongo_collection\" assert dn3.db_username == \"user_3\" assert dn3.db_password == \"pwd_3\" assert dn3.db_name == \"db_3\" assert dn3.collection_name == \"collection_3\" assert dn3.custom_document == MongoDefaultDocument assert dn3.db_port == 1010 assert dn3.db_host == \"default_host\" assert dn3.db_driver == \"default server\" assert dn3.db_extra_args == {\"default\": \"default\"} assert dn3.scope == Scope.GLOBAL assert dn3.validity_period == timedelta(1) "} {"text": "import os from unittest import mock from src.taipy.core.config import DataNodeConfig from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile def _configure_task_in_toml(): return NamedTemporaryFile( content=\"\"\" [TAIPY] [DATA_NODE.input] [DATA_NODE.output] [TASK.tasks1] function = \"builtins.print:function\" inputs = [ \"input:SECTION\",] outputs = [ \"output:SECTION\",] \"\"\" ) def _check_data_nodes_instance(dn_id, task_id): \"\"\"Check if the data node instance in the task config correctly points to the Config._applied_config, not the Config._python_config or the Config._file_config \"\"\" dn_config_applied_instance = Config.data_nodes[dn_id] for dn in Config.tasks[task_id].inputs: if dn.id == dn_id: dn_config_instance_via_task = dn for dn in Config.tasks[task_id].outputs: if dn.id == dn_id: dn_config_instance_via_task = dn dn_config_python_instance = None if Config._python_config._sections.get(\"DATA_NODE\", None): dn_config_python_instance = Config._python_config._sections[\"DATA_NODE\"][dn_id] dn_config_file_instance = None if Config._file_config._sections.get(\"DATA_NODE\", None): dn_config_file_instance = Config._file_config._sections[\"DATA_NODE\"][dn_id] if dn_config_python_instance: assert dn_config_python_instance.scope is None assert dn_config_python_instance is not dn_config_applied_instance assert dn_config_python_instance is not dn_config_instance_via_task if dn_config_file_instance: assert dn_config_file_instance.scope is None assert dn_config_file_instance is not dn_config_applied_instance assert dn_config_file_instance is not dn_config_instance_via_task assert dn_config_applied_instance.scope == DataNodeConfig._DEFAULT_SCOPE assert dn_config_instance_via_task is dn_config_applied_instance def test_data_node_instance_when_configure_task_in_python(): input_config = Config.configure_data_node(\"input\") output_config = Config.configure_data_node(\"output\") Config.configure_task(\"tasks1\", print, input_config, output_config) _check_data_nodes_instance(\"input\", \"tasks1\") _check_data_nodes_instance(\"output\", \"tasks1\") def test_data_node_instance_when_configure_task_by_loading_toml(): toml_config = _configure_task_in_toml() Config.load(toml_config.filename) _check_data_nodes_instance(\"input\", \"tasks1\") _check_data_nodes_instance(\"output\", \"tasks1\") def test_data_node_instance_when_configure_task_by_overriding_toml(): toml_config = _configure_task_in_toml() Config.override(toml_config.filename) _check_data_nodes_instance(\"input\", \"tasks1\") _check_data_nodes_instance(\"output\", \"tasks1\") def test_task_config_creation(): input_config = Config.configure_data_node(\"input\") output_config = Config.configure_data_node(\"output\") task_config = Config.configure_task(\"tasks1\", print, input_config, output_config) assert not task_config.skippable assert list(Config.tasks) == [\"default\", task_config.id] task2 = Config.configure_task(\"tasks2\", print, input_config, output_config, skippable=True) assert task2.skippable assert list(Config.tasks) == [\"default\", task_config.id, task2.id] def test_task_count(): input_config = Config.configure_data_node(\"input\") output_config = Config.configure_data_node(\"output\") Config.configure_task(\"tasks1\", print, input_config, output_config) assert len(Config.tasks) == 2 Config.configure_task(\"tasks2\", print, input_config, output_config) assert len(Config.tasks) == 3 Config.configure_task(\"tasks3\", print, input_config, output_config) assert len(Config.tasks) == 4 def test_task_getitem(): input_config = Config.configure_data_node(\"input\") output_config = Config.configure_data_node(\"output\") task_id = \"tasks1\" task_cfg = Config.configure_task(task_id, print, input_config, output_config) assert Config.tasks[task_id].id == task_cfg.id assert Config.tasks[task_id].properties == task_cfg.properties assert Config.tasks[task_id].function == task_cfg.function assert Config.tasks[task_id].input_configs == task_cfg.input_configs assert Config.tasks[task_id].output_configs == task_cfg.output_configs assert Config.tasks[task_id].skippable == task_cfg.skippable def test_task_creation_no_duplication(): input_config = Config.configure_data_node(\"input\") output_config = Config.configure_data_node(\"output\") Config.configure_task(\"tasks1\", print, input_config, output_config) assert len(Config.tasks) == 2 Config.configure_task(\"tasks1\", print, input_config, output_config) assert len(Config.tasks) == 2 def test_task_config_with_env_variable_value(): input_config = Config.configure_data_node(\"input\") output_config = Config.configure_data_node(\"output\") with mock.patch.dict(os.environ, {\"FOO\": \"plop\", \"BAR\": \"baz\"}): Config.configure_task(\"task_name\", print, input_config, output_config, prop=\"ENV[BAR]\") assert Config.tasks[\"task_name\"].prop == \"baz\" assert Config.tasks[\"task_name\"].properties[\"prop\"] == \"baz\" assert Config.tasks[\"task_name\"]._properties[\"prop\"] == \"ENV[BAR]\" def test_clean_config(): dn1 = Config.configure_data_node(\"dn1\") dn2 = Config.configure_data_node(\"dn2\") task1_config = Config.configure_task(\"id1\", print, dn1, dn2) task2_config = Config.configure_task(\"id2\", print, dn2, dn1) assert Config.tasks[\"id1\"] is task1_config assert Config.tasks[\"id2\"] is task2_config task1_config._clean() task2_config._clean() # Check if the instance before and after _clean() is the same assert Config.tasks[\"id1\"] is task1_config assert Config.tasks[\"id2\"] is task2_config assert task1_config.id == \"id1\" assert task2_config.id == \"id2\" assert task1_config.function is task1_config.function is None assert task1_config.inputs == task1_config.inputs == [] assert task1_config.input_configs == task1_config.input_configs == [] assert task1_config.outputs == task1_config.outputs == [] assert task1_config.output_configs == task1_config.output_configs == [] assert task1_config.skippable is task1_config.skippable is False assert task1_config.properties == task1_config.properties == {} def test_deprecated_cacheable_attribute_remains_compatible(): dn_1_id = \"dn_1_id\" dn_1_config = Config.configure_data_node( id=dn_1_id, storage_type=\"pickle\", cacheable=False, scope=Scope.SCENARIO, ) assert Config.data_nodes[dn_1_id].id == dn_1_id assert Config.data_nodes[dn_1_id].storage_type == \"pickle\" assert Config.data_nodes[dn_1_id].scope == Scope.SCENARIO assert Config.data_nodes[dn_1_id].properties == {\"cacheable\": False} assert not Config.data_nodes[dn_1_id].cacheable dn_1_config.cacheable = True assert Config.data_nodes[dn_1_id].properties == {\"cacheable\": True} assert Config.data_nodes[dn_1_id].cacheable dn_2_id = \"dn_2_id\" dn_2_config = Config.configure_data_node( id=dn_2_id, storage_type=\"pickle\", cacheable=True, scope=Scope.SCENARIO, ) assert Config.data_nodes[dn_2_id].id == dn_2_id assert Config.data_nodes[dn_2_id].storage_type == \"pickle\" assert Config.data_nodes[dn_2_id].scope == Scope.SCENARIO assert Config.data_nodes[dn_2_id].properties == {\"cacheable\": True} assert Config.data_nodes[dn_2_id].cacheable dn_2_config.cacheable = False assert Config.data_nodes[dn_1_id].properties == {\"cacheable\": False} assert not Config.data_nodes[dn_1_id].cacheable dn_3_id = \"dn_3_id\" dn_3_config = Config.configure_data_node( id=dn_3_id, storage_type=\"pickle\", scope=Scope.SCENARIO, ) assert Config.data_nodes[dn_3_id].id == dn_3_id assert Config.data_nodes[dn_3_id].storage_type == \"pickle\" assert Config.data_nodes[dn_3_id].scope == Scope.SCENARIO assert Config.data_nodes[dn_3_id].properties == {} assert not Config.data_nodes[dn_3_id].cacheable dn_3_config.cacheable = True assert Config.data_nodes[dn_3_id].properties == {\"cacheable\": True} assert Config.data_nodes[dn_3_id].cacheable "} {"text": "import pytest from taipy.config.config import Config def test_job_config(): assert Config.job_config.mode == \"development\" job_c = Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) assert job_c.mode == \"standalone\" assert job_c.max_nb_of_workers == 2 assert Config.job_config.mode == \"standalone\" assert Config.job_config.max_nb_of_workers == 2 Config.configure_job_executions(foo=\"bar\") assert Config.job_config.foo == \"bar\" def test_clean_config(): job_config = Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2, prop=\"foo\") assert Config.job_config is job_config job_config._clean() # Check if the instance before and after _clean() is the same assert Config.job_config is job_config assert job_config.mode == \"development\" assert job_config._config == {\"max_nb_of_workers\": 1} assert job_config.properties == {} "} {"text": "import datetime import json from src.taipy.core.config import CoreSection, DataNodeConfig, JobConfig, MigrationConfig, ScenarioConfig, TaskConfig from taipy.config import Config from taipy.config._serializer._json_serializer import _JsonSerializer from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from tests.core.utils.named_temporary_file import NamedTemporaryFile def multiply(a): return a * 2 def migrate_csv_path(dn): dn.path = \"foo.csv\" def compare_function(*data_node_results): comparison_result = {} current_result_index = 0 for current_result in data_node_results: comparison_result[current_result_index] = {} next_result_index = 0 for next_result in data_node_results: print(f\"comparing result {current_result_index} with result {next_result_index}\") comparison_result[current_result_index][next_result_index] = next_result - current_result next_result_index += 1 current_result_index += 1 return comparison_result class CustomClass: a = None b = None class CustomEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, datetime): result = {\"__type__\": \"Datetime\", \"__value__\": o.isoformat()} else: result = json.JSONEncoder.default(self, o) return result class CustomDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) def object_hook(self, source): if source.get(\"__type__\") == \"Datetime\": return datetime.fromisoformat(source.get(\"__value__\")) else: return source def config_test_scenario(): test_csv_dn_cfg = Config.configure_csv_data_node( id=\"test_csv_dn\", path=\"./test.csv\", exposed_type=CustomClass, scope=Scope.GLOBAL, validity_period=datetime.timedelta(1), ) test_json_dn_cfg = Config.configure_json_data_node( id=\"test_json_dn\", default_path=\"./test.json\", encoder=CustomEncoder, decoder=CustomDecoder, ) test_pickle_dn_cfg = Config.configure_pickle_data_node( id=\"test_pickle_dn\", path=\"./test.p\", scope=Scope.SCENARIO, validity_period=datetime.timedelta(1), ) test_task_cfg = Config.configure_task( id=\"test_task\", input=test_csv_dn_cfg, function=multiply, output=test_json_dn_cfg ) test_scenario_cfg = Config.configure_scenario( id=\"test_scenario\", task_configs=[test_task_cfg], additional_data_node_configs=[test_pickle_dn_cfg], comparators={test_json_dn_cfg.id: compare_function}, frequency=Frequency.DAILY, ) test_scenario_cfg.add_sequences({\"sequence1\": [test_task_cfg]}) Config.add_migration_function(\"1.0\", test_csv_dn_cfg, migrate_csv_path) return test_scenario_cfg def test_read_write_toml_configuration_file(): expected_toml_config = f\"\"\" [TAIPY] [JOB] mode = \"development\" max_nb_of_workers = \"1:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" core_version = \"{CoreSection._CURRENT_CORE_VERSION}\" [DATA_NODE.default] storage_type = \"pickle\" scope = \"SCENARIO:SCOPE\" [DATA_NODE.test_csv_dn] storage_type = \"csv\" scope = \"GLOBAL:SCOPE\" validity_period = \"1d0h0m0s:timedelta\" path = \"./test.csv\" exposed_type = \"tests.core.config.test_config_serialization.CustomClass:class\" encoding = \"utf-8\" has_header = \"True:bool\" [DATA_NODE.test_json_dn] storage_type = \"json\" scope = \"SCENARIO:SCOPE\" default_path = \"./test.json\" encoder = \"tests.core.config.test_config_serialization.CustomEncoder:class\" decoder = \"tests.core.config.test_config_serialization.CustomDecoder:class\" encoding = \"utf-8\" [DATA_NODE.test_pickle_dn] storage_type = \"pickle\" scope = \"SCENARIO:SCOPE\" validity_period = \"1d0h0m0s:timedelta\" path = \"./test.p\" [TASK.default] inputs = [] outputs = [] skippable = \"False:bool\" [TASK.test_task] function = \"tests.core.config.test_config_serialization.multiply:function\" inputs = [ \"test_csv_dn:SECTION\",] outputs = [ \"test_json_dn:SECTION\",] skippable = \"False:bool\" [SCENARIO.default] tasks = [] additional_data_nodes = [] [SCENARIO.test_scenario] tasks = [ \"test_task:SECTION\",] additional_data_nodes = [ \"test_pickle_dn:SECTION\",] frequency = \"DAILY:FREQUENCY\" [VERSION_MIGRATION.migration_fcts.\"1.0\"] test_csv_dn = \"tests.core.config.test_config_serialization.migrate_csv_path:function\" [SCENARIO.default.comparators] [SCENARIO.default.sequences] [SCENARIO.test_scenario.comparators] test_json_dn = [ \"tests.core.config.test_config_serialization.compare_function:function\",] [SCENARIO.test_scenario.sequences] sequence1 = [ \"test_task:SECTION\",] \"\"\".strip() config_test_scenario() tf = NamedTemporaryFile() Config.backup(tf.filename) actual_config = tf.read().strip() assert actual_config == expected_toml_config Config.load(tf.filename) tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_config_2 = tf2.read().strip() assert actual_config_2 == expected_toml_config assert Config.unique_sections is not None assert len(Config.unique_sections) == 3 assert Config.unique_sections[JobConfig.name].mode == \"development\" assert Config.unique_sections[JobConfig.name].max_nb_of_workers == 1 assert Config.unique_sections[MigrationConfig.name].migration_fcts[\"1.0\"] == {\"test_csv_dn\": migrate_csv_path} assert Config.sections is not None assert len(Config.sections) == 3 assert Config.sections[DataNodeConfig.name] is not None assert len(Config.sections[DataNodeConfig.name]) == 4 assert Config.sections[DataNodeConfig.name][\"default\"] is not None assert Config.sections[DataNodeConfig.name][\"default\"].storage_type == \"pickle\" assert Config.sections[DataNodeConfig.name][\"default\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].storage_type == \"csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].scope == Scope.GLOBAL assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].validity_period == datetime.timedelta(1) assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].has_header is True assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].path == \"./test.csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].encoding == \"utf-8\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].exposed_type == CustomClass assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].storage_type == \"json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].default_path == \"./test.json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].encoding == \"utf-8\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].encoder == CustomEncoder assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].decoder == CustomDecoder assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].storage_type == \"pickle\" assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].validity_period == datetime.timedelta(1) assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].path == \"./test.p\" assert Config.sections[TaskConfig.name] is not None assert len(Config.sections[TaskConfig.name]) == 2 assert Config.sections[TaskConfig.name][\"default\"] is not None assert Config.sections[TaskConfig.name][\"default\"].inputs == [] assert Config.sections[TaskConfig.name][\"default\"].outputs == [] assert Config.sections[TaskConfig.name][\"default\"].function is None assert not Config.sections[TaskConfig.name][\"default\"].skippable assert [inp.id for inp in Config.sections[TaskConfig.name][\"test_task\"].inputs] == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id ] assert [outp.id for outp in Config.sections[TaskConfig.name][\"test_task\"].outputs] == [ Config.sections[DataNodeConfig.name][\"test_json_dn\"].id ] assert Config.sections[TaskConfig.name][\"test_task\"].function == multiply assert Config.sections[TaskConfig.name][\"test_task\"].function == multiply assert Config.sections[ScenarioConfig.name] is not None assert len(Config.sections[ScenarioConfig.name]) == 2 assert Config.sections[ScenarioConfig.name][\"default\"] is not None assert Config.sections[ScenarioConfig.name][\"default\"].tasks == [] assert Config.sections[ScenarioConfig.name][\"default\"].additional_data_nodes == [] assert Config.sections[ScenarioConfig.name][\"default\"].data_nodes == [] assert len(Config.sections[ScenarioConfig.name][\"default\"].comparators) == 0 assert [task.id for task in Config.sections[ScenarioConfig.name][\"test_scenario\"].tasks] == [ Config.sections[TaskConfig.name][\"test_task\"].id ] assert [ additional_data_node.id for additional_data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].additional_data_nodes ] == [Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].id] assert sorted([data_node.id for data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].data_nodes]) == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id, Config.sections[DataNodeConfig.name][\"test_json_dn\"].id, Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].id, ] sequences = {} for sequence_name, sequence_tasks in Config.sections[ScenarioConfig.name][\"test_scenario\"].sequences.items(): sequences[sequence_name] = [task.id for task in sequence_tasks] assert sequences == {\"sequence1\": [Config.sections[TaskConfig.name][\"test_task\"].id]} assert dict(Config.sections[ScenarioConfig.name][\"test_scenario\"].comparators) == { \"test_json_dn\": [compare_function] } def test_read_write_json_configuration_file(): expected_json_config = ( \"\"\"{ \"TAIPY\": {}, \"JOB\": { \"mode\": \"development\", \"max_nb_of_workers\": \"1:int\" }, \"CORE\": { \"root_folder\": \"./taipy/\", \"storage_folder\": \".data/\", \"repository_type\": \"filesystem\", \"read_entity_retry\": \"0:int\", \"mode\": \"development\", \"version_number\": \"\", \"force\": \"False:bool\",\"\"\" + f\"\"\" \"core_version\": \"{CoreSection._CURRENT_CORE_VERSION}\" \"\"\" + \"\"\" }, \"VERSION_MIGRATION\": { \"migration_fcts\": { \"1.0\": { \"test_csv_dn\": \"tests.core.config.test_config_serialization.migrate_csv_path:function\" } } }, \"DATA_NODE\": { \"default\": { \"storage_type\": \"pickle\", \"scope\": \"SCENARIO:SCOPE\" }, \"test_csv_dn\": { \"storage_type\": \"csv\", \"scope\": \"GLOBAL:SCOPE\", \"validity_period\": \"1d0h0m0s:timedelta\", \"path\": \"./test.csv\", \"exposed_type\": \"tests.core.config.test_config_serialization.CustomClass:class\", \"encoding\": \"utf-8\", \"has_header\": \"True:bool\" }, \"test_json_dn\": { \"storage_type\": \"json\", \"scope\": \"SCENARIO:SCOPE\", \"default_path\": \"./test.json\", \"encoder\": \"tests.core.config.test_config_serialization.CustomEncoder:class\", \"decoder\": \"tests.core.config.test_config_serialization.CustomDecoder:class\", \"encoding\": \"utf-8\" }, \"test_pickle_dn\": { \"storage_type\": \"pickle\", \"scope\": \"SCENARIO:SCOPE\", \"validity_period\": \"1d0h0m0s:timedelta\", \"path\": \"./test.p\" } }, \"TASK\": { \"default\": { \"function\": null, \"inputs\": [], \"outputs\": [], \"skippable\": \"False:bool\" }, \"test_task\": { \"function\": \"tests.core.config.test_config_serialization.multiply:function\", \"inputs\": [ \"test_csv_dn:SECTION\" ], \"outputs\": [ \"test_json_dn:SECTION\" ], \"skippable\": \"False:bool\" } }, \"SCENARIO\": { \"default\": { \"comparators\": {}, \"tasks\": [], \"additional_data_nodes\": [], \"frequency\": null, \"sequences\": {} }, \"test_scenario\": { \"comparators\": { \"test_json_dn\": [ \"tests.core.config.test_config_serialization.compare_function:function\" ] }, \"tasks\": [ \"test_task:SECTION\" ], \"additional_data_nodes\": [ \"test_pickle_dn:SECTION\" ], \"frequency\": \"DAILY:FREQUENCY\", \"sequences\": { \"sequence1\": [ \"test_task:SECTION\" ] } } } } \"\"\".strip() ) Config._serializer = _JsonSerializer() config_test_scenario() tf = NamedTemporaryFile() Config.backup(tf.filename) actual_config = tf.read().strip() assert actual_config == expected_json_config Config.load(tf.filename) tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_config_2 = tf2.read().strip() assert actual_config_2 == expected_json_config assert Config.unique_sections is not None assert len(Config.unique_sections) == 3 assert Config.unique_sections[JobConfig.name].mode == \"development\" assert Config.unique_sections[JobConfig.name].max_nb_of_workers == 1 assert Config.unique_sections[MigrationConfig.name].migration_fcts[\"1.0\"] == {\"test_csv_dn\": migrate_csv_path} assert Config.sections is not None assert len(Config.sections) == 3 assert Config.sections[DataNodeConfig.name] is not None assert len(Config.sections[DataNodeConfig.name]) == 4 assert Config.sections[DataNodeConfig.name][\"default\"] is not None assert Config.sections[DataNodeConfig.name][\"default\"].storage_type == \"pickle\" assert Config.sections[DataNodeConfig.name][\"default\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].storage_type == \"csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].scope == Scope.GLOBAL assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].validity_period == datetime.timedelta(1) assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].has_header is True assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].path == \"./test.csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].encoding == \"utf-8\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].exposed_type == CustomClass assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].storage_type == \"json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].default_path == \"./test.json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].encoding == \"utf-8\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].encoder == CustomEncoder assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].decoder == CustomDecoder assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].storage_type == \"pickle\" assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].validity_period == datetime.timedelta(1) assert Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].path == \"./test.p\" assert Config.sections[TaskConfig.name] is not None assert len(Config.sections[TaskConfig.name]) == 2 assert Config.sections[TaskConfig.name][\"default\"] is not None assert Config.sections[TaskConfig.name][\"default\"].inputs == [] assert Config.sections[TaskConfig.name][\"default\"].outputs == [] assert Config.sections[TaskConfig.name][\"default\"].function is None assert [inp.id for inp in Config.sections[TaskConfig.name][\"test_task\"].inputs] == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id ] assert [outp.id for outp in Config.sections[TaskConfig.name][\"test_task\"].outputs] == [ Config.sections[DataNodeConfig.name][\"test_json_dn\"].id ] assert Config.sections[TaskConfig.name][\"test_task\"].function == multiply assert Config.sections[ScenarioConfig.name] is not None assert len(Config.sections[ScenarioConfig.name]) == 2 assert Config.sections[ScenarioConfig.name][\"default\"] is not None assert Config.sections[ScenarioConfig.name][\"default\"].tasks == [] assert Config.sections[ScenarioConfig.name][\"default\"].additional_data_nodes == [] assert Config.sections[ScenarioConfig.name][\"default\"].data_nodes == [] assert len(Config.sections[ScenarioConfig.name][\"default\"].comparators) == 0 assert [task.id for task in Config.sections[ScenarioConfig.name][\"test_scenario\"].tasks] == [ Config.sections[TaskConfig.name][\"test_task\"].id ] assert [ additional_data_node.id for additional_data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].additional_data_nodes ] == [Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].id] assert sorted([data_node.id for data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].data_nodes]) == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id, Config.sections[DataNodeConfig.name][\"test_json_dn\"].id, Config.sections[DataNodeConfig.name][\"test_pickle_dn\"].id, ] sequences = {} for sequence_name, sequence_tasks in Config.sections[ScenarioConfig.name][\"test_scenario\"].sequences.items(): sequences[sequence_name] = [task.id for task in sequence_tasks] assert sequences == {\"sequence1\": [Config.sections[TaskConfig.name][\"test_task\"].id]} assert dict(Config.sections[ScenarioConfig.name][\"test_scenario\"].comparators) == { \"test_json_dn\": [compare_function] } def test_read_write_toml_configuration_file_migrate_sequence_in_scenario(): old_toml_config = \"\"\" [TAIPY] [JOB] mode = \"development\" max_nb_of_workers = \"1:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" mode = \"development\" version_number = \"\" force = \"False:bool\" [DATA_NODE.default] storage_type = \"pickle\" scope = \"SCENARIO:SCOPE\" [DATA_NODE.test_csv_dn] storage_type = \"csv\" scope = \"GLOBAL:SCOPE\" validity_period = \"1d0h0m0s:timedelta\" path = \"./test.csv\" exposed_type = \"tests.core.config.test_config_serialization.CustomClass:class\" has_header = \"True:bool\" [DATA_NODE.test_json_dn] storage_type = \"json\" scope = \"SCENARIO:SCOPE\" default_path = \"./test.json\" encoder = \"tests.core.config.test_config_serialization.CustomEncoder:class\" decoder = \"tests.core.config.test_config_serialization.CustomDecoder:class\" [TASK.default] inputs = [] outputs = [] skippable = \"False:bool\" [TASK.test_task] function = \"tests.core.config.test_config_serialization.multiply:function\" inputs = [ \"test_csv_dn:SECTION\",] outputs = [ \"test_json_dn:SECTION\",] skippable = \"False:bool\" [SCENARIO.default] [SCENARIO.test_scenario] tasks = [ \"test_task:SECTION\",] sequences.test_sequence = [ \"test_task:SECTION\",] frequency = \"DAILY:FREQUENCY\" [VERSION_MIGRATION.migration_fcts.\"1.0\"] test_csv_dn = \"tests.core.config.test_config_serialization.migrate_csv_path:function\" [SCENARIO.default.comparators] [SCENARIO.test_scenario.comparators] test_json_dn = [ \"tests.core.config.test_config_serialization.compare_function:function\",] \"\"\".strip() config_test_scenario() tf = NamedTemporaryFile() with open(tf.filename, \"w\") as fd: fd.writelines(old_toml_config) Config.restore(tf.filename) assert Config.unique_sections is not None assert len(Config.unique_sections) == 3 assert Config.unique_sections[CoreSection.name].root_folder == \"./taipy/\" assert Config.unique_sections[CoreSection.name].storage_folder == \".data/\" assert Config.unique_sections[CoreSection.name].repository_type == \"filesystem\" assert Config.unique_sections[CoreSection.name].repository_properties == {} assert Config.unique_sections[CoreSection.name].mode == \"development\" assert Config.unique_sections[CoreSection.name].version_number == \"\" assert Config.unique_sections[CoreSection.name].force is False assert Config.unique_sections[JobConfig.name].mode == \"development\" assert Config.unique_sections[JobConfig.name].max_nb_of_workers == 1 assert Config.unique_sections[MigrationConfig.name].migration_fcts[\"1.0\"] == {\"test_csv_dn\": migrate_csv_path} assert Config.sections is not None assert len(Config.sections) == 3 assert Config.sections[DataNodeConfig.name] is not None assert len(Config.sections[DataNodeConfig.name]) == 3 assert Config.sections[DataNodeConfig.name][\"default\"] is not None assert Config.sections[DataNodeConfig.name][\"default\"].storage_type == \"pickle\" assert Config.sections[DataNodeConfig.name][\"default\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].storage_type == \"csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].scope == Scope.GLOBAL assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].validity_period == datetime.timedelta(1) assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].has_header is True assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].path == \"./test.csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].exposed_type == CustomClass assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].storage_type == \"json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].default_path == \"./test.json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].encoder == CustomEncoder assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].decoder == CustomDecoder assert Config.sections[TaskConfig.name] is not None assert len(Config.sections[TaskConfig.name]) == 2 assert Config.sections[TaskConfig.name][\"default\"] is not None assert Config.sections[TaskConfig.name][\"default\"].inputs == [] assert Config.sections[TaskConfig.name][\"default\"].outputs == [] assert Config.sections[TaskConfig.name][\"default\"].function is None assert not Config.sections[TaskConfig.name][\"default\"].skippable assert [inp.id for inp in Config.sections[TaskConfig.name][\"test_task\"].inputs] == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id ] assert [outp.id for outp in Config.sections[TaskConfig.name][\"test_task\"].outputs] == [ Config.sections[DataNodeConfig.name][\"test_json_dn\"].id ] assert Config.sections[TaskConfig.name][\"test_task\"].function == multiply assert Config.sections[TaskConfig.name][\"test_task\"].function == multiply assert Config.sections[ScenarioConfig.name] is not None assert len(Config.sections[ScenarioConfig.name]) == 2 assert Config.sections[ScenarioConfig.name][\"default\"] is not None assert Config.sections[ScenarioConfig.name][\"default\"].tasks == [] assert Config.sections[ScenarioConfig.name][\"default\"].additional_data_nodes == [] assert Config.sections[ScenarioConfig.name][\"default\"].data_nodes == [] assert len(Config.sections[ScenarioConfig.name][\"default\"].comparators) == 0 assert [task.id for task in Config.sections[ScenarioConfig.name][\"test_scenario\"].tasks] == [ Config.sections[TaskConfig.name][\"test_task\"].id ] assert [ additional_data_node.id for additional_data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].additional_data_nodes ] == [] assert sorted([data_node.id for data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].data_nodes]) == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id, Config.sections[DataNodeConfig.name][\"test_json_dn\"].id, ] assert Config.sections[ScenarioConfig.name][\"test_scenario\"].sequences == { \"test_sequence\": [Config.sections[TaskConfig.name][\"test_task\"]] } assert dict(Config.sections[ScenarioConfig.name][\"test_scenario\"].comparators) == { \"test_json_dn\": [compare_function] } def test_read_write_json_configuration_file_migrate_sequence_in_scenario(): old_json_config = \"\"\" { \"TAIPY\": {}, \"JOB\": { \"mode\": \"development\", \"max_nb_of_workers\": \"1:int\" }, \"CORE\": { \"root_folder\": \"./taipy/\", \"storage_folder\": \".data/\", \"repository_type\": \"filesystem\", \"read_entity_retry\": \"0:int\", \"mode\": \"development\", \"version_number\": \"\", \"force\": \"False:bool\" }, \"VERSION_MIGRATION\": { \"migration_fcts\": { \"1.0\": { \"test_csv_dn\": \"tests.core.config.test_config_serialization.migrate_csv_path:function\" } } }, \"DATA_NODE\": { \"default\": { \"storage_type\": \"pickle\", \"scope\": \"SCENARIO:SCOPE\" }, \"test_csv_dn\": { \"storage_type\": \"csv\", \"scope\": \"GLOBAL:SCOPE\", \"validity_period\": \"1d0h0m0s:timedelta\", \"path\": \"./test.csv\", \"exposed_type\": \"tests.core.config.test_config_serialization.CustomClass:class\", \"has_header\": \"True:bool\" }, \"test_json_dn\": { \"storage_type\": \"json\", \"scope\": \"SCENARIO:SCOPE\", \"default_path\": \"./test.json\", \"encoder\": \"tests.core.config.test_config_serialization.CustomEncoder:class\", \"decoder\": \"tests.core.config.test_config_serialization.CustomDecoder:class\" } }, \"TASK\": { \"default\": { \"function\": null, \"inputs\": [], \"outputs\": [], \"skippable\": \"False:bool\" }, \"test_task\": { \"function\": \"tests.core.config.test_config_serialization.multiply:function\", \"inputs\": [ \"test_csv_dn:SECTION\" ], \"outputs\": [ \"test_json_dn:SECTION\" ], \"skippable\": \"False:bool\" } }, \"SCENARIO\": { \"default\": { \"comparators\": {}, \"sequences\": {}, \"frequency\": null }, \"test_scenario\": { \"comparators\": { \"test_json_dn\": [ \"tests.core.config.test_config_serialization.compare_function:function\" ] }, \"tasks\": [ \"test_task:SECTION\" ], \"sequences\": { \"test_sequence\": [ \"test_task:SECTION\" ] }, \"frequency\": \"DAILY:FREQUENCY\" } } } \"\"\".strip() Config._serializer = _JsonSerializer() config_test_scenario() tf = NamedTemporaryFile() with open(tf.filename, \"w\") as fd: fd.writelines(old_json_config) Config.restore(tf.filename) assert Config.unique_sections is not None assert len(Config.unique_sections) == 3 assert Config.unique_sections[CoreSection.name].root_folder == \"./taipy/\" assert Config.unique_sections[CoreSection.name].storage_folder == \".data/\" assert Config.unique_sections[CoreSection.name].repository_type == \"filesystem\" assert Config.unique_sections[CoreSection.name].repository_properties == {} assert Config.unique_sections[CoreSection.name].mode == \"development\" assert Config.unique_sections[CoreSection.name].version_number == \"\" assert Config.unique_sections[CoreSection.name].force is False assert Config.unique_sections[JobConfig.name].mode == \"development\" assert Config.unique_sections[JobConfig.name].max_nb_of_workers == 1 assert Config.unique_sections[MigrationConfig.name].migration_fcts[\"1.0\"] == {\"test_csv_dn\": migrate_csv_path} assert Config.sections is not None assert len(Config.sections) == 3 assert Config.sections[DataNodeConfig.name] is not None assert len(Config.sections[DataNodeConfig.name]) == 3 assert Config.sections[DataNodeConfig.name][\"default\"] is not None assert Config.sections[DataNodeConfig.name][\"default\"].storage_type == \"pickle\" assert Config.sections[DataNodeConfig.name][\"default\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].storage_type == \"csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].scope == Scope.GLOBAL assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].validity_period == datetime.timedelta(1) assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].has_header is True assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].path == \"./test.csv\" assert Config.sections[DataNodeConfig.name][\"test_csv_dn\"].exposed_type == CustomClass assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].storage_type == \"json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].scope == Scope.SCENARIO assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].default_path == \"./test.json\" assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].encoder == CustomEncoder assert Config.sections[DataNodeConfig.name][\"test_json_dn\"].decoder == CustomDecoder assert Config.sections[TaskConfig.name] is not None assert len(Config.sections[TaskConfig.name]) == 2 assert Config.sections[TaskConfig.name][\"default\"] is not None assert Config.sections[TaskConfig.name][\"default\"].inputs == [] assert Config.sections[TaskConfig.name][\"default\"].outputs == [] assert Config.sections[TaskConfig.name][\"default\"].function is None assert [inp.id for inp in Config.sections[TaskConfig.name][\"test_task\"].inputs] == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id ] assert [outp.id for outp in Config.sections[TaskConfig.name][\"test_task\"].outputs] == [ Config.sections[DataNodeConfig.name][\"test_json_dn\"].id ] assert Config.sections[TaskConfig.name][\"test_task\"].function == multiply assert Config.sections[ScenarioConfig.name] is not None assert len(Config.sections[ScenarioConfig.name]) == 2 assert Config.sections[ScenarioConfig.name][\"default\"] is not None assert Config.sections[ScenarioConfig.name][\"default\"].tasks == [] assert Config.sections[ScenarioConfig.name][\"default\"].additional_data_nodes == [] assert Config.sections[ScenarioConfig.name][\"default\"].data_nodes == [] assert len(Config.sections[ScenarioConfig.name][\"default\"].comparators) == 0 assert [task.id for task in Config.sections[ScenarioConfig.name][\"test_scenario\"].tasks] == [ Config.sections[TaskConfig.name][\"test_task\"].id ] assert [ additional_data_node.id for additional_data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].additional_data_nodes ] == [] assert sorted([data_node.id for data_node in Config.sections[ScenarioConfig.name][\"test_scenario\"].data_nodes]) == [ Config.sections[DataNodeConfig.name][\"test_csv_dn\"].id, Config.sections[DataNodeConfig.name][\"test_json_dn\"].id, ] assert Config.sections[ScenarioConfig.name][\"test_scenario\"].sequences == { \"test_sequence\": [Config.sections[TaskConfig.name][\"test_task\"]] } assert dict(Config.sections[ScenarioConfig.name][\"test_scenario\"].comparators) == { \"test_json_dn\": [compare_function] } "} {"text": "from taipy.config.config import Config def migrate_pickle_path(dn): dn.path = \"s1.pkl\" def migrate_skippable(task): task.skippable = True def test_migration_config(): assert Config.migration_functions.migration_fcts == {} data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") migration_cfg = Config.add_migration_function( target_version=\"1.0\", config=data_nodes1, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == {\"1.0\": {\"data_nodes1\": migrate_pickle_path}} assert migration_cfg.properties == {} data_nodes2 = Config.configure_data_node(\"data_nodes2\", \"pickle\") migration_cfg = Config.add_migration_function( target_version=\"1.0\", config=data_nodes2, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == { \"1.0\": {\"data_nodes1\": migrate_pickle_path, \"data_nodes2\": migrate_pickle_path} } def test_clean_config(): assert Config.migration_functions.migration_fcts == {} data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") migration_cfg = Config.add_migration_function( target_version=\"1.0\", config=data_nodes1, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == {\"1.0\": {\"data_nodes1\": migrate_pickle_path}} assert migration_cfg.properties == {} migration_cfg._clean() assert migration_cfg.migration_fcts == {} assert migration_cfg._properties == {} "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from src.taipy.core.config import CoreSection from src.taipy.core.config.data_node_config import DataNodeConfig from src.taipy.core.config.job_config import JobConfig from src.taipy.core.config.migration_config import MigrationConfig from src.taipy.core.config.scenario_config import ScenarioConfig from src.taipy.core.config.task_config import TaskConfig from taipy.config._config import _Config from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.global_app.global_app_config import GlobalAppConfig def _test_default_job_config(job_config: JobConfig): assert job_config is not None assert job_config.mode == JobConfig._DEFAULT_MODE def _test_default_core_section(core_section: CoreSection): assert core_section is not None assert core_section.mode == CoreSection._DEFAULT_MODE assert core_section.version_number == \"\" assert not core_section.force assert core_section.root_folder == \"./taipy/\" assert core_section.storage_folder == \".data/\" assert core_section.repository_type == \"filesystem\" assert core_section.repository_properties == {} assert len(core_section.properties) == 0 def _test_default_data_node_config(dn_config: DataNodeConfig): assert dn_config is not None assert dn_config.id is not None assert dn_config.storage_type == \"pickle\" assert dn_config.scope == Scope.SCENARIO assert dn_config.validity_period is None assert len(dn_config.properties) == 0 # type: ignore def _test_default_task_config(task_config: TaskConfig): assert task_config is not None assert task_config.id is not None assert task_config.input_configs == [] assert task_config.output_configs == [] assert task_config.function is None assert not task_config.skippable assert len(task_config.properties) == 0 # type: ignore def _test_default_scenario_config(scenario_config: ScenarioConfig): assert scenario_config is not None assert scenario_config.id is not None assert scenario_config.tasks == [] assert scenario_config.task_configs == [] assert scenario_config.additional_data_nodes == [] assert scenario_config.additional_data_node_configs == [] assert scenario_config.data_nodes == [] assert scenario_config.data_node_configs == [] assert scenario_config.sequences == {} assert len(scenario_config.properties) == 0 # type: ignore def _test_default_version_migration_config(version_migration_config: MigrationConfig): assert version_migration_config is not None assert version_migration_config.migration_fcts == {} assert len(version_migration_config.properties) == 0 # type: ignore def _test_default_global_app_config(global_config: GlobalAppConfig): assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_default_configuration(): default_config = Config._default_config assert default_config._global_config is not None _test_default_global_app_config(default_config._global_config) _test_default_global_app_config(Config.global_config) _test_default_global_app_config(GlobalAppConfig().default_config()) assert default_config._unique_sections is not None assert len(default_config._unique_sections) == 3 assert len(default_config._sections) == 3 _test_default_job_config(default_config._unique_sections[JobConfig.name]) _test_default_job_config(Config.job_config) _test_default_job_config(JobConfig().default_config()) _test_default_version_migration_config(default_config._unique_sections[MigrationConfig.name]) _test_default_version_migration_config(Config.migration_functions) _test_default_version_migration_config(MigrationConfig.default_config()) _test_default_core_section(default_config._unique_sections[CoreSection.name]) _test_default_core_section(Config.core) _test_default_core_section(CoreSection().default_config()) _test_default_data_node_config(default_config._sections[DataNodeConfig.name][_Config.DEFAULT_KEY]) _test_default_data_node_config(Config.data_nodes[_Config.DEFAULT_KEY]) _test_default_data_node_config(DataNodeConfig.default_config()) assert len(default_config._sections[DataNodeConfig.name]) == 1 assert len(Config.data_nodes) == 1 _test_default_task_config(default_config._sections[TaskConfig.name][_Config.DEFAULT_KEY]) _test_default_task_config(Config.tasks[_Config.DEFAULT_KEY]) _test_default_task_config(TaskConfig.default_config()) assert len(default_config._sections[TaskConfig.name]) == 1 assert len(Config.tasks) == 1 _test_default_scenario_config(default_config._sections[ScenarioConfig.name][_Config.DEFAULT_KEY]) Config.scenarios[_Config.DEFAULT_KEY] _test_default_scenario_config(Config.scenarios[_Config.DEFAULT_KEY]) _test_default_scenario_config(ScenarioConfig.default_config()) assert len(default_config._sections[ScenarioConfig.name]) == 1 assert len(Config.scenarios) == 1 "} {"text": "import os from unittest import mock from taipy.config.common.frequency import Frequency from taipy.config.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile def my_func(): pass def _configure_scenario_in_toml(): return NamedTemporaryFile( content=\"\"\" [TAIPY] [TASK.task1] inputs = [] outputs = [] [TASK.task2] inputs = [] outputs = [] [SCENARIO.scenarios1] tasks = [ \"task1:SECTION\", \"task2:SECTION\"] \"\"\" ) def _check_tasks_instance(task_id, scenario_id): \"\"\"Check if the task instance in the task config correctly points to the Config._applied_config, not the Config._python_config or the Config._file_config \"\"\" task_config_applied_instance = Config.tasks[task_id] task_config_instance_via_scenario = None for task in Config.scenarios[scenario_id].tasks: if task.id == task_id: task_config_instance_via_scenario = task task_config_python_instance = None if Config._python_config._sections.get(\"TASK\", None): task_config_python_instance = Config._python_config._sections[\"TASK\"][task_id] task_config_file_instance = None if Config._file_config._sections.get(\"TASK\", None): task_config_file_instance = Config._file_config._sections[\"TASK\"][task_id] assert task_config_python_instance is not task_config_applied_instance assert task_config_python_instance is not task_config_instance_via_scenario assert task_config_file_instance is not task_config_applied_instance assert task_config_file_instance is not task_config_instance_via_scenario assert task_config_instance_via_scenario is task_config_applied_instance def test_task_instance_when_configure_scenario_in_python(): task1_config = Config.configure_task(\"task1\", []) task2_config = Config.configure_task(\"task2\", print) Config.configure_scenario(\"scenarios1\", [task1_config, task2_config]) _check_tasks_instance(\"task1\", \"scenarios1\") _check_tasks_instance(\"task2\", \"scenarios1\") def test_task_instance_when_configure_scenario_by_loading_toml(): toml_config = _configure_scenario_in_toml() Config.load(toml_config.filename) _check_tasks_instance(\"task1\", \"scenarios1\") _check_tasks_instance(\"task2\", \"scenarios1\") def test_task_instance_when_configure_scenario_by_overriding_toml(): toml_config = _configure_scenario_in_toml() Config.override(toml_config.filename) _check_tasks_instance(\"task1\", \"scenarios1\") _check_tasks_instance(\"task2\", \"scenarios1\") def test_scenario_creation(): dn_config_1 = Config.configure_data_node(\"dn1\") dn_config_2 = Config.configure_data_node(\"dn2\") dn_config_3 = Config.configure_data_node(\"dn3\") dn_config_4 = Config.configure_data_node(\"dn4\") task_config_1 = Config.configure_task(\"task1\", sum, [dn_config_1, dn_config_2], dn_config_3) task_config_2 = Config.configure_task(\"task2\", print, dn_config_3) scenario = Config.configure_scenario( \"scenarios1\", [task_config_1, task_config_2], [dn_config_4], comparators={\"dn_cfg\": [my_func]}, sequences={\"sequence\": []}, ) assert list(Config.scenarios) == [\"default\", scenario.id] scenario2 = Config.configure_scenario(\"scenarios2\", [task_config_1], frequency=Frequency.MONTHLY) assert list(Config.scenarios) == [\"default\", scenario.id, scenario2.id] def test_scenario_count(): task_config_1 = Config.configure_task(\"task1\", my_func) task_config_2 = Config.configure_task(\"task2\", print) Config.configure_scenario(\"scenarios1\", [task_config_1, task_config_2]) assert len(Config.scenarios) == 2 Config.configure_scenario(\"scenarios2\", [task_config_1]) assert len(Config.scenarios) == 3 Config.configure_scenario(\"scenarios3\", [task_config_2]) assert len(Config.scenarios) == 4 def test_scenario_getitem(): dn_config_1 = Config.configure_data_node(\"dn1\") dn_config_2 = Config.configure_data_node(\"dn2\") dn_config_3 = Config.configure_data_node(\"dn3\") dn_config_4 = Config.configure_data_node(\"dn4\") task_config_1 = Config.configure_task(\"task1\", sum, [dn_config_1, dn_config_2], dn_config_3) task_config_2 = Config.configure_task(\"task2\", print, dn_config_3) scenario_id = \"scenarios1\" scenario = Config.configure_scenario(scenario_id, [task_config_1, task_config_2], [dn_config_4]) assert Config.scenarios[scenario_id].id == scenario.id assert Config.scenarios[scenario_id].task_configs == scenario.task_configs assert Config.scenarios[scenario_id].tasks == scenario.tasks assert Config.scenarios[scenario_id].task_configs == scenario.tasks assert Config.scenarios[scenario_id].additional_data_node_configs == scenario.additional_data_node_configs assert Config.scenarios[scenario_id].additional_data_nodes == scenario.additional_data_nodes assert Config.scenarios[scenario_id].additional_data_node_configs == scenario.additional_data_nodes assert Config.scenarios[scenario_id].data_node_configs == scenario.data_node_configs assert Config.scenarios[scenario_id].data_nodes == scenario.data_nodes assert Config.scenarios[scenario_id].data_node_configs == scenario.data_nodes assert scenario.tasks == [task_config_1, task_config_2] assert scenario.additional_data_node_configs == [dn_config_4] assert set(scenario.data_nodes) == set([dn_config_4, dn_config_1, dn_config_2, dn_config_3]) assert Config.scenarios[scenario_id].properties == scenario.properties def test_scenario_creation_no_duplication(): task_config_1 = Config.configure_task(\"task1\", my_func) task_config_2 = Config.configure_task(\"task2\", print) dn_config = Config.configure_data_node(\"dn\") Config.configure_scenario(\"scenarios1\", [task_config_1, task_config_2], [dn_config]) assert len(Config.scenarios) == 2 Config.configure_scenario(\"scenarios1\", [task_config_1, task_config_2], [dn_config]) assert len(Config.scenarios) == 2 def test_scenario_get_set_and_remove_comparators(): task_config_1 = Config.configure_task(\"task1\", my_func) task_config_2 = Config.configure_task(\"task2\", print) dn_config_1 = \"dn_config_1\" scenario_config_1 = Config.configure_scenario( \"scenarios1\", [task_config_1, task_config_2], comparators={dn_config_1: my_func} ) assert scenario_config_1.comparators is not None assert scenario_config_1.comparators[dn_config_1] == [my_func] assert len(scenario_config_1.comparators.keys()) == 1 dn_config_2 = \"dn_config_2\" scenario_config_1.add_comparator(dn_config_2, my_func) assert len(scenario_config_1.comparators.keys()) == 2 scenario_config_1.delete_comparator(dn_config_1) assert len(scenario_config_1.comparators.keys()) == 1 scenario_config_1.delete_comparator(dn_config_2) assert len(scenario_config_1.comparators.keys()) == 0 scenario_config_2 = Config.configure_scenario(\"scenarios2\", [task_config_1, task_config_2]) assert scenario_config_2.comparators is not None scenario_config_2.add_comparator(dn_config_1, my_func) assert len(scenario_config_2.comparators.keys()) == 1 scenario_config_2.delete_comparator(\"dn_config_3\") def test_scenario_config_with_env_variable_value(): task_config_1 = Config.configure_task(\"task1\", my_func) task_config_2 = Config.configure_task(\"task2\", print) with mock.patch.dict(os.environ, {\"FOO\": \"bar\"}): Config.configure_scenario(\"scenario_name\", [task_config_1, task_config_2], prop=\"ENV[FOO]\") assert Config.scenarios[\"scenario_name\"].prop == \"bar\" assert Config.scenarios[\"scenario_name\"].properties[\"prop\"] == \"bar\" assert Config.scenarios[\"scenario_name\"]._properties[\"prop\"] == \"ENV[FOO]\" def test_clean_config(): task1_config = Config.configure_task(\"task1\", print, [], []) task2_config = Config.configure_task(\"task2\", print, [], []) scenario1_config = Config.configure_scenario( \"id1\", [task1_config, task2_config], [], Frequency.YEARLY, {\"foo\": \"bar\"}, prop=\"foo\", sequences={\"sequence_1\": []}, ) scenario2_config = Config.configure_scenario( \"id2\", [task2_config, task1_config], [], Frequency.MONTHLY, {\"foz\": \"baz\"}, prop=\"bar\", sequences={\"sequence_2\": []}, ) assert Config.scenarios[\"id1\"] is scenario1_config assert Config.scenarios[\"id2\"] is scenario2_config scenario1_config._clean() scenario2_config._clean() # Check if the instance before and after _clean() is the same assert Config.scenarios[\"id1\"] is scenario1_config assert Config.scenarios[\"id2\"] is scenario2_config assert scenario1_config.id == \"id1\" assert scenario2_config.id == \"id2\" assert scenario1_config.tasks == scenario1_config.task_configs == [] assert scenario1_config.additional_data_nodes == scenario1_config.additional_data_node_configs == [] assert scenario1_config.data_nodes == scenario1_config.data_node_configs == [] assert scenario1_config.sequences == scenario1_config.sequences == {} assert scenario1_config.frequency is scenario1_config.frequency is None assert scenario1_config.comparators == scenario1_config.comparators == {} assert scenario1_config.properties == scenario1_config.properties == {} assert scenario2_config.tasks == scenario2_config.task_configs == [] assert scenario2_config.additional_data_nodes == scenario2_config.additional_data_node_configs == [] assert scenario2_config.data_nodes == scenario2_config.data_node_configs == [] assert scenario2_config.sequences == scenario1_config.sequences == {} assert scenario2_config.frequency is scenario2_config.frequency is None assert scenario2_config.comparators == scenario2_config.comparators == {} assert scenario2_config.properties == scenario2_config.properties == {} def test_add_sequence(): task1_config = Config.configure_task(\"task1\", print, [], []) task2_config = Config.configure_task(\"task2\", print, [], []) task3_config = Config.configure_task(\"task3\", print, [], []) task4_config = Config.configure_task(\"task4\", print, [], []) scenario_config = Config.configure_scenario( \"id\", [task1_config, task2_config, task3_config, task4_config], [], Frequency.YEARLY, prop=\"foo\" ) assert Config.scenarios[\"id\"] is scenario_config assert scenario_config.id == \"id\" assert ( scenario_config.tasks == scenario_config.task_configs == [task1_config, task2_config, task3_config, task4_config] ) assert scenario_config.additional_data_nodes == scenario_config.additional_data_node_configs == [] assert scenario_config.data_nodes == scenario_config.data_node_configs == [] assert scenario_config.frequency is scenario_config.frequency == Frequency.YEARLY assert scenario_config.comparators == scenario_config.comparators == {} assert scenario_config.properties == {\"prop\": \"foo\"} scenario_config.add_sequences( { \"sequence1\": [task1_config], \"sequence2\": [task2_config, task3_config], \"sequence3\": [task1_config, task2_config, task4_config], } ) assert len(scenario_config.sequences) == 3 assert scenario_config.sequences[\"sequence1\"] == [task1_config] assert scenario_config.sequences[\"sequence2\"] == [task2_config, task3_config] assert scenario_config.sequences[\"sequence3\"] == [task1_config, task2_config, task4_config] scenario_config.remove_sequences(\"sequence1\") assert len(scenario_config.sequences) == 2 scenario_config.remove_sequences([\"sequence2\", \"sequence3\"]) assert len(scenario_config.sequences) == 0 "} {"text": "from unittest.mock import patch import pytest from src.taipy.core._init_version import _read_version from src.taipy.core.config.core_section import CoreSection from src.taipy.core.exceptions import ConfigCoreVersionMismatched from taipy.config.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile _MOCK_CORE_VERSION = \"3.1.1\" @pytest.fixture(scope=\"function\", autouse=True) def mock_core_version(): with patch(\"src.taipy.core.config.core_section._read_version\") as mock_read_version: mock_read_version.return_value = _MOCK_CORE_VERSION CoreSection._CURRENT_CORE_VERSION = _MOCK_CORE_VERSION Config.unique_sections[CoreSection.name] = CoreSection.default_config() Config._default_config._unique_sections[CoreSection.name] = CoreSection.default_config() yield @pytest.fixture(scope=\"session\", autouse=True) def reset_core_version(): yield CoreSection._CURRENT_CORE_VERSION = _read_version() class TestCoreVersionInCoreSectionConfig: major, minor, patch = _MOCK_CORE_VERSION.split(\".\") current_version = f\"{major}.{minor}.{patch}\" current_dev_version = f\"{major}.{minor}.{patch}.dev0\" compatible_future_version = f\"{major}.{minor}.{int(patch) + 1}\" compatible_future_dev_version = f\"{major}.{minor}.{int(patch) + 1}.dev0\" core_version_is_compatible = [ # Current version and dev version should be compatible (f\"{major}.{minor}.{patch}\", True), (f\"{major}.{minor}.{patch}.dev0\", True), # Future versions with same major and minor should be compatible (f\"{major}.{minor}.{int(patch) + 1}\", True), (f\"{major}.{minor}.{int(patch) + 1}.dev0\", True), # Past versions with same major and minor should be compatible (f\"{major}.{minor}.{int(patch) - 1}\", True), (f\"{major}.{minor}.{int(patch) - 1}.dev0\", True), # Future versions with different minor number should be incompatible (f\"{major}.{int(minor) + 1}.{patch}\", False), (f\"{major}.{int(minor) + 1}.{patch}.dev0\", False), # Past versions with different minor number should be incompatible (f\"{major}.{int(minor) - 1}.{patch}\", False), (f\"{major}.{int(minor) - 1}.{patch}.dev0\", False), ] @pytest.mark.parametrize(\"core_version, is_compatible\", core_version_is_compatible) def test_load_configuration_file(self, core_version, is_compatible): file_config = NamedTemporaryFile( f\"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" core_version = \"{core_version}\" [VERSION_MIGRATION.migration_fcts] \"\"\" ) if is_compatible: Config.load(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION else: with pytest.raises(ConfigCoreVersionMismatched): Config.load(file_config.filename) @pytest.mark.parametrize(\"core_version,is_compatible\", core_version_is_compatible) def test_override_configuration_file(self, core_version, is_compatible): file_config = NamedTemporaryFile( f\"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" core_version = \"{core_version}\" [VERSION_MIGRATION.migration_fcts] \"\"\" ) if is_compatible: Config.override(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION else: with pytest.raises(ConfigCoreVersionMismatched): Config.override(file_config.filename) def test_load_configuration_file_without_core_section(self): file_config = NamedTemporaryFile( \"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" [VERSION_MIGRATION.migration_fcts] \"\"\" ) Config.load(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION "} {"text": "import os from datetime import timedelta from unittest import mock from src.taipy.core.config import DataNodeConfig, ScenarioConfig, TaskConfig from src.taipy.core.config.core_section import CoreSection from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile def test_write_configuration_file(): expected_config = f\"\"\" [TAIPY] [JOB] mode = \"standalone\" max_nb_of_workers = \"2:int\" [CORE] root_folder = \"./taipy/\" storage_folder = \".data/\" repository_type = \"filesystem\" read_entity_retry = \"0:int\" mode = \"development\" version_number = \"\" force = \"False:bool\" core_version = \"{CoreSection._CURRENT_CORE_VERSION}\" [VERSION_MIGRATION.migration_fcts] [DATA_NODE.default] storage_type = \"in_memory\" scope = \"SCENARIO:SCOPE\" validity_period = \"1d0h0m0s:timedelta\" custom = \"default_custom_prop\" [DATA_NODE.dn1] storage_type = \"pickle\" scope = \"SCENARIO:SCOPE\" validity_period = \"1d0h0m0s:timedelta\" custom = \"custom property\" default_data = \"dn1\" [DATA_NODE.dn2] storage_type = \"ENV[FOO]\" scope = \"SCENARIO:SCOPE\" validity_period = \"2d0h0m0s:timedelta\" foo = \"bar\" default_data = \"dn2\" baz = \"ENV[QUX]\" quux = \"ENV[QUUZ]:bool\" corge = [ \"grault\", \"ENV[GARPLY]\", \"ENV[WALDO]:int\", \"3.0:float\",] [DATA_NODE.dn3] storage_type = \"ENV[FOO]\" scope = \"SCENARIO:SCOPE\" validity_period = \"1d0h0m0s:timedelta\" foo = \"bar\" default_data = \"dn3\" quux = \"ENV[QUUZ]:bool\" [TASK.default] inputs = [] outputs = [] skippable = \"False:bool\" [TASK.t1] function = \"builtins.print:function\" inputs = [ \"dn1:SECTION\",] outputs = [ \"dn2:SECTION\",] skippable = \"False:bool\" description = \"t1 description\" [SCENARIO.default] tasks = [] additional_data_nodes = [] frequency = \"QUARTERLY:FREQUENCY\" owner = \"Michel Platini\" [SCENARIO.s1] tasks = [ \"t1:SECTION\",] additional_data_nodes = [ \"dn3:SECTION\",] frequency = \"QUARTERLY:FREQUENCY\" owner = \"Raymond Kopa\" [SCENARIO.default.comparators] [SCENARIO.default.sequences] [SCENARIO.s1.comparators] [SCENARIO.s1.sequences] sequence = [ \"t1:SECTION\",] \"\"\".strip() tf = NamedTemporaryFile() with mock.patch.dict( os.environ, {\"FOO\": \"in_memory\", \"QUX\": \"qux\", \"QUUZ\": \"true\", \"GARPLY\": \"garply\", \"WALDO\": \"17\"} ): Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) Config.set_default_data_node_configuration( storage_type=\"in_memory\", custom=\"default_custom_prop\", validity_period=timedelta(1), ) dn1_cfg_v2 = Config.configure_data_node( \"dn1\", storage_type=\"pickle\", scope=Scope.SCENARIO, default_data=\"dn1\", custom=\"custom property\" ) dn2_cfg_v2 = Config.configure_data_node( \"dn2\", storage_type=\"ENV[FOO]\", validity_period=timedelta(2), foo=\"bar\", default_data=\"dn2\", baz=\"ENV[QUX]\", quux=\"ENV[QUUZ]:bool\", corge=(\"grault\", \"ENV[GARPLY]\", \"ENV[WALDO]:int\", 3.0), ) dn3_cfg_v2 = Config.configure_data_node( \"dn3\", storage_type=\"ENV[FOO]\", foo=\"bar\", default_data=\"dn3\", quux=\"ENV[QUUZ]:bool\", ) assert dn2_cfg_v2.scope == Scope.SCENARIO t1_cfg_v2 = Config.configure_task(\"t1\", print, dn1_cfg_v2, dn2_cfg_v2, description=\"t1 description\") Config.set_default_scenario_configuration([], [], Frequency.QUARTERLY, owner=\"Michel Platini\") Config.configure_scenario( \"s1\", task_configs=[t1_cfg_v2], additional_data_node_configs=[dn3_cfg_v2], frequency=Frequency.QUARTERLY, owner=\"Raymond Kopa\", sequences={\"sequence\": [t1_cfg_v2]}, ) Config.backup(tf.filename) actual_config = tf.read().strip() # problem here assert actual_config == expected_config Config.override(tf.filename) tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_config_2 = tf2.read().strip() assert actual_config_2 == expected_config def test_read_configuration_file(): file_config = NamedTemporaryFile( \"\"\" [DATA_NODE.default] has_header = true [DATA_NODE.my_datanode] path = \"/data/csv\" validity_period = \"1d0h0m0s:timedelta\" [DATA_NODE.my_datanode2] path = \"/data2/csv\" [DATA_NODE.my_datanode3] path = \"/data3/csv\" source = \"local\" [TASK.my_task] inputs = [\"my_datanode:SECTION\"] outputs = [\"my_datanode2:SECTION\"] description = \"task description\" [SCENARIO.my_scenario] tasks = [ \"my_task:SECTION\"] additional_data_nodes = [\"my_datanode3:SECTION\"] owner = \"John Doe\" [SCENARIO.my_scenario.sequences] sequence = [ \"my_task:SECTION\",] \"\"\" ) Config.configure_task(\"my_task\", print) Config.override(file_config.filename) assert len(Config.data_nodes) == 4 assert type(Config.data_nodes[\"my_datanode\"]) == DataNodeConfig assert type(Config.data_nodes[\"my_datanode2\"]) == DataNodeConfig assert type(Config.data_nodes[\"my_datanode3\"]) == DataNodeConfig assert Config.data_nodes[\"my_datanode\"].path == \"/data/csv\" assert Config.data_nodes[\"my_datanode2\"].path == \"/data2/csv\" assert Config.data_nodes[\"my_datanode3\"].path == \"/data3/csv\" assert Config.data_nodes[\"my_datanode\"].id == \"my_datanode\" assert Config.data_nodes[\"my_datanode2\"].id == \"my_datanode2\" assert Config.data_nodes[\"my_datanode3\"].id == \"my_datanode3\" assert Config.data_nodes[\"my_datanode\"].validity_period == timedelta(1) assert Config.data_nodes[\"my_datanode3\"].source == \"local\" assert len(Config.tasks) == 2 assert type(Config.tasks[\"my_task\"]) == TaskConfig assert Config.tasks[\"my_task\"].id == \"my_task\" assert Config.tasks[\"my_task\"].description == \"task description\" assert Config.tasks[\"my_task\"].function == print assert len(Config.tasks[\"my_task\"].inputs) == 1 assert type(Config.tasks[\"my_task\"].inputs[0]) == DataNodeConfig assert Config.tasks[\"my_task\"].inputs[0].path == \"/data/csv\" assert Config.tasks[\"my_task\"].inputs[0].id == \"my_datanode\" assert len(Config.tasks[\"my_task\"].outputs) == 1 assert type(Config.tasks[\"my_task\"].outputs[0]) == DataNodeConfig assert Config.tasks[\"my_task\"].outputs[0].path == \"/data2/csv\" assert Config.tasks[\"my_task\"].outputs[0].id == \"my_datanode2\" assert len(Config.scenarios) == 2 assert type(Config.scenarios[\"my_scenario\"]) == ScenarioConfig assert Config.scenarios[\"my_scenario\"].id == \"my_scenario\" assert Config.scenarios[\"my_scenario\"].owner == \"John Doe\" assert len(Config.scenarios[\"my_scenario\"].tasks) == 1 assert type(Config.scenarios[\"my_scenario\"].tasks[0]) == TaskConfig assert len(Config.scenarios[\"my_scenario\"].additional_data_nodes) == 1 assert type(Config.scenarios[\"my_scenario\"].additional_data_nodes[0]) == DataNodeConfig assert Config.scenarios[\"my_scenario\"].tasks[0].id == \"my_task\" assert Config.scenarios[\"my_scenario\"].tasks[0].description == \"task description\" assert Config.scenarios[\"my_scenario\"].additional_data_nodes[0].id == \"my_datanode3\" assert Config.scenarios[\"my_scenario\"].additional_data_nodes[0].source == \"local\" assert [task.id for task in Config.scenarios[\"my_scenario\"].sequences[\"sequence\"]] == [ Config.scenarios[\"my_scenario\"].tasks[0].id ] "} {"text": "from datetime import timedelta from taipy.config import Config from taipy.config.common.scope import Scope class TestConfig: def test_configure_csv_data_node(self): a, b, c, d, e, f = \"foo\", \"path\", True, \"numpy\", Scope.SCENARIO, timedelta(1) Config.configure_csv_data_node(a, b, c, d, e, f) assert len(Config.data_nodes) == 2 def test_configure_excel_data_node(self): a, b, c, d, e, f, g = \"foo\", \"path\", True, \"Sheet1\", \"numpy\", Scope.SCENARIO, timedelta(1) Config.configure_excel_data_node(a, b, c, d, e, f, g) assert len(Config.data_nodes) == 2 def test_configure_generic_data_node(self): a, b, c, d, e, f, g, h = \"foo\", print, print, tuple([]), tuple([]), Scope.SCENARIO, timedelta(1), \"qux\" Config.configure_generic_data_node(a, b, c, d, e, f, g, property=h) assert len(Config.data_nodes) == 2 def test_configure_in_memory_data_node(self): a, b, c, d, e = \"foo\", 0, Scope.SCENARIO, timedelta(1), \"qux\" Config.configure_in_memory_data_node(a, b, c, d, property=e) assert len(Config.data_nodes) == 2 def test_configure_pickle_data_node(self): a, b, c, d, e = \"foo\", 0, Scope.SCENARIO, timedelta(1), \"path\" Config.configure_pickle_data_node(a, b, c, d, path=e) assert len(Config.data_nodes) == 2 def test_configure_json_data_node(self): a, dp, ec, dc, sc, f, p = \"foo\", \"path\", \"ec\", \"dc\", Scope.SCENARIO, timedelta(1), \"qux\" Config.configure_json_data_node(a, dp, ec, dc, sc, f, path=p) assert len(Config.data_nodes) == 2 def test_configure_sql_table_data_node(self): a, b, c, d, e, f, g, h, i, extra_args, exposed_type, scope, vp, k = ( \"foo\", \"user\", \"pwd\", \"db\", \"engine\", \"table_name\", \"port\", \"host\", \"driver\", {\"foo\": \"bar\"}, \"exposed_type\", Scope.SCENARIO, timedelta(1), \"qux\", ) Config.configure_sql_table_data_node(a, b, c, d, e, f, g, h, i, extra_args, exposed_type, scope, vp, property=k) assert len(Config.data_nodes) == 2 def test_configure_sql_data_node(self): a, b, c, d, e, f, g, h, i, j, k, extra_args, exposed_type, scope, vp, k = ( \"foo\", \"user\", \"pwd\", \"db\", \"engine\", \"read_query\", \"write_query_builder\", \"append_query_builder\", \"port\", \"host\", \"driver\", {\"foo\": \"bar\"}, \"exposed_type\", Scope.SCENARIO, timedelta(1), \"qux\", ) Config.configure_sql_data_node(a, b, c, d, e, f, g, h, i, j, k, extra_args, exposed_type, scope, vp, property=k) assert len(Config.data_nodes) == 2 def test_configure_mongo_data_node(self): a, b, c, d, e, f, g, h, extra_args, scope, vp, k = ( \"foo\", \"db_name\", \"collection_name\", None, \"user\", \"pwd\", \"host\", \"port\", {\"foo\": \"bar\"}, Scope.SCENARIO, timedelta(1), \"qux\", ) Config.configure_mongo_collection_data_node(a, b, c, d, e, f, g, h, extra_args, scope, vp, property=k) assert len(Config.data_nodes) == 2 "} {"text": "import os from unittest import mock import pytest from taipy.config.config import Config from taipy.config.exceptions.exceptions import InconsistentEnvVariableError, MissingEnvVariableError from tests.core.utils.named_temporary_file import NamedTemporaryFile def test_override_default_configuration_with_code_configuration(): assert not Config.core.root_folder == \"foo\" assert len(Config.data_nodes) == 1 assert len(Config.tasks) == 1 assert len(Config.scenarios) == 1 Config.configure_job_executions(max_nb_of_workers=-1) Config.configure_core(root_folder=\"foo\") foo_config = Config.configure_data_node(\"foo\", \"in_memory\") xyz_config = Config.configure_data_node(\"xyz\") bar_config = Config.configure_task(\"bar\", print, [foo_config], []) qux_config = Config.configure_scenario(\"qux\", [bar_config], [xyz_config]) assert Config.job_config.max_nb_of_workers == -1 assert Config.core.root_folder == \"foo\" assert len(Config.data_nodes) == 3 assert \"default\" in Config.data_nodes assert foo_config.id in Config.data_nodes assert xyz_config.id in Config.data_nodes assert Config.data_nodes[foo_config.id].storage_type == \"in_memory\" assert Config.data_nodes[xyz_config.id].storage_type == \"pickle\" assert len(Config.tasks) == 2 assert \"default\" in Config.tasks assert bar_config.id in Config.tasks assert len(Config.tasks[bar_config.id].input_configs) == 1 assert Config.tasks[bar_config.id].input_configs[0].id == foo_config.id assert len(Config.tasks[bar_config.id].output_configs) == 0 assert Config.tasks[bar_config.id].function == print assert len(Config.scenarios) == 2 assert \"default\" in Config.scenarios assert qux_config.id in Config.scenarios assert len(Config.scenarios[qux_config.id].tasks) == 1 assert Config.scenarios[qux_config.id].tasks[0].id == bar_config.id assert len(Config.scenarios[qux_config.id].additional_data_nodes) == 1 assert Config.scenarios[qux_config.id].additional_data_nodes[0].id == xyz_config.id def test_override_default_config_with_code_config_including_env_variable_values(): Config.configure_core() assert Config.core.repository_type == \"filesystem\" Config.configure_core(repository_type=\"othertype\") assert Config.core.repository_type == \"othertype\" with mock.patch.dict(os.environ, {\"REPOSITORY_TYPE\": \"foo\"}): Config.configure_core(repository_type=\"ENV[REPOSITORY_TYPE]\") assert Config.core.repository_type == \"foo\" def test_override_default_configuration_with_file_configuration(): tf = NamedTemporaryFile( \"\"\" [TAIPY] [JOB] max_nb_of_workers = -1 [DATA_NODE.foo] [TASK.bar] [SCENARIO.qux] \"\"\" ) assert Config.job_config.max_nb_of_workers == 1 assert len(Config.data_nodes) == 1 assert len(Config.tasks) == 1 assert len(Config.scenarios) == 1 Config.override(tf.filename) assert Config.job_config.max_nb_of_workers == -1 assert len(Config.data_nodes) == 2 assert \"default\" in Config.data_nodes assert \"foo\" in Config.data_nodes assert len(Config.tasks) == 2 assert \"default\" in Config.tasks assert \"bar\" in Config.tasks assert \"default\" in Config.scenarios assert len(Config.scenarios) == 2 assert \"qux\" in Config.scenarios def test_override_default_config_with_file_config_including_env_variable_values(): tf = NamedTemporaryFile( \"\"\" [JOB] max_nb_of_workers = \"ENV[FOO]:int\" start_executor = \"ENV[BAR]\" \"\"\" ) assert Config.job_config.max_nb_of_workers == 1 assert not Config.job_config.start_executor with mock.patch.dict(os.environ, {\"FOO\": \"6\", \"BAR\": \"TRUe\"}): Config.override(tf.filename) assert Config.job_config.max_nb_of_workers == 6 assert Config.job_config.start_executor with mock.patch.dict(os.environ, {\"FOO\": \"foo\", \"BAR\": \"true\"}): with pytest.raises(InconsistentEnvVariableError): Config.override(tf.filename) with mock.patch.dict(os.environ, {\"FOO\": \"5\"}): with pytest.raises(MissingEnvVariableError): Config.override(tf.filename) def test_code_configuration_do_not_override_file_configuration(): config_from_filename = NamedTemporaryFile( \"\"\" [JOB] max_nb_of_workers = 2 \"\"\" ) Config.override(config_from_filename.filename) Config.configure_job_executions(max_nb_of_workers=21) assert Config.job_config.max_nb_of_workers == 2 # From file config def test_code_configuration_do_not_override_file_configuration_including_env_variable_values(): config_from_filename = NamedTemporaryFile( \"\"\" [JOB] max_nb_of_workers = 2 \"\"\" ) Config.override(config_from_filename.filename) with mock.patch.dict(os.environ, {\"FOO\": \"21\"}): Config.configure_job_executions(max_nb_of_workers=\"ENV[FOO]\") assert Config.job_config.max_nb_of_workers == 2 # From file config def test_file_configuration_override_code_configuration(): config_from_filename = NamedTemporaryFile( \"\"\" [JOB] max_nb_of_workers = 2 \"\"\" ) Config.configure_job_executions(max_nb_of_workers=21) Config.override(config_from_filename.filename) assert Config.job_config.max_nb_of_workers == 2 # From file config def test_file_configuration_override_code_configuration_including_env_variable_values(): config_from_filename = NamedTemporaryFile( \"\"\" [JOB] max_nb_of_workers = \"ENV[FOO]:int\" \"\"\" ) Config.configure_job_executions(max_nb_of_workers=21) with mock.patch.dict(os.environ, {\"FOO\": \"2\"}): Config.override(config_from_filename.filename) assert Config.job_config.max_nb_of_workers == 2 # From file config def test_override_default_configuration_with_multiple_configurations(): file_config = NamedTemporaryFile( \"\"\" [DATA_NODE.default] has_header = true [DATA_NODE.my_datanode] path = \"/data/csv\" [JOB] max_nb_of_workers = 10 [TAIPY] \"\"\" ) # Default config is applied assert Config.job_config.max_nb_of_workers == 1 # Code config is applied Config.configure_job_executions(max_nb_of_workers=-1) assert Config.job_config.max_nb_of_workers == -1 # File config is applied Config.override(file_config.filename) assert Config.job_config.max_nb_of_workers == 10 assert Config.data_nodes[\"my_datanode\"].has_header assert Config.data_nodes[\"my_datanode\"].path == \"/data/csv\" assert Config.data_nodes[\"my_datanode\"].not_defined is None def test_override_default_configuration_with_multiple_configurations_including_environment_variable_values(): file_config = NamedTemporaryFile( \"\"\" [DATA_NODE.default] has_header = true [DATA_NODE.my_datanode] path = \"ENV[FOO]\" [JOB] max_nb_of_workers = 10 [TAIPY] \"\"\" ) with mock.patch.dict(os.environ, {\"FOO\": \"/data/csv\", \"BAR\": \"/baz/data/csv\"}): # Default config is applied assert Config.job_config.max_nb_of_workers == 1 # Code config is applied Config.configure_job_executions(max_nb_of_workers=-1) Config.configure_data_node(\"my_datanode\", path=\"ENV[BAR]\") assert Config.job_config.max_nb_of_workers == -1 assert Config.data_nodes[\"my_datanode\"].path == \"/baz/data/csv\" # File config is applied Config.override(file_config.filename) assert Config.job_config.max_nb_of_workers == 10 assert Config.data_nodes[\"my_datanode\"].has_header assert Config.data_nodes[\"my_datanode\"].path == \"/data/csv\" assert Config.data_nodes[\"my_datanode\"].not_defined is None "} {"text": "from unittest.mock import patch from src.taipy.core import Core from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from taipy.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile def test_core_section(): with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() assert Config.core.mode == \"development\" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() assert not Config.core.force core.stop() with patch(\"sys.argv\", [\"prog\"]): Config.configure_core(mode=\"experiment\", version_number=\"test_num\", force=True) core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"test_num\" assert Config.core.force core.stop() toml_config = NamedTemporaryFile( content=\"\"\" [TAIPY] [CORE] mode = \"production\" version_number = \"test_num_2\" force = \"true:bool\" \"\"\" ) Config.load(toml_config.filename) with patch(\"sys.argv\", [\"prog\"]): core = Core() core.run() assert Config.core.mode == \"production\" assert Config.core.version_number == \"test_num_2\" assert Config.core.force core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"test_num_3\", \"--no-taipy-force\"]): core = Core() core.run() assert Config.core.mode == \"experiment\" assert Config.core.version_number == \"test_num_3\" assert not Config.core.force core.stop() def test_clean_config(): core_config = Config.configure_core(mode=\"experiment\", version_number=\"test_num\", force=True) assert Config.core is core_config core_config._clean() # Check if the instance before and after _clean() is the same assert Config.core is core_config assert core_config.mode == \"development\" assert core_config.version_number == \"\" assert core_config.force is False assert core_config.properties == {} "} {"text": "import datetime import os from unittest import mock import pytest from src.taipy.core import MongoDefaultDocument from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config import DataNodeConfig from src.taipy.core.config.job_config import JobConfig from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked def test_data_node_config_default_parameter(): csv_dn_cfg = Config.configure_data_node(\"data_node_1\", \"csv\") assert csv_dn_cfg.scope == Scope.SCENARIO assert csv_dn_cfg.has_header is True assert csv_dn_cfg.exposed_type == \"pandas\" assert csv_dn_cfg.validity_period is None json_dn_cfg = Config.configure_data_node(\"data_node_2\", \"json\") assert json_dn_cfg.scope == Scope.SCENARIO assert json_dn_cfg.validity_period is None parquet_dn_cfg = Config.configure_data_node(\"data_node_3\", \"parquet\") assert parquet_dn_cfg.scope == Scope.SCENARIO assert parquet_dn_cfg.engine == \"pyarrow\" assert parquet_dn_cfg.compression == \"snappy\" assert parquet_dn_cfg.exposed_type == \"pandas\" assert parquet_dn_cfg.validity_period is None excel_dn_cfg = Config.configure_data_node(\"data_node_4\", \"excel\") assert excel_dn_cfg.scope == Scope.SCENARIO assert excel_dn_cfg.has_header is True assert excel_dn_cfg.exposed_type == \"pandas\" assert excel_dn_cfg.validity_period is None generic_dn_cfg = Config.configure_data_node(\"data_node_5\", \"generic\") assert generic_dn_cfg.scope == Scope.SCENARIO assert generic_dn_cfg.validity_period is None in_memory_dn_cfg = Config.configure_data_node(\"data_node_6\", \"in_memory\") assert in_memory_dn_cfg.scope == Scope.SCENARIO assert in_memory_dn_cfg.validity_period is None pickle_dn_cfg = Config.configure_data_node(\"data_node_7\", \"pickle\") assert pickle_dn_cfg.scope == Scope.SCENARIO assert pickle_dn_cfg.validity_period is None sql_table_dn_cfg = Config.configure_data_node( \"data_node_8\", \"sql_table\", db_name=\"test\", db_engine=\"mssql\", table_name=\"test\" ) assert sql_table_dn_cfg.scope == Scope.SCENARIO assert sql_table_dn_cfg.db_host == \"localhost\" assert sql_table_dn_cfg.db_port == 1433 assert sql_table_dn_cfg.db_driver == \"\" assert sql_table_dn_cfg.sqlite_file_extension == \".db\" assert sql_table_dn_cfg.exposed_type == \"pandas\" assert sql_table_dn_cfg.validity_period is None sql_dn_cfg = Config.configure_data_node( \"data_node_9\", \"sql\", db_name=\"test\", db_engine=\"mssql\", read_query=\"test\", write_query_builder=print ) assert sql_dn_cfg.scope == Scope.SCENARIO assert sql_dn_cfg.db_host == \"localhost\" assert sql_dn_cfg.db_port == 1433 assert sql_dn_cfg.db_driver == \"\" assert sql_dn_cfg.sqlite_file_extension == \".db\" assert sql_dn_cfg.exposed_type == \"pandas\" assert sql_dn_cfg.validity_period is None mongo_dn_cfg = Config.configure_data_node( \"data_node_10\", \"mongo_collection\", db_name=\"test\", collection_name=\"test\" ) assert mongo_dn_cfg.scope == Scope.SCENARIO assert mongo_dn_cfg.db_host == \"localhost\" assert mongo_dn_cfg.db_port == 27017 assert mongo_dn_cfg.custom_document == MongoDefaultDocument assert mongo_dn_cfg.db_username == \"\" assert mongo_dn_cfg.db_password == \"\" assert mongo_dn_cfg.db_driver == \"\" assert mongo_dn_cfg.validity_period is None def test_data_node_config_check(caplog): data_node_config = Config.configure_data_node(\"data_nodes1\", \"pickle\") assert list(Config.data_nodes) == [DataNodeConfig._DEFAULT_KEY, data_node_config.id] data_node2_config = Config.configure_data_node(\"data_nodes2\", \"pickle\") assert list(Config.data_nodes) == [DataNodeConfig._DEFAULT_KEY, data_node_config.id, data_node2_config.id] data_node3_config = Config.configure_data_node(\"data_nodes3\", \"csv\", has_header=True, default_path=\"\") assert list(Config.data_nodes) == [ \"default\", data_node_config.id, data_node2_config.id, data_node3_config.id, ] with pytest.raises(SystemExit): Config.configure_data_node(\"data_nodes\", storage_type=\"bar\") Config.check() expected_error_message = ( \"`storage_type` field of DataNodeConfig `data_nodes` must be either csv, sql_table,\" \" sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory. Current\" ' value of property `storage_type` is \"bar\".' ) assert expected_error_message in caplog.text with pytest.raises(SystemExit): Config.configure_data_node(\"data_nodes\", scope=\"bar\") Config.check() expected_error_message = ( \"`scope` field of DataNodeConfig `data_nodes` must be populated with a Scope value.\" ' Current value of property `scope` is \"bar\".' ) assert expected_error_message in caplog.text with pytest.raises(TypeError): Config.configure_data_node(\"data_nodes\", storage_type=\"sql\") with pytest.raises(SystemExit): Config.configure_data_node(\"data_nodes\", storage_type=\"generic\") Config.check() expected_error_message = ( \"`storage_type` field of DataNodeConfig `data_nodes` must be either csv, sql_table,\" \" sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory.\" ' Current value of property `storage_type` is \"bar\".' ) assert expected_error_message in caplog.text def test_configure_data_node_from_another_configuration(): d1_cfg = Config.configure_sql_table_data_node( \"d1\", db_username=\"foo\", db_password=\"bar\", db_name=\"db\", db_engine=\"mssql\", db_port=8080, db_host=\"somewhere\", table_name=\"foo\", scope=Scope.GLOBAL, foo=\"bar\", ) d2_cfg = Config.configure_data_node_from( source_configuration=d1_cfg, id=\"d2\", table_name=\"table_2\", ) assert d2_cfg.id == \"d2\" assert d2_cfg.storage_type == \"sql_table\" assert d2_cfg.scope == Scope.GLOBAL assert d2_cfg.validity_period is None assert d2_cfg.db_username == \"foo\" assert d2_cfg.db_password == \"bar\" assert d2_cfg.db_name == \"db\" assert d2_cfg.db_engine == \"mssql\" assert d2_cfg.db_port == 8080 assert d2_cfg.db_host == \"somewhere\" assert d2_cfg.table_name == \"table_2\" assert d2_cfg.foo == \"bar\" d3_cfg = Config.configure_data_node_from( source_configuration=d1_cfg, id=\"d3\", scope=Scope.SCENARIO, validity_period=datetime.timedelta(days=1), table_name=\"table_3\", foo=\"baz\", ) assert d3_cfg.id == \"d3\" assert d3_cfg.storage_type == \"sql_table\" assert d3_cfg.scope == Scope.SCENARIO assert d3_cfg.validity_period == datetime.timedelta(days=1) assert d3_cfg.db_username == \"foo\" assert d3_cfg.db_password == \"bar\" assert d3_cfg.db_name == \"db\" assert d3_cfg.db_engine == \"mssql\" assert d3_cfg.db_port == 8080 assert d3_cfg.db_host == \"somewhere\" assert d3_cfg.table_name == \"table_3\" assert d3_cfg.foo == \"baz\" def test_data_node_count(): Config.configure_data_node(\"data_nodes1\", \"pickle\") assert len(Config.data_nodes) == 2 Config.configure_data_node(\"data_nodes2\", \"pickle\") assert len(Config.data_nodes) == 3 Config.configure_data_node(\"data_nodes3\", \"pickle\") assert len(Config.data_nodes) == 4 def test_data_node_getitem(): data_node_id = \"data_nodes1\" data_node_config = Config.configure_data_node(data_node_id, \"pickle\", default_path=\"foo.p\") assert Config.data_nodes[data_node_id].id == data_node_config.id assert Config.data_nodes[data_node_id].default_path == \"foo.p\" assert Config.data_nodes[data_node_id].storage_type == data_node_config.storage_type assert Config.data_nodes[data_node_id].scope == data_node_config.scope assert Config.data_nodes[data_node_id].properties == data_node_config.properties assert Config.data_nodes[data_node_id].cacheable == data_node_config.cacheable def test_data_node_creation_no_duplication(): Config.configure_data_node(\"data_nodes1\", \"pickle\") assert len(Config.data_nodes) == 2 Config.configure_data_node(\"data_nodes1\", \"pickle\") assert len(Config.data_nodes) == 2 def test_date_node_create_with_datetime(): data_node_config = Config.configure_data_node( id=\"datetime_data\", my_property=datetime.datetime(1991, 1, 1), foo=\"hello\", test=1, test_dict={\"type\": \"Datetime\", 2: \"daw\"}, ) assert data_node_config.foo == \"hello\" assert data_node_config.my_property == datetime.datetime(1991, 1, 1) assert data_node_config.test == 1 assert data_node_config.test_dict.get(\"type\") == \"Datetime\" def test_data_node_with_env_variable_value(): with mock.patch.dict(os.environ, {\"FOO\": \"pickle\", \"BAR\": \"baz\"}): Config.configure_data_node(\"data_node\", storage_type=\"ENV[FOO]\", prop=\"ENV[BAR]\") assert Config.data_nodes[\"data_node\"].prop == \"baz\" assert Config.data_nodes[\"data_node\"].properties[\"prop\"] == \"baz\" assert Config.data_nodes[\"data_node\"]._properties[\"prop\"] == \"ENV[BAR]\" assert Config.data_nodes[\"data_node\"].storage_type == \"pickle\" assert Config.data_nodes[\"data_node\"]._storage_type == \"ENV[FOO]\" def test_data_node_with_env_variable_in_write_fct_args(): def read_fct(): ... def write_fct(): ... with mock.patch.dict(os.environ, {\"FOO\": \"bar\", \"BAZ\": \"qux\"}): Config.configure_data_node( \"data_node\", storage_type=\"generic\", read_fct=read_fct, write_fct=write_fct, write_fct_args=[\"ENV[FOO]\", \"my_param\", \"ENV[BAZ]\"], ) assert Config.data_nodes[\"data_node\"].write_fct_args == [\"bar\", \"my_param\", \"qux\"] def test_data_node_with_env_variable_in_read_fct_args(): def read_fct(): ... def write_fct(): ... with mock.patch.dict(os.environ, {\"FOO\": \"bar\", \"BAZ\": \"qux\"}): Config.configure_data_node( \"data_node\", storage_type=\"generic\", read_fct=read_fct, write_fct=write_fct, read_fct_args=[\"ENV[FOO]\", \"my_param\", \"ENV[BAZ]\"], ) assert Config.data_nodes[\"data_node\"].read_fct_args == [\"bar\", \"my_param\", \"qux\"] def test_block_datanode_config_update_in_development_mode(): data_node_id = \"data_node_id\" Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) data_node_config = Config.configure_data_node( id=data_node_id, storage_type=\"pickle\", default_path=\"foo.p\", scope=Scope.SCENARIO, ) assert Config.data_nodes[data_node_id].id == data_node_id assert Config.data_nodes[data_node_id].default_path == \"foo.p\" assert Config.data_nodes[data_node_id].storage_type == \"pickle\" assert Config.data_nodes[data_node_id].scope == Scope.SCENARIO assert Config.data_nodes[data_node_id].properties == {\"default_path\": \"foo.p\"} _OrchestratorFactory._build_dispatcher() with pytest.raises(ConfigurationUpdateBlocked): data_node_config.storage_type = \"foo\" with pytest.raises(ConfigurationUpdateBlocked): data_node_config.scope = Scope.SCENARIO with pytest.raises(ConfigurationUpdateBlocked): data_node_config.cacheable = True with pytest.raises(ConfigurationUpdateBlocked): data_node_config.properties = {\"foo\": \"bar\"} assert Config.data_nodes[data_node_id].id == data_node_id assert Config.data_nodes[data_node_id].default_path == \"foo.p\" assert Config.data_nodes[data_node_id].storage_type == \"pickle\" assert Config.data_nodes[data_node_id].scope == Scope.SCENARIO assert Config.data_nodes[data_node_id].properties == {\"default_path\": \"foo.p\"} def test_block_datanode_config_update_in_standalone_mode(): data_node_id = \"data_node_id\" Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE) data_node_config = Config.configure_data_node( id=data_node_id, storage_type=\"pickle\", default_path=\"foo.p\", scope=Scope.SCENARIO, ) assert Config.data_nodes[data_node_id].id == data_node_id assert Config.data_nodes[data_node_id].default_path == \"foo.p\" assert Config.data_nodes[data_node_id].storage_type == \"pickle\" assert Config.data_nodes[data_node_id].scope == Scope.SCENARIO assert Config.data_nodes[data_node_id].properties == {\"default_path\": \"foo.p\"} _OrchestratorFactory._build_dispatcher() with pytest.raises(ConfigurationUpdateBlocked): data_node_config.storage_type = \"foo\" with pytest.raises(ConfigurationUpdateBlocked): data_node_config.scope = Scope.SCENARIO with pytest.raises(ConfigurationUpdateBlocked): data_node_config.cacheable = True with pytest.raises(ConfigurationUpdateBlocked): data_node_config.properties = {\"foo\": \"bar\"} assert Config.data_nodes[data_node_id].id == data_node_id assert Config.data_nodes[data_node_id].default_path == \"foo.p\" assert Config.data_nodes[data_node_id].storage_type == \"pickle\" assert Config.data_nodes[data_node_id].scope == Scope.SCENARIO assert Config.data_nodes[data_node_id].properties == {\"default_path\": \"foo.p\"} def test_clean_config(): dn1_config = Config.configure_data_node( id=\"id1\", storage_type=\"csv\", default_path=\"foo.p\", scope=Scope.GLOBAL, validity_period=datetime.timedelta(2), ) dn2_config = Config.configure_data_node( id=\"id2\", storage_type=\"json\", default_path=\"bar.json\", scope=Scope.GLOBAL, validity_period=datetime.timedelta(2), ) assert Config.data_nodes[\"id1\"] is dn1_config assert Config.data_nodes[\"id2\"] is dn2_config dn1_config._clean() dn2_config._clean() # Check if the instance before and after _clean() is the same assert Config.data_nodes[\"id1\"] is dn1_config assert Config.data_nodes[\"id2\"] is dn2_config # Check if the value is similar to the default_config, but with difference instances assert dn1_config.id == \"id1\" assert dn2_config.id == \"id2\" assert dn1_config.storage_type == dn2_config.storage_type == \"pickle\" assert dn1_config.scope == dn2_config.scope == Scope.SCENARIO assert dn1_config.validity_period is dn2_config.validity_period is None assert dn1_config.default_path is dn2_config.default_path is None assert dn1_config.properties == dn2_config.properties == {} def test_deprecated_cacheable_attribute_remains_compatible(): dn_1_id = \"dn_1_id\" dn_1_config = Config.configure_data_node( id=dn_1_id, storage_type=\"pickle\", cacheable=False, scope=Scope.SCENARIO, ) assert Config.data_nodes[dn_1_id].id == dn_1_id assert Config.data_nodes[dn_1_id].storage_type == \"pickle\" assert Config.data_nodes[dn_1_id].scope == Scope.SCENARIO assert Config.data_nodes[dn_1_id].properties == {\"cacheable\": False} assert not Config.data_nodes[dn_1_id].cacheable dn_1_config.cacheable = True assert Config.data_nodes[dn_1_id].properties == {\"cacheable\": True} assert Config.data_nodes[dn_1_id].cacheable dn_2_id = \"dn_2_id\" dn_2_config = Config.configure_data_node( id=dn_2_id, storage_type=\"pickle\", cacheable=True, scope=Scope.SCENARIO, ) assert Config.data_nodes[dn_2_id].id == dn_2_id assert Config.data_nodes[dn_2_id].storage_type == \"pickle\" assert Config.data_nodes[dn_2_id].scope == Scope.SCENARIO assert Config.data_nodes[dn_2_id].properties == {\"cacheable\": True} assert Config.data_nodes[dn_2_id].cacheable dn_2_config.cacheable = False assert Config.data_nodes[dn_1_id].properties == {\"cacheable\": False} assert not Config.data_nodes[dn_1_id].cacheable dn_3_id = \"dn_3_id\" dn_3_config = Config.configure_data_node( id=dn_3_id, storage_type=\"pickle\", scope=Scope.SCENARIO, ) assert Config.data_nodes[dn_3_id].id == dn_3_id assert Config.data_nodes[dn_3_id].storage_type == \"pickle\" assert Config.data_nodes[dn_3_id].scope == Scope.SCENARIO assert Config.data_nodes[dn_3_id].properties == {} assert not Config.data_nodes[dn_3_id].cacheable dn_3_config.cacheable = True assert Config.data_nodes[dn_3_id].properties == {\"cacheable\": True} assert Config.data_nodes[dn_3_id].cacheable "} {"text": "from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config import MigrationConfig from taipy.config.config import Config def mock_func(): pass def test_check_if_entity_property_key_used_is_predefined(caplog): with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() assert caplog.text == \"\" core.stop() caplog.clear() Config.unique_sections[MigrationConfig.name]._properties[\"_entity_owner\"] = None with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() assert ( \"Properties of MigrationConfig `VERSION_MIGRATION` cannot have `_entity_owner` as its property.\" in caplog.text ) caplog.clear() Config.unique_sections[MigrationConfig.name]._properties[\"_entity_owner\"] = \"entity_owner\" with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( \"Properties of MigrationConfig `VERSION_MIGRATION` cannot have `_entity_owner` as its property.\" ' Current value of property `_entity_owner` is \"entity_owner\".' ) assert expected_error_message in caplog.text def test_check_valid_version(caplog): data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") Config.add_migration_function(\"2.0\", data_nodes1, mock_func) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() assert \"The target version for a migration function must be a production version.\" in caplog.text caplog.clear() Config.unblock_update() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() assert caplog.text == \"\" core.stop() def test_check_callable_function(caplog): data_nodes1 = Config.configure_data_node(\"data_nodes1\", \"pickle\") Config.add_migration_function(\"1.0\", data_nodes1, 1) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( \"The migration function of config `data_nodes1` from version 1.0 must be populated with\" \" Callable value. Current value of property `migration_fcts` is 1.\" ) assert expected_error_message in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function(\"1.0\", data_nodes1, \"bar\") with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( \"The migration function of config `data_nodes1` from version 1.0 must be populated with\" ' Callable value. Current value of property `migration_fcts` is \"bar\".' ) assert expected_error_message in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function(\"1.0\", data_nodes1, mock_func) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() def test_check_migration_from_productions_to_productions_exist(caplog): _VersionManager._set_production_version(\"1.0\", True) _VersionManager._set_production_version(\"1.1\", True) _VersionManager._set_production_version(\"1.2\", True) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() assert 'There is no migration function from production version \"1.0\" to version \"1.1\".' in caplog.text assert 'There is no migration function from production version \"1.1\" to version \"1.2\".' in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function(\"1.2\", \"data_nodes1\", mock_func) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() core.stop() assert 'There is no migration function from production version \"1.0\" to version \"1.1\".' in caplog.text "} {"text": "import pytest from taipy.config.checker.issue_collector import IssueCollector from taipy.config.config import Config class TestConfigIdChecker: def test_check_standalone_mode(self, caplog): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") Config.configure_scenario(id=\"bar\", task_configs=[], additional_data_node_configs=[]) Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id=\"bar\", task_configs=[]) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"`bar` is used as the config_id of multiple configurations ['DATA_NODE', 'SCENARIO']\" ' Current value of property `config_id` is \"bar\".' ) assert expected_error_message in caplog.text Config.configure_task(id=\"bar\", function=print) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"`bar` is used as the config_id of multiple configurations ['DATA_NODE', 'TASK', 'SCENARIO']\" ' Current value of property `config_id` is \"bar\".' ) assert expected_error_message in caplog.text Config.configure_task(id=\"foo\", function=print) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 2 expected_error_message = ( \"`foo` is used as the config_id of multiple configurations ['DATA_NODE', 'TASK']\" ' Current value of property `config_id` is \"foo\".' ) assert expected_error_message in caplog.text "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pytest from src.taipy.core.config.job_config import JobConfig from taipy.config.checker.issue_collector import IssueCollector from taipy.config.config import Config class TestJobConfigChecker: def test_check_standalone_mode(self, caplog): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE, max_nb_of_workers=2) Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=1) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"DataNode `foo`: In-memory storage type can ONLY be used in development mode. Current\" ' value of property `storage_type` is \"in_memory\".' ) assert expected_error_message in caplog.text "} {"text": "from copy import copy import pytest from src.taipy.core.config import TaskConfig from src.taipy.core.config.data_node_config import DataNodeConfig from taipy.config.checker.issue_collector import IssueCollector from taipy.config.config import Config class TestTaskConfigChecker: def test_check_config_id(self, caplog): config = Config._applied_config Config._compile_configs() Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 assert len(Config._collector.warnings) == 0 config._sections[TaskConfig.name][\"new\"] = copy(config._sections[TaskConfig.name][\"default\"]) config._sections[TaskConfig.name][\"new\"].id = None with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 2 assert \"config_id of TaskConfig `None` is empty\" in caplog.text assert \"function field of TaskConfig `new` is empty\" in caplog.text assert len(Config._collector.warnings) == 2 assert \"inputs field of TaskConfig `new` is empty.\" in caplog.text assert \"outputs field of TaskConfig `new` is empty.\" in caplog.text caplog.clear() config._sections[TaskConfig.name][\"new\"].id = \"new\" with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 assert len(Config._collector.warnings) == 2 def test_check_if_entity_property_key_used_is_predefined(self, caplog): Config._collector = IssueCollector() config = Config._applied_config Config._compile_configs() Config.check() assert len(Config._collector.errors) == 0 config._sections[TaskConfig.name][\"new\"] = copy(config._sections[TaskConfig.name][\"default\"]) config._sections[TaskConfig.name][\"new\"]._properties[\"_entity_owner\"] = None with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 2 assert \"function field of TaskConfig `new` is empty\" in caplog.text assert \"Properties of TaskConfig `default` cannot have `_entity_owner` as its property.\" in caplog.text caplog.clear() config._sections[TaskConfig.name][\"new\"] = copy(config._sections[TaskConfig.name][\"default\"]) config._sections[TaskConfig.name][\"new\"]._properties[\"_entity_owner\"] = \"entity_owner\" with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 2 assert \"function field of TaskConfig `new` is empty\" in caplog.text expected_error_message = ( \"Properties of TaskConfig `default` cannot have `_entity_owner` as its property.\" ' Current value of property `_entity_owner` is \"entity_owner\".' ) assert expected_error_message in caplog.text def test_check_inputs(self, caplog): config = Config._applied_config Config._compile_configs() Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 assert len(Config._collector.warnings) == 0 config._sections[TaskConfig.name][\"new\"] = config._sections[TaskConfig.name][\"default\"] config._sections[TaskConfig.name][\"new\"].id, config._sections[TaskConfig.name][\"new\"].function = \"new\", print Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 assert len(Config._collector.warnings) == 2 assert \"inputs field of TaskConfig `new` is empty.\" in caplog.text assert \"outputs field of TaskConfig `new` is empty.\" in caplog.text config._sections[TaskConfig.name][\"new\"]._inputs = \"bar\" with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"inputs field of TaskConfig `new` must be populated with a list of DataNodeConfig\" \" objects. Current value of property `inputs` is ['b', 'a', 'r'].\" ) assert expected_error_message in caplog.text assert len(Config._collector.warnings) == 1 config._sections[TaskConfig.name][\"new\"]._inputs = [\"bar\"] with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"inputs field of TaskConfig `new` must be populated with a list of DataNodeConfig\" \" objects. Current value of property `inputs` is ['bar'].\" ) assert expected_error_message in caplog.text assert len(Config._collector.warnings) == 1 config._sections[TaskConfig.name][\"new\"]._inputs = [DataNodeConfig(\"bar\")] Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 assert len(Config._collector.warnings) == 1 config._sections[TaskConfig.name][\"new\"]._inputs = [\"bar\", DataNodeConfig(\"bar\")] with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( \"inputs field of TaskConfig `new` must be populated with a list of\" \" DataNodeConfig objects. Current value of property `inputs` is\" \" ['bar', 0: registration_queue.get() # Test UPDATE Event scenario.is_primary = False assert registration_queue.qsize() == 1 tp.set_primary(scenario) assert registration_queue.qsize() == 2 tp.subscribe_scenario(print, None, scenario=scenario) assert registration_queue.qsize() == 3 tp.unsubscribe_scenario(print, None, scenario=scenario) assert registration_queue.qsize() == 4 tp.tag(scenario, \"testing\") assert registration_queue.qsize() == 5 tp.untag(scenario, \"testing\") assert registration_queue.qsize() == 6 scenario.properties[\"flag\"] = \"production\" assert registration_queue.qsize() == 7 scenario.properties.update({\"description\": \"a scenario\", \"test_mult\": True}) assert registration_queue.qsize() == 9 scenario.properties.pop(\"test_mult\") assert registration_queue.qsize() == 10 scenario.name = \"my_scenario\" assert registration_queue.qsize() == 11 cycle.name = \"new cycle name\" assert registration_queue.qsize() == 12 cycle.properties[\"valid\"] = True assert registration_queue.qsize() == 13 cycle.properties.update({\"re_run_periodically\": True}) assert registration_queue.qsize() == 14 cycle.properties.pop(\"re_run_periodically\") assert registration_queue.qsize() == 15 sequence.properties[\"name\"] = \"weather_forecast\" assert registration_queue.qsize() == 16 tp.subscribe_sequence(print, None, sequence) assert registration_queue.qsize() == 17 tp.unsubscribe_sequence(print, None, sequence) assert registration_queue.qsize() == 18 task.skippable = True assert registration_queue.qsize() == 19 task.properties[\"number_of_run\"] = 2 assert registration_queue.qsize() == 20 task.properties.update({\"debug\": True}) assert registration_queue.qsize() == 21 task.properties.pop(\"debug\") assert registration_queue.qsize() == 22 dn.editor_id = \"new editor id\" assert registration_queue.qsize() == 23 dn.properties[\"sorted\"] = True assert registration_queue.qsize() == 24 dn.properties.update({\"only_fetch_first_100\": True}) assert registration_queue.qsize() == 25 dn.properties.pop(\"only_fetch_first_100\") assert registration_queue.qsize() == 26 published_events = [] while registration_queue.qsize() != 0: published_events.append(registration_queue.get()) expected_event_types = [ EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.CYCLE, EventEntityType.CYCLE, EventEntityType.CYCLE, EventEntityType.CYCLE, EventEntityType.SEQUENCE, EventEntityType.SEQUENCE, EventEntityType.SEQUENCE, EventEntityType.TASK, EventEntityType.TASK, EventEntityType.TASK, EventEntityType.TASK, EventEntityType.DATA_NODE, EventEntityType.DATA_NODE, EventEntityType.DATA_NODE, EventEntityType.DATA_NODE, ] expected_attribute_names = [ \"is_primary\", \"is_primary\", \"subscribers\", \"subscribers\", \"tags\", \"tags\", \"properties\", \"properties\", \"properties\", \"properties\", \"properties\", \"name\", \"properties\", \"properties\", \"properties\", \"properties\", \"subscribers\", \"subscribers\", \"skippable\", \"properties\", \"properties\", \"properties\", \"editor_id\", \"properties\", \"properties\", \"properties\", ] expected_event_entity_id = [ scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, cycle.id, cycle.id, cycle.id, cycle.id, sequence.id, sequence.id, sequence.id, task.id, task.id, task.id, task.id, dn.id, dn.id, dn.id, dn.id, ] expected_event_operation_type = [EventOperation.UPDATE] * len(expected_event_types) assert all( [ event.entity_type == expected_event_types[i] and event.entity_id == expected_event_entity_id[i] and event.operation == expected_event_operation_type[i] and event.attribute_name == expected_attribute_names[i] for i, event in enumerate(published_events) ] ) def test_publish_update_event_in_context_manager(): _, registration_queue = Notifier.register() dn_config = Config.configure_data_node(\"dn_config\") task_config = Config.configure_task(\"task_config\", print, [dn_config]) scenario_config = Config.configure_scenario( \"scenario_config\", [task_config], frequency=Frequency.DAILY, flag=\"test\" ) scenario_config.add_sequences({\"sequence_config\": [task_config]}) scenario = tp.create_scenario(scenario_config) cycle = scenario.cycle task = scenario.tasks[task_config.id] dn = scenario.data_nodes[dn_config.id] sequence = scenario.sequences[\"sequence_config\"] scenario.properties.update({\"description\": \"a scenario\"}) assert registration_queue.qsize() == 6 while registration_queue.qsize() > 0: registration_queue.get() # Test UPDATE Event in Context Manager assert registration_queue.qsize() == 0 # If multiple entities is in context, the last to enter will be the first to exit # So the published event will have the order starting with scenario first and ending with dn with dn as d, task as t, sequence as s, cycle as c, scenario as sc: sc.is_primary = True assert registration_queue.qsize() == 0 tp.set_primary(sc) assert registration_queue.qsize() == 0 sc.properties[\"flag\"] = \"production\" assert registration_queue.qsize() == 0 sc.properties.update({\"description\": \"a scenario\"}) assert registration_queue.qsize() == 0 sc.properties.pop(\"description\") assert registration_queue.qsize() == 0 sc.name = \"my_scenario\" assert registration_queue.qsize() == 0 c.name = \"another new cycle name\" assert registration_queue.qsize() == 0 c.properties[\"valid\"] = True assert registration_queue.qsize() == 0 c.properties.update({\"re_run_periodically\": True}) assert registration_queue.qsize() == 0 s.properties[\"name\"] = \"weather_forecast\" assert registration_queue.qsize() == 0 t.skippable = True assert registration_queue.qsize() == 0 t.properties[\"number_of_run\"] = 2 assert registration_queue.qsize() == 0 t.properties.update({\"debug\": True}) assert registration_queue.qsize() == 0 d.editor_id = \"another new editor id\" assert registration_queue.qsize() == 0 d.properties[\"sorted\"] = True assert registration_queue.qsize() == 0 d.properties.update({\"only_fetch_first_100\": True}) assert registration_queue.qsize() == 0 published_events = [] assert registration_queue.qsize() == 16 while registration_queue.qsize() != 0: published_events.append(registration_queue.get()) expected_event_types = [ EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.SCENARIO, EventEntityType.CYCLE, EventEntityType.CYCLE, EventEntityType.CYCLE, EventEntityType.SEQUENCE, EventEntityType.TASK, EventEntityType.TASK, EventEntityType.TASK, EventEntityType.DATA_NODE, EventEntityType.DATA_NODE, EventEntityType.DATA_NODE, ] expected_attribute_names = [ \"is_primary\", \"is_primary\", \"properties\", \"properties\", \"properties\", \"properties\", \"name\", \"properties\", \"properties\", \"properties\", \"skippable\", \"properties\", \"properties\", \"editor_id\", \"properties\", \"properties\", ] expected_event_entity_id = [ scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, scenario.id, cycle.id, cycle.id, cycle.id, sequence.id, task.id, task.id, task.id, dn.id, dn.id, dn.id, ] assert all( [ event.entity_type == expected_event_types[i] and event.entity_id == expected_event_entity_id[i] and event.operation == EventOperation.UPDATE and event.attribute_name == expected_attribute_names[i] for i, event in enumerate(published_events) ] ) def test_publish_submission_event(): _, registration_queue = Notifier.register() dn_config = Config.configure_data_node(\"dn_config\") task_config = Config.configure_task(\"task_config\", print, [dn_config]) scenario_config = Config.configure_scenario( \"scenario_config\", [task_config], frequency=Frequency.DAILY, flag=\"test\" ) scenario_config.add_sequences({\"sequence_config\": [task_config]}) scenario = tp.create_scenario(scenario_config) assert registration_queue.qsize() == 5 while registration_queue.qsize() > 0: registration_queue.get() # Test SUBMISSION Event job = scenario.submit()[0] assert registration_queue.qsize() == 6 published_events = [] while registration_queue.qsize() != 0: published_events.append(registration_queue.get()) expected_operations = [ EventOperation.CREATION, EventOperation.CREATION, EventOperation.UPDATE, EventOperation.UPDATE, EventOperation.UPDATE, EventOperation.SUBMISSION, ] expected_attribute_names = [None, None, \"jobs\", \"status\", \"submission_status\", None] expected_event_types = [ EventEntityType.SUBMISSION, EventEntityType.JOB, EventEntityType.SUBMISSION, EventEntityType.JOB, EventEntityType.SUBMISSION, EventEntityType.SCENARIO, ] expected_event_entity_id = [job.submit_id, job.id, job.submit_id, job.id, job.submit_id, scenario.id] assert all( [ event.entity_type == expected_event_types[i] and event.entity_id == expected_event_entity_id[i] and event.operation == expected_operations[i] and event.attribute_name == expected_attribute_names[i] for i, event in enumerate(published_events) ] ) def test_publish_deletion_event(): _, registration_queue = Notifier.register() dn_config = Config.configure_data_node(\"dn_config\") task_config = Config.configure_task(\"task_config\", print, [dn_config]) scenario_config = Config.configure_scenario( \"scenario_config\", [task_config], frequency=Frequency.DAILY, flag=\"test\" ) scenario_config.add_sequences({\"sequence_config\": [task_config]}) scenario = tp.create_scenario(scenario_config) cycle = scenario.cycle task = scenario.tasks[task_config.id] dn = scenario.data_nodes[dn_config.id] sequence = scenario.sequences[\"sequence_config\"] job = scenario.submit()[0] assert registration_queue.qsize() == 11 while registration_queue.qsize() > 0: registration_queue.get() # Test DELETION Event tp.delete(scenario.id) assert registration_queue.qsize() == 7 published_events = [] while registration_queue.qsize() != 0: published_events.append(registration_queue.get()) expected_event_types = [ EventEntityType.CYCLE, EventEntityType.SEQUENCE, EventEntityType.SCENARIO, EventEntityType.TASK, EventEntityType.JOB, EventEntityType.DATA_NODE, EventEntityType.SUBMISSION, ] expected_event_entity_id = [cycle.id, sequence.id, scenario.id, task.id, job.id, dn.id, job.submit_id] expected_event_operation_type = [EventOperation.DELETION] * len(expected_event_types) assert all( [ event.entity_type == expected_event_types[i] and event.entity_id == expected_event_entity_id[i] and event.operation == expected_event_operation_type[i] and event.attribute_name is None for i, event in enumerate(published_events) ] ) scenario = tp.create_scenario(scenario_config) cycle = scenario.cycle assert registration_queue.qsize() == 5 # only to clear the queue while registration_queue.qsize() != 0: registration_queue.get() tp.clean_all_entities_by_version() assert registration_queue.qsize() == 5 published_events = [] while registration_queue.qsize() != 0: published_events.append(registration_queue.get()) expected_event_types = [ EventEntityType.JOB, EventEntityType.CYCLE, EventEntityType.SCENARIO, EventEntityType.TASK, EventEntityType.DATA_NODE, ] expected_event_entity_id = [None, cycle.id, scenario.id, None, None] assert all( [ event.entity_type == expected_event_types[i] and event.entity_id == expected_event_entity_id[i] and event.operation == EventOperation.DELETION and event.attribute_name is None for i, event in enumerate(published_events) ] ) "} {"text": "import pytest from src.taipy.core.exceptions.exceptions import InvalidEventOperation from src.taipy.core.notification._topic import _Topic from src.taipy.core.notification.event import EventEntityType, EventOperation def test_general_topic_creation(): topic_1 = _Topic(None, None, None, None) assert topic_1.entity_type is None assert topic_1.entity_id is None assert topic_1.operation is None assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.SCENARIO, \"scenario_id\") assert topic_2.entity_type == EventEntityType.SCENARIO assert topic_2.entity_id == \"scenario_id\" assert topic_2.operation is None assert topic_2.attribute_name is None topic_3 = _Topic(None, None, EventOperation.CREATION) assert topic_3.entity_type is None assert topic_3.entity_id is None assert topic_3.operation == EventOperation.CREATION assert topic_3.attribute_name is None topic_4 = _Topic(None, None, EventOperation.UPDATE, \"properties\") assert topic_4.entity_type is None assert topic_4.entity_id is None assert topic_4.operation == EventOperation.UPDATE assert topic_4.attribute_name == \"properties\" topic_5 = _Topic(entity_type=EventEntityType.JOB, operation=EventOperation.DELETION) assert topic_5.entity_type == EventEntityType.JOB assert topic_5.entity_id is None assert topic_5.operation == EventOperation.DELETION assert topic_5.attribute_name is None topic_6 = _Topic(entity_type=EventEntityType.SEQUENCE) assert topic_6.entity_type == EventEntityType.SEQUENCE assert topic_6.entity_id is None assert topic_6.operation is None assert topic_6.attribute_name is None def test_topic_creation_cycle(): topic_1 = _Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.CREATION) assert topic_1.entity_type == EventEntityType.CYCLE assert topic_1.entity_id == \"cycle_id\" assert topic_1.operation == EventOperation.CREATION assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.UPDATE, \"frequency\") assert topic_2.entity_type == EventEntityType.CYCLE assert topic_2.entity_id == \"cycle_id\" assert topic_2.operation == EventOperation.UPDATE assert topic_2.attribute_name == \"frequency\" topic_3 = _Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.DELETION) assert topic_3.entity_type == EventEntityType.CYCLE assert topic_3.entity_id == \"cycle_id\" assert topic_3.operation == EventOperation.DELETION assert topic_3.attribute_name is None topic_4 = _Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.CREATION) assert topic_4.entity_type == EventEntityType.CYCLE assert topic_4.entity_id == \"cycle_id\" assert topic_4.operation == EventOperation.CREATION assert topic_4.attribute_name is None # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.CREATION, \"frequency\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.DELETION, \"frequency\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.CYCLE, \"cycle_id\", attribute_name=\"frequency\") with pytest.raises(InvalidEventOperation): _ = _Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.SUBMISSION) with pytest.raises(InvalidEventOperation): _ = _Topic(EventEntityType.CYCLE, \"cycle_id\", EventOperation.SUBMISSION, \"frequency\") def test_topic_creation_scenario(): topic_1 = _Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.CREATION) assert topic_1.entity_type == EventEntityType.SCENARIO assert topic_1.entity_id == \"scenario_id\" assert topic_1.operation == EventOperation.CREATION assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.UPDATE, \"is_primary\") assert topic_2.entity_type == EventEntityType.SCENARIO assert topic_2.entity_id == \"scenario_id\" assert topic_2.operation == EventOperation.UPDATE assert topic_2.attribute_name == \"is_primary\" topic_3 = _Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.DELETION) assert topic_3.entity_type == EventEntityType.SCENARIO assert topic_3.entity_id == \"scenario_id\" assert topic_3.operation == EventOperation.DELETION assert topic_3.attribute_name is None topic_4 = _Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.SUBMISSION) assert topic_4.entity_type == EventEntityType.SCENARIO assert topic_4.entity_id == \"scenario_id\" assert topic_4.operation == EventOperation.SUBMISSION assert topic_4.attribute_name is None topic_5 = _Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.UPDATE, \"properties\") assert topic_5.entity_type == EventEntityType.SCENARIO assert topic_5.entity_id == \"scenario_id\" assert topic_5.operation == EventOperation.UPDATE assert topic_5.attribute_name == \"properties\" # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.CREATION, \"is_primary\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.DELETION, \"is_primary\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SCENARIO, \"scenario_id\", EventOperation.SUBMISSION, \"is_primary\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SCENARIO, \"scenario_id\", attribute_name=\"is_primary\") def test_topic_creation_sequence(): topic_1 = _Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.CREATION) assert topic_1.entity_type == EventEntityType.SEQUENCE assert topic_1.entity_id == \"sequence_id\" assert topic_1.operation == EventOperation.CREATION assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.UPDATE, \"subscribers\") assert topic_2.entity_type == EventEntityType.SEQUENCE assert topic_2.entity_id == \"sequence_id\" assert topic_2.operation == EventOperation.UPDATE assert topic_2.attribute_name == \"subscribers\" topic_3 = _Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.DELETION) assert topic_3.entity_type == EventEntityType.SEQUENCE assert topic_3.entity_id == \"sequence_id\" assert topic_3.operation == EventOperation.DELETION assert topic_3.attribute_name is None topic_4 = _Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.SUBMISSION) assert topic_4.entity_type == EventEntityType.SEQUENCE assert topic_4.entity_id == \"sequence_id\" assert topic_4.operation == EventOperation.SUBMISSION assert topic_4.attribute_name is None topic_5 = _Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.DELETION) assert topic_5.entity_type == EventEntityType.SEQUENCE assert topic_5.entity_id == \"sequence_id\" assert topic_5.operation == EventOperation.DELETION assert topic_5.attribute_name is None # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.CREATION, \"subscribers\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.DELETION, \"subscribers\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SEQUENCE, \"sequence_id\", EventOperation.SUBMISSION, \"subscribers\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.SEQUENCE, \"sequence_id\", attribute_name=\"subscribers\") def test_topic_creation_task(): topic_1 = _Topic(EventEntityType.TASK, \"task_id\", EventOperation.CREATION) assert topic_1.entity_type == EventEntityType.TASK assert topic_1.entity_id == \"task_id\" assert topic_1.operation == EventOperation.CREATION assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.TASK, \"task_id\", EventOperation.UPDATE, \"function\") assert topic_2.entity_type == EventEntityType.TASK assert topic_2.entity_id == \"task_id\" assert topic_2.operation == EventOperation.UPDATE assert topic_2.attribute_name == \"function\" topic_3 = _Topic(EventEntityType.TASK, \"task_id\", EventOperation.DELETION) assert topic_3.entity_type == EventEntityType.TASK assert topic_3.entity_id == \"task_id\" assert topic_3.operation == EventOperation.DELETION assert topic_3.attribute_name is None topic_4 = _Topic(EventEntityType.TASK, \"task_id\", EventOperation.SUBMISSION) assert topic_4.entity_type == EventEntityType.TASK assert topic_4.entity_id == \"task_id\" assert topic_4.operation == EventOperation.SUBMISSION assert topic_4.attribute_name is None topic_5 = _Topic(EventEntityType.TASK, \"task_id\", EventOperation.SUBMISSION) assert topic_5.entity_type == EventEntityType.TASK assert topic_5.entity_id == \"task_id\" assert topic_5.operation == EventOperation.SUBMISSION assert topic_5.attribute_name is None # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.TASK, \"task_id\", EventOperation.CREATION, \"function\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.TASK, \"task_id\", EventOperation.DELETION, \"function\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.TASK, \"task_id\", EventOperation.SUBMISSION, \"function\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.TASK, \"task_id\", attribute_name=\"function\") def test_topic_creation_datanode(): topic_1 = _Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.CREATION) assert topic_1.entity_type == EventEntityType.DATA_NODE assert topic_1.entity_id == \"dn_id\" assert topic_1.operation == EventOperation.CREATION assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.UPDATE, \"properties\") assert topic_2.entity_type == EventEntityType.DATA_NODE assert topic_2.entity_id == \"dn_id\" assert topic_2.operation == EventOperation.UPDATE assert topic_2.attribute_name == \"properties\" topic_3 = _Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.DELETION) assert topic_3.entity_type == EventEntityType.DATA_NODE assert topic_3.entity_id == \"dn_id\" assert topic_3.operation == EventOperation.DELETION assert topic_3.attribute_name is None topic_4 = _Topic(None, \"dn_id\", EventOperation.UPDATE, \"scope\") assert topic_4.entity_type is None assert topic_4.entity_id == \"dn_id\" assert topic_4.operation == EventOperation.UPDATE assert topic_4.attribute_name == \"scope\" # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.CREATION, \"properties\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.DELETION, \"properties\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.DATA_NODE, \"dn_id\", attribute_name=\"properties\") with pytest.raises(InvalidEventOperation): _ = _Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.SUBMISSION) # with pytest.raises(InvalidEventOperation): # _ = Topic(EventEntityType.DATA_NODE, \"dn_id\", EventOperation.SUBMISSION, \"properties\") def test_topic_creation_job(): topic_1 = _Topic(EventEntityType.JOB, \"job_id\", EventOperation.CREATION) assert topic_1.entity_type == EventEntityType.JOB assert topic_1.entity_id == \"job_id\" assert topic_1.operation == EventOperation.CREATION assert topic_1.attribute_name is None topic_2 = _Topic(EventEntityType.JOB, \"job_id\", EventOperation.UPDATE, \"force\") assert topic_2.entity_type == EventEntityType.JOB assert topic_2.entity_id == \"job_id\" assert topic_2.operation == EventOperation.UPDATE assert topic_2.attribute_name == \"force\" topic_3 = _Topic(EventEntityType.JOB, \"job_id\", EventOperation.DELETION) assert topic_3.entity_type == EventEntityType.JOB assert topic_3.entity_id == \"job_id\" assert topic_3.operation == EventOperation.DELETION assert topic_3.attribute_name is None topic_4 = _Topic(EventEntityType.JOB, \"job_id\", EventOperation.CREATION) assert topic_4.entity_type == EventEntityType.JOB assert topic_4.entity_id == \"job_id\" assert topic_4.operation == EventOperation.CREATION assert topic_4.attribute_name is None # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.JOB, \"job_id\", EventOperation.CREATION, \"force\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.JOB, \"job_id\", EventOperation.DELETION, \"force\") # with pytest.raises(InvalidEventAttributeName): # _ = Topic(EventEntityType.JOB, \"job_id\", attribute_name=\"force\") with pytest.raises(InvalidEventOperation): _ = _Topic(EventEntityType.JOB, \"job_id\", EventOperation.SUBMISSION) # with pytest.raises(InvalidEventOperation): # _ = Topic(EventEntityType.JOB, \"job_id\", EventOperation.SUBMISSION, \"force\") def test_topic_equal(): assert _Topic() == _Topic() assert _Topic(EventEntityType.SCENARIO) == _Topic(EventEntityType.SCENARIO) assert _Topic(entity_id=\"sequence_id\") == _Topic(entity_id=\"sequence_id\") assert _Topic(operation=EventOperation.SUBMISSION) == _Topic(operation=EventOperation.SUBMISSION) assert _Topic(EventEntityType.JOB, \"JOB_id\", EventOperation.UPDATE, \"status\") == _Topic( EventEntityType.JOB, \"JOB_id\", EventOperation.UPDATE, \"status\" ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pytest from src.taipy.core.exceptions.exceptions import InvalidEventAttributeName, InvalidEventOperation from src.taipy.core.notification.event import Event, EventEntityType, EventOperation from taipy.config.common.frequency import Frequency def test_event_creation_cycle(): event_1 = Event( entity_type=EventEntityType.CYCLE, operation=EventOperation.CREATION, entity_id=\"cycle_id\", ) assert event_1.creation_date is not None assert event_1.entity_type == EventEntityType.CYCLE assert event_1.entity_id == \"cycle_id\" assert event_1.operation == EventOperation.CREATION assert event_1.attribute_name is None event_2 = Event( entity_type=EventEntityType.CYCLE, operation=EventOperation.UPDATE, entity_id=\"cycle_id\", attribute_name=\"frequency\", attribute_value=Frequency.DAILY, ) assert event_2.creation_date is not None assert event_2.entity_type == EventEntityType.CYCLE assert event_2.entity_id == \"cycle_id\" assert event_2.operation == EventOperation.UPDATE assert event_2.attribute_name == \"frequency\" event_3 = Event(entity_type=EventEntityType.CYCLE, entity_id=\"cycle_id\", operation=EventOperation.DELETION) assert event_3.creation_date is not None assert event_3.entity_type == EventEntityType.CYCLE assert event_3.entity_id == \"cycle_id\" assert event_3.operation == EventOperation.DELETION assert event_3.attribute_name is None with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.CYCLE, operation=EventOperation.CREATION, entity_id=\"cycle_id\", attribute_name=\"frequency\", ) with pytest.raises(InvalidEventAttributeName): _ = Event(EventEntityType.CYCLE, EventOperation.DELETION, entity_id=\"cycle_id\", attribute_name=\"frequency\") with pytest.raises(InvalidEventOperation): _ = Event( entity_type=EventEntityType.CYCLE, operation=EventOperation.SUBMISSION, entity_id=\"cycle_id\", ) with pytest.raises(InvalidEventOperation): _ = Event( entity_type=EventEntityType.CYCLE, operation=EventOperation.SUBMISSION, entity_id=\"cycle_id\", attribute_name=\"frequency\", ) def test_event_creation_scenario(): event_1 = Event(entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.CREATION) assert event_1.creation_date is not None assert event_1.entity_type == EventEntityType.SCENARIO assert event_1.entity_id == \"scenario_id\" assert event_1.operation == EventOperation.CREATION assert event_1.attribute_name is None event_2 = Event( entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.UPDATE, attribute_name=\"is_primary\", attribute_value=True, ) assert event_2.creation_date is not None assert event_2.entity_type == EventEntityType.SCENARIO assert event_2.entity_id == \"scenario_id\" assert event_2.operation == EventOperation.UPDATE assert event_2.attribute_name == \"is_primary\" assert event_2.attribute_value is True event_3 = Event(entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.DELETION) assert event_3.creation_date is not None assert event_3.entity_type == EventEntityType.SCENARIO assert event_3.entity_id == \"scenario_id\" assert event_3.operation == EventOperation.DELETION assert event_3.attribute_name is None event_4 = Event(entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.SUBMISSION) assert event_4.creation_date is not None assert event_4.entity_type == EventEntityType.SCENARIO assert event_4.entity_id == \"scenario_id\" assert event_4.operation == EventOperation.SUBMISSION assert event_4.attribute_name is None with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.CREATION, attribute_name=\"is_primary\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.DELETION, attribute_name=\"is_primary\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.SCENARIO, entity_id=\"scenario_id\", operation=EventOperation.SUBMISSION, attribute_name=\"is_primary\", ) def test_event_creation_sequence(): event_1 = Event(entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.CREATION) assert event_1.creation_date is not None assert event_1.entity_type == EventEntityType.SEQUENCE assert event_1.entity_id == \"sequence_id\" assert event_1.operation == EventOperation.CREATION assert event_1.attribute_name is None event_2 = Event( entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.UPDATE, attribute_name=\"subscribers\", attribute_value=object(), ) assert event_2.creation_date is not None assert event_2.entity_type == EventEntityType.SEQUENCE assert event_2.entity_id == \"sequence_id\" assert event_2.operation == EventOperation.UPDATE assert event_2.attribute_name == \"subscribers\" event_3 = Event(entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.DELETION) assert event_3.creation_date is not None assert event_3.entity_type == EventEntityType.SEQUENCE assert event_3.entity_id == \"sequence_id\" assert event_3.operation == EventOperation.DELETION assert event_3.attribute_name is None event_4 = Event(entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.SUBMISSION) assert event_4.creation_date is not None assert event_4.entity_type == EventEntityType.SEQUENCE assert event_4.entity_id == \"sequence_id\" assert event_4.operation == EventOperation.SUBMISSION assert event_4.attribute_name is None with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.CREATION, attribute_name=\"subscribers\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.DELETION, attribute_name=\"subscribers\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.SEQUENCE, entity_id=\"sequence_id\", operation=EventOperation.SUBMISSION, attribute_name=\"subscribers\", ) def test_event_creation_task(): event_1 = Event(entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.CREATION) assert event_1.creation_date is not None assert event_1.entity_type == EventEntityType.TASK assert event_1.entity_id == \"task_id\" assert event_1.operation == EventOperation.CREATION assert event_1.attribute_name is None event_2 = Event( entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.UPDATE, attribute_name=\"function\", ) assert event_2.creation_date is not None assert event_2.entity_type == EventEntityType.TASK assert event_2.entity_id == \"task_id\" assert event_2.operation == EventOperation.UPDATE assert event_2.attribute_name == \"function\" event_3 = Event(entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.DELETION) assert event_3.creation_date is not None assert event_3.entity_type == EventEntityType.TASK assert event_3.entity_id == \"task_id\" assert event_3.operation == EventOperation.DELETION assert event_3.attribute_name is None event_4 = Event(entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.SUBMISSION) assert event_4.creation_date is not None assert event_4.entity_type == EventEntityType.TASK assert event_4.entity_id == \"task_id\" assert event_4.operation == EventOperation.SUBMISSION assert event_4.attribute_name is None with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.CREATION, attribute_name=\"function\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.DELETION, attribute_name=\"function\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.TASK, entity_id=\"task_id\", operation=EventOperation.SUBMISSION, attribute_name=\"function\", ) def test_event_creation_datanode(): event_1 = Event(entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.CREATION) assert event_1.creation_date is not None assert event_1.entity_type == EventEntityType.DATA_NODE assert event_1.entity_id == \"dn_id\" assert event_1.operation == EventOperation.CREATION assert event_1.attribute_name is None event_2 = Event( entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.UPDATE, attribute_name=\"properties\", ) assert event_2.creation_date is not None assert event_2.entity_type == EventEntityType.DATA_NODE assert event_2.entity_id == \"dn_id\" assert event_2.operation == EventOperation.UPDATE assert event_2.attribute_name == \"properties\" event_3 = Event(entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.DELETION) assert event_3.creation_date is not None assert event_3.entity_type == EventEntityType.DATA_NODE assert event_3.entity_id == \"dn_id\" assert event_3.operation == EventOperation.DELETION assert event_3.attribute_name is None with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.CREATION, attribute_name=\"properties\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.DELETION, attribute_name=\"properties\", ) with pytest.raises(InvalidEventOperation): _ = Event(entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.SUBMISSION) with pytest.raises(InvalidEventOperation): _ = Event( entity_type=EventEntityType.DATA_NODE, entity_id=\"dn_id\", operation=EventOperation.SUBMISSION, attribute_name=\"properties\", ) def test_event_creation_job(): event_1 = Event(entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.CREATION) assert event_1.creation_date is not None assert event_1.entity_type == EventEntityType.JOB assert event_1.entity_id == \"job_id\" assert event_1.operation == EventOperation.CREATION assert event_1.attribute_name is None event_2 = Event( entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.UPDATE, attribute_name=\"force\" ) assert event_2.creation_date is not None assert event_2.entity_type == EventEntityType.JOB assert event_2.entity_id == \"job_id\" assert event_2.operation == EventOperation.UPDATE assert event_2.attribute_name == \"force\" event_3 = Event(entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.DELETION) assert event_3.creation_date is not None assert event_3.entity_type == EventEntityType.JOB assert event_3.entity_id == \"job_id\" assert event_3.operation == EventOperation.DELETION assert event_3.attribute_name is None with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.CREATION, attribute_name=\"force\", ) with pytest.raises(InvalidEventAttributeName): _ = Event( entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.DELETION, attribute_name=\"force\", ) with pytest.raises(InvalidEventOperation): _ = Event(entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.SUBMISSION) with pytest.raises(InvalidEventOperation): _ = Event( entity_type=EventEntityType.JOB, entity_id=\"job_id\", operation=EventOperation.SUBMISSION, attribute_name=\"force\", ) "} {"text": "from dataclasses import dataclass, field from math import exp from queue import SimpleQueue from colorama import init from src.taipy.core import taipy as tp from src.taipy.core.config import scenario_config from src.taipy.core.job.status import Status from src.taipy.core.notification.core_event_consumer import CoreEventConsumerBase from src.taipy.core.notification.event import Event, EventEntityType, EventOperation from src.taipy.core.notification.notifier import Notifier from taipy.config import Config, Frequency from tests.core.utils import assert_true_after_time class Snapshot: \"\"\" A captured snapshot of the recording core events consumer. \"\"\" def __init__(self): self.collected_events = [] self.entity_type_collected = {} self.operation_collected = {} self.attr_name_collected = {} def capture_event(self, event): self.collected_events.append(event) self.entity_type_collected[event.entity_type] = self.entity_type_collected.get(event.entity_type, 0) + 1 self.operation_collected[event.operation] = self.operation_collected.get(event.operation, 0) + 1 if event.attribute_name: self.attr_name_collected[event.attribute_name] = self.attr_name_collected.get(event.attribute_name, 0) + 1 class RecordingConsumer(CoreEventConsumerBase): \"\"\" A straightforward and no-thread core events consumer that allows to capture snapshots of received events. \"\"\" def __init__(self, registration_id: str, queue: SimpleQueue): super().__init__(registration_id, queue) def capture(self) -> Snapshot: \"\"\" Capture a snapshot of events received between the previous snapshot (or from the start of this consumer). \"\"\" snapshot = Snapshot() while not self.queue.empty(): event = self.queue.get() snapshot.capture_event(event) return snapshot def process_event(self, event: Event): # Nothing todo pass def start(self): # Nothing to do here pass def stop(self): # Nothing to do here either pass def identity(x): return x def test_events_published_for_scenario_creation(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) register_id_0, register_queue_0 = Notifier.register() all_evts = RecordingConsumer(register_id_0, register_queue_0) all_evts.start() # Create a scenario only trigger 6 creation events (for cycle, data node(x2), task, sequence and scenario) tp.create_scenario(sc_config) snapshot = all_evts.capture() assert len(snapshot.collected_events) == 6 assert snapshot.entity_type_collected.get(EventEntityType.CYCLE, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.DATA_NODE, 0) == 2 assert snapshot.entity_type_collected.get(EventEntityType.TASK, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.SEQUENCE, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.SCENARIO, 0) == 1 assert snapshot.operation_collected.get(EventOperation.CREATION, 0) == 6 all_evts.stop() def test_no_event_published_for_getting_scenario(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) scenario = tp.create_scenario(sc_config) register_id_0, register_queue_0 = Notifier.register() all_evts = RecordingConsumer(register_id_0, register_queue_0) all_evts.start() # Get all scenarios does not trigger any event tp.get_scenarios() snapshot = all_evts.capture() assert len(snapshot.collected_events) == 0 # Get one scenario does not trigger any event tp.get(scenario.id) snapshot = all_evts.capture() assert len(snapshot.collected_events) == 0 all_evts.stop() def test_events_published_for_writing_dn(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) scenario = tp.create_scenario(sc_config) register_id_0, register_queue_0 = Notifier.register() all_evts = RecordingConsumer(register_id_0, register_queue_0) all_evts.start() # Write input manually trigger 4 data node update events # for last_edit_date, editor_id, editor_expiration_date and edit_in_progress scenario.the_input.write(\"test\") snapshot = all_evts.capture() assert len(snapshot.collected_events) == 4 assert snapshot.entity_type_collected.get(EventEntityType.CYCLE, 0) == 0 assert snapshot.entity_type_collected.get(EventEntityType.DATA_NODE, 0) == 4 assert snapshot.entity_type_collected.get(EventEntityType.TASK, 0) == 0 assert snapshot.entity_type_collected.get(EventEntityType.SEQUENCE, 0) == 0 assert snapshot.entity_type_collected.get(EventEntityType.SCENARIO, 0) == 0 assert snapshot.operation_collected.get(EventOperation.CREATION, 0) == 0 assert snapshot.operation_collected.get(EventOperation.UPDATE, 0) == 4 all_evts.stop() def test_events_published_for_scenario_submission(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) scenario = tp.create_scenario(sc_config) scenario.the_input.write(\"test\") register_id_0, register_queue_0 = Notifier.register() all_evts = RecordingConsumer(register_id_0, register_queue_0) all_evts.start() # Submit a scenario triggers: # 1 scenario submission event # 7 dn update events (for last_edit_date, editor_id(x2), editor_expiration_date(x2) and edit_in_progress(x2)) # 1 job creation event # 3 job update events (for status: PENDING, RUNNING and COMPLETED) # 1 submission creation event # 1 submission update event for jobs # 3 submission update events (for status: PENDING, RUNNING and COMPLETED) scenario.submit() snapshot = all_evts.capture() assert len(snapshot.collected_events) == 17 assert snapshot.entity_type_collected.get(EventEntityType.CYCLE, 0) == 0 assert snapshot.entity_type_collected.get(EventEntityType.DATA_NODE, 0) == 7 assert snapshot.entity_type_collected.get(EventEntityType.TASK, 0) == 0 assert snapshot.entity_type_collected.get(EventEntityType.SEQUENCE, 0) == 0 assert snapshot.entity_type_collected.get(EventEntityType.SCENARIO, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.JOB, 0) == 4 assert snapshot.entity_type_collected.get(EventEntityType.SUBMISSION, 0) == 5 assert snapshot.operation_collected.get(EventOperation.CREATION, 0) == 2 assert snapshot.operation_collected.get(EventOperation.UPDATE, 0) == 14 assert snapshot.operation_collected.get(EventOperation.SUBMISSION, 0) == 1 assert snapshot.attr_name_collected[\"last_edit_date\"] == 1 assert snapshot.attr_name_collected[\"editor_id\"] == 2 assert snapshot.attr_name_collected[\"editor_expiration_date\"] == 2 assert snapshot.attr_name_collected[\"edit_in_progress\"] == 2 assert snapshot.attr_name_collected[\"status\"] == 3 assert snapshot.attr_name_collected[\"jobs\"] == 1 assert snapshot.attr_name_collected[\"submission_status\"] == 3 all_evts.stop() def test_events_published_for_scenario_deletion(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) scenario = tp.create_scenario(sc_config) scenario.the_input.write(\"test\") scenario.submit() register_id_0, register_queue_0 = Notifier.register() all_evts = RecordingConsumer(register_id_0, register_queue_0) all_evts.start() # Delete a scenario trigger 8 deletion events # 1 scenario deletion event # 1 cycle deletion event # 2 dn deletion events (for input and output) # 1 task deletion event # 1 sequence deletion event # 1 job deletion event # 1 submission deletion event tp.delete(scenario.id) snapshot = all_evts.capture() assert len(snapshot.collected_events) == 8 assert snapshot.entity_type_collected.get(EventEntityType.CYCLE, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.DATA_NODE, 0) == 2 assert snapshot.entity_type_collected.get(EventEntityType.TASK, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.SEQUENCE, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.SCENARIO, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.SUBMISSION, 0) == 1 assert snapshot.entity_type_collected.get(EventEntityType.JOB, 0) == 1 assert snapshot.operation_collected.get(EventOperation.UPDATE, 0) == 0 assert snapshot.operation_collected.get(EventOperation.SUBMISSION, 0) == 0 assert snapshot.operation_collected.get(EventOperation.DELETION, 0) == 8 all_evts.stop() def test_job_events(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) register_id, register_queue = Notifier.register(entity_type=EventEntityType.JOB) consumer = RecordingConsumer(register_id, register_queue) consumer.start() # Create scenario scenario = tp.create_scenario(sc_config) snapshot = consumer.capture() assert len(snapshot.collected_events) == 0 # Submit scenario scenario.submit() snapshot = consumer.capture() # 2 events expected: one for creation, another for status update assert len(snapshot.collected_events) == 2 assert snapshot.collected_events[0].operation == EventOperation.CREATION assert snapshot.collected_events[0].entity_type == EventEntityType.JOB assert snapshot.collected_events[0].metadata.get(\"task_config_id\") == task_config.id assert snapshot.collected_events[1].operation == EventOperation.UPDATE assert snapshot.collected_events[1].entity_type == EventEntityType.JOB assert snapshot.collected_events[1].metadata.get(\"task_config_id\") == task_config.id assert snapshot.collected_events[1].attribute_name == \"status\" assert snapshot.collected_events[1].attribute_value == Status.BLOCKED job = tp.get_jobs()[0] tp.cancel_job(job) snapshot = consumer.capture() assert len(snapshot.collected_events) == 1 event = snapshot.collected_events[0] assert event.metadata.get(\"task_config_id\") == task_config.id assert event.attribute_name == \"status\" assert event.attribute_value == Status.CANCELED consumer.stop() def test_scenario_events(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) register_id, register_queue = Notifier.register(entity_type=EventEntityType.SCENARIO) consumer = RecordingConsumer(register_id, register_queue) consumer.start() scenario = tp.create_scenario(sc_config) snapshot = consumer.capture() assert len(snapshot.collected_events) == 1 assert snapshot.collected_events[0].operation == EventOperation.CREATION assert snapshot.collected_events[0].entity_type == EventEntityType.SCENARIO assert snapshot.collected_events[0].metadata.get(\"config_id\") == scenario.config_id scenario.submit() snapshot = consumer.capture() assert len(snapshot.collected_events) == 1 assert snapshot.collected_events[0].operation == EventOperation.SUBMISSION assert snapshot.collected_events[0].entity_type == EventEntityType.SCENARIO assert snapshot.collected_events[0].metadata.get(\"config_id\") == scenario.config_id # Delete scenario tp.delete(scenario.id) snapshot = consumer.capture() assert len(snapshot.collected_events) == 1 assert snapshot.collected_events[0].operation == EventOperation.DELETION assert snapshot.collected_events[0].entity_type == EventEntityType.SCENARIO consumer.stop() def test_data_node_events(): input_config = Config.configure_data_node(\"the_input\") output_config = Config.configure_data_node(\"the_output\") task_config = Config.configure_task(\"the_task\", identity, input=input_config, output=output_config) sc_config = Config.configure_scenario( \"the_scenario\", task_configs=[task_config], frequency=Frequency.DAILY, sequences={\"the_seq\": [task_config]} ) register_id, register_queue = Notifier.register(entity_type=EventEntityType.DATA_NODE) consumer = RecordingConsumer(register_id, register_queue) consumer.start() scenario = tp.create_scenario(sc_config) snapshot = consumer.capture() # We expect two creation events since we have two data nodes: assert len(snapshot.collected_events) == 2 assert snapshot.collected_events[0].operation == EventOperation.CREATION assert snapshot.collected_events[0].entity_type == EventEntityType.DATA_NODE assert snapshot.collected_events[0].metadata.get(\"config_id\") in [output_config.id, input_config.id] assert snapshot.collected_events[1].operation == EventOperation.CREATION assert snapshot.collected_events[1].entity_type == EventEntityType.DATA_NODE assert snapshot.collected_events[1].metadata.get(\"config_id\") in [output_config.id, input_config.id] # Delete scenario tp.delete(scenario.id) snapshot = consumer.capture() assert len(snapshot.collected_events) == 2 assert snapshot.collected_events[0].operation == EventOperation.DELETION assert snapshot.collected_events[0].entity_type == EventEntityType.DATA_NODE assert snapshot.collected_events[1].operation == EventOperation.DELETION assert snapshot.collected_events[1].entity_type == EventEntityType.DATA_NODE consumer.stop() "} {"text": "from queue import SimpleQueue from src.taipy.core import taipy as tp from src.taipy.core.notification.core_event_consumer import CoreEventConsumerBase from src.taipy.core.notification.event import Event, EventEntityType, EventOperation from src.taipy.core.notification.notifier import Notifier from taipy.config import Config, Frequency from tests.core.utils import assert_true_after_time class AllCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.event_collected = 0 self.event_entity_type_collected: dict = {} self.event_operation_collected: dict = {} super().__init__(registration_id, queue) def process_event(self, event: Event): self.event_collected += 1 self.event_entity_type_collected[event.entity_type] = ( self.event_entity_type_collected.get(event.entity_type, 0) + 1 ) self.event_operation_collected[event.operation] = self.event_operation_collected.get(event.operation, 0) + 1 class ScenarioCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.scenario_event_collected = 0 self.event_operation_collected: dict = {} super().__init__(registration_id, queue) def process_event(self, event: Event): self.scenario_event_collected += 1 self.event_operation_collected[event.operation] = self.event_operation_collected.get(event.operation, 0) + 1 class TaskCreationCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.task_event_collected = 0 self.creation_event_operation_collected = 0 super().__init__(registration_id, queue) def process_event(self, event: Event): self.task_event_collected += 1 self.creation_event_operation_collected += 1 def test_core_event_consumer(): register_id_0, register_queue_0 = Notifier.register() all_evt_csumer_0 = AllCoreEventConsumerProcessor(register_id_0, register_queue_0) register_id_1, register_queue_1 = Notifier.register(entity_type=EventEntityType.SCENARIO) sc_evt_csumer_1 = ScenarioCoreEventConsumerProcessor(register_id_1, register_queue_1) register_id_2, register_queue_2 = Notifier.register( entity_type=EventEntityType.TASK, operation=EventOperation.CREATION ) task_creation_evt_csumer_2 = TaskCreationCoreEventConsumerProcessor(register_id_2, register_queue_2) all_evt_csumer_0.start() sc_evt_csumer_1.start() task_creation_evt_csumer_2.start() dn_config = Config.configure_data_node(\"dn_config\") task_config = Config.configure_task(\"task_config\", print, [dn_config]) scenario_config = Config.configure_scenario( \"scenario_config\", [task_config], frequency=Frequency.DAILY, sequences={\"seq\": [task_config]} ) # Create a scenario trigger 5 creation events scenario = tp.create_scenario(scenario_config) assert_true_after_time(lambda: all_evt_csumer_0.event_collected == 5, time=10) assert_true_after_time(lambda: len(all_evt_csumer_0.event_entity_type_collected) == 5, time=10) assert_true_after_time(lambda: all_evt_csumer_0.event_operation_collected[EventOperation.CREATION] == 5, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.scenario_event_collected == 1, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.event_operation_collected[EventOperation.CREATION] == 1, time=10) assert_true_after_time(lambda: len(sc_evt_csumer_1.event_operation_collected) == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.task_event_collected == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.creation_event_operation_collected == 1, time=10) # Delete a scenario trigger 5 update events tp.delete(scenario.id) assert_true_after_time(lambda: all_evt_csumer_0.event_collected == 10, time=10) assert_true_after_time(lambda: len(all_evt_csumer_0.event_entity_type_collected) == 5, time=10) assert_true_after_time(lambda: all_evt_csumer_0.event_operation_collected[EventOperation.DELETION] == 5, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.scenario_event_collected == 2, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.event_operation_collected[EventOperation.DELETION] == 1, time=10) assert_true_after_time(lambda: len(sc_evt_csumer_1.event_operation_collected) == 2, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.task_event_collected == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.creation_event_operation_collected == 1, time=10) all_evt_csumer_0.stop() sc_evt_csumer_1.stop() task_creation_evt_csumer_2.stop() "} {"text": " from queue import SimpleQueue from src.taipy.core.notification import EventEntityType, EventOperation from src.taipy.core.notification._registration import _Registration from src.taipy.core.notification._topic import _Topic def test_create_registration(): registration_0 = _Registration() assert isinstance(registration_0.registration_id, str) assert registration_0.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_0.queue, SimpleQueue) assert registration_0.queue.qsize() == 0 assert isinstance(registration_0.topic, _Topic) assert registration_0.topic.entity_type is None assert registration_0.topic.entity_id is None assert registration_0.topic.operation is None assert registration_0.topic.attribute_name is None registration_1 = _Registration( entity_type=EventEntityType.SCENARIO, entity_id=\"SCENARIO_scenario_id\", operation=EventOperation.CREATION ) assert isinstance(registration_1.registration_id, str) assert registration_1.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_1.queue, SimpleQueue) assert registration_1.queue.qsize() == 0 assert isinstance(registration_1.topic, _Topic) assert registration_1.topic.entity_type == EventEntityType.SCENARIO assert registration_1.topic.entity_id == \"SCENARIO_scenario_id\" assert registration_1.topic.operation == EventOperation.CREATION assert registration_1.topic.attribute_name is None registration_2 = _Registration( entity_type=EventEntityType.SEQUENCE, entity_id=\"SEQUENCE_scenario_id\", operation=EventOperation.UPDATE, attribute_name=\"tasks\", ) assert isinstance(registration_2.registration_id, str) assert registration_2.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_2.queue, SimpleQueue) assert registration_2.queue.qsize() == 0 assert isinstance(registration_2.topic, _Topic) assert registration_2.topic.entity_type == EventEntityType.SEQUENCE assert registration_2.topic.entity_id == \"SEQUENCE_scenario_id\" assert registration_2.topic.operation == EventOperation.UPDATE assert registration_2.topic.attribute_name == \"tasks\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import datetime from datetime import timedelta from src.taipy.core import CycleId from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.cycle.cycle import Cycle from taipy.config.common.frequency import Frequency def test_create_cycle_entity(current_datetime): cycle_1 = Cycle( Frequency.DAILY, {\"key\": \"value\"}, creation_date=current_datetime, start_date=current_datetime, end_date=current_datetime, name=\"foo\", ) assert cycle_1.id is not None assert cycle_1.name == \"foo\" assert cycle_1.properties == {\"key\": \"value\"} assert cycle_1.creation_date == current_datetime assert cycle_1.start_date == current_datetime assert cycle_1.end_date == current_datetime assert cycle_1.key == \"value\" assert cycle_1.frequency == Frequency.DAILY cycle_2 = Cycle(Frequency.YEARLY, {}, current_datetime, current_datetime, current_datetime) assert cycle_2.name == current_datetime.strftime(\"%Y\") assert cycle_2.frequency == Frequency.YEARLY cycle_3 = Cycle(Frequency.MONTHLY, {}, current_datetime, current_datetime, current_datetime) assert cycle_3.name == current_datetime.strftime(\"%B %Y\") assert cycle_3.frequency == Frequency.MONTHLY cycle_4 = Cycle(Frequency.WEEKLY, {}, current_datetime, current_datetime, current_datetime) assert cycle_4.name == current_datetime.strftime(\"Week %W %Y, from %d. %B\") assert cycle_4.frequency == Frequency.WEEKLY cycle_5 = Cycle(Frequency.DAILY, {}, current_datetime, current_datetime, current_datetime) assert cycle_5.name == current_datetime.strftime(\"%A, %d. %B %Y\") assert cycle_5.frequency == Frequency.DAILY def test_cycle_name(current_datetime): start_date = datetime.datetime(2023, 1, 2) cycle = Cycle(Frequency.DAILY, {}, current_datetime, start_date, start_date, \"name\", CycleId(\"id\")) assert cycle.name == \"name\" cycle = Cycle(Frequency.DAILY, {}, current_datetime, start_date, start_date, None, CycleId(\"id\")) assert cycle.name == \"Monday, 02. January 2023\" cycle = Cycle(Frequency.WEEKLY, {}, current_datetime, start_date, start_date, None, CycleId(\"id\")) assert cycle.name == \"Week 01 2023, from 02. January\" cycle = Cycle(Frequency.MONTHLY, {}, current_datetime, start_date, start_date, None, CycleId(\"id\")) assert cycle.name == \"January 2023\" cycle = Cycle(Frequency.QUARTERLY, {}, current_datetime, start_date, start_date, None, CycleId(\"id\")) assert cycle.name == \"2023 Q1\" cycle = Cycle(Frequency.YEARLY, {}, current_datetime, start_date, start_date, None, CycleId(\"id\")) assert cycle.name == \"2023\" def test_cycle_label(current_datetime): cycle = Cycle( Frequency.DAILY, {\"key\": \"value\"}, creation_date=current_datetime, start_date=current_datetime, end_date=current_datetime, ) assert cycle.get_label() == cycle.name assert cycle.get_simple_label() == cycle.name cycle._properties[\"label\"] = \"label\" assert cycle.get_label() == \"label\" assert cycle.get_simple_label() == \"label\" def test_add_property_to_scenario(current_datetime): cycle = Cycle( Frequency.WEEKLY, {\"key\": \"value\"}, current_datetime, current_datetime, current_datetime, name=\"foo\", ) assert cycle.properties == {\"key\": \"value\"} assert cycle.key == \"value\" cycle.properties[\"new_key\"] = \"new_value\" assert cycle.properties == {\"key\": \"value\", \"new_key\": \"new_value\"} assert cycle.key == \"value\" assert cycle.new_key == \"new_value\" def test_auto_set_and_reload(current_datetime): cycle_1 = Cycle( Frequency.WEEKLY, {\"key\": \"value\"}, current_datetime, current_datetime, current_datetime, name=\"foo\", ) _CycleManager._set(cycle_1) cycle_2 = _CycleManager._get(cycle_1) # auto set & reload on frequency attribute assert cycle_1.frequency == Frequency.WEEKLY cycle_1.frequency = Frequency.YEARLY assert cycle_1.frequency == Frequency.YEARLY assert cycle_2.frequency == Frequency.YEARLY cycle_2.frequency = Frequency.MONTHLY assert cycle_1.frequency == Frequency.MONTHLY assert cycle_2.frequency == Frequency.MONTHLY new_datetime_1 = current_datetime + timedelta(1) new_datetime_2 = current_datetime + timedelta(2) # auto set & reload on creation_date attribute assert cycle_1.creation_date == current_datetime assert cycle_2.creation_date == current_datetime cycle_1.creation_date = new_datetime_1 assert cycle_1.creation_date == new_datetime_1 assert cycle_2.creation_date == new_datetime_1 cycle_2.creation_date = new_datetime_2 assert cycle_1.creation_date == new_datetime_2 assert cycle_2.creation_date == new_datetime_2 # auto set & reload on start_date attribute assert cycle_1.start_date == current_datetime assert cycle_2.start_date == current_datetime cycle_1.start_date = new_datetime_1 assert cycle_1.start_date == new_datetime_1 assert cycle_2.start_date == new_datetime_1 cycle_2.start_date = new_datetime_2 assert cycle_1.start_date == new_datetime_2 assert cycle_2.start_date == new_datetime_2 # auto set & reload on end_date attribute assert cycle_1.end_date == current_datetime assert cycle_2.end_date == current_datetime cycle_1.end_date = new_datetime_1 assert cycle_1.end_date == new_datetime_1 assert cycle_2.end_date == new_datetime_1 cycle_2.end_date = new_datetime_2 assert cycle_1.end_date == new_datetime_2 assert cycle_2.end_date == new_datetime_2 # auto set & reload on names attribute assert cycle_1.name == \"foo\" assert cycle_2.name == \"foo\" cycle_1.name = \"fed\" assert cycle_1.name == \"fed\" assert cycle_2.name == \"fed\" cycle_2.name = \"def\" assert cycle_1.name == \"def\" assert cycle_2.name == \"def\" # auto set & reload on properties attribute assert cycle_1.properties == {\"key\": \"value\"} assert cycle_2.properties == {\"key\": \"value\"} cycle_1._properties[\"qux\"] = 4 assert cycle_1.properties[\"qux\"] == 4 assert cycle_2.properties[\"qux\"] == 4 assert cycle_1.properties == {\"key\": \"value\", \"qux\": 4} assert cycle_2.properties == {\"key\": \"value\", \"qux\": 4} cycle_2._properties[\"qux\"] = 5 assert cycle_1.properties[\"qux\"] == 5 assert cycle_2.properties[\"qux\"] == 5 cycle_1.properties[\"temp_key_1\"] = \"temp_value_1\" cycle_1.properties[\"temp_key_2\"] = \"temp_value_2\" assert cycle_1.properties == { \"qux\": 5, \"key\": \"value\", \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } assert cycle_2.properties == { \"qux\": 5, \"key\": \"value\", \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } cycle_1.properties.pop(\"temp_key_1\") assert \"temp_key_1\" not in cycle_1.properties.keys() assert \"temp_key_1\" not in cycle_1.properties.keys() assert cycle_1.properties == { \"key\": \"value\", \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } assert cycle_2.properties == { \"key\": \"value\", \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } cycle_2.properties.pop(\"temp_key_2\") assert cycle_1.properties == {\"key\": \"value\", \"qux\": 5} assert cycle_2.properties == {\"key\": \"value\", \"qux\": 5} assert \"temp_key_2\" not in cycle_1.properties.keys() assert \"temp_key_2\" not in cycle_2.properties.keys() cycle_1.properties[\"temp_key_3\"] = 0 assert cycle_1.properties == {\"key\": \"value\", \"qux\": 5, \"temp_key_3\": 0} assert cycle_2.properties == {\"key\": \"value\", \"qux\": 5, \"temp_key_3\": 0} cycle_1.properties.update({\"temp_key_3\": 1}) assert cycle_1.properties == {\"key\": \"value\", \"qux\": 5, \"temp_key_3\": 1} assert cycle_2.properties == {\"key\": \"value\", \"qux\": 5, \"temp_key_3\": 1} cycle_1.properties.update(dict()) assert cycle_1.properties == {\"key\": \"value\", \"qux\": 5, \"temp_key_3\": 1} assert cycle_2.properties == {\"key\": \"value\", \"qux\": 5, \"temp_key_3\": 1} cycle_1.properties.pop(\"key\") cycle_1.properties[\"temp_key_4\"] = 0 cycle_1.properties[\"temp_key_5\"] = 0 new_datetime_3 = new_datetime_1 + timedelta(5) with cycle_1 as cycle: assert cycle.frequency == Frequency.MONTHLY assert cycle.creation_date == new_datetime_2 assert cycle.start_date == new_datetime_2 assert cycle.end_date == new_datetime_2 assert cycle.name == \"def\" assert cycle._is_in_context assert cycle.properties[\"qux\"] == 5 assert cycle.properties[\"temp_key_3\"] == 1 assert cycle.properties[\"temp_key_4\"] == 0 assert cycle.properties[\"temp_key_5\"] == 0 cycle.frequency = Frequency.YEARLY cycle.creation_date = new_datetime_3 cycle.start_date = new_datetime_3 cycle.end_date = new_datetime_3 cycle.name = \"abc\" assert cycle.name == \"def\" assert cycle._name == \"abc\" cycle.properties[\"qux\"] = 9 cycle.properties.pop(\"temp_key_3\") cycle.properties.pop(\"temp_key_4\") cycle.properties.update({\"temp_key_4\": 1}) cycle.properties.update({\"temp_key_5\": 2}) cycle.properties.pop(\"temp_key_5\") cycle.properties.update(dict()) assert cycle.frequency == Frequency.MONTHLY assert cycle.creation_date == new_datetime_2 assert cycle.start_date == new_datetime_2 assert cycle.end_date == new_datetime_2 assert cycle._is_in_context assert cycle.properties[\"qux\"] == 5 assert cycle.name == \"def\" assert cycle.properties[\"temp_key_3\"] == 1 assert cycle.properties[\"temp_key_4\"] == 0 assert cycle.properties[\"temp_key_5\"] == 0 assert cycle_1.frequency == Frequency.YEARLY assert cycle_1.creation_date == new_datetime_3 assert cycle_1.start_date == new_datetime_3 assert cycle_1.end_date == new_datetime_3 assert cycle_1.name == \"abc\" assert cycle_1.properties[\"qux\"] == 9 assert \"temp_key_3\" not in cycle_1.properties.keys() assert cycle_1.properties[\"temp_key_4\"] == 1 assert \"temp_key_5\" not in cycle_1.properties.keys() "} {"text": "import os import pytest from src.taipy.core.cycle._cycle_fs_repository import _CycleFSRepository from src.taipy.core.cycle._cycle_sql_repository import _CycleSQLRepository from src.taipy.core.cycle.cycle import Cycle, CycleId from src.taipy.core.exceptions import ModelNotFound class TestCycleRepositories: @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_save_and_load(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) obj = repository._load(cycle.id) assert isinstance(obj, Cycle) @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_exists(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) assert repository._exists(cycle.id) assert not repository._exists(\"not-existed-cycle\") @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_load_all(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") repository._save(cycle) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_load_all_with_filters(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") cycle._name = f\"cycle-{i}\" repository._save(cycle) objs = repository._load_all(filters=[{\"id\": \"cycle-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_CycleSQLRepository]) def test_delete(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) repository._delete(cycle.id) with pytest.raises(ModelNotFound): repository._load(cycle.id) @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_delete_all(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") repository._save(cycle) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_delete_many(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") repository._save(cycle) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_search(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f\"cycle-{i}\") cycle.name = f\"cycle-{i}\" repository._save(cycle) assert len(repository._load_all()) == 10 objs = repository._search(\"name\", \"cycle-2\") assert len(objs) == 1 assert isinstance(objs[0], Cycle) @pytest.mark.parametrize(\"repo\", [_CycleFSRepository, _CycleSQLRepository]) def test_export(self, tmpdir, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) repository._export(cycle.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _CycleFSRepository else os.path.join(tmpdir.strpath, \"cycle\") assert os.path.exists(os.path.join(dir_path, f\"{cycle.id}.json\")) "} {"text": "from datetime import datetime from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.cycle.cycle import Cycle from src.taipy.core.cycle.cycle_id import CycleId from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config def test_save_and_get_cycle_entity(tmpdir, cycle, current_datetime): _CycleManager._repository.base_path = tmpdir assert len(_CycleManager._get_all()) == 0 _CycleManager._set(cycle) assert _CycleManager._exists(cycle.id) cycle_1 = _CycleManager._get(cycle.id) assert cycle_1.id == cycle.id assert cycle_1.name == cycle.name assert cycle_1.properties == cycle.properties assert cycle_1.creation_date == cycle.creation_date assert cycle_1.start_date == cycle.start_date assert cycle_1.end_date == cycle.end_date assert cycle_1.frequency == cycle.frequency assert len(_CycleManager._get_all()) == 1 assert _CycleManager._get(cycle.id) == cycle assert _CycleManager._get(cycle.id).name == cycle.name assert isinstance(_CycleManager._get(cycle.id).creation_date, datetime) assert _CycleManager._get(cycle.id).creation_date == cycle.creation_date assert _CycleManager._get(cycle.id).frequency == Frequency.DAILY cycle_2_id = CycleId(\"cycle_2\") assert _CycleManager._get(cycle_2_id) is None assert not _CycleManager._exists(cycle_2_id) cycle_3 = Cycle( Frequency.MONTHLY, {}, creation_date=current_datetime, start_date=current_datetime, end_date=current_datetime, name=\"bar\", id=cycle_1.id, ) _CycleManager._set(cycle_3) cycle_3 = _CycleManager._get(cycle_1.id) assert _CycleManager._exists(cycle_1.id) assert len(_CycleManager._get_all()) == 1 assert cycle_3.id == cycle_1.id assert cycle_3.name == cycle_3.name assert cycle_3.properties == cycle_3.properties assert cycle_3.creation_date == current_datetime assert cycle_3.start_date == current_datetime assert cycle_3.end_date == current_datetime assert cycle_3.frequency == cycle_3.frequency def test_create_and_delete_cycle_entity(tmpdir): _CycleManager._repository.base_path = tmpdir assert len(_CycleManager._get_all()) == 0 cycle_1 = _CycleManager._create(Frequency.DAILY, name=\"foo\", key=\"value\") assert cycle_1.id is not None assert cycle_1.name == \"foo\" assert cycle_1.properties == {\"key\": \"value\"} assert cycle_1.creation_date is not None assert cycle_1.start_date is not None assert cycle_1.end_date is not None assert cycle_1.start_date < cycle_1.creation_date < cycle_1.end_date assert cycle_1.key == \"value\" assert cycle_1.frequency == Frequency.DAILY cycle_1_id = cycle_1.id assert _CycleManager._exists(cycle_1.id) assert len(_CycleManager._get_all()) == 1 assert _CycleManager._get(cycle_1_id) == cycle_1 assert _CycleManager._get(cycle_1_id).name == \"foo\" assert isinstance(_CycleManager._get(cycle_1_id).creation_date, datetime) assert _CycleManager._get(cycle_1_id).frequency == Frequency.DAILY cycle_2_id = CycleId(\"cycle_2\") assert _CycleManager._get(cycle_2_id) is None assert not _CycleManager._exists(cycle_2_id) cycle_3 = _CycleManager._create(Frequency.MONTHLY, \"bar\") assert cycle_3.id is not None assert cycle_3.name == \"bar\" assert isinstance(cycle_3.creation_date, datetime) assert cycle_3.frequency == Frequency.MONTHLY cycle_3_id = cycle_3.id assert _CycleManager._exists(cycle_3_id) assert len(_CycleManager._get_all()) == 2 assert _CycleManager._get(cycle_3_id).name == \"bar\" cycle_4 = _CycleManager._create(Frequency.YEARLY, \"baz\") cycle_4_id = cycle_4.id assert _CycleManager._exists(cycle_4_id) assert len(_CycleManager._get_all()) == 3 _CycleManager._delete(cycle_4_id) assert len(_CycleManager._get_all()) == 2 assert not _CycleManager._exists(cycle_4_id) assert _CycleManager._get(cycle_4_id) is None _CycleManager._delete_all() assert len(_CycleManager._get_all()) == 0 assert not any(_CycleManager._exists(cycle_id) for cycle_id in [cycle_1_id, cycle_3_id, cycle_4_id]) def test_get_cycle_start_date_and_end_date(): creation_date_1 = datetime.fromisoformat(\"2021-11-11T11:11:01.000001\") daily_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.DAILY, creation_date=creation_date_1) weekly_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.WEEKLY, creation_date=creation_date_1) monthly_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.MONTHLY, creation_date=creation_date_1) yearly_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.YEARLY, creation_date=creation_date_1) assert daily_start_date_1 == datetime.fromisoformat(\"2021-11-11T00:00:00.000000\") assert weekly_start_date_1 == datetime.fromisoformat(\"2021-11-08T00:00:00.000000\") assert monthly_start_date_1 == datetime.fromisoformat(\"2021-11-01T00:00:00.000000\") assert yearly_start_date_1 == datetime.fromisoformat(\"2021-01-01T00:00:00.000000\") daily_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.DAILY, start_date=daily_start_date_1) weekly_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.WEEKLY, start_date=weekly_start_date_1) monthly_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.MONTHLY, start_date=monthly_start_date_1) yearly_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.YEARLY, start_date=yearly_start_date_1) assert daily_end_date_1 == datetime.fromisoformat(\"2021-11-11T23:59:59.999999\") assert weekly_end_date_1 == datetime.fromisoformat(\"2021-11-14T23:59:59.999999\") assert monthly_end_date_1 == datetime.fromisoformat(\"2021-11-30T23:59:59.999999\") assert yearly_end_date_1 == datetime.fromisoformat(\"2021-12-31T23:59:59.999999\") creation_date_2 = datetime.now() daily_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.DAILY, creation_date=creation_date_2) daily_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.DAILY, daily_start_date_2) assert daily_start_date_2.date() == creation_date_2.date() assert daily_end_date_2.date() == creation_date_2.date() assert daily_start_date_2 < creation_date_2 < daily_end_date_2 weekly_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.WEEKLY, creation_date=creation_date_2) weekly_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.WEEKLY, weekly_start_date_2) assert weekly_start_date_2 < creation_date_2 < weekly_end_date_2 monthly_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.MONTHLY, creation_date=creation_date_2) monthly_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.MONTHLY, monthly_start_date_2) assert monthly_start_date_2.month == creation_date_2.month and monthly_start_date_2.day == 1 assert monthly_end_date_2.month == creation_date_2.month assert monthly_start_date_2 < creation_date_2 < monthly_end_date_2 yearly_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.YEARLY, creation_date=creation_date_2) yearly_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.YEARLY, yearly_start_date_2) assert yearly_start_date_2.year == creation_date_2.year assert yearly_start_date_2 == datetime(creation_date_2.year, 1, 1) assert yearly_end_date_2.year == creation_date_2.year assert yearly_end_date_2.date() == datetime(creation_date_2.year, 12, 31).date() assert yearly_start_date_2 < creation_date_2 < yearly_end_date_2 def test_hard_delete_shared_entities(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_config_1 = Config.configure_data_node(\"my_input_1\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_config_2 = Config.configure_data_node(\"my_input_2\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_config_3 = Config.configure_data_node(\"my_input_3\", \"in_memory\", scope=Scope.CYCLE, default_data=\"testing\") dn_config_4 = Config.configure_data_node(\"my_input_4\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_config_1, dn_config_2) task_config_2 = Config.configure_task(\"task_config_2\", print, dn_config_2, dn_config_3) task_config_3 = Config.configure_task(\"task_config_3\", print, dn_config_3, dn_config_4) # scope = global creation_date = datetime.now() scenario_config_1 = Config.configure_scenario( \"scenario_config_1\", [task_config_1, task_config_2, task_config_3], creation_date=creation_date, frequency=Frequency.DAILY, ) scenario_config_1.add_sequences( { \"sequence_1\": [task_config_1, task_config_2], \"sequence_2\": [task_config_1, task_config_2], \"sequence_3\": [task_config_3], } ) scenario_config_2 = Config.configure_scenario( \"scenario_config_2\", [task_config_2, task_config_3] ) # No Frequency so cycle attached to scenarios scenario_config_2.add_sequences({\"sequence_3\": [task_config_3]}) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config_1) scenario_2 = _ScenarioManager._create(scenario_config_1) scenario_3 = _ScenarioManager._create(scenario_config_2) scenario_1.submit() scenario_2.submit() scenario_3.submit() assert len(_ScenarioManager._get_all()) == 3 assert len(_SequenceManager._get_all()) == 7 assert len(_TaskManager._get_all()) == 7 assert len(_DataManager._get_all()) == 8 assert len(_JobManager._get_all()) == 8 assert len(_CycleManager._get_all()) == 1 _CycleManager._hard_delete(scenario_1.cycle.id) assert len(_CycleManager._get_all()) == 0 assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 2 assert len(_JobManager._get_all()) == 2 assert len(_DataManager._get_all()) == 3 def test_get_primary(tmpdir, cycle, current_datetime): _CycleManager._repository.base_path = tmpdir assert len(_CycleManager._get_all()) == 0 _CycleManager._set(cycle) cycle_1 = _CycleManager._get(cycle.id) cycle_2 = Cycle(Frequency.MONTHLY, {}, current_datetime, current_datetime, current_datetime, name=\"foo\") _CycleManager._set(cycle_2) cycle_2 = _CycleManager._get(cycle_2.id) cycles = _CycleManager._get_all() assert len(_CycleManager._get_all()) == 2 assert ( len(_CycleManager._get_cycles_by_frequency_and_start_date(cycle_1.frequency, cycle_1.start_date, cycles)) == 1 ) assert ( len(_CycleManager._get_cycles_by_frequency_and_start_date(cycle_2.frequency, cycle_2.start_date, cycles)) == 1 ) assert ( len( _CycleManager._get_cycles_by_frequency_and_start_date( Frequency.WEEKLY, datetime(2000, 1, 1, 1, 0, 0, 0), cycles ) ) == 0 ) assert ( len( _CycleManager._get_cycles_by_frequency_and_overlapping_date( cycle_1.frequency, cycle_1.creation_date, cycles ) ) == 1 ) assert ( _CycleManager._get_cycles_by_frequency_and_overlapping_date(cycle_1.frequency, cycle_1.creation_date, cycles)[0] == cycle_1 ) assert ( len( _CycleManager._get_cycles_by_frequency_and_overlapping_date( Frequency.WEEKLY, datetime(2000, 1, 1, 1, 0, 0, 0), cycles ) ) == 0 ) "} {"text": "from datetime import datetime from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.cycle._cycle_manager_factory import _CycleManagerFactory from src.taipy.core.cycle.cycle import Cycle from src.taipy.core.cycle.cycle_id import CycleId from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config def test_save_and_get_cycle_entity(init_sql_repo, cycle, current_datetime): _CycleManager._repository = _CycleManagerFactory._build_repository() _CycleManager._delete_all() assert len(_CycleManager._get_all()) == 0 _CycleManager._set(cycle) assert _CycleManager._exists(cycle.id) cycle_1 = _CycleManager._get(cycle.id) assert cycle_1.id == cycle.id assert cycle_1.name == cycle.name assert cycle_1.properties == cycle.properties assert cycle_1.creation_date == cycle.creation_date assert cycle_1.start_date == cycle.start_date assert cycle_1.end_date == cycle.end_date assert cycle_1.frequency == cycle.frequency assert len(_CycleManager._get_all()) == 1 assert _CycleManager._get(cycle.id) == cycle assert _CycleManager._get(cycle.id).name == cycle.name assert isinstance(_CycleManager._get(cycle.id).creation_date, datetime) assert _CycleManager._get(cycle.id).creation_date == cycle.creation_date assert _CycleManager._get(cycle.id).frequency == Frequency.DAILY cycle_2_id = CycleId(\"cycle_2\") assert _CycleManager._get(cycle_2_id) is None assert not _CycleManager._exists(cycle_2_id) cycle_3 = Cycle( Frequency.MONTHLY, {}, creation_date=current_datetime, start_date=current_datetime, end_date=current_datetime, name=\"bar\", id=cycle_1.id, ) _CycleManager._set(cycle_3) cycle_3 = _CycleManager._get(cycle_1.id) assert _CycleManager._exists(cycle_1.id) assert len(_CycleManager._get_all()) == 1 assert cycle_3.id == cycle_1.id assert cycle_3.name == cycle_3.name assert cycle_3.properties == cycle_3.properties assert cycle_3.creation_date == current_datetime assert cycle_3.start_date == current_datetime assert cycle_3.end_date == current_datetime assert cycle_3.frequency == cycle_3.frequency def test_create_and_delete_cycle_entity(init_sql_repo): _CycleManager._repository = _CycleManagerFactory._build_repository() _CycleManager._delete_all() assert len(_CycleManager._get_all()) == 0 cycle_1 = _CycleManager._create(Frequency.DAILY, name=\"foo\", key=\"value\", display_name=\"foo\") assert cycle_1.id is not None assert cycle_1.name == \"foo\" assert cycle_1.properties == {\"key\": \"value\", \"display_name\": \"foo\"} assert cycle_1.creation_date is not None assert cycle_1.start_date is not None assert cycle_1.end_date is not None assert cycle_1.start_date < cycle_1.creation_date < cycle_1.end_date assert cycle_1.key == \"value\" assert cycle_1.frequency == Frequency.DAILY cycle_1_id = cycle_1.id assert _CycleManager._exists(cycle_1.id) assert len(_CycleManager._get_all()) == 1 assert _CycleManager._get(cycle_1_id) == cycle_1 assert _CycleManager._get(cycle_1_id).name == \"foo\" assert isinstance(_CycleManager._get(cycle_1_id).creation_date, datetime) assert _CycleManager._get(cycle_1_id).frequency == Frequency.DAILY cycle_2_id = CycleId(\"cycle_2\") assert _CycleManager._get(cycle_2_id) is None assert not _CycleManager._exists(cycle_2_id) cycle_3 = _CycleManager._create(Frequency.MONTHLY, \"bar\") assert cycle_3.id is not None assert cycle_3.name == \"bar\" assert isinstance(cycle_3.creation_date, datetime) assert cycle_3.frequency == Frequency.MONTHLY cycle_3_id = cycle_3.id assert _CycleManager._exists(cycle_3_id) assert len(_CycleManager._get_all()) == 2 assert _CycleManager._get(cycle_3_id).name == \"bar\" cycle_4 = _CycleManager._create(Frequency.YEARLY, \"baz\") cycle_4_id = cycle_4.id assert _CycleManager._exists(cycle_4_id) assert len(_CycleManager._get_all()) == 3 _CycleManager._delete(cycle_4_id) assert len(_CycleManager._get_all()) == 2 assert not _CycleManager._exists(cycle_4_id) assert _CycleManager._get(cycle_4_id) is None _CycleManager._delete_all() assert len(_CycleManager._get_all()) == 0 assert not any(_CycleManager._exists(cycle_id) for cycle_id in [cycle_1_id, cycle_3_id, cycle_4_id]) def test_get_cycle_start_date_and_end_date(init_sql_repo): _CycleManager._repository = _CycleManagerFactory._build_repository() _CycleManager._delete_all() creation_date_1 = datetime.fromisoformat(\"2021-11-11T11:11:01.000001\") daily_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.DAILY, creation_date=creation_date_1) weekly_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.WEEKLY, creation_date=creation_date_1) monthly_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.MONTHLY, creation_date=creation_date_1) yearly_start_date_1 = _CycleManager._get_start_date_of_cycle(Frequency.YEARLY, creation_date=creation_date_1) assert daily_start_date_1 == datetime.fromisoformat(\"2021-11-11T00:00:00.000000\") assert weekly_start_date_1 == datetime.fromisoformat(\"2021-11-08T00:00:00.000000\") assert monthly_start_date_1 == datetime.fromisoformat(\"2021-11-01T00:00:00.000000\") assert yearly_start_date_1 == datetime.fromisoformat(\"2021-01-01T00:00:00.000000\") daily_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.DAILY, start_date=daily_start_date_1) weekly_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.WEEKLY, start_date=weekly_start_date_1) monthly_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.MONTHLY, start_date=monthly_start_date_1) yearly_end_date_1 = _CycleManager._get_end_date_of_cycle(Frequency.YEARLY, start_date=yearly_start_date_1) assert daily_end_date_1 == datetime.fromisoformat(\"2021-11-11T23:59:59.999999\") assert weekly_end_date_1 == datetime.fromisoformat(\"2021-11-14T23:59:59.999999\") assert monthly_end_date_1 == datetime.fromisoformat(\"2021-11-30T23:59:59.999999\") assert yearly_end_date_1 == datetime.fromisoformat(\"2021-12-31T23:59:59.999999\") creation_date_2 = datetime.now() daily_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.DAILY, creation_date=creation_date_2) daily_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.DAILY, daily_start_date_2) assert daily_start_date_2.date() == creation_date_2.date() assert daily_end_date_2.date() == creation_date_2.date() assert daily_start_date_2 < creation_date_2 < daily_end_date_2 weekly_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.WEEKLY, creation_date=creation_date_2) weekly_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.WEEKLY, weekly_start_date_2) assert weekly_start_date_2 < creation_date_2 < weekly_end_date_2 monthly_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.MONTHLY, creation_date=creation_date_2) monthly_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.MONTHLY, monthly_start_date_2) assert monthly_start_date_2.month == creation_date_2.month and monthly_start_date_2.day == 1 assert monthly_end_date_2.month == creation_date_2.month assert monthly_start_date_2 < creation_date_2 < monthly_end_date_2 yearly_start_date_2 = _CycleManager._get_start_date_of_cycle(Frequency.YEARLY, creation_date=creation_date_2) yearly_end_date_2 = _CycleManager._get_end_date_of_cycle(Frequency.YEARLY, yearly_start_date_2) assert yearly_start_date_2.year == creation_date_2.year assert yearly_start_date_2 == datetime(creation_date_2.year, 1, 1) assert yearly_end_date_2.year == creation_date_2.year assert yearly_end_date_2.date() == datetime(creation_date_2.year, 12, 31).date() assert yearly_start_date_2 < creation_date_2 < yearly_end_date_2 def test_hard_delete_shared_entities(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _ScenarioManager._repository = _ScenarioManagerFactory._build_repository() dn_config_1 = Config.configure_data_node(\"my_input_1\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_config_2 = Config.configure_data_node(\"my_input_2\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_config_3 = Config.configure_data_node(\"my_input_3\", \"in_memory\", scope=Scope.CYCLE, default_data=\"testing\") dn_config_4 = Config.configure_data_node(\"my_input_4\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_config_1, dn_config_2) task_config_2 = Config.configure_task(\"task_config_2\", print, dn_config_2, dn_config_3) task_config_3 = Config.configure_task(\"task_config_3\", print, dn_config_3, dn_config_4) # scope = global creation_date = datetime.now() scenario_config_1 = Config.configure_scenario( \"scenario_config_1\", [task_config_1, task_config_2, task_config_3], creation_date=creation_date, frequency=Frequency.DAILY, ) scenario_config_1.add_sequences( { \"sequence_1\": [task_config_1, task_config_2], \"sequence_2\": [task_config_1, task_config_2], \"sequence_3\": [task_config_3], } ) scenario_config_2 = Config.configure_scenario( \"scenario_config_2\", [task_config_2, task_config_3] ) # No Frequency so cycle attached to scenarios scenario_config_2.add_sequences({\"sequence_3\": [task_config_3]}) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config_1) scenario_2 = _ScenarioManager._create(scenario_config_1) scenario_3 = _ScenarioManager._create(scenario_config_2) scenario_1.submit() scenario_2.submit() scenario_3.submit() assert len(_ScenarioManager._get_all()) == 3 assert len(_SequenceManager._get_all()) == 7 assert len(_TaskManager._get_all()) == 7 assert len(_DataManager._get_all()) == 8 assert len(_JobManager._get_all()) == 8 assert len(_CycleManager._get_all()) == 1 _CycleManager._hard_delete(scenario_1.cycle.id) assert len(_CycleManager._get_all()) == 0 assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 2 assert len(_JobManager._get_all()) == 2 assert len(_DataManager._get_all()) == 3 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import dataclasses import pathlib from dataclasses import dataclass from typing import Any, Dict, Iterable, List, Optional, Union from src.taipy.core._manager._manager import _Manager from src.taipy.core._repository._abstract_converter import _AbstractConverter from src.taipy.core._repository._abstract_repository import _AbstractRepository from src.taipy.core._repository._filesystem_repository import _FileSystemRepository from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config @dataclass class MockModel: id: str name: str version: str def to_dict(self): return dataclasses.asdict(self) @staticmethod def from_dict(data: Dict[str, Any]): return MockModel(id=data[\"id\"], name=data[\"name\"], version=data[\"version\"]) @dataclass class MockEntity: def __init__(self, id: str, name: str, version: str = None) -> None: self.id = id self.name = name if version: self._version = version else: self._version = _VersionManager._get_latest_version() class MockConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, entity: MockEntity) -> MockModel: return MockModel(id=entity.id, name=entity.name, version=entity._version) @classmethod def _model_to_entity(cls, model: MockModel) -> MockEntity: return MockEntity(id=model.id, name=model.name, version=model.version) class MockRepository(_AbstractRepository): # type: ignore def __init__(self, **kwargs): self.repo = _FileSystemRepository(**kwargs, converter=MockConverter) def _to_model(self, obj: MockEntity): return MockModel(obj.id, obj.name, obj._version) def _from_model(self, model: MockModel): return MockEntity(model.id, model.name, model.version) def _load(self, entity_id: str) -> MockEntity: return self.repo._load(entity_id) def _load_all(self, filters: Optional[List[Dict]] = None) -> List[MockEntity]: return self.repo._load_all(filters) def _save(self, entity: MockEntity): return self.repo._save(entity) def _exists(self, entity_id: str) -> bool: return self.repo._exists(entity_id) def _delete(self, entity_id: str): return self.repo._delete(entity_id) def _delete_all(self): return self.repo._delete_all() def _delete_many(self, ids: Iterable[str]): return self.repo._delete_many(ids) def _delete_by(self, attribute: str, value: str): return self.repo._delete_by(attribute, value) def _search(self, attribute: str, value: Any, filters: Optional[List[Dict]] = None) -> List[MockEntity]: return self.repo._search(attribute, value, filters) def _export(self, entity_id: str, folder_path: Union[str, pathlib.Path]): return self.repo._export(self, entity_id, folder_path) @property def _storage_folder(self) -> pathlib.Path: return pathlib.Path(Config.core.storage_folder) # type: ignore class MockManager(_Manager[MockEntity]): _ENTITY_NAME = MockEntity.__name__ _repository = MockRepository(model_type=MockModel, dir_name=\"foo\") class TestManager: def test_save_and_fetch_model(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) fetched_model = MockManager._get(m.id) assert m == fetched_model def test_exists(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) assert MockManager._exists(m.id) def test_get(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) assert MockManager._get(m.id) == m def test_get_all(self): MockManager._delete_all() objs = [] for i in range(5): m = MockEntity(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) MockManager._set(m) _objs = MockManager._get_all() assert len(_objs) == 5 def test_delete(self): m = MockEntity(\"uuid\", \"foo\") MockManager._set(m) MockManager._delete(m.id) assert MockManager._get(m.id) is None def test_delete_all(self): objs = [] for i in range(5): m = MockEntity(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) MockManager._set(m) MockManager._delete_all() assert MockManager._get_all() == [] def test_delete_many(self): objs = [] for i in range(5): m = MockEntity(f\"uuid-{i}\", f\"Foo{i}\") objs.append(m) MockManager._set(m) MockManager._delete_many([\"uuid-0\", \"uuid-1\"]) assert len(MockManager._get_all()) == 3 def test_is_editable(self): m = MockEntity(\"uuid\", \"Foo\") MockManager._set(m) assert MockManager._is_editable(m) def test_is_readable(self): m = MockEntity(\"uuid\", \"Foo\") MockManager._set(m) assert MockManager._is_readable(m) "} {"text": " class NotifyMock: \"\"\" A shared class for testing notification on jobStatus of sequence level and scenario level \"entity\" can be understood as either \"scenario\" or \"sequence\". \"\"\" def __init__(self, entity): self.scenario = entity self.nb_called = 0 self.__name__ = \"NotifyMock\" def __call__(self, entity, job): assert entity == self.scenario if self.nb_called == 0: assert job.is_pending() if self.nb_called == 1: assert job.is_running() if self.nb_called == 2: assert job.is_finished() self.nb_called += 1 def assert_called_3_times(self): assert self.nb_called == 3 def assert_not_called(self): assert self.nb_called == 0 def reset(self): self.nb_called = 0 "} {"text": " def assert_true_after_time(assertion, msg=None, time=120): from datetime import datetime from time import sleep loops = 0 start = datetime.now() while (datetime.now() - start).seconds < time: sleep(1) # Limit CPU usage try: if assertion(): return except BaseException as e: print(\"Raise : \", e) loops += 1 continue if msg: print(msg) assert assertion() "} {"text": "import os import tempfile class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile(\"w\", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, \"r\") as fp: return fp.read() def __del__(self): os.unlink(self.filename) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from typing import List from src.taipy.core import DataNode, Sequence, SequenceId, Task, TaskId from src.taipy.core._entity._dag import _DAG from taipy.config.common.scope import Scope def assert_x(x: int, *nodes): for node in nodes: assert node.x == x def assert_y(y: List[int], *nodes): for node in nodes: assert node.y in y y.remove(node.y) def assert_x_y(x: int, y: List[int], *nodes): assert_x(x, *nodes) for node in nodes: assert node.y in y y.remove(node.y) def assert_edge_exists(src, dest, dag: _DAG): list_of_tuples = [(edge.src.entity.id, edge.dest.entity.id) for edge in dag.edges] assert (src, dest) in list_of_tuples class TestDAG: def test_get_dag_1(self): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) dag = sequence._get_dag() # s1 --- ---> s3 ---> t2 ---> s5 ---- # | | | # |---> t1 ---| -------------------------> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert dag.length == 7 assert dag.width == 2 assert dag._grid_length == 7 assert dag._grid_width == 3 assert len(dag.nodes) == 11 assert_x_y(0, [0, 2], dag.nodes[\"s1\"], dag.nodes[\"s2\"]) assert_x_y(1, [1], dag.nodes[\"t1\"]) assert_x_y(2, [0, 2], dag.nodes[\"s3\"], dag.nodes[\"s4\"]) assert_x_y(3, [0, 2], dag.nodes[\"t2\"], dag.nodes[\"t4\"]) assert_x_y(4, [0, 2], dag.nodes[\"s5\"], dag.nodes[\"s7\"]) assert_x_y(5, [1], dag.nodes[\"t3\"]) assert_x_y(6, [1], dag.nodes[\"s6\"]) assert len(dag.edges) == 11 assert_edge_exists(\"s1\", \"t1\", dag) assert_edge_exists(\"s2\", \"t1\", dag) assert_edge_exists(\"t1\", \"s3\", dag) assert_edge_exists(\"t1\", \"s4\", dag) assert_edge_exists(\"s3\", \"t2\", dag) assert_edge_exists(\"t2\", \"s5\", dag) assert_edge_exists(\"s5\", \"t3\", dag) assert_edge_exists(\"s4\", \"t3\", dag) assert_edge_exists(\"t3\", \"s6\", dag) assert_edge_exists(\"s4\", \"t4\", dag) assert_edge_exists(\"t4\", \"s7\", dag) def test_get_dag_2(self): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, None, [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # 6 | t2 _____ # 5 | \\ # 4 | s5 _________________ t3 _______ s6 # 3 | s1 __ _ s4 _____/ # 2 | \\ _ t1 ____/ \\_ t4 _______ s7 # 1 | / # 0 | s2 -- # |________________________________________________ # 0 1 2 3 4 dag = sequence._get_dag() assert dag.length == 5 assert dag.width == 3 assert dag._grid_length == 5 assert dag._grid_width == 7 assert len(dag.nodes) == 10 assert_x_y(0, [0, 3, 6], dag.nodes[\"s1\"], dag.nodes[\"s2\"], dag.nodes[\"t2\"]) assert_x_y(1, [2, 4], dag.nodes[\"t1\"], dag.nodes[\"s5\"]) assert_x_y(2, [3], dag.nodes[\"s4\"]) assert_x_y(3, [2, 4], dag.nodes[\"t3\"], dag.nodes[\"t4\"]) assert_x_y(4, [2, 4], dag.nodes[\"s6\"], dag.nodes[\"s7\"]) assert len(dag.edges) == 9 assert_edge_exists(\"s1\", \"t1\", dag) assert_edge_exists(\"s2\", \"t1\", dag) assert_edge_exists(\"t1\", \"s4\", dag) assert_edge_exists(\"t2\", \"s5\", dag) assert_edge_exists(\"s5\", \"t3\", dag) assert_edge_exists(\"s4\", \"t3\", dag) assert_edge_exists(\"t3\", \"s6\", dag) assert_edge_exists(\"s4\", \"t4\", dag) assert_edge_exists(\"t4\", \"s7\", dag) def test_get_dag_3(self): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"quuz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"corge\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"hugh\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2, data_node_3], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t2\")) task_3 = Task(\"fred\", {}, print, [data_node_4], [data_node_5], TaskId(\"t3\")) task_4 = Task(\"garply\", {}, print, output=[data_node_6], id=TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_7], None, TaskId(\"t5\")) sequence = Sequence({}, [task_5, task_3, task_4, task_1, task_2], SequenceId(\"p1\")) # 12 | s7 __ # 11 | \\ # 10 | \\ # 9 | t4 _ \\_ t5 # 8 | \\ ____ t3 ___ # 7 | \\ / \\ # 6 | s3 _ \\__ s6 _ s4 _/ \\___ s5 # 5 | \\ / \\ # 4 | \\ / \\____ t2 # 3 | s2 ___\\__ t1 __/ # 2 | / # 1 | / # 0 | s1 _/ # |________________________________________________ # 0 1 2 3 4 dag = sequence._get_dag() assert dag.length == 5 assert dag.width == 5 assert dag._grid_length == 5 assert dag._grid_width == 13 assert len(dag.nodes) == 12 assert_x_y( 0, [0, 3, 6, 9, 12], dag.nodes[\"s1\"], dag.nodes[\"s2\"], dag.nodes[\"s3\"], dag.nodes[\"s7\"], dag.nodes[\"t4\"] ) assert_x_y(1, [3, 6, 9], dag.nodes[\"t1\"], dag.nodes[\"t5\"], dag.nodes[\"s6\"]) assert_x_y(2, [6], dag.nodes[\"s4\"]) assert_x_y(3, [4, 8], dag.nodes[\"t2\"], dag.nodes[\"t3\"]) assert_x_y(4, [6], dag.nodes[\"s5\"]) assert len(dag.edges) == 9 assert_edge_exists(\"s1\", \"t1\", dag) assert_edge_exists(\"s2\", \"t1\", dag) assert_edge_exists(\"s3\", \"t1\", dag) assert_edge_exists(\"t1\", \"s4\", dag) assert_edge_exists(\"s4\", \"t2\", dag) assert_edge_exists(\"s4\", \"t3\", dag) assert_edge_exists(\"t3\", \"s5\", dag) assert_edge_exists(\"t4\", \"s6\", dag) assert_edge_exists(\"s7\", \"t5\", dag) def test_get_dag_4(self): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"quuz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"corge\", Scope.SCENARIO, \"s6\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2, data_node_3], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t2\")) task_3 = Task(\"fred\", {}, print, [data_node_4], [data_node_5], TaskId(\"t3\")) task_4 = Task(\"garply\", {}, print, output=[data_node_6], id=TaskId(\"t4\")) sequence = Sequence({}, [task_3, task_4, task_1, task_2], SequenceId(\"p1\")) # 6 | t4 __ # 5 | \\ # 4 | s3 _ \\__ s6 ______ t3 ___ # 3 | \\ ___ s4 _/ \\___ s5 # 2 | s2 __\\__ t1 __/ \\______ t2 # 1 | / # 0 | s1 _/ # |________________________________________________ # 0 1 2 3 4 dag = sequence._get_dag() assert dag.length == 5 assert dag.width == 4 assert dag._grid_length == 5 assert dag._grid_width == 7 assert len(dag.nodes) == 10 assert_x_y(0, [0, 2, 4, 6], dag.nodes[\"s1\"], dag.nodes[\"s2\"], dag.nodes[\"s3\"], dag.nodes[\"t4\"]) assert_x_y(1, [2, 4], dag.nodes[\"t1\"], dag.nodes[\"s6\"]) assert_x_y(2, [3], dag.nodes[\"s4\"]) assert_x_y(3, [2, 4], dag.nodes[\"t2\"], dag.nodes[\"t3\"]) assert_x_y(4, [3], dag.nodes[\"s5\"]) assert len(dag.edges) == 8 assert_edge_exists(\"s1\", \"t1\", dag) assert_edge_exists(\"s2\", \"t1\", dag) assert_edge_exists(\"s3\", \"t1\", dag) assert_edge_exists(\"t1\", \"s4\", dag) assert_edge_exists(\"s4\", \"t2\", dag) assert_edge_exists(\"s4\", \"t3\", dag) assert_edge_exists(\"t3\", \"s5\", dag) assert_edge_exists(\"t4\", \"s6\", dag) def test_get_dag_5(self): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") task_1 = Task(\"baz\", {}, print, [data_node_1], [data_node_2], TaskId(\"t1\")) sequence = Sequence({}, [task_1], SequenceId(\"p1\")) # 1 | # 0 | s1 __ t1 __ s2 # |_________________ # 0 1 2 dag = sequence._get_dag() assert dag.length == 3 assert dag.width == 1 assert dag._grid_length == 3 assert dag._grid_width == 1 assert len(dag.nodes) == 3 assert_x_y(0, [0], dag.nodes[\"s1\"]) assert_x_y(1, [0], dag.nodes[\"t1\"]) assert_x_y(2, [0], dag.nodes[\"s2\"]) assert len(dag.edges) == 2 assert_edge_exists(\"s1\", \"t1\", dag) assert_edge_exists(\"t1\", \"s2\", dag) def test_get_dag_6(self): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") task_1 = Task(\"quux\", {}, print, [data_node_1, data_node_2], [data_node_3], TaskId(\"t1\")) task_2 = Task(\"quuz\", {}, print, [data_node_2], [data_node_4], TaskId(\"t2\")) sequence = Sequence({}, [task_1, task_2], SequenceId(\"p1\")) # 2 | # | # 1 | s1 ___ t1 __ s3 # | / # 0 | s2 /__ t2 __ s4 # |_________________ # 0 1 2 dag = sequence._get_dag() assert dag.length == 3 assert dag.width == 2 assert dag._grid_length == 3 assert dag._grid_width == 2 assert len(dag.nodes) == 6 assert_x_y(0, [0, 1], dag.nodes[\"s1\"], dag.nodes[\"s2\"]) assert_x_y(1, [0, 1], dag.nodes[\"t1\"], dag.nodes[\"t2\"]) assert_x_y(2, [0, 1], dag.nodes[\"s3\"], dag.nodes[\"s4\"]) assert len(dag.edges) == 5 assert_edge_exists(\"s1\", \"t1\", dag) assert_edge_exists(\"s2\", \"t1\", dag) assert_edge_exists(\"t1\", \"s3\", dag) assert_edge_exists(\"s2\", \"t2\", dag) assert_edge_exists(\"t2\", \"s4\", dag) "} {"text": "from unittest import mock import pytest from src.taipy.core import taipy from src.taipy.core._entity._labeled import _Labeled from taipy.config import Config, Frequency, Scope class MockOwner: label = \"owner_label\" def get_label(self): return self.label def test_get_label(): labeled_entity = _Labeled() with pytest.raises(NotImplementedError): labeled_entity.get_label() with pytest.raises(NotImplementedError): labeled_entity.get_simple_label() with pytest.raises(AttributeError): labeled_entity._get_label() with pytest.raises(AttributeError): labeled_entity._get_simple_label() labeled_entity.id = \"id\" assert labeled_entity._get_label() == \"id\" assert labeled_entity._get_simple_label() == \"id\" labeled_entity.config_id = \"the config id\" assert labeled_entity._get_label() == \"the config id\" assert labeled_entity._get_simple_label() == \"the config id\" labeled_entity._properties = {\"name\": \"a name\"} assert labeled_entity._get_label() == \"a name\" assert labeled_entity._get_simple_label() == \"a name\" labeled_entity.owner_id = \"owner_id\" with mock.patch(\"src.taipy.core.get\") as get_mck: get_mck.return_value = MockOwner() assert labeled_entity._get_label() == \"owner_label > a name\" assert labeled_entity._get_simple_label() == \"a name\" labeled_entity._properties[\"label\"] = \"a wonderful label\" assert labeled_entity._get_label() == \"a wonderful label\" assert labeled_entity._get_simple_label() == \"a wonderful label\" def mult(n1, n2): return n1 * n2 def test_get_label_complex_case(): dn1_cfg = Config.configure_data_node(\"dn1\", scope=Scope.GLOBAL) dn2_cfg = Config.configure_data_node(\"dn2\", scope=Scope.CYCLE) dn3_cfg = Config.configure_data_node(\"dn3\", scope=Scope.CYCLE) dn4_cfg = Config.configure_data_node(\"dn4\", scope=Scope.SCENARIO) dn5_cfg = Config.configure_data_node(\"dn5\", scope=Scope.SCENARIO) tA_cfg = Config.configure_task(\"t_A_C\", mult, [dn1_cfg, dn2_cfg], dn3_cfg) tB_cfg = Config.configure_task(\"t_B_S\", mult, [dn3_cfg, dn4_cfg], dn5_cfg) scenario_cfg = Config.configure_scenario(\"scenario_cfg\", [tA_cfg, tB_cfg], [], Frequency.DAILY) scenario_cfg.add_sequences( { \"sequence_C\": [tA_cfg], \"sequence_S\": [tA_cfg, tB_cfg], } ) scenario = taipy.create_scenario(scenario_cfg, name=\"My Name\") cycle = scenario.cycle cycle.name = \"Today\" sequence_C = scenario.sequence_C sequence_S = scenario.sequence_S tA = scenario.t_A_C tB = scenario.t_B_S dn1 = scenario.dn1 dn2 = scenario.dn2 dn3 = scenario.dn3 dn4 = scenario.dn4 dn5 = scenario.dn5 assert cycle.get_label() == scenario.cycle.name assert cycle.get_simple_label() == scenario.cycle.name assert scenario.get_label() == \"Today > My Name\" assert scenario.get_simple_label() == \"My Name\" assert sequence_C.get_label() == \"Today > My Name > sequence_C\" assert sequence_C.get_simple_label() == \"sequence_C\" assert sequence_S.get_label() == \"Today > My Name > sequence_S\" assert sequence_S.get_simple_label() == \"sequence_S\" assert tA.get_label() == \"Today > t_A_C\" assert tA.get_simple_label() == \"t_A_C\" assert tB.get_label() == \"Today > My Name > t_B_S\" assert tB.get_simple_label() == \"t_B_S\" assert dn1.get_label() == \"dn1\" assert dn1.get_simple_label() == \"dn1\" assert dn2.get_label() == \"Today > dn2\" assert dn2.get_simple_label() == \"dn2\" assert dn3.get_label() == \"Today > dn3\" assert dn3.get_simple_label() == \"dn3\" assert dn4.get_label() == \"Today > My Name > dn4\" assert dn4.get_simple_label() == \"dn4\" assert dn5.get_label() == \"Today > My Name > dn5\" assert dn5.get_simple_label() == \"dn5\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.core._entity._entity_ids import _EntityIds class TestEntityIds: def test_add_two_entity_ids(self): entity_ids_1 = _EntityIds() entity_ids_2 = _EntityIds() entity_ids_1_address = id(entity_ids_1) entity_ids_1.data_node_ids.update([\"data_node_id_1\", \"data_node_id_2\"]) entity_ids_1.task_ids.update([\"task_id_1\", \"task_id_2\"]) entity_ids_1.job_ids.update([\"job_id_1\", \"job_id_2\"]) entity_ids_1.sequence_ids.update([\"sequence_id_1\", \"sequence_id_2\"]) entity_ids_1.scenario_ids.update([\"scenario_id_1\", \"scenario_id_2\"]) entity_ids_1.cycle_ids.update([\"cycle_id_1\", \"cycle_id_2\"]) entity_ids_2.data_node_ids.update([\"data_node_id_2\", \"data_node_id_3\"]) entity_ids_2.task_ids.update([\"task_id_2\", \"task_id_3\"]) entity_ids_2.job_ids.update([\"job_id_2\", \"job_id_3\"]) entity_ids_2.sequence_ids.update([\"sequence_id_2\", \"sequence_id_3\"]) entity_ids_2.scenario_ids.update([\"scenario_id_2\", \"scenario_id_3\"]) entity_ids_2.cycle_ids.update([\"cycle_id_2\", \"cycle_id_3\"]) entity_ids_1 += entity_ids_2 # += operator should not change the address of entity_ids_1 assert id(entity_ids_1) == entity_ids_1_address assert entity_ids_1.data_node_ids == {\"data_node_id_1\", \"data_node_id_2\", \"data_node_id_3\"} assert entity_ids_1.task_ids == {\"task_id_1\", \"task_id_2\", \"task_id_3\"} assert entity_ids_1.job_ids == {\"job_id_1\", \"job_id_2\", \"job_id_3\"} assert entity_ids_1.sequence_ids == {\"sequence_id_1\", \"sequence_id_2\", \"sequence_id_3\"} assert entity_ids_1.scenario_ids == {\"scenario_id_1\", \"scenario_id_2\", \"scenario_id_3\"} assert entity_ids_1.cycle_ids == {\"cycle_id_1\", \"cycle_id_2\", \"cycle_id_3\"} "} {"text": "import filecmp import os import shutil import sys from unittest.mock import patch import mongomock import pytest from src.taipy.core._entity._migrate_cli import _MigrateCLI @pytest.fixture(scope=\"function\", autouse=True) def clean_data_folder(): if os.path.exists(\"tests/core/_entity/.data\"): shutil.rmtree(\"tests/core/_entity/.data\") yield def test_migrate_fs_default(caplog): _MigrateCLI.create_parser() # Test migrate with default .data folder with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", \"--skip-backup\"]): _MigrateCLI.parse_arguments() assert \"Starting entity migration from '.data' folder\" in caplog.text def test_migrate_fs_specified_folder(caplog): _MigrateCLI.create_parser() # Copy data_sample to .data folder for testing data_sample_path = \"tests/core/_entity/data_sample\" data_path = \"tests/core/_entity/.data\" shutil.copytree(data_sample_path, data_path) # Run with --skip-backup to only test the migration with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path, \"--skip-backup\"]): _MigrateCLI.parse_arguments() assert f\"Starting entity migration from '{data_path}' folder\" in caplog.text # Compare migrated .data folder with data_sample_migrated dircmp_result = filecmp.dircmp(data_path, \"tests/core/_entity/data_sample_migrated\") assert not dircmp_result.diff_files and not dircmp_result.left_only and not dircmp_result.right_only for subdir in dircmp_result.subdirs.values(): assert not subdir.diff_files and not subdir.left_only and not subdir.right_only def test_migrate_fs_backup_and_remove(caplog): _MigrateCLI.create_parser() # Copy data_sample to .data folder for testing data_sample_path = \"tests/core/_entity/data_sample\" data_path = \"tests/core/_entity/.data\" backup_path = \"tests/core/_entity/.data_backup\" shutil.copytree(data_sample_path, data_path) # Remove backup when it does not exist should raise an error with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path, \"--remove-backup\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert f\"The backup folder '{backup_path}' does not exist.\" in caplog.text assert not os.path.exists(backup_path) # Run without --skip-backup to create the backup folder with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path]): _MigrateCLI.parse_arguments() assert f\"Backed up entities from '{data_path}' to '{backup_path}' folder before migration.\" in caplog.text assert os.path.exists(backup_path) # Remove backup with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path, \"--remove-backup\"]): _MigrateCLI.parse_arguments() assert f\"Removed backup entities from the backup folder '{backup_path}'.\" in caplog.text assert not os.path.exists(backup_path) def test_migrate_fs_backup_and_restore(caplog): _MigrateCLI.create_parser() # Copy data_sample to .data folder for testing data_sample_path = \"tests/core/_entity/data_sample\" data_path = \"tests/core/_entity/.data\" backup_path = \"tests/core/_entity/.data_backup\" shutil.copytree(data_sample_path, data_path) # Restore backup when it does not exist should raise an error with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path, \"--restore\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert f\"The backup folder '{backup_path}' does not exist.\" in caplog.text assert not os.path.exists(backup_path) # Run without --skip-backup to create the backup folder with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path]): _MigrateCLI.parse_arguments() assert os.path.exists(backup_path) # restore the backup with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", data_path, \"--restore\"]): _MigrateCLI.parse_arguments() assert f\"Restored entities from the backup folder '{backup_path}' to '{data_path}'.\" in caplog.text assert not os.path.exists(backup_path) # Compare migrated .data folder with data_sample to ensure restoreing the backup worked dircmp_result = filecmp.dircmp(data_path, \"tests/core/_entity/data_sample\") assert not dircmp_result.diff_files and not dircmp_result.left_only and not dircmp_result.right_only for subdir in dircmp_result.subdirs.values(): assert not subdir.diff_files and not subdir.left_only and not subdir.right_only def test_migrate_fs_non_existing_folder(caplog): _MigrateCLI.create_parser() # Test migrate with a non-existing folder with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"filesystem\", \"non-existing-folder\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert \"Folder 'non-existing-folder' does not exist.\" in caplog.text @patch(\"src.taipy.core._entity._migrate_cli._migrate_sql_entities\") def test_migrate_sql_specified_path(_migrate_sql_entities_mock, tmp_sqlite): _MigrateCLI.create_parser() # Test the _migrate_sql_entities is called once with the correct path with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite, \"--skip-backup\"]): _MigrateCLI.parse_arguments() assert _migrate_sql_entities_mock.assert_called_once_with(path=tmp_sqlite) def test_migrate_sql_backup_and_remove(caplog, tmp_sqlite): _MigrateCLI.create_parser() # Create the .sqlite file to test with open(tmp_sqlite, \"w\") as f: f.write(\"\") file_name, file_extension = tmp_sqlite.rsplit(\".\", 1) backup_sqlite = f\"{file_name}_backup.{file_extension}\" # Remove backup when it does not exist should raise an error with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite, \"--remove-backup\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert f\"The backup database '{backup_sqlite}' does not exist.\" in caplog.text assert not os.path.exists(backup_sqlite) # Run without --skip-backup to create the backup database with pytest.raises(Exception): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite]): _MigrateCLI.parse_arguments() assert os.path.exists(backup_sqlite) # Remove backup with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite, \"--remove-backup\"]): _MigrateCLI.parse_arguments() assert f\"Removed backup entities from the backup database '{backup_sqlite}'.\" in caplog.text assert not os.path.exists(backup_sqlite) @pytest.mark.skipif(sys.platform == \"win32\", reason=\"Does not run on windows due to PermissionError: [WinError 32]\") def test_migrate_sql_backup_and_restore(caplog, tmp_sqlite): _MigrateCLI.create_parser() # Create the .sqlite file to test with open(tmp_sqlite, \"w\") as f: f.write(\"\") file_name, file_extension = tmp_sqlite.rsplit(\".\", 1) backup_sqlite = f\"{file_name}_backup.{file_extension}\" # Restore backup when it does not exist should raise an error with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite, \"--restore\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert f\"The backup database '{backup_sqlite}' does not exist.\" in caplog.text assert not os.path.exists(backup_sqlite) # Run without --skip-backup to create the backup database with pytest.raises(Exception): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite]): _MigrateCLI.parse_arguments() assert os.path.exists(backup_sqlite) # Restore the backup with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", tmp_sqlite, \"--restore\"]): _MigrateCLI.parse_arguments() assert f\"Restored entities from the backup database '{backup_sqlite}' to '{tmp_sqlite}'.\" in caplog.text assert not os.path.exists(backup_sqlite) def test_migrate_sql_non_existing_path(caplog): _MigrateCLI.create_parser() # Test migrate without providing a path with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert \"Missing the required sqlite path.\" in caplog.text caplog.clear() # Test migrate with a non-existing-path.sqlite file with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"sql\", \"non-existing-path.sqlite\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert \"File 'non-existing-path.sqlite' does not exist.\" in caplog.text @patch(\"src.taipy.core._entity._migrate_cli._migrate_mongo_entities\") def test_call_to_migrate_mongo(_migrate_mongo_entities_mock): _MigrateCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\"]): _MigrateCLI.parse_arguments() assert _migrate_mongo_entities_mock.assert_called_once_with() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\", \"host\", \"port\", \"user\", \"password\"]): _MigrateCLI.parse_arguments() assert _migrate_mongo_entities_mock.assert_called_once_with(\"host\", \"port\", \"user\", \"password\") @mongomock.patch(servers=((\"localhost\", 27017),)) def test_migrate_mongo_backup_and_remove(caplog): _MigrateCLI.create_parser() mongo_backup_path = \".mongo_backup\" # Remove backup when it does not exist should raise an error with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\", \"--remove-backup\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert f\"The backup folder '{mongo_backup_path}' does not exist.\" in caplog.text assert not os.path.exists(mongo_backup_path) # Run without --skip-backup to create the backup database with pytest.raises(Exception): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\"]): _MigrateCLI.parse_arguments() assert os.path.exists(mongo_backup_path) # Remove backup with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\", \"--remove-backup\"]): _MigrateCLI.parse_arguments() assert f\"Removed backup entities from the backup folder '{mongo_backup_path}'.\" in caplog.text assert not os.path.exists(mongo_backup_path) @mongomock.patch(servers=((\"localhost\", 27017),)) def test_migrate_mongo_backup_and_restore(caplog): _MigrateCLI.create_parser() mongo_backup_path = \".mongo_backup\" # Restore backup when it does not exist should raise an error with pytest.raises(SystemExit) as err: with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\", \"--restore\"]): _MigrateCLI.parse_arguments() assert err.value.code == 1 assert f\"The backup folder '{mongo_backup_path}' does not exist.\" in caplog.text assert not os.path.exists(mongo_backup_path) # Run without --skip-backup to create the backup database with pytest.raises(Exception): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\"]): _MigrateCLI.parse_arguments() assert os.path.exists(mongo_backup_path) # Restore the backup with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"mongo\", \"--restore\"]): _MigrateCLI.parse_arguments() assert f\"Restored entities from the backup folder '{mongo_backup_path}'.\" in caplog.text assert not os.path.exists(mongo_backup_path) def test_not_provide_valid_repository_type(caplog): _MigrateCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\"]): _MigrateCLI.parse_arguments() assert \"the following arguments are required: --repository-type\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\"]): _MigrateCLI.parse_arguments() assert \"argument --repository-type: expected at least one argument\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"migrate\", \"--repository-type\", \"invalid-repository-type\"]): _MigrateCLI.parse_arguments() assert \"Unknown repository type invalid-repository-type\" in caplog.text "} {"text": "import pytest from src.taipy.core.common._utils import _retry_read_entity from taipy.config import Config def test_retry_decorator(mocker): func = mocker.Mock(side_effect=Exception()) @_retry_read_entity((Exception,)) def decorated_func(): func() with pytest.raises(Exception): decorated_func() # Called once in the normal flow and no retry # The Config.core.read_entity_retry is set to 0 at conftest.py assert Config.core.read_entity_retry == 0 assert func.call_count == 1 func.reset_mock() Config.core.read_entity_retry = 3 with pytest.raises(Exception): decorated_func() # Called once in the normal flow and 3 more times on the retry flow assert func.call_count == 4 def test_retry_decorator_exception_not_in_list(mocker): func = mocker.Mock(side_effect=KeyError()) Config.core.read_entity_retry = 3 @_retry_read_entity((Exception,)) def decorated_func(): func() with pytest.raises(KeyError): decorated_func() # Called only on the first time and not trigger retry because KeyError is not on the exceptions list assert func.called == 1 "} {"text": "from src.taipy.core.common.warn_if_inputs_not_ready import _warn_if_inputs_not_ready from src.taipy.core.data._data_manager_factory import _DataManagerFactory from taipy.config import Config def test_warn_inputs_all_not_ready(caplog): one = Config.configure_data_node(\"one\") two = Config.configure_data_node(\"two\") three = Config.configure_data_node(\"three\") data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}).values() _warn_if_inputs_not_ready(data_nodes) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in data_nodes ] assert all([expected_output in stdout for expected_output in expected_outputs]) def test_warn_inputs_all_ready(caplog): one = Config.configure_data_node(\"one\", default_data=1) two = Config.configure_data_node(\"two\", default_data=2) three = Config.configure_data_node(\"three\", default_data=3) data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}).values() _warn_if_inputs_not_ready(data_nodes) stdout = caplog.text not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in data_nodes ] assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_warn_inputs_one_ready(caplog): one = Config.configure_data_node(\"one\", default_data=1) two = Config.configure_data_node(\"two\") three = Config.configure_data_node(\"three\") data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}) _warn_if_inputs_not_ready(data_nodes.values()) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in [data_nodes[two], data_nodes[three]] ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in [data_nodes[one]] ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_submit_task_with_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") excel_dn_cfg = Config.configure_excel_data_node(\"wrong_excel_file_path\", default_path=\"wrong_path.xlsx\") json_dn_cfg = Config.configure_json_data_node(\"wrong_json_file_path\", default_path=\"wrong_path.json\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_path=\"wrong_path.pickle\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") input_dn_cfgs = [csv_dn_cfg, excel_dn_cfg, json_dn_cfg, pickle_dn_cfg, parquet_dn_cfg] dn_manager = _DataManagerFactory._build_manager() dns = [dn_manager._bulk_get_or_create([input_dn_cfg])[input_dn_cfg] for input_dn_cfg in input_dn_cfgs] _warn_if_inputs_not_ready(dns) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in dns ] assert all([expected_output in stdout for expected_output in expected_outputs]) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from datetime import datetime, timedelta from typing import Callable, Iterable, Optional from unittest.mock import ANY, patch import pytest from src.taipy.core import Job from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.common import _utils from src.taipy.core.common._utils import _Subscriber from src.taipy.core.config.job_config import JobConfig from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import ( DeletingPrimaryScenario, DifferentScenarioConfigs, InsufficientScenarioToCompare, NonExistingComparator, NonExistingScenario, NonExistingScenarioConfig, NonExistingTask, SequenceTaskConfigDoesNotExistInSameScenarioConfig, UnauthorizedTagError, ) from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.scenario.scenario_id import ScenarioId from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from src.taipy.core.task.task_id import TaskId from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils import assert_true_after_time from tests.core.utils.NotifyMock import NotifyMock def test_set_and_get_scenario(cycle): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() scenario_id_1 = ScenarioId(\"scenario_id_1\") scenario_1 = Scenario(\"scenario_name_1\", [], {}, [], scenario_id_1) input_dn_2 = InMemoryDataNode(\"foo\", Scope.SCENARIO) output_dn_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO) additional_dn_2 = InMemoryDataNode(\"zyx\", Scope.SCENARIO) task_name_2 = \"task_2\" task_2 = Task(task_name_2, {}, print, [input_dn_2], [output_dn_2], TaskId(\"task_id_2\")) scenario_id_2 = ScenarioId(\"scenario_id_2\") scenario_2 = Scenario( \"scenario_name_2\", [task_2], {}, [additional_dn_2], scenario_id_2, datetime.now(), True, cycle, sequences={\"sequence_2\": {\"tasks\": [task_2]}}, ) additional_dn_3 = InMemoryDataNode(\"baz\", Scope.SCENARIO) task_name_3 = \"task_3\" task_3 = Task(task_name_3, {}, print, id=TaskId(\"task_id_3\")) scenario_3_with_same_id = Scenario( \"scenario_name_3\", [task_3], {}, [additional_dn_3], scenario_id_1, datetime.now(), False, cycle, sequences={\"sequence_3\": {}}, ) # No existing scenario assert len(_ScenarioManager._get_all()) == 0 assert _ScenarioManager._get(scenario_id_1) is None assert _ScenarioManager._get(scenario_1) is None assert _ScenarioManager._get(scenario_id_2) is None assert _ScenarioManager._get(scenario_2) is None # Save one scenario. We expect to have only one scenario stored _ScenarioManager._set(scenario_1) assert len(_ScenarioManager._get_all()) == 1 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 0 assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).sequences) == 0 assert _ScenarioManager._get(scenario_id_2) is None assert _ScenarioManager._get(scenario_2) is None # Save a second scenario. Now, we expect to have a total of two scenarios stored _TaskManager._set(task_2) _CycleManager._set(cycle) _ScenarioManager._set(scenario_2) _DataManager._set(additional_dn_2) assert len(_ScenarioManager._get_all()) == 2 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 0 assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).sequences) == 0 assert _ScenarioManager._get(scenario_id_2).id == scenario_2.id assert _ScenarioManager._get(scenario_id_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_id_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_id_2).sequences) == 1 assert _ScenarioManager._get(scenario_2).id == scenario_2.id assert _ScenarioManager._get(scenario_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_2).sequences) == 1 assert _TaskManager._get(task_2.id).id == task_2.id assert _ScenarioManager._get(scenario_id_2).cycle == cycle assert _ScenarioManager._get(scenario_2).cycle == cycle assert _CycleManager._get(cycle.id).id == cycle.id # We save the first scenario again. We expect nothing to change _ScenarioManager._set(scenario_1) assert len(_ScenarioManager._get_all()) == 2 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 0 assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).sequences) == 0 assert _ScenarioManager._get(scenario_id_2).id == scenario_2.id assert _ScenarioManager._get(scenario_id_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_id_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_id_2).sequences) == 1 assert _ScenarioManager._get(scenario_2).id == scenario_2.id assert _ScenarioManager._get(scenario_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_2).sequences) == 1 assert _TaskManager._get(task_2.id).id == task_2.id assert _CycleManager._get(cycle.id).id == cycle.id # We save a third scenario with same id as the first one. # We expect the first scenario to be updated _DataManager._set(additional_dn_3) _TaskManager._set(task_3) _TaskManager._set(scenario_2.tasks[task_name_2]) _ScenarioManager._set(scenario_3_with_same_id) assert len(_ScenarioManager._get_all()) == 2 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_3_with_same_id.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 1 assert _ScenarioManager._get(scenario_id_1).cycle == cycle assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_3_with_same_id.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 1 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 1 assert len(_ScenarioManager._get(scenario_1).sequences) == 1 assert _ScenarioManager._get(scenario_1).cycle == cycle assert _ScenarioManager._get(scenario_id_2).id == scenario_2.id assert _ScenarioManager._get(scenario_id_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_id_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_id_2).sequences) == 1 assert _ScenarioManager._get(scenario_2).id == scenario_2.id assert _ScenarioManager._get(scenario_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_2).sequences) == 1 assert _TaskManager._get(task_2.id).id == task_2.id def test_raise_sequence_task_configs_not_in_scenario_config(): data_node = Config.configure_pickle_data_node(\"temp\") task_config_1 = Config.configure_task(\"task_1\", print, output=[data_node]) task_config_2 = Config.configure_task(\"task_2\", print, input=[data_node]) scenario_config_1 = Config.configure_scenario(\"scenario_1\") scenario_config_1.add_sequences({\"sequence_0\": []}) _ScenarioManager._create(scenario_config_1) scenario_config_1.add_sequences({\"sequence_1\": [task_config_1]}) with pytest.raises(SequenceTaskConfigDoesNotExistInSameScenarioConfig) as err: _ScenarioManager._create(scenario_config_1) assert err.value.args == ([task_config_1.id], \"sequence_1\", scenario_config_1.id) scenario_config_1._tasks = [task_config_1] _ScenarioManager._create(scenario_config_1) scenario_config_1.add_sequences({\"sequence_2\": [task_config_1]}) _ScenarioManager._create(scenario_config_1) scenario_config_1.add_sequences({\"sequence_3\": [task_config_1, task_config_2]}) with pytest.raises(SequenceTaskConfigDoesNotExistInSameScenarioConfig) as err: _ScenarioManager._create(scenario_config_1) assert err.value.args == ([task_config_2.id], \"sequence_3\", scenario_config_1.id) scenario_config_1._tasks = [task_config_1, task_config_2] _ScenarioManager._create(scenario_config_1) def test_get_all_on_multiple_versions_environment(): # Create 5 scenarios with 2 versions each # Only version 1.0 has the scenario with config_id = \"config_id_1\" # Only version 2.0 has the scenario with config_id = \"config_id_6\" for version in range(1, 3): for i in range(5): _ScenarioManager._set( Scenario(f\"config_id_{i+version}\", [], {}, [], ScenarioId(f\"id{i}_v{version}\"), version=f\"{version}.0\") ) _VersionManager._set_experiment_version(\"1.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_experiment_version(\"2.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 _VersionManager._set_development_version(\"1.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_development_version(\"2.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 def test_create_scenario_does_not_modify_config(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) creation_date_1 = datetime.now() name_1 = \"name_1\" scenario_config = Config.configure_scenario(\"sc\", None, None, Frequency.DAILY) assert scenario_config.properties.get(\"name\") is None assert len(scenario_config.properties) == 0 _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config, creation_date=creation_date_1, name=name_1) assert len(scenario_config.properties) == 0 assert len(scenario.properties) == 1 assert scenario.properties.get(\"name\") == name_1 assert scenario.name == name_1 scenario.properties[\"foo\"] = \"bar\" _ScenarioManager._set(scenario) assert len(scenario_config.properties) == 0 assert len(scenario.properties) == 2 assert scenario.properties.get(\"foo\") == \"bar\" assert scenario.properties.get(\"name\") == name_1 assert scenario.name == name_1 scenario_2 = _ScenarioManager._create(scenario_config, creation_date=creation_date_1) assert scenario_2.name is None def test_create_and_delete_scenario(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) creation_date_1 = datetime.now() creation_date_2 = creation_date_1 + timedelta(minutes=10) name_1 = \"name_1\" _ScenarioManager._delete_all() assert len(_ScenarioManager._get_all()) == 0 scenario_config = Config.configure_scenario(\"sc\", None, None, Frequency.DAILY) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config, creation_date=creation_date_1, name=name_1) assert scenario_1.config_id == \"sc\" assert scenario_1.sequences == {} assert scenario_1.tasks == {} assert scenario_1.additional_data_nodes == {} assert scenario_1.data_nodes == {} assert scenario_1.cycle.frequency == Frequency.DAILY assert scenario_1.is_primary assert scenario_1.cycle.creation_date == creation_date_1 assert scenario_1.cycle.start_date.date() == creation_date_1.date() assert scenario_1.cycle.end_date.date() == creation_date_1.date() assert scenario_1.creation_date == creation_date_1 assert scenario_1.name == name_1 assert scenario_1.properties[\"name\"] == name_1 assert scenario_1.tags == set() cycle_id_1 = scenario_1.cycle.id assert _CycleManager._get(cycle_id_1).id == cycle_id_1 _ScenarioManager._delete(scenario_1.id) assert _ScenarioManager._get(scenario_1.id) is None assert _CycleManager._get(cycle_id_1) is None # Recreate scenario_1 scenario_1 = _ScenarioManager._create(scenario_config, creation_date=creation_date_1, name=name_1) scenario_2 = _ScenarioManager._create(scenario_config, creation_date=creation_date_2) assert scenario_2.config_id == \"sc\" assert scenario_2.sequences == {} assert scenario_2.tasks == {} assert scenario_2.additional_data_nodes == {} assert scenario_2.data_nodes == {} assert scenario_2.cycle.frequency == Frequency.DAILY assert not scenario_2.is_primary assert scenario_2.cycle.creation_date == creation_date_1 assert scenario_2.cycle.start_date.date() == creation_date_2.date() assert scenario_2.cycle.end_date.date() == creation_date_2.date() assert scenario_2.properties.get(\"name\") is None assert scenario_2.tags == set() assert scenario_1 != scenario_2 assert scenario_1.cycle == scenario_2.cycle assert len(_ScenarioManager._get_all()) == 2 with pytest.raises(DeletingPrimaryScenario): _ScenarioManager._delete( scenario_1.id, ) _ScenarioManager._delete( scenario_2.id, ) assert len(_ScenarioManager._get_all()) == 1 _ScenarioManager._delete(scenario_1.id) assert len(_ScenarioManager._get_all()) == 0 def test_is_deletable(): assert len(_ScenarioManager._get_all()) == 0 scenario_config = Config.configure_scenario(\"sc\", None, None, Frequency.DAILY) creation_date = datetime.now() scenario_1_primary = _ScenarioManager._create(scenario_config, creation_date=creation_date, name=\"1\") scenario_2 = _ScenarioManager._create(scenario_config, creation_date=creation_date, name=\"2\") assert len(_ScenarioManager._get_all()) == 2 assert scenario_1_primary.is_primary assert not _ScenarioManager._is_deletable(scenario_1_primary) assert not _ScenarioManager._is_deletable(scenario_1_primary.id) assert not scenario_2.is_primary assert _ScenarioManager._is_deletable(scenario_2) assert _ScenarioManager._is_deletable(scenario_2.id) _ScenarioManager._hard_delete(scenario_2.id) del scenario_2 assert len(_ScenarioManager._get_all()) == 1 assert scenario_1_primary.is_primary assert _ScenarioManager._is_deletable(scenario_1_primary) assert _ScenarioManager._is_deletable(scenario_1_primary.id) def test_assign_scenario_as_parent_of_task_and_additional_data_nodes(): dn_config_1 = Config.configure_data_node(\"dn_1\", \"in_memory\", scope=Scope.GLOBAL) dn_config_2 = Config.configure_data_node(\"dn_2\", \"in_memory\", scope=Scope.GLOBAL) dn_config_3 = Config.configure_data_node(\"dn_3\", \"in_memory\", scope=Scope.SCENARIO) additional_dn_config_1 = Config.configure_data_node(\"additional_dn_1\", \"in_memory\", scope=Scope.GLOBAL) additional_dn_config_2 = Config.configure_data_node(\"additional_dn_2\", \"in_memory\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"task_1\", print, [dn_config_1], [dn_config_2]) task_config_2 = Config.configure_task(\"task_2\", print, [dn_config_2], [dn_config_3]) task_config_3 = Config.configure_task(\"task_3\", print, [dn_config_2], [dn_config_3]) scenario_config_1 = Config.configure_scenario( \"scenario_1\", [task_config_1, task_config_2], [additional_dn_config_1, additional_dn_config_2] ) scenario_config_1.add_sequences({\"sequence_1\": [task_config_1, task_config_2]}) scenario_config_2 = Config.configure_scenario( \"scenario_2\", [task_config_1, task_config_2, task_config_3], [additional_dn_config_1, additional_dn_config_2] ) scenario_config_2.add_sequences( {\"sequence_1\": [task_config_1, task_config_2], \"sequence_2\": [task_config_1, task_config_3]} ) scenario_1 = _ScenarioManager._create(scenario_config_1) sequence_1_s1 = scenario_1.sequences[\"sequence_1\"] assert all([sequence.parent_ids == {scenario_1.id} for sequence in scenario_1.sequences.values()]) tasks = scenario_1.tasks.values() assert all([task.parent_ids == {scenario_1.id, sequence_1_s1.id} for task in tasks]) data_nodes = {} for task in tasks: data_nodes.update(task.data_nodes) assert data_nodes[\"dn_1\"].parent_ids == {scenario_1.tasks[\"task_1\"].id} assert data_nodes[\"dn_2\"].parent_ids == {scenario_1.tasks[\"task_1\"].id, scenario_1.tasks[\"task_2\"].id} assert data_nodes[\"dn_3\"].parent_ids == {scenario_1.tasks[\"task_2\"].id} additional_data_nodes = scenario_1.additional_data_nodes assert additional_data_nodes[\"additional_dn_1\"].parent_ids == {scenario_1.id} assert additional_data_nodes[\"additional_dn_2\"].parent_ids == {scenario_1.id} scenario_2 = _ScenarioManager._create(scenario_config_2) sequence_1_s2 = scenario_2.sequences[\"sequence_1\"] sequence_2_s2 = scenario_2.sequences[\"sequence_2\"] assert all([sequence.parent_ids == {scenario_2.id} for sequence in scenario_2.sequences.values()]) assert scenario_1.tasks[\"task_1\"] == scenario_2.tasks[\"task_1\"] assert scenario_1.tasks[\"task_1\"].parent_ids == { scenario_1.id, sequence_1_s1.id, scenario_2.id, sequence_1_s2.id, sequence_2_s2.id, } assert scenario_1.tasks[\"task_2\"].parent_ids == {scenario_1.id, sequence_1_s1.id} assert scenario_2.tasks[\"task_2\"].parent_ids == {scenario_2.id, sequence_1_s2.id} assert scenario_2.tasks[\"task_3\"].parent_ids == {scenario_2.id, sequence_2_s2.id} additional_data_nodes = scenario_2.additional_data_nodes assert additional_data_nodes[\"additional_dn_1\"].parent_ids == {scenario_1.id, scenario_2.id} assert additional_data_nodes[\"additional_dn_2\"].parent_ids == {scenario_2.id} _ScenarioManager._hard_delete(scenario_1.id) _ScenarioManager._hard_delete(scenario_2.id) _TaskManager._delete_all() _DataManager._delete_all() dn_config_1 = Config.configure_data_node(\"dn_1\", \"in_memory\", scope=Scope.GLOBAL) dn_config_2 = Config.configure_data_node(\"dn_2\", \"in_memory\", scope=Scope.GLOBAL) dn_config_3 = Config.configure_data_node(\"dn_3\", \"in_memory\", scope=Scope.GLOBAL) additional_dn_config_1 = Config.configure_data_node(\"additional_dn_1\", \"in_memory\", scope=Scope.GLOBAL) additional_dn_config_2 = Config.configure_data_node(\"additional_dn_2\", \"in_memory\", scope=Scope.GLOBAL) task_config_1 = Config.configure_task(\"task_1\", print, [dn_config_1], [dn_config_2]) task_config_2 = Config.configure_task(\"task_2\", print, [dn_config_2], [dn_config_3]) task_config_3 = Config.configure_task(\"task_3\", print, [dn_config_2], [dn_config_3]) scenario_config_1 = Config.configure_scenario( \"scenario_1\", [task_config_1, task_config_2], [additional_dn_config_1, additional_dn_config_2] ) scenario_config_1.add_sequences({\"sequence_1\": [task_config_1, task_config_2]}) scenario_config_2 = Config.configure_scenario( \"scenario_2\", [task_config_1, task_config_2, task_config_3], [additional_dn_config_1, additional_dn_config_2] ) scenario_config_2.add_sequences( {\"sequence_1\": [task_config_1, task_config_2], \"sequence_2\": [task_config_1, task_config_3]} ) scenario_1 = _ScenarioManager._create(scenario_config_1) sequence_1_s1 = scenario_1.sequences[\"sequence_1\"] assert scenario_1.sequences[\"sequence_1\"].parent_ids == {scenario_1.id} tasks = scenario_1.tasks.values() assert all([task.parent_ids == {scenario_1.id, sequence_1_s1.id} for task in tasks]) data_nodes = {} for task in tasks: data_nodes.update(task.data_nodes) assert data_nodes[\"dn_1\"].parent_ids == {scenario_1.tasks[\"task_1\"].id} assert data_nodes[\"dn_2\"].parent_ids == {scenario_1.tasks[\"task_1\"].id, scenario_1.tasks[\"task_2\"].id} assert data_nodes[\"dn_3\"].parent_ids == {scenario_1.tasks[\"task_2\"].id} additional_data_nodes = scenario_1.additional_data_nodes assert additional_data_nodes[\"additional_dn_1\"].parent_ids == {scenario_1.id} assert additional_data_nodes[\"additional_dn_2\"].parent_ids == {scenario_1.id} scenario_2 = _ScenarioManager._create(scenario_config_2) sequence_1_s2 = scenario_2.sequences[\"sequence_1\"] sequence_2_s2 = scenario_2.sequences[\"sequence_2\"] assert scenario_1.sequences[\"sequence_1\"].parent_ids == {scenario_1.id} assert scenario_2.sequences[\"sequence_1\"].parent_ids == {scenario_2.id} assert scenario_2.sequences[\"sequence_2\"].parent_ids == {scenario_2.id} tasks = {**scenario_1.tasks, **scenario_2.tasks} assert tasks[\"task_1\"].parent_ids == { scenario_1.id, scenario_2.id, sequence_1_s1.id, sequence_1_s2.id, sequence_2_s2.id, } assert tasks[\"task_2\"].parent_ids == {scenario_1.id, scenario_2.id, sequence_1_s1.id, sequence_1_s2.id} assert tasks[\"task_3\"].parent_ids == {scenario_2.id, sequence_2_s2.id} additional_data_nodes = scenario_2.additional_data_nodes assert additional_data_nodes[\"additional_dn_1\"].parent_ids == {scenario_1.id, scenario_2.id} assert additional_data_nodes[\"additional_dn_2\"].parent_ids == {scenario_1.id, scenario_2.id} def mult_by_2(nb: int): return nb * 2 def mult_by_3(nb: int): return nb * 3 def mult_by_4(nb: int): return nb * 4 def test_scenario_manager_only_creates_data_node_once(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) # dn_1 ---> mult_by_2 ---> dn_2 ---> mult_by_3 ---> dn_6 # dn_1 ---> mult_by_4 ---> dn_4 dn_config_1 = Config.configure_data_node(\"foo\", \"in_memory\", Scope.GLOBAL, default_data=1) dn_config_2 = Config.configure_data_node(\"bar\", \"in_memory\", Scope.CYCLE, default_data=0) dn_config_6 = Config.configure_data_node(\"baz\", \"in_memory\", Scope.CYCLE, default_data=0) dn_config_4 = Config.configure_data_node(\"qux\", \"in_memory\", Scope.SCENARIO, default_data=0) task_mult_by_2_config = Config.configure_task(\"mult_by_2\", mult_by_2, [dn_config_1], dn_config_2) task_mult_by_3_config = Config.configure_task(\"mult_by_3\", mult_by_3, [dn_config_2], dn_config_6) task_mult_by_4_config = Config.configure_task(\"mult_by_4\", mult_by_4, [dn_config_1], dn_config_4) scenario_config = Config.configure_scenario( \"awesome_scenario\", [task_mult_by_2_config, task_mult_by_3_config, task_mult_by_4_config], None, Frequency.DAILY ) scenario_config.add_sequences( {\"by_6\": [task_mult_by_2_config, task_mult_by_3_config], \"by_4\": [task_mult_by_4_config]} ) _OrchestratorFactory._build_dispatcher() assert len(_DataManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 0 assert len(_SequenceManager._get_all()) == 0 assert len(_ScenarioManager._get_all()) == 0 assert len(_CycleManager._get_all()) == 0 scenario_1 = _ScenarioManager._create(scenario_config) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 3 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 1 assert scenario_1.foo.read() == 1 assert scenario_1.bar.read() == 0 assert scenario_1.baz.read() == 0 assert scenario_1.qux.read() == 0 assert scenario_1.by_6._get_sorted_tasks()[0][0].config_id == task_mult_by_2_config.id assert scenario_1.by_6._get_sorted_tasks()[1][0].config_id == task_mult_by_3_config.id assert scenario_1.by_4._get_sorted_tasks()[0][0].config_id == task_mult_by_4_config.id assert scenario_1.tasks.keys() == {task_mult_by_2_config.id, task_mult_by_3_config.id, task_mult_by_4_config.id} scenario_1_sorted_tasks = scenario_1._get_sorted_tasks() expected = [{task_mult_by_2_config.id, task_mult_by_4_config.id}, {task_mult_by_3_config.id}] for i, list_tasks_by_level in enumerate(scenario_1_sorted_tasks): assert set([t.config_id for t in list_tasks_by_level]) == expected[i] assert scenario_1.cycle.frequency == Frequency.DAILY _ScenarioManager._create(scenario_config) assert len(_DataManager._get_all()) == 5 assert len(_TaskManager._get_all()) == 4 assert len(_SequenceManager._get_all()) == 4 assert len(_ScenarioManager._get_all()) == 2 def test_notification_subscribe(mocker): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) mocker.patch(\"src.taipy.core._entity._reload._Reloader._reload\", side_effect=lambda m, o: o) scenario_config = Config.configure_scenario( \"awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], ) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) notify_1 = NotifyMock(scenario) notify_2 = NotifyMock(scenario) mocker.patch.object(_utils, \"_load_fct\", side_effect=[notify_1, notify_2]) # test subscribing notification _ScenarioManager._subscribe(callback=notify_1, scenario=scenario) _ScenarioManager._submit(scenario) notify_1.assert_called_3_times() notify_1.reset() # test unsubscribing notification # test notis subscribe only on new jobs # _ScenarioManager._get(scenario) _ScenarioManager._unsubscribe(callback=notify_1, scenario=scenario) _ScenarioManager._subscribe(callback=notify_2, scenario=scenario) _ScenarioManager._submit(scenario) notify_1.assert_not_called() notify_2.assert_called_3_times() class Notify: def __call__(self, *args, **kwargs): self.args = args def assert_called_with(self, args): assert args in self.args def test_notification_subscribe_multiple_params(mocker): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) mocker.patch(\"src.taipy.core._entity._reload._Reloader._reload\", side_effect=lambda m, o: o) scenario_config = Config.configure_scenario( \"awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], ) notify = mocker.Mock() _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._subscribe(callback=notify, params=[\"foobar\", 123, 1.2], scenario=scenario) mocker.patch.object(_ScenarioManager, \"_get\", return_value=scenario) _ScenarioManager._submit(scenario) notify.assert_called_with(\"foobar\", 123, 1.2, scenario, ANY) def notify_multi_param(param, *args): assert len(param) == 3 def notify1(*args, **kwargs): ... def notify2(*args, **kwargs): ... def test_notification_unsubscribe(mocker): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) mocker.patch(\"src.taipy.core._entity._reload._Reloader._reload\", side_effect=lambda m, o: o) scenario_config = Config.configure_scenario( \"awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], ) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) notify_1 = notify1 notify_2 = notify2 # test subscribing notification _ScenarioManager._subscribe(callback=notify_1, scenario=scenario) _ScenarioManager._unsubscribe(callback=notify_1, scenario=scenario) _ScenarioManager._subscribe(callback=notify_2, scenario=scenario) _ScenarioManager._submit(scenario.id) with pytest.raises(ValueError): _ScenarioManager._unsubscribe(callback=notify_1, scenario=scenario) _ScenarioManager._unsubscribe(callback=notify_2, scenario=scenario) def test_notification_unsubscribe_multi_param(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) scenario_config = Config.configure_scenario( \"awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], ) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) # test subscribing notification _ScenarioManager._subscribe(callback=notify_multi_param, params=[\"foobar\", 123, 0], scenario=scenario) _ScenarioManager._subscribe(callback=notify_multi_param, params=[\"foobar\", 123, 1], scenario=scenario) _ScenarioManager._subscribe(callback=notify_multi_param, params=[\"foobar\", 123, 2], scenario=scenario) assert len(scenario.subscribers) == 3 # if no params are passed, removes the first occurrence of the subscriber when theres more than one copy scenario.unsubscribe(notify_multi_param) assert len(scenario.subscribers) == 2 assert _Subscriber(notify_multi_param, [\"foobar\", 123, 0]) not in scenario.subscribers # If params are passed, find the corresponding pair of callback and params to remove scenario.unsubscribe(notify_multi_param, [\"foobar\", 123, 2]) assert len(scenario.subscribers) == 1 assert _Subscriber(notify_multi_param, [\"foobar\", 123, 2]) not in scenario.subscribers # If params are passed but is not on the list of subscribers, throws a ValueErrors with pytest.raises(ValueError): scenario.unsubscribe(notify_multi_param, [\"foobar\", 123, 10000]) def test_scenario_notification_subscribe_all(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) scenario_config = Config.configure_scenario( \"awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], ) other_scenario_config = Config.configure_scenario( \"other_scenario\", [ Config.configure_task( \"other_mult_by_2_2\", mult_by_2, [Config.configure_data_node(\"other_foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"other_bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], ) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) other_scenario = _ScenarioManager._create(other_scenario_config) notify_1 = NotifyMock(scenario) _ScenarioManager._subscribe(notify_1) assert len(_ScenarioManager._get(scenario.id).subscribers) == 1 assert len(_ScenarioManager._get(other_scenario.id).subscribers) == 1 def test_is_promotable_to_primary_scenario(): assert len(_ScenarioManager._get_all()) == 0 scenario_config = Config.configure_scenario(\"sc\", set(), set(), Frequency.DAILY) creation_date = datetime.now() scenario_1 = _ScenarioManager._create(scenario_config, creation_date=creation_date, name=\"1\") # primary scenario scenario_2 = _ScenarioManager._create(scenario_config, creation_date=creation_date, name=\"2\") assert len(_ScenarioManager._get_all()) == 2 assert scenario_1.is_primary assert not _ScenarioManager._is_promotable_to_primary(scenario_1) assert not _ScenarioManager._is_promotable_to_primary(scenario_1.id) assert not scenario_2.is_primary assert _ScenarioManager._is_promotable_to_primary(scenario_2) assert _ScenarioManager._is_promotable_to_primary(scenario_2.id) _ScenarioManager._set_primary(scenario_2) assert len(_ScenarioManager._get_all()) == 2 assert not scenario_1.is_primary assert _ScenarioManager._is_promotable_to_primary(scenario_1) assert _ScenarioManager._is_promotable_to_primary(scenario_1.id) assert scenario_2.is_primary assert not _ScenarioManager._is_promotable_to_primary(scenario_2) assert not _ScenarioManager._is_promotable_to_primary(scenario_2.id) def test_get_set_primary_scenario(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() cycle_1 = _CycleManager._create(Frequency.DAILY, name=\"foo\") scenario_1 = Scenario(\"sc_1\", [], {}, ScenarioId(\"sc_1\"), is_primary=False, cycle=cycle_1) scenario_2 = Scenario(\"sc_2\", [], {}, ScenarioId(\"sc_2\"), is_primary=False, cycle=cycle_1) _ScenarioManager._delete_all() _CycleManager._delete_all() assert len(_ScenarioManager._get_all()) == 0 assert len(_CycleManager._get_all()) == 0 _CycleManager._set(cycle_1) _ScenarioManager._set(scenario_1) _ScenarioManager._set(scenario_2) assert len(_ScenarioManager._get_primary_scenarios()) == 0 assert len(_ScenarioManager._get_all_by_cycle(cycle_1)) == 2 _ScenarioManager._set_primary(scenario_1) assert len(_ScenarioManager._get_primary_scenarios()) == 1 assert len(_ScenarioManager._get_all_by_cycle(cycle_1)) == 2 assert _ScenarioManager._get_primary(cycle_1) == scenario_1 _ScenarioManager._set_primary(scenario_2) assert len(_ScenarioManager._get_primary_scenarios()) == 1 assert len(_ScenarioManager._get_all_by_cycle(cycle_1)) == 2 assert _ScenarioManager._get_primary(cycle_1) == scenario_2 def test_hard_delete_one_single_scenario_with_scenario_data_nodes(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.SCENARIO) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) scenario_config = Config.configure_scenario(\"scenario_config\", [task_config]) scenario_config.add_sequences({\"sequence_config\": [task_config]}) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 _ScenarioManager._hard_delete(scenario.id) assert len(_ScenarioManager._get_all()) == 0 assert len(_SequenceManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 0 assert len(_DataManager._get_all()) == 0 assert len(_JobManager._get_all()) == 0 def test_hard_delete_one_scenario_among_two_with_scenario_data_nodes(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.SCENARIO) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) scenario_config = Config.configure_scenario(\"scenario_config\", [task_config]) scenario_config.add_sequences({\"sequence_config\": [task_config]}) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config) scenario_2 = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario_1.id) _ScenarioManager._submit(scenario_2.id) assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 2 assert len(_DataManager._get_all()) == 4 assert len(_JobManager._get_all()) == 2 _ScenarioManager._hard_delete(scenario_1.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 assert _ScenarioManager._get(scenario_2.id) is not None def test_hard_delete_one_scenario_among_two_with_cycle_data_nodes(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.CYCLE, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.CYCLE) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) scenario_config = Config.configure_scenario(\"scenario_config\", [task_config]) scenario_config.add_sequences({\"sequence_config\": [task_config]}) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config) scenario_2 = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario_1.id) _ScenarioManager._submit(scenario_2.id) assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 2 _ScenarioManager._hard_delete(scenario_1.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 2 assert _ScenarioManager._get(scenario_2.id) is not None def test_hard_delete_shared_entities(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_config_1 = Config.configure_data_node(\"my_input_1\", \"in_memory\", scope=Scope.CYCLE, default_data=\"testing\") dn_config_2 = Config.configure_data_node(\"my_input_2\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_config_3 = Config.configure_data_node(\"my_input_3\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") dn_config_4 = Config.configure_data_node(\"my_input_4\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_config_1, dn_config_2) task_config_2 = Config.configure_task(\"task_config_2\", print, dn_config_2, dn_config_3) task_config_3 = Config.configure_task(\"task_config_3\", print, dn_config_3, dn_config_4) # scope = global task_config_4 = Config.configure_task(\"task_config_4\", print, dn_config_1) # scope = cycle scenario_config_1 = Config.configure_scenario( \"scenario_config_1\", [task_config_1, task_config_2, task_config_3, task_config_4], frequency=Frequency.WEEKLY, ) scenario_config_1.add_sequences( { \"sequence_config_1\": [task_config_1, task_config_2], \"sequence_config_2\": [task_config_1, task_config_2], \"sequence_config_3\": [task_config_3], \"sequence_config_4\": [task_config_4], } ) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config_1) scenario_2 = _ScenarioManager._create(scenario_config_1) scenario_1.submit() scenario_2.submit() assert len(_CycleManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 8 assert len(_TaskManager._get_all()) == 6 assert len(_DataManager._get_all()) == 5 assert len(_JobManager._get_all()) == 8 _ScenarioManager._hard_delete(scenario_2.id) assert len(_CycleManager._get_all()) == 1 assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 4 assert len(_DataManager._get_all()) == 4 assert len(_JobManager._get_all()) == 6 def test_is_submittable(): assert len(_ScenarioManager._get_all()) == 0 dn_config = Config.configure_in_memory_data_node(\"dn\", 10) task_config = Config.configure_task(\"task\", print, [dn_config]) scenario_config = Config.configure_scenario(\"sc\", set([task_config]), set(), Frequency.DAILY) scenario = _ScenarioManager._create(scenario_config) assert len(_ScenarioManager._get_all()) == 1 assert _ScenarioManager._is_submittable(scenario) assert _ScenarioManager._is_submittable(scenario.id) assert not _ScenarioManager._is_submittable(\"Scenario_temp\") scenario.dn.edit_in_progress = True assert not _ScenarioManager._is_submittable(scenario) assert not _ScenarioManager._is_submittable(scenario.id) scenario.dn.edit_in_progress = False assert _ScenarioManager._is_submittable(scenario) assert _ScenarioManager._is_submittable(scenario.id) def test_submit(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = InMemoryDataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = InMemoryDataNode(\"fum\", Scope.SCENARIO, \"s8\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"thud\", {}, print, [data_node_6], [data_node_8], TaskId(\"t5\")) scenario = Scenario( \"scenario_name\", [task_5, task_4, task_2, task_1, task_3], {}, [], ScenarioId(\"sce_id\"), ) class MockOrchestrator(_Orchestrator): submit_calls = [] @classmethod def _lock_dn_output_and_create_job( cls, task: Task, submit_id: str, submit_entity_id: str, callbacks: Optional[Iterable[Callable]] = None, force: bool = False, ) -> Job: cls.submit_calls.append(task.id) return super()._lock_dn_output_and_create_job(task, submit_id, submit_entity_id, callbacks, force) with patch(\"src.taipy.core.task._task_manager._TaskManager._orchestrator\", new=MockOrchestrator): with pytest.raises(NonExistingScenario): _ScenarioManager._submit(scenario.id) with pytest.raises(NonExistingScenario): _ScenarioManager._submit(scenario) # scenario and sequence do exist, but tasks does not exist. # We expect an exception to be raised _ScenarioManager._set(scenario) with pytest.raises(NonExistingTask): _ScenarioManager._submit(scenario.id) with pytest.raises(NonExistingTask): _ScenarioManager._submit(scenario) # scenario, sequence, and tasks do exist. # We expect all the tasks to be submitted once, # and respecting specific constraints on the order _TaskManager._set(task_1) _TaskManager._set(task_2) _TaskManager._set(task_3) _TaskManager._set(task_4) _TaskManager._set(task_5) _ScenarioManager._submit(scenario.id) submit_calls = _TaskManager._orchestrator().submit_calls assert len(submit_calls) == 5 assert set(submit_calls) == {task_1.id, task_2.id, task_4.id, task_3.id, task_5.id} assert submit_calls.index(task_2.id) < submit_calls.index(task_3.id) assert submit_calls.index(task_1.id) < submit_calls.index(task_3.id) assert submit_calls.index(task_1.id) < submit_calls.index(task_2.id) assert submit_calls.index(task_1.id) < submit_calls.index(task_4.id) _ScenarioManager._submit(scenario) submit_calls = _TaskManager._orchestrator().submit_calls assert len(submit_calls) == 10 assert set(submit_calls) == {task_1.id, task_2.id, task_4.id, task_3.id, task_5.id} assert submit_calls.index(task_2.id) < submit_calls.index(task_3.id) assert submit_calls.index(task_1.id) < submit_calls.index(task_3.id) assert submit_calls.index(task_1.id) < submit_calls.index(task_2.id) assert submit_calls.index(task_1.id) < submit_calls.index(task_4.id) def my_print(a, b): print(a + b) def test_submit_task_with_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_path=\"wrong_path.pickle\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") json_dn_cfg = Config.configure_parquet_data_node(\"wrong_json_file_path\", default_path=\"wrong_path.json\") task_cfg = Config.configure_task(\"task\", my_print, [csv_dn_cfg, pickle_dn_cfg], parquet_dn_cfg) task_2_cfg = Config.configure_task(\"task2\", my_print, [csv_dn_cfg, parquet_dn_cfg], json_dn_cfg) scenario_cfg = Config.configure_scenario(\"scenario\", [task_cfg, task_2_cfg]) sc_manager = _ScenarioManagerFactory._build_manager() scenario = sc_manager._create(scenario_cfg) sc_manager._submit(scenario) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in scenario.get_inputs() ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in scenario.data_nodes.values() if input_dn not in scenario.get_inputs() ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_submit_task_with_one_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_data=\"value\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") json_dn_cfg = Config.configure_parquet_data_node(\"wrong_json_file_path\", default_path=\"wrong_path.json\") task_cfg = Config.configure_task(\"task\", my_print, [csv_dn_cfg, pickle_dn_cfg], parquet_dn_cfg) task_2_cfg = Config.configure_task(\"task2\", my_print, [csv_dn_cfg, parquet_dn_cfg], json_dn_cfg) scenario_cfg = Config.configure_scenario(\"scenario\", [task_cfg, task_2_cfg]) sce_manager = _ScenarioManagerFactory._build_manager() scenario = sce_manager._create(scenario_cfg) sce_manager._submit(scenario) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in scenario.get_inputs() if input_dn.config_id == \"wrong_csv_file_path\" ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in scenario.data_nodes.values() if input_dn.config_id != \"wrong_csv_file_path\" ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def subtraction(n1, n2): return n1 - n2 def addition(n1, n2): return n1 + n2 def test_scenarios_comparison_development_mode(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) scenario_config = Config.configure_scenario( \"Awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], comparators={\"bar\": [subtraction], \"foo\": [subtraction, addition]}, ) _OrchestratorFactory._build_dispatcher() assert scenario_config.comparators is not None scenario_1 = _ScenarioManager._create(scenario_config) scenario_2 = _ScenarioManager._create(scenario_config) with pytest.raises(InsufficientScenarioToCompare): _ScenarioManager._compare(scenario_1, data_node_config_id=\"bar\") scenario_3 = Scenario(\"awesome_scenario_config\", [], {}) with pytest.raises(DifferentScenarioConfigs): _ScenarioManager._compare(scenario_1, scenario_3, data_node_config_id=\"bar\") _ScenarioManager._submit(scenario_1.id) _ScenarioManager._submit(scenario_2.id) bar_comparison = _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id=\"bar\")[\"bar\"] assert bar_comparison[\"subtraction\"] == 0 foo_comparison = _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id=\"foo\")[\"foo\"] assert len(foo_comparison.keys()) == 2 assert foo_comparison[\"addition\"] == 2 assert foo_comparison[\"subtraction\"] == 0 assert len(_ScenarioManager._compare(scenario_1, scenario_2).keys()) == 2 with pytest.raises(NonExistingScenarioConfig): _ScenarioManager._compare(scenario_3, scenario_3) with pytest.raises(NonExistingComparator): _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id=\"abc\") def test_scenarios_comparison_standalone_mode(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE) scenario_config = Config.configure_scenario( \"Awesome_scenario\", [ Config.configure_task( \"mult_by_2\", mult_by_2, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ], comparators={\"bar\": [subtraction], \"foo\": [subtraction, addition]}, ) _OrchestratorFactory._build_dispatcher() assert scenario_config.comparators is not None scenario_1 = _ScenarioManager._create(scenario_config) scenario_2 = _ScenarioManager._create(scenario_config) with pytest.raises(InsufficientScenarioToCompare): _ScenarioManager._compare(scenario_1, data_node_config_id=\"bar\") scenario_3 = Scenario(\"awesome_scenario_config\", [], {}) with pytest.raises(DifferentScenarioConfigs): _ScenarioManager._compare(scenario_1, scenario_3, data_node_config_id=\"bar\") _ScenarioManager._submit(scenario_1.id) _ScenarioManager._submit(scenario_2.id) bar_comparison = _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id=\"bar\")[\"bar\"] assert_true_after_time(lambda: bar_comparison[\"subtraction\"] == 0) foo_comparison = _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id=\"foo\")[\"foo\"] assert_true_after_time(lambda: len(foo_comparison.keys()) == 2) assert_true_after_time(lambda: foo_comparison[\"addition\"] == 2) assert_true_after_time(lambda: foo_comparison[\"subtraction\"] == 0) assert_true_after_time(lambda: len(_ScenarioManager._compare(scenario_1, scenario_2).keys()) == 2) with pytest.raises(NonExistingScenarioConfig): _ScenarioManager._compare(scenario_3, scenario_3) with pytest.raises(NonExistingComparator): _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id=\"abc\") def test_tags(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() cycle_1 = _CycleManager._create(Frequency.DAILY, name=\"today\", creation_date=datetime.now()) cycle_2 = _CycleManager._create( Frequency.DAILY, name=\"tomorrow\", creation_date=datetime.now() + timedelta(days=1), ) cycle_3 = _CycleManager._create( Frequency.DAILY, name=\"yesterday\", creation_date=datetime.now() + timedelta(days=-1), ) scenario_no_tag = Scenario(\"scenario_no_tag\", [], {}, [], ScenarioId(\"scenario_no_tag\"), cycle=cycle_1) scenario_1_tag = Scenario( \"scenario_1_tag\", [], {}, [], ScenarioId(\"scenario_1_tag\"), cycle=cycle_1, tags={\"fst\"}, ) scenario_2_tags = Scenario( \"scenario_2_tags\", [], {}, [], ScenarioId(\"scenario_2_tags\"), cycle=cycle_2, tags={\"fst\", \"scd\"}, ) # Test has_tag assert len(scenario_no_tag.tags) == 0 assert not scenario_no_tag.has_tag(\"fst\") assert not scenario_no_tag.has_tag(\"scd\") assert len(scenario_1_tag.tags) == 1 assert scenario_1_tag.has_tag(\"fst\") assert not scenario_1_tag.has_tag(\"scd\") assert len(scenario_2_tags.tags) == 2 assert scenario_2_tags.has_tag(\"fst\") assert scenario_2_tags.has_tag(\"scd\") # test get and set serialize/deserialize tags _CycleManager._set(cycle_1) _CycleManager._set(cycle_2) _CycleManager._set(cycle_3) _ScenarioManager._set(scenario_no_tag) _ScenarioManager._set(scenario_1_tag) _ScenarioManager._set(scenario_2_tags) assert len(_ScenarioManager._get(ScenarioId(\"scenario_no_tag\")).tags) == 0 assert not _ScenarioManager._get(ScenarioId(\"scenario_no_tag\")).has_tag(\"fst\") assert not _ScenarioManager._get(ScenarioId(\"scenario_no_tag\")).has_tag(\"scd\") assert len(_ScenarioManager._get(ScenarioId(\"scenario_1_tag\")).tags) == 1 assert \"fst\" in _ScenarioManager._get(ScenarioId(\"scenario_1_tag\")).tags assert \"scd\" not in _ScenarioManager._get(ScenarioId(\"scenario_1_tag\")).tags assert len(_ScenarioManager._get(ScenarioId(\"scenario_2_tags\")).tags) == 2 assert \"fst\" in _ScenarioManager._get(ScenarioId(\"scenario_2_tags\")).tags assert \"scd\" in _ScenarioManager._get(ScenarioId(\"scenario_2_tags\")).tags # Test tag & untag _ScenarioManager._tag(scenario_no_tag, \"thd\") # add new tag _ScenarioManager._untag(scenario_1_tag, \"NOT_EXISTING_TAG\") # remove not existing tag does nothing _ScenarioManager._untag(scenario_1_tag, \"fst\") # remove `fst` tag assert len(scenario_no_tag.tags) == 1 assert not scenario_no_tag.has_tag(\"fst\") assert not scenario_no_tag.has_tag(\"scd\") assert scenario_no_tag.has_tag(\"thd\") assert len(scenario_1_tag.tags) == 0 assert not scenario_1_tag.has_tag(\"fst\") assert not scenario_1_tag.has_tag(\"scd\") assert not scenario_1_tag.has_tag(\"thd\") assert len(scenario_2_tags.tags) == 2 assert scenario_2_tags.has_tag(\"fst\") assert scenario_2_tags.has_tag(\"scd\") assert not scenario_2_tags.has_tag(\"thd\") _ScenarioManager._untag(scenario_no_tag, \"thd\") _ScenarioManager._set(scenario_no_tag) _ScenarioManager._tag(scenario_1_tag, \"fst\") _ScenarioManager._set(scenario_1_tag) # test getters assert not _ScenarioManager._get_by_tag(cycle_3, \"fst\") assert not _ScenarioManager._get_by_tag(cycle_3, \"scd\") assert not _ScenarioManager._get_by_tag(cycle_3, \"thd\") assert _ScenarioManager._get_by_tag(cycle_2, \"fst\") == scenario_2_tags assert _ScenarioManager._get_by_tag(cycle_2, \"scd\") == scenario_2_tags assert not _ScenarioManager._get_by_tag(cycle_2, \"thd\") assert _ScenarioManager._get_by_tag(cycle_1, \"fst\") == scenario_1_tag assert not _ScenarioManager._get_by_tag(cycle_1, \"scd\") assert not _ScenarioManager._get_by_tag(cycle_1, \"thd\") assert len(_ScenarioManager._get_all_by_tag(\"NOT_EXISTING\")) == 0 assert scenario_1_tag in _ScenarioManager._get_all_by_tag(\"fst\") assert scenario_2_tags in _ScenarioManager._get_all_by_tag(\"fst\") assert _ScenarioManager._get_all_by_tag(\"scd\") == [scenario_2_tags] assert len(_ScenarioManager._get_all_by_tag(\"thd\")) == 0 # test tag cycle mgt _ScenarioManager._tag( scenario_no_tag, \"fst\" ) # tag sc_no_tag should untag sc_1_tag with same cycle but not sc_2_tags assert not _ScenarioManager._get_by_tag(cycle_3, \"fst\") assert not _ScenarioManager._get_by_tag(cycle_3, \"scd\") assert not _ScenarioManager._get_by_tag(cycle_3, \"thd\") assert _ScenarioManager._get_by_tag(cycle_2, \"fst\") == scenario_2_tags assert _ScenarioManager._get_by_tag(cycle_2, \"scd\") == scenario_2_tags assert not _ScenarioManager._get_by_tag(cycle_2, \"thd\") assert _ScenarioManager._get_by_tag(cycle_1, \"fst\") == scenario_no_tag assert not _ScenarioManager._get_by_tag(cycle_1, \"scd\") assert not _ScenarioManager._get_by_tag(cycle_1, \"thd\") assert len(_ScenarioManager._get_all_by_tag(\"NOT_EXISTING\")) == 0 assert len(_ScenarioManager._get_all_by_tag(\"fst\")) == 2 assert scenario_2_tags in _ScenarioManager._get_all_by_tag(\"fst\") assert scenario_no_tag in _ScenarioManager._get_all_by_tag(\"fst\") assert _ScenarioManager._get_all_by_tag(\"scd\") == [scenario_2_tags] assert len(_ScenarioManager._get_all_by_tag(\"thd\")) == 0 def test_authorized_tags(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) scenario = Scenario(\"scenario_1\", [], {\"authorized_tags\": [\"foo\", \"bar\"]}, [], ScenarioId(\"scenario_1\")) scenario_2_cfg = Config.configure_scenario(\"scenario_2\", [], [], Frequency.DAILY, authorized_tags=[\"foo\", \"bar\"]) _OrchestratorFactory._build_dispatcher() scenario_2 = _ScenarioManager._create(scenario_2_cfg) _ScenarioManager._set(scenario) assert len(scenario.tags) == 0 assert len(scenario_2.tags) == 0 with pytest.raises(UnauthorizedTagError): _ScenarioManager._tag(scenario, \"baz\") _ScenarioManager._tag(scenario_2, \"baz\") assert len(scenario.tags) == 0 assert len(scenario_2.tags) == 0 _ScenarioManager._tag(scenario, \"foo\") _ScenarioManager._tag(scenario_2, \"foo\") assert len(scenario.tags) == 1 assert len(scenario_2.tags) == 1 _ScenarioManager._tag(scenario, \"bar\") _ScenarioManager._tag(scenario_2, \"bar\") assert len(scenario.tags) == 2 assert len(scenario_2.tags) == 2 _ScenarioManager._tag(scenario, \"foo\") _ScenarioManager._tag(scenario_2, \"foo\") assert len(scenario.tags) == 2 assert len(scenario_2.tags) == 2 _ScenarioManager._untag(scenario, \"foo\") _ScenarioManager._untag(scenario_2, \"foo\") assert len(scenario.tags) == 1 assert len(scenario_2.tags) == 1 def test_get_scenarios_by_config_id(): scenario_config_1 = Config.configure_scenario(\"s1\", sequence_configs=[]) scenario_config_2 = Config.configure_scenario(\"s2\", sequence_configs=[]) scenario_config_3 = Config.configure_scenario(\"s3\", sequence_configs=[]) s_1_1 = _ScenarioManager._create(scenario_config_1) s_1_2 = _ScenarioManager._create(scenario_config_1) s_1_3 = _ScenarioManager._create(scenario_config_1) assert len(_ScenarioManager._get_all()) == 3 s_2_1 = _ScenarioManager._create(scenario_config_2) s_2_2 = _ScenarioManager._create(scenario_config_2) assert len(_ScenarioManager._get_all()) == 5 s_3_1 = _ScenarioManager._create(scenario_config_3) assert len(_ScenarioManager._get_all()) == 6 s1_scenarios = _ScenarioManager._get_by_config_id(scenario_config_1.id) assert len(s1_scenarios) == 3 assert sorted([s_1_1.id, s_1_2.id, s_1_3.id]) == sorted([scenario.id for scenario in s1_scenarios]) s2_scenarios = _ScenarioManager._get_by_config_id(scenario_config_2.id) assert len(s2_scenarios) == 2 assert sorted([s_2_1.id, s_2_2.id]) == sorted([scenario.id for scenario in s2_scenarios]) s3_scenarios = _ScenarioManager._get_by_config_id(scenario_config_3.id) assert len(s3_scenarios) == 1 assert sorted([s_3_1.id]) == sorted([scenario.id for scenario in s3_scenarios]) def test_get_scenarios_by_config_id_in_multiple_versions_environment(): scenario_config_1 = Config.configure_scenario(\"s1\", sequence_configs=[]) scenario_config_2 = Config.configure_scenario(\"s2\", sequence_configs=[]) _VersionManager._set_experiment_version(\"1.0\") _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_2) _ScenarioManager._create(scenario_config_2) assert len(_ScenarioManager._get_by_config_id(scenario_config_1.id)) == 3 assert len(_ScenarioManager._get_by_config_id(scenario_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_2) _ScenarioManager._create(scenario_config_2) assert len(_ScenarioManager._get_by_config_id(scenario_config_1.id)) == 3 assert len(_ScenarioManager._get_by_config_id(scenario_config_2.id)) == 2 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import pytest from src.taipy.core.exceptions import ModelNotFound from src.taipy.core.scenario._scenario_fs_repository import _ScenarioFSRepository from src.taipy.core.scenario._scenario_sql_repository import _ScenarioSQLRepository from src.taipy.core.scenario.scenario import Scenario, ScenarioId class TestScenarioFSRepository: @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_save_and_load(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) obj = repository._load(scenario.id) assert isinstance(obj, Scenario) @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_exists(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) assert repository._exists(scenario.id) assert not repository._exists(\"not-existed-scenario\") @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_load_all(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_load_all_with_filters(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) objs = repository._load_all(filters=[{\"id\": \"scenario-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) repository._delete(scenario.id) with pytest.raises(ModelNotFound): repository._load(scenario.id) @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_all(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_many(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_by(self, scenario, repo, init_sql_repo): repository = repo() # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") scenario._version = f\"{(i+1) // 5}.0\" repository._save(scenario) objs = repository._load_all() assert len(objs) == 10 repository._delete_by(\"version\", \"1.0\") assert len(repository._load_all()) == 5 @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_search(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f\"scenario-{i}\") repository._save(scenario) assert len(repository._load_all()) == 10 objs = repository._search(\"id\", \"scenario-2\") assert len(objs) == 1 assert isinstance(objs[0], Scenario) objs = repository._search(\"id\", \"scenario-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], Scenario) assert repository._search(\"id\", \"scenario-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"repo\", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_export(self, tmpdir, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) repository._export(scenario.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _ScenarioFSRepository else os.path.join(tmpdir.strpath, \"scenario\") assert os.path.exists(os.path.join(dir_path, f\"{scenario.id}.json\")) "} {"text": "from datetime import datetime, timedelta import pytest from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config.job_config import JobConfig from src.taipy.core.cycle._cycle_manager import _CycleManager from src.taipy.core.cycle._cycle_manager_factory import _CycleManagerFactory from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import DeletingPrimaryScenario from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.scenario.scenario_id import ScenarioId from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.sequence._sequence_manager_factory import _SequenceManagerFactory from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task from src.taipy.core.task.task_id import TaskId from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.conftest import init_managers def test_set_and_get_scenario(cycle, init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() _OrchestratorFactory._build_dispatcher() scenario_id_1 = ScenarioId(\"scenario_id_1\") scenario_1 = Scenario(\"scenario_name_1\", [], {}, [], scenario_id_1) input_dn_2 = InMemoryDataNode(\"foo\", Scope.SCENARIO) output_dn_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO) additional_dn_2 = InMemoryDataNode(\"zyx\", Scope.SCENARIO) task_name_2 = \"task_2\" task_2 = Task(task_name_2, {}, print, [input_dn_2], [output_dn_2], TaskId(\"task_id_2\")) scenario_id_2 = ScenarioId(\"scenario_id_2\") scenario_2 = Scenario( \"scenario_name_2\", [task_2], {}, [additional_dn_2], scenario_id_2, datetime.now(), True, cycle, sequences={\"sequence_2\": {\"tasks\": [task_2]}}, ) additional_dn_3 = InMemoryDataNode(\"baz\", Scope.SCENARIO) task_name_3 = \"task_3\" task_3 = Task(task_name_3, {}, print, id=TaskId(\"task_id_3\")) scenario_3_with_same_id = Scenario( \"scenario_name_3\", [task_3], {}, [additional_dn_3], scenario_id_1, datetime.now(), False, cycle, sequences={\"sequence_3\": {}}, ) # No existing scenario assert len(_ScenarioManager._get_all()) == 0 assert _ScenarioManager._get(scenario_id_1) is None assert _ScenarioManager._get(scenario_1) is None assert _ScenarioManager._get(scenario_id_2) is None assert _ScenarioManager._get(scenario_2) is None # Save one scenario. We expect to have only one scenario stored _ScenarioManager._set(scenario_1) assert len(_ScenarioManager._get_all()) == 1 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 0 assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).sequences) == 0 assert _ScenarioManager._get(scenario_id_2) is None assert _ScenarioManager._get(scenario_2) is None # Save a second scenario. Now, we expect to have a total of two scenarios stored _TaskManager._set(task_2) _CycleManager._set(cycle) _ScenarioManager._set(scenario_2) _DataManager._set(additional_dn_2) assert len(_ScenarioManager._get_all()) == 2 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 0 assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).sequences) == 0 assert _ScenarioManager._get(scenario_id_2).id == scenario_2.id assert _ScenarioManager._get(scenario_id_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_id_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_id_2).sequences) == 1 assert _ScenarioManager._get(scenario_2).id == scenario_2.id assert _ScenarioManager._get(scenario_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_2).sequences) == 1 assert _TaskManager._get(task_2.id).id == task_2.id assert _ScenarioManager._get(scenario_id_2).cycle == cycle assert _ScenarioManager._get(scenario_2).cycle == cycle assert _CycleManager._get(cycle.id).id == cycle.id # We save the first scenario again. We expect nothing to change _ScenarioManager._set(scenario_1) assert len(_ScenarioManager._get_all()) == 2 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 0 assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_1.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 0 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 0 assert len(_ScenarioManager._get(scenario_1).sequences) == 0 assert _ScenarioManager._get(scenario_id_2).id == scenario_2.id assert _ScenarioManager._get(scenario_id_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_id_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_id_2).sequences) == 1 assert _ScenarioManager._get(scenario_2).id == scenario_2.id assert _ScenarioManager._get(scenario_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_2).sequences) == 1 assert _TaskManager._get(task_2.id).id == task_2.id assert _CycleManager._get(cycle.id).id == cycle.id # We save a third scenario with same id as the first one. # We expect the first scenario to be updated _DataManager._set(additional_dn_3) _TaskManager._set(task_3) _TaskManager._set(scenario_2.tasks[task_name_2]) _ScenarioManager._set(scenario_3_with_same_id) assert len(_ScenarioManager._get_all()) == 2 assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id assert _ScenarioManager._get(scenario_id_1).config_id == scenario_3_with_same_id.config_id assert len(_ScenarioManager._get(scenario_id_1).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_1).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_1).data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_1).sequences) == 1 assert _ScenarioManager._get(scenario_id_1).cycle == cycle assert _ScenarioManager._get(scenario_1).id == scenario_1.id assert _ScenarioManager._get(scenario_1).config_id == scenario_3_with_same_id.config_id assert len(_ScenarioManager._get(scenario_1).tasks) == 1 assert len(_ScenarioManager._get(scenario_1).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_1).data_nodes) == 1 assert len(_ScenarioManager._get(scenario_1).sequences) == 1 assert _ScenarioManager._get(scenario_1).cycle == cycle assert _ScenarioManager._get(scenario_id_2).id == scenario_2.id assert _ScenarioManager._get(scenario_id_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_id_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_id_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_id_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_id_2).sequences) == 1 assert _ScenarioManager._get(scenario_2).id == scenario_2.id assert _ScenarioManager._get(scenario_2).config_id == scenario_2.config_id assert len(_ScenarioManager._get(scenario_2).tasks) == 1 assert len(_ScenarioManager._get(scenario_2).additional_data_nodes) == 1 assert len(_ScenarioManager._get(scenario_2).data_nodes) == 3 assert len(_ScenarioManager._get(scenario_2).sequences) == 1 assert _TaskManager._get(task_2.id).id == task_2.id def test_get_all_on_multiple_versions_environment(init_sql_repo): init_managers() # Create 5 scenarios with 2 versions each # Only version 1.0 has the scenario with config_id = \"config_id_1\" # Only version 2.0 has the scenario with config_id = \"config_id_6\" for version in range(1, 3): for i in range(5): _ScenarioManager._set( Scenario(f\"config_id_{i+version}\", [], {}, ScenarioId(f\"id{i}_v{version}\"), version=f\"{version}.0\") ) _VersionManager._set_experiment_version(\"1.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_experiment_version(\"2.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 _VersionManager._set_development_version(\"1.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_development_version(\"2.0\") assert len(_ScenarioManager._get_all()) == 5 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_ScenarioManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 def test_create_scenario_does_not_modify_config(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() creation_date_1 = datetime.now() name_1 = \"name_1\" scenario_config = Config.configure_scenario(\"sc\", None, None, Frequency.DAILY) _OrchestratorFactory._build_dispatcher() assert scenario_config.properties.get(\"name\") is None assert len(scenario_config.properties) == 0 scenario = _ScenarioManager._create(scenario_config, creation_date=creation_date_1, name=name_1) assert len(scenario_config.properties) == 0 assert len(scenario.properties) == 1 assert scenario.properties.get(\"name\") == name_1 assert scenario.name == name_1 scenario.properties[\"foo\"] = \"bar\" _ScenarioManager._set(scenario) assert len(scenario_config.properties) == 0 assert len(scenario.properties) == 2 assert scenario.properties.get(\"foo\") == \"bar\" assert scenario.properties.get(\"name\") == name_1 assert scenario.name == name_1 scenario_2 = _ScenarioManager._create(scenario_config, creation_date=creation_date_1) assert scenario_2.name is None def test_create_and_delete_scenario(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() creation_date_1 = datetime.now() creation_date_2 = creation_date_1 + timedelta(minutes=10) name_1 = \"name_1\" _ScenarioManager._delete_all() assert len(_ScenarioManager._get_all()) == 0 scenario_config = Config.configure_scenario(\"sc\", None, None, Frequency.DAILY) _OrchestratorFactory._build_dispatcher() scenario_1 = _ScenarioManager._create(scenario_config, creation_date=creation_date_1, name=name_1) assert scenario_1.config_id == \"sc\" assert scenario_1.sequences == {} assert scenario_1.tasks == {} assert scenario_1.additional_data_nodes == {} assert scenario_1.data_nodes == {} assert scenario_1.cycle.frequency == Frequency.DAILY assert scenario_1.is_primary assert scenario_1.cycle.creation_date == creation_date_1 assert scenario_1.cycle.start_date.date() == creation_date_1.date() assert scenario_1.cycle.end_date.date() == creation_date_1.date() assert scenario_1.creation_date == creation_date_1 assert scenario_1.name == name_1 assert scenario_1.properties[\"name\"] == name_1 assert scenario_1.tags == set() cycle_id_1 = scenario_1.cycle.id assert _CycleManager._get(cycle_id_1).id == cycle_id_1 _ScenarioManager._delete(scenario_1.id) assert _ScenarioManager._get(scenario_1.id) is None assert _CycleManager._get(cycle_id_1) is None # Recreate scenario_1 scenario_1 = _ScenarioManager._create(scenario_config, creation_date=creation_date_1, name=name_1) scenario_2 = _ScenarioManager._create(scenario_config, creation_date=creation_date_2) assert scenario_2.config_id == \"sc\" assert scenario_2.sequences == {} assert scenario_2.tasks == {} assert scenario_2.additional_data_nodes == {} assert scenario_2.data_nodes == {} assert scenario_2.cycle.frequency == Frequency.DAILY assert not scenario_2.is_primary assert scenario_2.cycle.creation_date == creation_date_1 assert scenario_2.cycle.start_date.date() == creation_date_2.date() assert scenario_2.cycle.end_date.date() == creation_date_2.date() assert scenario_2.properties.get(\"name\") is None assert scenario_2.tags == set() assert scenario_1 != scenario_2 assert scenario_1.cycle == scenario_2.cycle assert len(_ScenarioManager._get_all()) == 2 with pytest.raises(DeletingPrimaryScenario): _ScenarioManager._delete( scenario_1.id, ) _ScenarioManager._delete( scenario_2.id, ) assert len(_ScenarioManager._get_all()) == 1 _ScenarioManager._delete(scenario_1.id) assert len(_ScenarioManager._get_all()) == 0 def mult_by_2(nb: int): return nb * 2 def mult_by_3(nb: int): return nb * 3 def mult_by_4(nb: int): return nb * 4 def test_scenario_manager_only_creates_data_node_once(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() # dn_1 ---> mult_by_2 ---> dn_2 ---> mult_by_3 ---> dn_6 # dn_1 ---> mult_by_4 ---> dn_4 dn_config_1 = Config.configure_data_node(\"foo\", \"in_memory\", Scope.GLOBAL, default_data=1) dn_config_2 = Config.configure_data_node(\"bar\", \"in_memory\", Scope.CYCLE, default_data=0) dn_config_6 = Config.configure_data_node(\"baz\", \"in_memory\", Scope.CYCLE, default_data=0) dn_config_4 = Config.configure_data_node(\"qux\", \"in_memory\", Scope.SCENARIO, default_data=0) task_mult_by_2_config = Config.configure_task(\"mult_by_2\", mult_by_2, [dn_config_1], dn_config_2) task_mult_by_3_config = Config.configure_task(\"mult_by_3\", mult_by_3, [dn_config_2], dn_config_6) task_mult_by_4_config = Config.configure_task(\"mult_by_4\", mult_by_4, [dn_config_1], dn_config_4) scenario_config = Config.configure_scenario( \"awesome_scenario\", [task_mult_by_2_config, task_mult_by_3_config, task_mult_by_4_config], None, Frequency.DAILY ) scenario_config.add_sequences( {\"by_6\": [task_mult_by_2_config, task_mult_by_3_config], \"by_4\": [task_mult_by_4_config]} ) _OrchestratorFactory._build_dispatcher() assert len(_DataManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 0 assert len(_SequenceManager._get_all()) == 0 assert len(_ScenarioManager._get_all()) == 0 assert len(_CycleManager._get_all()) == 0 scenario_1 = _ScenarioManager._create(scenario_config) assert len(_DataManager._get_all()) == 4 assert len(_TaskManager._get_all()) == 3 assert len(_SequenceManager._get_all()) == 2 assert len(_ScenarioManager._get_all()) == 1 assert scenario_1.foo.read() == 1 assert scenario_1.bar.read() == 0 assert scenario_1.baz.read() == 0 assert scenario_1.qux.read() == 0 assert scenario_1.by_6._get_sorted_tasks()[0][0].config_id == task_mult_by_2_config.id assert scenario_1.by_6._get_sorted_tasks()[1][0].config_id == task_mult_by_3_config.id assert scenario_1.by_4._get_sorted_tasks()[0][0].config_id == task_mult_by_4_config.id assert scenario_1.tasks.keys() == {task_mult_by_2_config.id, task_mult_by_3_config.id, task_mult_by_4_config.id} scenario_1_sorted_tasks = scenario_1._get_sorted_tasks() expected = [{task_mult_by_2_config.id, task_mult_by_4_config.id}, {task_mult_by_3_config.id}] for i, list_tasks_by_level in enumerate(scenario_1_sorted_tasks): assert set([t.config_id for t in list_tasks_by_level]) == expected[i] assert scenario_1.cycle.frequency == Frequency.DAILY _ScenarioManager._create(scenario_config) assert len(_DataManager._get_all()) == 5 assert len(_TaskManager._get_all()) == 4 assert len(_SequenceManager._get_all()) == 4 assert len(_ScenarioManager._get_all()) == 2 def test_get_scenarios_by_config_id(init_sql_repo): init_managers() scenario_config_1 = Config.configure_scenario(\"s1\", sequence_configs=[]) scenario_config_2 = Config.configure_scenario(\"s2\", sequence_configs=[]) scenario_config_3 = Config.configure_scenario(\"s3\", sequence_configs=[]) s_1_1 = _ScenarioManager._create(scenario_config_1) s_1_2 = _ScenarioManager._create(scenario_config_1) s_1_3 = _ScenarioManager._create(scenario_config_1) assert len(_ScenarioManager._get_all()) == 3 s_2_1 = _ScenarioManager._create(scenario_config_2) s_2_2 = _ScenarioManager._create(scenario_config_2) assert len(_ScenarioManager._get_all()) == 5 s_3_1 = _ScenarioManager._create(scenario_config_3) assert len(_ScenarioManager._get_all()) == 6 s1_scenarios = _ScenarioManager._get_by_config_id(scenario_config_1.id) assert len(s1_scenarios) == 3 assert sorted([s_1_1.id, s_1_2.id, s_1_3.id]) == sorted([scenario.id for scenario in s1_scenarios]) s2_scenarios = _ScenarioManager._get_by_config_id(scenario_config_2.id) assert len(s2_scenarios) == 2 assert sorted([s_2_1.id, s_2_2.id]) == sorted([scenario.id for scenario in s2_scenarios]) s3_scenarios = _ScenarioManager._get_by_config_id(scenario_config_3.id) assert len(s3_scenarios) == 1 assert sorted([s_3_1.id]) == sorted([scenario.id for scenario in s3_scenarios]) def test_get_scenarios_by_config_id_in_multiple_versions_environment(init_sql_repo): init_managers() scenario_config_1 = Config.configure_scenario(\"s1\", sequence_configs=[]) scenario_config_2 = Config.configure_scenario(\"s2\", sequence_configs=[]) _VersionManager._set_experiment_version(\"1.0\") _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_2) _ScenarioManager._create(scenario_config_2) assert len(_ScenarioManager._get_by_config_id(scenario_config_1.id)) == 3 assert len(_ScenarioManager._get_by_config_id(scenario_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_1) _ScenarioManager._create(scenario_config_2) _ScenarioManager._create(scenario_config_2) assert len(_ScenarioManager._get_by_config_id(scenario_config_1.id)) == 3 assert len(_ScenarioManager._get_by_config_id(scenario_config_2.id)) == 2 "} {"text": "from datetime import datetime, timedelta from unittest import mock import pytest from src.taipy.core.common._utils import _Subscriber from src.taipy.core.cycle._cycle_manager_factory import _CycleManagerFactory from src.taipy.core.cycle.cycle import Cycle, CycleId from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data.in_memory import DataNode, InMemoryDataNode from src.taipy.core.data.pickle import PickleDataNode from src.taipy.core.exceptions.exceptions import SequenceTaskDoesNotExistInScenario from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.scenario.scenario_id import ScenarioId from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task, TaskId from taipy.config import Frequency from taipy.config.common.scope import Scope from taipy.config.exceptions.exceptions import InvalidConfigurationId def test_create_primary_scenario(cycle): scenario = Scenario(\"foo\", set(), {\"key\": \"value\"}, is_primary=True, cycle=cycle) assert scenario.id is not None assert scenario.config_id == \"foo\" assert scenario.tasks == {} assert scenario.additional_data_nodes == {} assert scenario.data_nodes == {} assert scenario.sequences == {} assert scenario.properties == {\"key\": \"value\"} assert scenario.key == \"value\" assert scenario.creation_date is not None assert scenario.is_primary assert scenario.cycle == cycle assert scenario.tags == set() assert scenario.get_simple_label() == scenario.config_id with mock.patch(\"src.taipy.core.get\") as get_mck: class MockOwner: label = \"owner_label\" def get_label(self): return self.label get_mck.return_value = MockOwner() assert scenario.get_label() == \"owner_label > \" + scenario.config_id def test_create_scenario_at_time(current_datetime): scenario = Scenario(\"bar\", set(), {}, set(), ScenarioId(\"baz\"), creation_date=current_datetime) assert scenario.id == \"baz\" assert scenario.config_id == \"bar\" assert scenario.tasks == {} assert scenario.additional_data_nodes == {} assert scenario.data_nodes == {} assert scenario.sequences == {} assert scenario.properties == {} assert scenario.creation_date == current_datetime assert not scenario.is_primary assert scenario.cycle is None assert scenario.tags == set() assert scenario.get_simple_label() == scenario.config_id assert scenario.get_label() == scenario.config_id def test_create_scenario_with_task_and_additional_dn_and_sequence(): dn_1 = PickleDataNode(\"xyz\", Scope.SCENARIO) dn_2 = PickleDataNode(\"abc\", Scope.SCENARIO) task = Task(\"qux\", {}, print, [dn_1]) scenario = Scenario(\"quux\", set([task]), {}, set([dn_2]), sequences={\"acb\": {\"tasks\": [task]}}) sequence = scenario.sequences[\"acb\"] assert scenario.id is not None assert scenario.config_id == \"quux\" assert len(scenario.tasks) == 1 assert len(scenario.additional_data_nodes) == 1 assert len(scenario.data_nodes) == 2 assert len(scenario.sequences) == 1 assert scenario.qux == task assert scenario.xyz == dn_1 assert scenario.abc == dn_2 assert scenario.acb == sequence assert scenario.properties == {} assert scenario.tags == set() def test_create_scenario_invalid_config_id(): with pytest.raises(InvalidConfigurationId): Scenario(\"foo bar\", [], {}) def test_create_scenario_and_add_sequences(): input_1 = PickleDataNode(\"input_1\", Scope.SCENARIO) output_1 = PickleDataNode(\"output_1\", Scope.SCENARIO) output_2 = PickleDataNode(\"output_2\", Scope.SCENARIO) additional_dn_1 = PickleDataNode(\"additional_1\", Scope.SCENARIO) additional_dn_2 = PickleDataNode(\"additional_2\", Scope.SCENARIO) task_1 = Task(\"task_1\", {}, print, [input_1], [output_1], TaskId(\"task_id_1\")) task_2 = Task(\"task_2\", {}, print, [output_1], [output_2], TaskId(\"task_id_2\")) data_manager = _DataManagerFactory._build_manager() task_manager = _TaskManagerFactory._build_manager() data_manager._set(input_1) data_manager._set(output_1) data_manager._set(output_2) data_manager._set(additional_dn_1) data_manager._set(additional_dn_2) task_manager._set(task_1) task_manager._set(task_2) scenario = Scenario(\"scenario\", set([task_1]), {}) scenario.sequences = {\"sequence_1\": {\"tasks\": [task_1]}, \"sequence_2\": {\"tasks\": []}} assert scenario.id is not None assert scenario.config_id == \"scenario\" assert len(scenario.tasks) == 1 assert scenario.tasks.keys() == {task_1.config_id} assert len(scenario.additional_data_nodes) == 0 assert scenario.additional_data_nodes == {} assert len(scenario.data_nodes) == 2 assert scenario.data_nodes == { input_1.config_id: input_1, output_1.config_id: output_1, } assert len(scenario.sequences) == 2 assert scenario.sequence_1 == scenario.sequences[\"sequence_1\"] assert scenario.sequence_2 == scenario.sequences[\"sequence_2\"] assert scenario.sequences == {\"sequence_1\": scenario.sequence_1, \"sequence_2\": scenario.sequence_2} def test_create_scenario_overlapping_sequences(): input_1 = PickleDataNode(\"input_1\", Scope.SCENARIO) output_1 = PickleDataNode(\"output_1\", Scope.SCENARIO) output_2 = PickleDataNode(\"output_2\", Scope.SCENARIO) additional_dn_1 = PickleDataNode(\"additional_1\", Scope.SCENARIO) additional_dn_2 = PickleDataNode(\"additional_2\", Scope.SCENARIO) task_1 = Task(\"task_1\", {}, print, [input_1], [output_1], TaskId(\"task_id_1\")) task_2 = Task(\"task_2\", {}, print, [output_1], [output_2], TaskId(\"task_id_2\")) data_manager = _DataManagerFactory._build_manager() task_manager = _TaskManagerFactory._build_manager() data_manager._set(input_1) data_manager._set(output_1) data_manager._set(output_2) data_manager._set(additional_dn_1) data_manager._set(additional_dn_2) task_manager._set(task_1) task_manager._set(task_2) scenario = Scenario(\"scenario\", set([task_1, task_2]), {}) scenario.add_sequence(\"sequence_1\", [task_1]) scenario.add_sequence(\"sequence_2\", [task_1, task_2]) assert scenario.id is not None assert scenario.config_id == \"scenario\" assert len(scenario.tasks) == 2 assert scenario.tasks.keys() == {task_1.config_id, task_2.config_id} assert len(scenario.additional_data_nodes) == 0 assert scenario.additional_data_nodes == {} assert len(scenario.data_nodes) == 3 assert scenario.data_nodes == { input_1.config_id: input_1, output_1.config_id: output_1, output_2.config_id: output_2, } sequence_1 = scenario.sequences[\"sequence_1\"] sequence_2 = scenario.sequences[\"sequence_2\"] assert scenario.sequences == {\"sequence_1\": sequence_1, \"sequence_2\": sequence_2} scenario.remove_sequences([\"sequence_2\"]) assert scenario.sequences == {\"sequence_1\": sequence_1} scenario.remove_sequences([\"sequence_1\"]) assert scenario.sequences == {} def test_create_scenario_one_additional_dn(): input_1 = PickleDataNode(\"input_1\", Scope.SCENARIO) input_2 = PickleDataNode(\"input_2\", Scope.SCENARIO) output_1 = PickleDataNode(\"output_1\", Scope.SCENARIO) output_2 = PickleDataNode(\"output_2\", Scope.SCENARIO) additional_dn_1 = PickleDataNode(\"additional_1\", Scope.SCENARIO) additional_dn_2 = PickleDataNode(\"additional_2\", Scope.SCENARIO) task_1 = Task(\"task_1\", {}, print, [input_1], [output_1], TaskId(\"task_id_1\")) task_2 = Task(\"task_2\", {}, print, [input_2], [output_2], TaskId(\"task_id_2\")) data_manager = _DataManagerFactory._build_manager() task_manager = _TaskManagerFactory._build_manager() data_manager._set(input_1) data_manager._set(output_1) data_manager._set(input_2) data_manager._set(output_2) data_manager._set(additional_dn_1) data_manager._set(additional_dn_2) task_manager._set(task_1) task_manager._set(task_2) scenario = Scenario(\"scenario\", set(), {}, set([additional_dn_1])) assert scenario.id is not None assert scenario.config_id == \"scenario\" assert len(scenario.tasks) == 0 assert len(scenario.additional_data_nodes) == 1 assert len(scenario.data_nodes) == 1 assert scenario.tasks == {} assert scenario.additional_data_nodes == {additional_dn_1.config_id: additional_dn_1} assert scenario.data_nodes == {additional_dn_1.config_id: additional_dn_1} def test_create_scenario_wth_additional_dns(): input_1 = PickleDataNode(\"input_1\", Scope.SCENARIO) input_2 = PickleDataNode(\"input_2\", Scope.SCENARIO) output_1 = PickleDataNode(\"output_1\", Scope.SCENARIO) output_2 = PickleDataNode(\"output_2\", Scope.SCENARIO) additional_dn_1 = PickleDataNode(\"additional_1\", Scope.SCENARIO) additional_dn_2 = PickleDataNode(\"additional_2\", Scope.SCENARIO) task_1 = Task(\"task_1\", {}, print, [input_1], [output_1], TaskId(\"task_id_1\")) task_2 = Task(\"task_2\", {}, print, [input_2], [output_2], TaskId(\"task_id_2\")) data_manager = _DataManagerFactory._build_manager() task_manager = _TaskManagerFactory._build_manager() data_manager._set(input_1) data_manager._set(output_1) data_manager._set(input_2) data_manager._set(output_2) data_manager._set(additional_dn_1) data_manager._set(additional_dn_2) task_manager._set(task_1) task_manager._set(task_2) scenario = Scenario(\"scenario\", set(), {}, set([additional_dn_1, additional_dn_2])) assert scenario.id is not None assert scenario.config_id == \"scenario\" assert len(scenario.tasks) == 0 assert len(scenario.additional_data_nodes) == 2 assert len(scenario.data_nodes) == 2 assert scenario.tasks == {} assert scenario.additional_data_nodes == { additional_dn_1.config_id: additional_dn_1, additional_dn_2.config_id: additional_dn_2, } assert scenario.data_nodes == { additional_dn_1.config_id: additional_dn_1, additional_dn_2.config_id: additional_dn_2, } scenario_1 = Scenario(\"scenario_1\", set([task_1]), {}, set([additional_dn_1])) assert scenario_1.id is not None assert scenario_1.config_id == \"scenario_1\" assert len(scenario_1.tasks) == 1 assert len(scenario_1.additional_data_nodes) == 1 assert len(scenario_1.data_nodes) == 3 assert scenario_1.tasks.keys() == {task_1.config_id} assert scenario_1.additional_data_nodes == { additional_dn_1.config_id: additional_dn_1, } assert scenario_1.data_nodes == { input_1.config_id: input_1, output_1.config_id: output_1, additional_dn_1.config_id: additional_dn_1, } scenario_2 = Scenario(\"scenario_2\", set([task_1, task_2]), {}, set([additional_dn_1, additional_dn_2])) assert scenario_2.id is not None assert scenario_2.config_id == \"scenario_2\" assert len(scenario_2.tasks) == 2 assert len(scenario_2.additional_data_nodes) == 2 assert len(scenario_2.data_nodes) == 6 assert scenario_2.tasks.keys() == {task_1.config_id, task_2.config_id} assert scenario_2.additional_data_nodes == { additional_dn_1.config_id: additional_dn_1, additional_dn_2.config_id: additional_dn_2, } assert {dn_config_id: dn.id for dn_config_id, dn in scenario_2.data_nodes.items()} == { input_1.config_id: input_1.id, output_1.config_id: output_1.id, input_2.config_id: input_2.id, output_2.config_id: output_2.id, additional_dn_1.config_id: additional_dn_1.id, additional_dn_2.config_id: additional_dn_2.id, } def test_raise_sequence_tasks_not_in_scenario(data_node): task_1 = Task(\"task_1\", {}, print, output=[data_node]) task_2 = Task(\"task_2\", {}, print, input=[data_node]) with pytest.raises(SequenceTaskDoesNotExistInScenario) as err: Scenario(\"scenario\", [], {}, sequences={\"sequence\": {\"tasks\": [task_1]}}, scenario_id=\"SCENARIO_scenario\") assert err.value.args == ([task_1.id], \"sequence\", \"SCENARIO_scenario\") with pytest.raises(SequenceTaskDoesNotExistInScenario) as err: Scenario( \"scenario\", [task_1], {}, sequences={\"sequence\": {\"tasks\": [task_1, task_2]}}, scenario_id=\"SCENARIO_scenario\", ) assert err.value.args == ([task_2.id], \"sequence\", \"SCENARIO_scenario\") Scenario(\"scenario\", [task_1], {}, sequences={\"sequence\": {\"tasks\": [task_1]}}) Scenario( \"scenario\", [task_1, task_2], {}, sequences={\"sequence_1\": {\"tasks\": [task_1]}, \"sequence_2\": {\"tasks\": [task_1, task_2]}}, ) def test_raise_tasks_not_in_scenario_with_add_sequence_api(data_node): task_1 = Task(\"task_1\", {}, print, output=[data_node]) task_2 = Task(\"task_2\", {}, print, input=[data_node]) scenario = Scenario(\"scenario\", [task_1], {}) scenario_manager = _ScenarioManagerFactory._build_manager() task_manager = _TaskManagerFactory._build_manager() scenario_manager._set(scenario) task_manager._set(task_1) task_manager._set(task_2) scenario.add_sequences({\"sequence_1\": {}}) with pytest.raises(SequenceTaskDoesNotExistInScenario) as err: scenario.add_sequence(\"sequence_2\", [task_2]) assert err.value.args == ([task_2.id], \"sequence_2\", scenario.id) scenario.add_sequence(\"sequence_3\", [task_1]) with pytest.raises(SequenceTaskDoesNotExistInScenario) as err: scenario.add_sequences({\"sequence_4\": [task_2]}) assert err.value.args == ([task_2.id], \"sequence_4\", scenario.id) with pytest.raises(SequenceTaskDoesNotExistInScenario) as err: scenario.add_sequences({\"sequence_5\": [task_1, task_2]}) assert err.value.args == ([task_2.id], \"sequence_5\", scenario.id) scenario.tasks = [task_1, task_2] scenario.add_sequence(\"sequence_6\", [task_1, task_2]) def test_add_property_to_scenario(): scenario = Scenario(\"foo\", [], {\"key\": \"value\"}) assert scenario.properties == {\"key\": \"value\"} assert scenario.key == \"value\" scenario.properties[\"new_key\"] = \"new_value\" assert scenario.properties == {\"key\": \"value\", \"new_key\": \"new_value\"} assert scenario.key == \"value\" assert scenario.new_key == \"new_value\" def test_add_cycle_to_scenario(cycle): scenario = Scenario(\"foo\", [], {}) assert scenario.cycle is None _CycleManagerFactory._build_manager()._set(cycle) scenario.cycle = cycle assert scenario.cycle == cycle def test_add_and_remove_subscriber(): scenario = Scenario(\"foo\", [], {}) scenario._add_subscriber(print) assert len(scenario.subscribers) == 1 scenario._remove_subscriber(print) assert len(scenario.subscribers) == 0 def test_add_and_remove_tag(): scenario = Scenario(\"foo\", [], {}) assert len(scenario.tags) == 0 scenario._add_tag(\"tag\") assert len(scenario.tags) == 1 scenario._remove_tag(\"tag\") assert len(scenario.tags) == 0 def test_auto_set_and_reload(cycle, current_datetime, task, data_node): scenario_1 = Scenario( \"foo\", set(), {\"name\": \"bar\"}, set(), creation_date=current_datetime, is_primary=True, cycle=None, ) additional_dn = InMemoryDataNode(\"additional_dn\", Scope.SCENARIO) example_date = datetime.fromisoformat(\"2021-11-11T11:11:01.000001\") tmp_cycle = Cycle( Frequency.WEEKLY, {}, creation_date=example_date, start_date=example_date, end_date=example_date, name=\"cc\", id=CycleId(\"tmp_cc_id\"), ) sequence_1_name = \"sequence_1\" sequence_1 = Sequence({}, [], SequenceId(f\"SEQUENCE_{sequence_1_name}_{scenario_1.id}\")) tmp_sequence_name = \"tmp_sequence\" tmp_sequence = Sequence( {}, [], SequenceId(f\"SEQUENCE_{tmp_sequence_name}_{scenario_1.id}\"), ) _TaskManagerFactory._build_manager()._set(task) _DataManagerFactory._build_manager()._set(data_node) _DataManagerFactory._build_manager()._set(additional_dn) _CycleManagerFactory._build_manager()._set(cycle) scenario_manager = _ScenarioManagerFactory._build_manager() cycle_manager = _CycleManagerFactory._build_manager() cycle_manager._set(cycle) cycle_manager._set(tmp_cycle) scenario_manager._set(scenario_1) scenario_2 = scenario_manager._get(scenario_1) assert scenario_1.config_id == \"foo\" assert scenario_2.config_id == \"foo\" # auto set & reload on name attribute assert scenario_1.name == \"bar\" assert scenario_2.name == \"bar\" scenario_1.name = \"zab\" assert scenario_1.name == \"zab\" assert scenario_2.name == \"zab\" scenario_2.name = \"baz\" assert scenario_1.name == \"baz\" assert scenario_2.name == \"baz\" # auto set & reload on sequences attribute assert len(scenario_1.sequences) == 0 assert len(scenario_2.sequences) == 0 scenario_1.sequences = {tmp_sequence_name: {}} assert len(scenario_1.sequences) == 1 assert scenario_1.sequences[tmp_sequence_name] == tmp_sequence assert len(scenario_2.sequences) == 1 assert scenario_2.sequences[tmp_sequence_name] == tmp_sequence scenario_2.add_sequences({sequence_1_name: []}) assert len(scenario_1.sequences) == 2 assert scenario_1.sequences == {sequence_1_name: sequence_1, tmp_sequence_name: tmp_sequence} assert len(scenario_2.sequences) == 2 assert scenario_2.sequences == {sequence_1_name: sequence_1, tmp_sequence_name: tmp_sequence} scenario_2.remove_sequences([tmp_sequence_name]) assert len(scenario_1.sequences) == 1 assert scenario_1.sequences == {sequence_1_name: sequence_1} assert len(scenario_2.sequences) == 1 assert scenario_2.sequences == {sequence_1_name: sequence_1} assert len(scenario_1.tasks) == 0 assert len(scenario_1.data_nodes) == 0 scenario_1.tasks = {task} assert len(scenario_1.tasks) == 1 assert scenario_1.tasks[task.config_id] == task assert len(scenario_1.data_nodes) == 2 assert len(scenario_2.tasks) == 1 assert scenario_2.tasks[task.config_id] == task assert len(scenario_2.data_nodes) == 2 assert len(scenario_1.additional_data_nodes) == 0 scenario_1.additional_data_nodes = {additional_dn} assert len(scenario_1.additional_data_nodes) == 1 assert scenario_1.additional_data_nodes[additional_dn.config_id] == additional_dn assert len(scenario_1.data_nodes) == 3 assert len(scenario_2.additional_data_nodes) == 1 assert scenario_2.additional_data_nodes[additional_dn.config_id] == additional_dn assert len(scenario_2.data_nodes) == 3 new_datetime = current_datetime + timedelta(1) new_datetime_1 = current_datetime + timedelta(2) # auto set & reload on name attribute assert scenario_1.creation_date == current_datetime assert scenario_2.creation_date == current_datetime scenario_1.creation_date = new_datetime_1 assert scenario_1.creation_date == new_datetime_1 assert scenario_2.creation_date == new_datetime_1 scenario_2.creation_date = new_datetime assert scenario_1.creation_date == new_datetime assert scenario_2.creation_date == new_datetime # auto set & reload on cycle attribute assert scenario_1.cycle is None assert scenario_2.cycle is None scenario_1.cycle = tmp_cycle assert scenario_1.cycle == tmp_cycle assert scenario_2.cycle == tmp_cycle scenario_2.cycle = cycle assert scenario_1.cycle == cycle assert scenario_2.cycle == cycle # auto set & reload on is_primary attribute assert scenario_1.is_primary assert scenario_2.is_primary scenario_1.is_primary = False assert not scenario_1.is_primary assert not scenario_2.is_primary scenario_2.is_primary = True assert scenario_1.is_primary assert scenario_2.is_primary # auto set & reload on subscribers attribute assert len(scenario_1.subscribers) == 0 assert len(scenario_2.subscribers) == 0 scenario_1.subscribers.append(_Subscriber(print, [])) assert len(scenario_1.subscribers) == 1 assert len(scenario_2.subscribers) == 1 scenario_2.subscribers.append(_Subscriber(print, [])) assert len(scenario_1.subscribers) == 2 assert len(scenario_2.subscribers) == 2 scenario_1.subscribers.clear() assert len(scenario_1.subscribers) == 0 assert len(scenario_2.subscribers) == 0 scenario_1.subscribers.extend([_Subscriber(print, []), _Subscriber(map, [])]) assert len(scenario_1.subscribers) == 2 assert len(scenario_2.subscribers) == 2 scenario_1.subscribers.remove(_Subscriber(print, [])) assert len(scenario_1.subscribers) == 1 assert len(scenario_2.subscribers) == 1 scenario_1.subscribers + print + len assert len(scenario_1.subscribers) == 3 assert len(scenario_2.subscribers) == 3 scenario_1.subscribers = [] assert len(scenario_1.subscribers) == 0 assert len(scenario_2.subscribers) == 0 assert len(scenario_1.tags) == 0 scenario_1.tags = {\"hi\"} assert len(scenario_1.tags) == 1 assert len(scenario_2.tags) == 1 # auto set & reload on properties attribute assert scenario_1.properties == {\"name\": \"baz\"} assert scenario_2.properties == {\"name\": \"baz\"} scenario_1._properties[\"qux\"] = 4 assert scenario_1.properties[\"qux\"] == 4 assert scenario_2.properties[\"qux\"] == 4 assert scenario_1.properties == {\"name\": \"baz\", \"qux\": 4} assert scenario_2.properties == {\"name\": \"baz\", \"qux\": 4} scenario_2._properties[\"qux\"] = 5 assert scenario_1.properties[\"qux\"] == 5 assert scenario_2.properties[\"qux\"] == 5 scenario_1.properties[\"temp_key_1\"] = \"temp_value_1\" scenario_1.properties[\"temp_key_2\"] = \"temp_value_2\" assert scenario_1.properties == { \"name\": \"baz\", \"qux\": 5, \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } assert scenario_2.properties == { \"name\": \"baz\", \"qux\": 5, \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } scenario_1.properties.pop(\"temp_key_1\") assert \"temp_key_1\" not in scenario_1.properties.keys() assert \"temp_key_1\" not in scenario_1.properties.keys() assert scenario_1.properties == { \"name\": \"baz\", \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } assert scenario_2.properties == { \"name\": \"baz\", \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } scenario_2.properties.pop(\"temp_key_2\") assert scenario_1.properties == {\"name\": \"baz\", \"qux\": 5} assert scenario_2.properties == {\"name\": \"baz\", \"qux\": 5} assert \"temp_key_2\" not in scenario_1.properties.keys() assert \"temp_key_2\" not in scenario_2.properties.keys() scenario_1.properties[\"temp_key_3\"] = 0 assert scenario_1.properties == {\"name\": \"baz\", \"qux\": 5, \"temp_key_3\": 0} assert scenario_2.properties == {\"name\": \"baz\", \"qux\": 5, \"temp_key_3\": 0} scenario_1.properties.update({\"temp_key_3\": 1}) assert scenario_1.properties == {\"name\": \"baz\", \"qux\": 5, \"temp_key_3\": 1} assert scenario_2.properties == {\"name\": \"baz\", \"qux\": 5, \"temp_key_3\": 1} scenario_1.properties.update(dict()) assert scenario_1.properties == {\"name\": \"baz\", \"qux\": 5, \"temp_key_3\": 1} assert scenario_2.properties == {\"name\": \"baz\", \"qux\": 5, \"temp_key_3\": 1} scenario_1.properties[\"temp_key_4\"] = 0 scenario_1.properties[\"temp_key_5\"] = 0 with scenario_1 as scenario: assert scenario.config_id == \"foo\" assert len(scenario.tasks) == 1 assert len(scenario.sequences) == 1 assert scenario.sequences[\"sequence_1\"] == sequence_1 assert scenario.tasks[task.config_id] == task assert len(scenario.additional_data_nodes) == 1 assert scenario.additional_data_nodes[additional_dn.config_id] == additional_dn assert scenario.creation_date == new_datetime assert scenario.cycle == cycle assert scenario.is_primary assert len(scenario.subscribers) == 0 assert len(scenario.tags) == 1 assert scenario._is_in_context assert scenario.name == \"baz\" assert scenario.properties[\"qux\"] == 5 assert scenario.properties[\"temp_key_3\"] == 1 assert scenario.properties[\"temp_key_4\"] == 0 assert scenario.properties[\"temp_key_5\"] == 0 new_datetime_2 = new_datetime + timedelta(5) scenario.config_id = \"foo\" scenario.tasks = set() scenario.additional_data_nodes = set() scenario.remove_sequences([sequence_1_name]) scenario.creation_date = new_datetime_2 scenario.cycle = None scenario.is_primary = False scenario.subscribers = [print] scenario.tags = None scenario.name = \"qux\" scenario.properties[\"qux\"] = 9 scenario.properties.pop(\"temp_key_3\") scenario.properties.pop(\"temp_key_4\") scenario.properties.update({\"temp_key_4\": 1}) scenario.properties.update({\"temp_key_5\": 2}) scenario.properties.pop(\"temp_key_5\") scenario.properties.update(dict()) assert scenario.config_id == \"foo\" assert len(scenario.sequences) == 1 assert scenario.sequences[sequence_1_name] == sequence_1 assert len(scenario.tasks) == 1 assert scenario.tasks[task.config_id] == task assert len(scenario.additional_data_nodes) == 1 assert scenario.additional_data_nodes[additional_dn.config_id] == additional_dn assert scenario.creation_date == new_datetime assert scenario.cycle == cycle assert scenario.is_primary assert len(scenario.subscribers) == 0 assert len(scenario.tags) == 1 assert scenario._is_in_context assert scenario.name == \"baz\" assert scenario.properties[\"qux\"] == 5 assert scenario.properties[\"temp_key_3\"] == 1 assert scenario.properties[\"temp_key_4\"] == 0 assert scenario.properties[\"temp_key_5\"] == 0 assert scenario_1.config_id == \"foo\" assert len(scenario_1.sequences) == 0 assert len(scenario_1.tasks) == 0 assert len(scenario_1.additional_data_nodes) == 0 assert scenario_1.tasks == {} assert scenario_1.additional_data_nodes == {} assert scenario_1.creation_date == new_datetime_2 assert scenario_1.cycle is None assert not scenario_1.is_primary assert len(scenario_1.subscribers) == 1 assert len(scenario_1.tags) == 0 assert not scenario_1._is_in_context assert scenario_1.properties[\"qux\"] == 9 assert \"temp_key_3\" not in scenario_1.properties.keys() assert scenario_1.properties[\"temp_key_4\"] == 1 assert \"temp_key_5\" not in scenario_1.properties.keys() def test_is_deletable(): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._is_deletable\") as mock_submit: scenario = Scenario(\"foo\", [], {}) scenario.is_deletable() mock_submit.assert_called_once_with(scenario) def test_submit_scenario(): with mock.patch(\"src.taipy.core.scenario._scenario_manager._ScenarioManager._submit\") as mock_submit: scenario = Scenario(\"foo\", [], {}) scenario.submit(force=False) mock_submit.assert_called_once_with(scenario, None, False, False, None) def test_subscribe_scenario(): with mock.patch(\"src.taipy.core.subscribe_scenario\") as mock_subscribe: scenario = Scenario(\"foo\", [], {}) scenario.subscribe(None) mock_subscribe.assert_called_once_with(None, None, scenario) def test_unsubscribe_scenario(): with mock.patch(\"src.taipy.core.unsubscribe_scenario\") as mock_unsubscribe: scenario = Scenario(\"foo\", [], {}) scenario.unsubscribe(None) mock_unsubscribe.assert_called_once_with(None, None, scenario) def test_add_tag_scenario(): with mock.patch(\"src.taipy.core.tag\") as mock_add_tag: scenario = Scenario(\"foo\", [], {}) scenario.add_tag(\"tag\") mock_add_tag.assert_called_once_with(scenario, \"tag\") def test_remove_tag_scenario(): with mock.patch(\"src.taipy.core.untag\") as mock_remove_tag: scenario = Scenario(\"foo\", [], {}) scenario.remove_tag(\"tag\") mock_remove_tag.assert_called_once_with(scenario, \"tag\") def test_get_inputs_outputs_intermediate_data_nodes(): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario = Scenario(\"scenario\", {task_1, task_2, task_3, task_4}, {}, set(), ScenarioId(\"s1\")) # s1 --- ---> s3 ---> t2 ---> s5 ---- # | | | # |---> t1 ---| -------------------------> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert scenario.get_inputs() == {data_node_1, data_node_2} assert scenario.get_outputs() == {data_node_6, data_node_7} assert scenario.get_intermediate() == {data_node_3, data_node_4, data_node_5} data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, None, [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario = Scenario(\"scenario\", {task_1, task_2, task_3, task_4}, {}, set(), ScenarioId(\"s1\")) # s1 --- t2 ---> s5 ------ # | | # |---> t1 ---| -----> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert scenario.get_inputs() == {data_node_1, data_node_2} assert scenario.get_outputs() == {data_node_6, data_node_7} assert scenario.get_intermediate() == {data_node_4, data_node_5} data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = DataNode(\"d8\", Scope.SCENARIO, \"s8\") data_node_9 = DataNode(\"d9\", Scope.SCENARIO, \"s9\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"t5\", {}, print, [data_node_8], [data_node_9], TaskId(\"t5\")) task_6 = Task(\"t6\", {}, print, [data_node_7, data_node_9], id=TaskId(\"t6\")) scenario = Scenario(\"scenario\", {task_1, task_2, task_3, task_4, task_5, task_6}, {}, set(), ScenarioId(\"s1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 ---> t6 # | # s8 -------> t5 -------> s9 ------------------ assert scenario.get_inputs() == {data_node_1, data_node_2, data_node_6, data_node_8} assert scenario.get_outputs() == set() assert scenario.get_intermediate() == {data_node_5, data_node_4, data_node_7, data_node_9} data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = DataNode(\"hugh\", Scope.SCENARIO, \"s8\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_5], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4, data_node_6], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_8], None, TaskId(\"t5\")) scenario = Scenario(\"scenario\", {task_1, task_2, task_3, task_4, task_5}, {}, set(), ScenarioId(\"sc1\")) # s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 # t2 ---> s5 | # s8 ---> t5 s6 --| assert scenario.get_inputs() == {data_node_1, data_node_2, data_node_8, data_node_6} assert scenario.get_outputs() == {data_node_5, data_node_7} assert scenario.get_intermediate() == {data_node_4} def test_is_ready_to_run(): data_node_1 = PickleDataNode(\"foo\", Scope.SCENARIO, \"s1\", properties={\"default_data\": 1}) data_node_2 = PickleDataNode(\"bar\", Scope.SCENARIO, \"s2\", properties={\"default_data\": 2}) data_node_4 = PickleDataNode(\"qux\", Scope.SCENARIO, \"s4\", properties={\"default_data\": 4}) data_node_5 = PickleDataNode(\"quux\", Scope.SCENARIO, \"s5\", properties={\"default_data\": 5}) data_node_6 = PickleDataNode(\"quuz\", Scope.SCENARIO, \"s6\", properties={\"default_data\": 6}) data_node_7 = PickleDataNode(\"corge\", Scope.SCENARIO, \"s7\", properties={\"default_data\": 7}) data_node_8 = PickleDataNode(\"d8\", Scope.SCENARIO, \"s8\", properties={\"default_data\": 8}) data_node_9 = PickleDataNode(\"d9\", Scope.SCENARIO, \"s9\", properties={\"default_data\": 9}) task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"t5\", {}, print, [data_node_8], [data_node_9], TaskId(\"t5\")) task_6 = Task(\"t6\", {}, print, [data_node_7, data_node_9], id=TaskId(\"t6\")) scenario = Scenario(\"scenario\", {task_1, task_2, task_3, task_4, task_5, task_6}, {}, set(), ScenarioId(\"s1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 ---> t6 # | # s8 -------> t5 -------> s9 ------------------ assert scenario.get_inputs() == {data_node_1, data_node_2, data_node_6, data_node_8} data_manager = _DataManagerFactory._build_manager() data_manager._delete_all() for dn in [data_node_1, data_node_2, data_node_4, data_node_5, data_node_6, data_node_7, data_node_8, data_node_9]: data_manager._set(dn) assert scenario.is_ready_to_run() data_node_1.edit_in_progress = True assert not scenario.is_ready_to_run() data_node_2.edit_in_progress = True assert not scenario.is_ready_to_run() data_node_6.edit_in_progress = True data_node_8.edit_in_progress = True assert not scenario.is_ready_to_run() data_node_1.edit_in_progress = False data_node_2.edit_in_progress = False data_node_6.edit_in_progress = False data_node_8.edit_in_progress = False assert scenario.is_ready_to_run() def test_data_nodes_being_edited(): data_node_1 = PickleDataNode(\"foo\", Scope.SCENARIO, \"s1\", properties={\"default_data\": 1}) data_node_2 = PickleDataNode(\"bar\", Scope.SCENARIO, \"s2\", properties={\"default_data\": 2}) data_node_4 = PickleDataNode(\"qux\", Scope.SCENARIO, \"s4\", properties={\"default_data\": 4}) data_node_5 = PickleDataNode(\"quux\", Scope.SCENARIO, \"s5\", properties={\"default_data\": 5}) data_node_6 = PickleDataNode(\"quuz\", Scope.SCENARIO, \"s6\", properties={\"default_data\": 6}) data_node_7 = PickleDataNode(\"corge\", Scope.SCENARIO, \"s7\", properties={\"default_data\": 7}) data_node_8 = PickleDataNode(\"d8\", Scope.SCENARIO, \"s8\", properties={\"default_data\": 8}) data_node_9 = PickleDataNode(\"d9\", Scope.SCENARIO, \"s9\", properties={\"default_data\": 9}) task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"t5\", {}, print, [data_node_8], [data_node_9], TaskId(\"t5\")) task_6 = Task(\"t6\", {}, print, [data_node_7, data_node_9], id=TaskId(\"t6\")) scenario = Scenario(\"scenario\", {task_1, task_2, task_3, task_4, task_5, task_6}, {}, set(), ScenarioId(\"s1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 ---> t6 # | # s8 -------> t5 -------> s9 ------------------ data_manager = _DataManagerFactory._build_manager() for dn in [data_node_1, data_node_2, data_node_4, data_node_5, data_node_6, data_node_7, data_node_8, data_node_9]: data_manager._set(dn) assert len(scenario.data_nodes_being_edited()) == 0 assert scenario.data_nodes_being_edited() == set() data_node_1.edit_in_progress = True assert len(scenario.data_nodes_being_edited()) == 1 assert scenario.data_nodes_being_edited() == {data_node_1} data_node_2.edit_in_progress = True data_node_6.edit_in_progress = True data_node_8.edit_in_progress = True assert len(scenario.data_nodes_being_edited()) == 4 assert scenario.data_nodes_being_edited() == {data_node_1, data_node_2, data_node_6, data_node_8} data_node_4.edit_in_progress = True data_node_5.edit_in_progress = True data_node_9.edit_in_progress = True assert len(scenario.data_nodes_being_edited()) == 7 assert scenario.data_nodes_being_edited() == { data_node_1, data_node_2, data_node_4, data_node_5, data_node_6, data_node_8, data_node_9, } data_node_1.edit_in_progress = False data_node_2.edit_in_progress = False data_node_6.edit_in_progress = False data_node_8.edit_in_progress = False assert len(scenario.data_nodes_being_edited()) == 3 assert scenario.data_nodes_being_edited() == {data_node_4, data_node_5, data_node_9} data_node_4.edit_in_progress = False data_node_5.edit_in_progress = False data_node_7.edit_in_progress = True assert len(scenario.data_nodes_being_edited()) == 2 assert scenario.data_nodes_being_edited() == {data_node_7, data_node_9} data_node_7.edit_in_progress = False data_node_9.edit_in_progress = False assert len(scenario.data_nodes_being_edited()) == 0 assert scenario.data_nodes_being_edited() == set() def test_get_tasks(): task_1 = Task(\"grault\", {}, print, id=TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, id=TaskId(\"t4\")) scenario_1 = Scenario(\"scenario_1\", {task_1, task_2, task_3, task_4}, {}, set(), ScenarioId(\"s1\")) assert scenario_1.tasks == {\"grault\": task_1, \"garply\": task_2, \"waldo\": task_3, \"fred\": task_4} task_5 = Task(\"wallo\", {}, print, id=TaskId(\"t5\")) scenario_2 = Scenario(\"scenario_2\", {task_1, task_2, task_3, task_4, task_5}, {}, set(), ScenarioId(\"s2\")) assert scenario_2.tasks == {\"grault\": task_1, \"garply\": task_2, \"waldo\": task_3, \"fred\": task_4, \"wallo\": task_5} def test_get_set_of_tasks(): task_1 = Task(\"grault\", {}, print, id=TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, id=TaskId(\"t4\")) scenario_1 = Scenario(\"scenario_1\", {task_1, task_2, task_3, task_4}, {}, set(), ScenarioId(\"s1\")) assert scenario_1._get_set_of_tasks() == {task_1, task_2, task_3, task_4} task_5 = Task(\"wallo\", {}, print, id=TaskId(\"t5\")) scenario_2 = Scenario(\"scenario_2\", {task_1, task_2, task_3, task_4, task_5}, {}, set(), ScenarioId(\"s2\")) assert scenario_2._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5} def test_get_sorted_tasks(): def _assert_equal(tasks_a, tasks_b) -> bool: if len(tasks_a) != len(tasks_b): return False for i in range(len(tasks_a)): task_a, task_b = tasks_a[i], tasks_b[i] if isinstance(task_a, list) and isinstance(task_b, list): if not _assert_equal(task_a, task_b): return False elif isinstance(task_a, list) or isinstance(task_b, list): return False else: index_task_b = tasks_b.index(task_a) if any([isinstance(task_b, list) for task_b in tasks_b[i : index_task_b + 1]]): return False return True # s1 --- ---> s3 ---> t2 ---> s5 ---- # | | | # |---> t1 ---| -------------------------> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = InMemoryDataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario_1 = Scenario(\"scenario_1\", {task_1, task_2, task_3, task_4}, {}, [], ScenarioId(\"s1\")) assert scenario_1.get_inputs() == {data_node_1, data_node_2} assert scenario_1._get_set_of_tasks() == {task_1, task_2, task_3, task_4} _assert_equal(scenario_1._get_sorted_tasks(), [[task_1], [task_2, task_4], [task_3]]) # s1 --- t2 ---> s5 # | | # |---> t1 ---| -----> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, None, [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario_2 = Scenario(\"scenario_2\", {task_1, task_2, task_3, task_4}, {}, [], ScenarioId(\"s2\")) assert scenario_2.get_inputs() == {data_node_1, data_node_2} assert scenario_2._get_set_of_tasks() == {task_1, task_2, task_3, task_4} _assert_equal(scenario_2._get_sorted_tasks(), [[task_1, task_2], [task_3, task_4]]) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario_3 = Scenario(\"quest\", [task_4, task_2, task_1, task_3], {}, [], scenario_id=ScenarioId(\"s3\")) assert scenario_3.get_inputs() == {data_node_1, data_node_2, data_node_6} assert scenario_3._get_set_of_tasks() == {task_1, task_2, task_3, task_4} assert _assert_equal(scenario_3._get_sorted_tasks(), [[task_2, task_1], [task_4, task_3]]) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 ---> t6 # | # s8 -------> t5 -------> s9 ------------------ data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = InMemoryDataNode(\"d8\", Scope.SCENARIO, \"s8\") data_node_9 = InMemoryDataNode(\"d9\", Scope.SCENARIO, \"s9\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"t5\", {}, print, [data_node_8], [data_node_9], TaskId(\"t5\")) task_6 = Task(\"t6\", {}, print, [data_node_7, data_node_9], id=TaskId(\"t6\")) scenario_4 = Scenario(\"scenario_3\", [task_1, task_2, task_3, task_4, task_5, task_6], {}, [], ScenarioId(\"s4\")) assert scenario_4.get_inputs() == {data_node_1, data_node_2, data_node_6, data_node_8} assert scenario_4._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5, task_6} _assert_equal(scenario_4._get_sorted_tasks(), [[task_1, task_2, task_5], [task_3, task_4], [task_6]]) # s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 # t2 ---> s5 | # s8 ---> t5 s6 --| data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = InMemoryDataNode(\"hugh\", Scope.SCENARIO, \"s8\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_5], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4, data_node_6], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_8], None, TaskId(\"t5\")) scenario_5 = Scenario(\"scenario_4\", [task_1, task_2, task_3, task_4, task_5], {}, [], ScenarioId(\"s5\")) assert scenario_5.get_inputs() == {data_node_1, data_node_2, data_node_8, data_node_6} assert scenario_5._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5} _assert_equal(scenario_5._get_sorted_tasks(), [[task_1, task_2, task_5], [task_3, task_4]]) # p1 s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s5 # p2 t2 ---> s4 ---> t3 # p3 s6 ---> t5 data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, output=[data_node_4], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_5], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_6], None, TaskId(\"t5\")) scenario_6 = Scenario(\"quest\", [task_1, task_2, task_3, task_4, task_5], {}, [], ScenarioId(\"s6\")) assert scenario_6.get_inputs() == {data_node_1, data_node_2, data_node_6} assert scenario_6._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5} _assert_equal(scenario_6._get_sorted_tasks(), [[task_5, task_2, task_1], [task_4, task_3]]) # p1 s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s5 # p2 t2 ---> s4 ---> t3 # p3 s6 ---> t5 ---> s4 ---> t4 ---> s5 data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, output=[data_node_4], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_5], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_6], [data_node_4], None, TaskId(\"t5\")) scenario_7 = Scenario(\"quest\", [task_4, task_1, task_2, task_3, task_5], {}, [], scenario_id=ScenarioId(\"s7\")) assert scenario_7.get_inputs() == {data_node_1, data_node_2, data_node_6} assert scenario_7._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5} _assert_equal(scenario_7._get_sorted_tasks(), [[task_5, task_2, task_1], [task_4, task_3]]) # p1 s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s3 ---> t4 ---> s4 # p2 t2 ---> s3 ---> t3 # p3 s5 ---> t5 ---> s3 ---> t4 ---> s4 # p4 s3 ---> t4 ---> s4 data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"qux\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"quux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quuz\", Scope.SCENARIO, \"s5\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_3], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, output=[data_node_3], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_3], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_3], [data_node_4], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_5], [data_node_3], TaskId(\"t5\")) scenario_8 = Scenario(\"quest\", [task_1, task_2, task_3, task_4, task_5], {}, [], scenario_id=ScenarioId(\"s8\")) assert scenario_8.get_inputs() == {data_node_1, data_node_2, data_node_5} assert scenario_8._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5} _assert_equal(scenario_8._get_sorted_tasks(), [[task_5, task_2, task_1], [task_3, task_4]]) def test_add_and_remove_sequences(): data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s3\") data_node_4 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s5\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_3], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, [data_node_3], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_3], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_3], [data_node_4], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_5], [data_node_3], TaskId(\"t5\")) scenario_1 = Scenario(\"quest\", [task_1, task_2, task_3, task_4, task_5], {}, [], scenario_id=ScenarioId(\"s1\")) sequence_1 = Sequence({\"name\": \"sequence_1\"}, [task_1], SequenceId(f\"SEQUENCE_sequence_1_{scenario_1.id}\")) sequence_2 = Sequence({\"name\": \"sequence_2\"}, [task_1, task_2], SequenceId(f\"SEQUENCE_sequence_2_{scenario_1.id}\")) sequence_3 = Sequence( {\"name\": \"sequence_3\"}, [task_1, task_5, task_3], SequenceId(f\"SEQUENCE_sequence_3_{scenario_1.id}\") ) task_manager = _TaskManagerFactory._build_manager() data_manager = _DataManagerFactory._build_manager() scenario_manager = _ScenarioManagerFactory._build_manager() for dn in [data_node_1, data_node_2, data_node_3, data_node_4, data_node_5]: data_manager._set(dn) for t in [task_1, task_2, task_3, task_4, task_5]: task_manager._set(t) scenario_manager._set(scenario_1) assert scenario_1.get_inputs() == {data_node_1, data_node_2, data_node_5} assert scenario_1._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5} assert len(scenario_1.sequences) == 0 scenario_1.sequences = {\"sequence_1\": {\"tasks\": [task_1]}} assert scenario_1.sequences == {\"sequence_1\": sequence_1} scenario_1.add_sequences({\"sequence_2\": [task_1, task_2]}) assert scenario_1.sequences == {\"sequence_1\": sequence_1, \"sequence_2\": sequence_2} scenario_1.remove_sequences([\"sequence_1\"]) assert scenario_1.sequences == {\"sequence_2\": sequence_2} scenario_1.add_sequences({\"sequence_1\": [task_1], \"sequence_3\": [task_1, task_5, task_3]}) assert scenario_1.sequences == { \"sequence_2\": sequence_2, \"sequence_1\": sequence_1, \"sequence_3\": sequence_3, } scenario_1.remove_sequences([\"sequence_2\", \"sequence_3\"]) assert scenario_1.sequences == {\"sequence_1\": sequence_1} def test_check_consistency(): data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s3\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = InMemoryDataNode(\"d8\", Scope.SCENARIO, \"s8\") data_node_9 = InMemoryDataNode(\"d9\", Scope.SCENARIO, \"s9\") scenario_0 = Scenario(\"scenario_0\", [], {}) assert scenario_0._is_consistent() task_1 = Task(\"foo\", {}, print, [data_node_1], [data_node_2], TaskId(\"t1\")) scenario_1 = Scenario(\"scenario_1\", [task_1], {}) assert scenario_1._is_consistent() # s1 --- ---> s3 ---> t2 ---> s5 ---- # | | | # |---> t1 ---| -------------------------> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario_2 = Scenario(\"scenario_2\", {task_1, task_2, task_3, task_4}, {}, [], ScenarioId(\"s1\")) assert scenario_2._is_consistent() # s1 --- t2 ---> s5 # | | # |---> t1 ---| -----> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, None, [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario_3 = Scenario(\"scenario_3\", {task_1, task_2, task_3, task_4}, {}, [], ScenarioId(\"s2\")) assert scenario_3._is_consistent() # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario_4 = Scenario(\"scenario_4\", [task_4, task_2, task_1, task_3], {}, [], scenario_id=ScenarioId(\"s3\")) assert scenario_4._is_consistent() # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 ---> t6 # | # s8 -------> t5 -------> s9 ------------------ task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"t5\", {}, print, [data_node_8], [data_node_9], TaskId(\"t5\")) task_6 = Task(\"t6\", {}, print, [data_node_7, data_node_9], id=TaskId(\"t6\")) scenario_5 = Scenario(\"scenario_5\", [task_1, task_2, task_3, task_4, task_5, task_6], {}, [], ScenarioId(\"s4\")) assert scenario_5._is_consistent() # s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 # t2 ---> s5 | # s8 ---> t5 s6 --| task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_5], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4, data_node_6], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_8], None, TaskId(\"t5\")) scenario_6 = Scenario(\"scenario_6\", [task_1, task_2, task_3, task_4, task_5], {}, [], ScenarioId(\"s5\")) assert scenario_6._is_consistent() # p1 s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s5 # p2 t2 ---> s4 ---> t3 # p3 s6 ---> t5 task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_4], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_5], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_6], None, TaskId(\"t5\")) scenario_7 = Scenario(\"scenario_7\", [task_1, task_2, task_3, task_4, task_5], {}, [], ScenarioId(\"s6\")) assert scenario_7._is_consistent() # p1 s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s5 # p2 t2 ---> s4 ---> t3 # p3 s6 ---> t5 ---> s4 ---> t4 ---> s5 task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_4], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_5], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_6], [data_node_4], None, TaskId(\"t5\")) scenario_8 = Scenario(\"scenario_8\", [task_4, task_1, task_2, task_3, task_5], {}, [], scenario_id=ScenarioId(\"s7\")) assert scenario_8._is_consistent() # p1 s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s3 ---> t4 ---> s4 # p2 t2 ---> s3 ---> t3 # p3 s5 ---> t5 ---> s3 ---> t4 ---> s4 # p4 s3 ---> t4 ---> s4 task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_3], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_3], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_3], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_3], [data_node_4], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_5], [data_node_3], TaskId(\"t5\")) scenario_9 = Scenario(\"scenario_9\", [task_1, task_2, task_3, task_4, task_5], {}, [], scenario_id=ScenarioId(\"s8\")) assert scenario_9._is_consistent() "} {"text": "from src.taipy.core._version._version import _Version from taipy.config.config import Config def test_create_version(): v = _Version(\"foo\", config=Config.configure_data_node(\"dn\")) assert v.id == \"foo\" assert v.config is not None "} {"text": "import multiprocessing from unittest.mock import patch from src.taipy.core import Core, taipy from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from taipy.config.config import Config from tests.core.conftest import init_config from tests.core.utils import assert_true_after_time m = multiprocessing.Manager() def twice(a): return a * 2 def triple(a): return a * 3 def migrate_pickle_path(dn): dn.path = \"bar.pkl\" return dn def migrate_skippable_task(task): task.skippable = True return task def migrate_foo_scenario(scenario): scenario.properties[\"foo\"] = \"bar\" return scenario def test_migrate_datanode(): scenario_v1 = submit_v1() init_config() Config.add_migration_function(\"2.0\", \"d1\", migrate_pickle_path) submit_v2() v1 = taipy.get(scenario_v1.id) assert v1.d1.version == \"2.0\" assert v1.d1.path == \"bar.pkl\" def test_migrate_datanode_in_standalone_mode(): scenario_v1 = submit_v1() init_config() Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) Config.add_migration_function(\"2.0\", \"d1\", migrate_pickle_path) scenario_cfg_v2 = config_scenario_v2() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario_v2 = _ScenarioManager._create(scenario_cfg_v2) jobs = _ScenarioManager._submit(scenario_v2) v1 = taipy.get(scenario_v1.id) assert v1.d1.version == \"2.0\" assert v1.d1.path == \"bar.pkl\" assert_true_after_time(jobs[0].is_completed) core.stop() def test_migrate_task(): scenario_v1 = submit_v1() init_config() Config.add_migration_function(\"2.0\", \"my_task\", migrate_skippable_task) submit_v2() v1 = taipy.get(scenario_v1.id) assert v1.my_task.version == \"2.0\" assert v1.my_task.skippable is True def test_migrate_task_in_standalone_mode(): scenario_v1 = submit_v1() init_config() Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) Config.add_migration_function(\"2.0\", \"my_task\", migrate_skippable_task) scenario_cfg_v2 = config_scenario_v2() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario_v2 = _ScenarioManager._create(scenario_cfg_v2) jobs = _ScenarioManager._submit(scenario_v2) v1 = taipy.get(scenario_v1.id) assert v1.my_task.version == \"2.0\" assert v1.my_task.skippable is True assert_true_after_time(jobs[0].is_completed) core.stop() def test_migrate_scenario(): scenario_v1 = submit_v1() init_config() Config.add_migration_function(\"2.0\", \"my_scenario\", migrate_foo_scenario) submit_v2() v1 = taipy.get(scenario_v1.id) assert v1.version == \"2.0\" assert v1.properties[\"foo\"] == \"bar\" def test_migrate_scenario_in_standalone_mode(): scenario_v1 = submit_v1() init_config() Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) Config.add_migration_function(\"2.0\", \"my_scenario\", migrate_foo_scenario) scenario_cfg_v2 = config_scenario_v2() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario_v2 = _ScenarioManager._create(scenario_cfg_v2) jobs = _ScenarioManager._submit(scenario_v2) v1 = taipy.get(scenario_v1.id) assert v1.version == \"2.0\" assert v1.properties[\"foo\"] == \"bar\" assert_true_after_time(jobs[0].is_completed) core.stop() def test_migrate_all_entities(): scenario_v1 = submit_v1() init_config() Config.add_migration_function(\"2.0\", \"d1\", migrate_pickle_path) Config.add_migration_function(\"2.0\", \"my_task\", migrate_skippable_task) Config.add_migration_function(\"2.0\", \"my_scenario\", migrate_foo_scenario) submit_v2() v1 = taipy.get(scenario_v1.id) assert v1.d1.version == \"2.0\" assert v1.my_task.version == \"2.0\" assert v1.d1.path == \"bar.pkl\" assert v1.my_task.skippable is True assert v1.properties[\"foo\"] == \"bar\" def test_migrate_all_entities_in_standalone_mode(): scenario_v1 = submit_v1() init_config() Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) Config.add_migration_function(\"2.0\", \"my_scenario\", migrate_foo_scenario) scenario_cfg_v2 = config_scenario_v2() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario_v2 = _ScenarioManager._create(scenario_cfg_v2) jobs = _ScenarioManager._submit(scenario_v2) v1 = taipy.get(scenario_v1.id) assert v1.version == \"2.0\" assert v1.properties[\"foo\"] == \"bar\" assert_true_after_time(jobs[0].is_completed) core.stop() def test_migrate_compatible_version(): scenario_cfg = config_scenario_v1() # Production 1.0 with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() scenario_v1 = _ScenarioManager._create(scenario_cfg) _ScenarioManager._submit(scenario_v1) assert scenario_v1.d2.read() == 2 assert len(_DataManager._get_all(version_number=\"all\")) == 2 core.stop() init_config() scenario_cfg = config_scenario_v1() # Production 2.0 is a compatible version with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario_v2 = _ScenarioManager._create(scenario_cfg) _ScenarioManager._submit(scenario_v2) assert scenario_v2.d2.read() == 2 assert len(_DataManager._get_all(version_number=\"all\")) == 4 core.stop() init_config() # Production 2.1 Config.add_migration_function( target_version=\"2.1\", config=\"d1\", migration_fct=migrate_pickle_path, ) scenario_cfg_v2_1 = config_scenario_v2() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() scenario_v2_1 = _ScenarioManager._create(scenario_cfg_v2_1) _ScenarioManager._submit(scenario_v2_1) core.stop() assert scenario_v2_1.d2.read() == 6 assert len(_DataManager._get_all(version_number=\"all\")) == 6 v1 = taipy.get(scenario_v1.id) assert v1.d1.version == \"2.1\" assert v1.d1.path == \"bar.pkl\" v2 = taipy.get(scenario_v2.id) assert v2.d1.version == \"2.1\" assert v2.d1.path == \"bar.pkl\" def submit_v1(): scenario_cfg_v1 = config_scenario_v1() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.0\"]): core = Core() core.run() scenario_v1 = _ScenarioManager._create(scenario_cfg_v1) _ScenarioManager._submit(scenario_v1) core.stop() return scenario_v1 def submit_v2(): scenario_cfg_v2 = config_scenario_v2() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario_v2 = _ScenarioManager._create(scenario_cfg_v2) _ScenarioManager._submit(scenario_v2) core.stop() return scenario_v2 def config_scenario_v1(): dn1 = Config.configure_pickle_data_node(id=\"d1\", default_data=1) dn2 = Config.configure_pickle_data_node(id=\"d2\") task_cfg = Config.configure_task(\"my_task\", twice, dn1, dn2) scenario_cfg = Config.configure_scenario(\"my_scenario\", [task_cfg]) scenario_cfg.add_sequences({\"my_sequence\": [task_cfg]}) return scenario_cfg def config_scenario_v2(): dn1 = Config.configure_pickle_data_node(id=\"d1\", default_data=2) dn2 = Config.configure_pickle_data_node(id=\"d2\") task_cfg = Config.configure_task(\"my_task\", triple, dn1, dn2) scenario_cfg = Config.configure_scenario(\"my_scenario\", [task_cfg]) scenario_cfg.add_sequences({\"my_scenario\": [task_cfg]}) return scenario_cfg "} {"text": "import os import pytest from src.taipy.core._version._version import _Version from src.taipy.core._version._version_fs_repository import _VersionFSRepository from src.taipy.core._version._version_sql_repository import _VersionSQLRepository from src.taipy.core.exceptions import ModelNotFound class TestVersionFSRepository: @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_save_and_load(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) obj = repository._load(_version.id) assert isinstance(obj, _Version) @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_exists(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) assert repository._exists(_version.id) assert not repository._exists(\"not-existed-version\") @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_load_all(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" repository._save(_version) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_load_all_with_filters(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" _version.name = f\"_version_{i}\" repository._save(_version) objs = repository._load_all(filters=[{\"id\": \"_version_2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_delete(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) repository._delete(_version.id) with pytest.raises(ModelNotFound): repository._load(_version.id) @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_delete_all(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" repository._save(_version) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_delete_many(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" repository._save(_version) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_search(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f\"_version_{i}\" _version.name = f\"_version_{i}\" repository._save(_version) assert len(repository._load_all()) == 10 objs = repository._search(\"id\", \"_version_2\") assert len(objs) == 1 assert isinstance(objs[0], _Version) @pytest.mark.parametrize(\"repo\", [_VersionFSRepository, _VersionSQLRepository]) def test_export(self, tmpdir, _version, repo, init_sql_repo): repository = repo() repository._save(_version) repository._export(_version.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _VersionFSRepository else os.path.join(tmpdir.strpath, \"version\") assert os.path.exists(os.path.join(dir_path, f\"{_version.id}.json\")) "} {"text": "from time import sleep from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._version._cli._version_cli import _VersionCLI from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.conftest import init_config def test_delete_version(caplog): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.1\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.1\"]): core = Core() core.run() core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() core.stop() all_versions = [version.id for version in _VersionManager._get_all()] production_version = _VersionManager._get_production_versions() assert len(all_versions) == 5 assert len(production_version) == 2 assert \"1.0\" in all_versions assert \"1.1\" in all_versions and \"1.1\" in production_version assert \"2.0\" in all_versions assert \"2.1\" in all_versions and \"2.1\" in production_version _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete\", \"1.0\"]): _VersionCLI.parse_arguments() assert \"Successfully delete version 1.0.\" in caplog.text all_versions = [version.id for version in _VersionManager._get_all()] assert len(all_versions) == 4 assert \"1.0\" not in all_versions # Test delete a non-existed version with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete\", \"non_exist_version\"]): _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text # Test delete production version will change the version from production to experiment with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete-production\", \"1.1\"]): _VersionCLI.parse_arguments() assert \"Successfully delete version 1.1 from the production version list.\" in caplog.text all_versions = [version.id for version in _VersionManager._get_all()] production_version = _VersionManager._get_production_versions() assert len(all_versions) == 4 assert \"1.1\" in all_versions and \"1.1\" not in production_version # Test delete a non-existed production version with pytest.raises(SystemExit) as e: with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete-production\", \"non_exist_version\"]): _VersionCLI.parse_arguments() assert str(e.value) == \"Version 'non_exist_version' is not a production version.\" def test_list_versions(capsys): with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.1\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.1\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.0\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() core.stop() _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--list\"]): _VersionCLI.parse_arguments() out, _ = capsys.readouterr() version_list = str(out).strip().split(\"\\n\") assert len(version_list) == 6 # 5 versions with the header assert all(column in version_list[0] for column in [\"Version number\", \"Mode\", \"Creation date\"]) assert all(column in version_list[1] for column in [\"2.1\", \"Production\", \"latest\"]) assert all(column in version_list[2] for column in [\"2.0\", \"Experiment\"]) and \"latest\" not in version_list[2] assert all(column in version_list[3] for column in [\"1.1\", \"Production\"]) and \"latest\" not in version_list[3] assert all(column in version_list[4] for column in [\"1.0\", \"Experiment\"]) and \"latest\" not in version_list[4] assert \"Development\" in version_list[5] and \"latest\" not in version_list[5] def test_rename_version(caplog): scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() dev_ver = _VersionManager._get_development_version() _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"non_exist_version\", \"1.1\"]): # This should raise an exception since version \"non_exist_version\" does not exist _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"1.0\", \"2.0\"]): # This should raise an exception since 2.0 already exists _VersionCLI.parse_arguments() assert \"Version name '2.0' is already used.\" in caplog.text _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"1.0\", \"1.1\"]): _VersionCLI.parse_arguments() assert _VersionManager._get(\"1.0\") is None assert [version.id for version in _VersionManager._get_all()].sort() == [dev_ver, \"1.1\", \"2.0\"].sort() # All entities are assigned to the new version assert len(_DataManager._get_all(\"1.1\")) == 2 assert len(_TaskManager._get_all(\"1.1\")) == 1 assert len(_SequenceManager._get_all(\"1.1\")) == 1 assert len(_ScenarioManager._get_all(\"1.1\")) == 1 assert len(_JobManager._get_all(\"1.1\")) == 1 _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"2.0\", \"2.1\"]): _VersionCLI.parse_arguments() assert _VersionManager._get(\"2.0\") is None assert [version.id for version in _VersionManager._get_all()].sort() == [dev_ver, \"1.1\", \"2.1\"].sort() assert _VersionManager._get_production_versions() == [\"2.1\"] # All entities are assigned to the new version assert len(_DataManager._get_all(\"2.1\")) == 2 assert len(_TaskManager._get_all(\"2.1\")) == 1 assert len(_SequenceManager._get_all(\"2.1\")) == 1 assert len(_ScenarioManager._get_all(\"2.1\")) == 1 assert len(_JobManager._get_all(\"2.1\")) == 1 def test_compare_version_config(caplog): scenario_config_1 = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config_1) _ScenarioManager._submit(scenario) core.stop() init_config() scenario_config_2 = config_scenario() Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"bar.csv\") with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config_2) _ScenarioManager._submit(scenario) core.stop() _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"non_exist_version\", \"2.0\"]): # This should raise an exception since version \"non_exist_version\" does not exist _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"1.0\", \"non_exist_version\"]): # This should raise an exception since 2.0 already exists _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"1.0\", \"1.0\"]): _VersionCLI.parse_arguments() assert \"There is no difference between version 1.0 Configuration and version 1.0 Configuration.\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"1.0\", \"2.0\"]): _VersionCLI.parse_arguments() expected_message = \"\"\"Differences between version 1.0 Configuration and version 2.0 Configuration: \\tDATA_NODE \"d2\" has attribute \"default_path\" modified: foo.csv -> bar.csv\"\"\" assert expected_message in caplog.text def twice(a): return a * 2 def config_scenario(): data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) data_node_2_config = Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"foo.csv\") task_config = Config.configure_task(\"my_task\", twice, data_node_1_config, data_node_2_config) scenario_config = Config.configure_scenario(\"my_scenario\", [task_config], frequency=Frequency.DAILY) scenario_config.add_sequences({\"my_sequence\": [task_config]}) return scenario_config "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pytest from src.taipy.core._version._version import _Version from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config def test_save_and_get_version_entity(tmpdir): _VersionManager._repository.base_path = tmpdir assert len(_VersionManager._get_all()) == 0 version = _Version(id=\"foo\", config=Config._applied_config) _VersionManager._get_or_create(id=\"foo\", force=False) version_1 = _VersionManager._get(version.id) assert version_1.id == version.id assert Config._serializer._str(version_1.config) == Config._serializer._str(version.config) assert len(_VersionManager._get_all()) == 1 assert _VersionManager._get(version.id) == version "} {"text": "from time import sleep from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._version._cli._version_cli import _VersionCLI from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.task._task_manager import _TaskManager from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.conftest import init_config def test_delete_version(caplog, init_sql_repo): _ScenarioManagerFactory._build_manager() scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.1\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.1\"]): core = Core() core.run() core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() core.stop() all_versions = [version.id for version in _VersionManager._get_all()] production_version = _VersionManager._get_production_versions() assert len(all_versions) == 5 assert len(production_version) == 2 assert \"1.0\" in all_versions assert \"1.1\" in all_versions and \"1.1\" in production_version assert \"2.0\" in all_versions assert \"2.1\" in all_versions and \"2.1\" in production_version _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete\", \"1.0\"]): _VersionCLI.parse_arguments() assert \"Successfully delete version 1.0.\" in caplog.text all_versions = [version.id for version in _VersionManager._get_all()] assert len(all_versions) == 4 assert \"1.0\" not in all_versions # Test delete a non-existed version with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete\", \"non_exist_version\"]): _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text # Test delete production version will change the version from production to experiment with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete-production\", \"1.1\"]): _VersionCLI.parse_arguments() assert \"Successfully delete version 1.1 from the production version list.\" in caplog.text all_versions = [version.id for version in _VersionManager._get_all()] production_version = _VersionManager._get_production_versions() assert len(all_versions) == 4 assert \"1.1\" in all_versions and \"1.1\" not in production_version # Test delete a non-existed production version with pytest.raises(SystemExit) as e: with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--delete-production\", \"non_exist_version\"]): _VersionCLI.parse_arguments() assert str(e.value) == \"Version 'non_exist_version' is not a production version.\" def test_list_versions(capsys, init_sql_repo): _ScenarioManagerFactory._build_manager() with patch(\"sys.argv\", [\"prog\", \"--development\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.1\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--production\", \"1.1\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.0\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.1\"]): core = Core() core.run() core.stop() sleep(0.05) with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.1\"]): core = Core() core.run() core.stop() _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--list\"]): _VersionCLI.parse_arguments() out, _ = capsys.readouterr() version_list = str(out).strip().split(\"\\n\") assert len(version_list) == 6 # 5 versions with the header assert all(column in version_list[0] for column in [\"Version number\", \"Mode\", \"Creation date\"]) assert all(column in version_list[1] for column in [\"2.1\", \"Production\", \"latest\"]) assert all(column in version_list[2] for column in [\"2.0\", \"Experiment\"]) and \"latest\" not in version_list[2] assert all(column in version_list[3] for column in [\"1.1\", \"Production\"]) and \"latest\" not in version_list[3] assert all(column in version_list[4] for column in [\"1.0\", \"Experiment\"]) and \"latest\" not in version_list[4] assert \"Development\" in version_list[5] and \"latest\" not in version_list[5] def test_rename_version(caplog, init_sql_repo): _ScenarioManagerFactory._build_manager() scenario_config = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() with patch(\"sys.argv\", [\"prog\", \"--production\", \"2.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config) _ScenarioManager._submit(scenario) core.stop() dev_ver = _VersionManager._get_development_version() _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"non_exist_version\", \"1.1\"]): # This should raise an exception since version \"non_exist_version\" does not exist _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"1.0\", \"2.0\"]): # This should raise an exception since 2.0 already exists _VersionCLI.parse_arguments() assert \"Version name '2.0' is already used.\" in caplog.text _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"1.0\", \"1.1\"]): _VersionCLI.parse_arguments() assert _VersionManager._get(\"1.0\") is None assert [version.id for version in _VersionManager._get_all()].sort() == [dev_ver, \"1.1\", \"2.0\"].sort() # All entities are assigned to the new version assert len(_DataManager._get_all(\"1.1\")) == 2 assert len(_TaskManager._get_all(\"1.1\")) == 1 assert len(_SequenceManager._get_all(\"1.1\")) == 0 assert len(_ScenarioManager._get_all(\"1.1\")) == 1 assert len(_JobManager._get_all(\"1.1\")) == 1 _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--rename\", \"2.0\", \"2.1\"]): _VersionCLI.parse_arguments() assert _VersionManager._get(\"2.0\") is None assert [version.id for version in _VersionManager._get_all()].sort() == [dev_ver, \"1.1\", \"2.1\"].sort() assert _VersionManager._get_production_versions() == [\"2.1\"] # All entities are assigned to the new version assert len(_DataManager._get_all(\"2.1\")) == 2 assert len(_TaskManager._get_all(\"2.1\")) == 1 assert len(_SequenceManager._get_all(\"2.1\")) == 0 assert len(_ScenarioManager._get_all(\"2.1\")) == 1 assert len(_JobManager._get_all(\"2.1\")) == 1 def test_compare_version_config(caplog, init_sql_repo): _ScenarioManagerFactory._build_manager() scenario_config_1 = config_scenario() with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"1.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config_1) _ScenarioManager._submit(scenario) core.stop() init_config() Config.configure_core(repository_type=\"sql\", repository_properties={\"db_location\": init_sql_repo}) _ScenarioManagerFactory._build_manager() scenario_config_2 = config_scenario() Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"bar.csv\") with patch(\"sys.argv\", [\"prog\", \"--experiment\", \"2.0\"]): core = Core() core.run() scenario = _ScenarioManager._create(scenario_config_2) _ScenarioManager._submit(scenario) core.stop() _VersionCLI.create_parser() with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"non_exist_version\", \"2.0\"]): # This should raise an exception since version \"non_exist_version\" does not exist _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"1.0\", \"non_exist_version\"]): # This should raise an exception since 2.0 already exists _VersionCLI.parse_arguments() assert \"Version 'non_exist_version' does not exist.\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"1.0\", \"1.0\"]): _VersionCLI.parse_arguments() assert \"There is no difference between version 1.0 Configuration and version 1.0 Configuration.\" in caplog.text with pytest.raises(SystemExit): with patch(\"sys.argv\", [\"prog\", \"manage-versions\", \"--compare-config\", \"1.0\", \"2.0\"]): _VersionCLI.parse_arguments() expected_message = \"\"\"Differences between version 1.0 Configuration and version 2.0 Configuration: \\tDATA_NODE \"d2\" has attribute \"default_path\" modified: foo.csv -> bar.csv\"\"\" assert expected_message in caplog.text def twice(a): return a * 2 def config_scenario(): data_node_1_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", default_data=\"abc\", scope=Scope.SCENARIO ) data_node_2_config = Config.configure_data_node(id=\"d2\", storage_type=\"csv\", default_path=\"foo.csv\") task_config = Config.configure_task(\"my_task\", twice, data_node_1_config, data_node_2_config) scenario_config = Config.configure_scenario(\"my_scenario\", [task_config], frequency=Frequency.DAILY) return scenario_config "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import multiprocessing import random import string from concurrent.futures import ProcessPoolExecutor from datetime import datetime, timedelta from functools import partial from time import sleep import pytest from src.taipy.core import taipy from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.pickle import PickleDataNode from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.submission._submission_manager import _SubmissionManager from src.taipy.core.submission.submission_status import SubmissionStatus from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from taipy.config import Config from taipy.config.common.scope import Scope from taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked from tests.core.utils import assert_true_after_time # ################################ USER FUNCTIONS ################################## def multiply(nb1: float, nb2: float): sleep(0.1) return nb1 * nb2 def lock_multiply(lock, nb1: float, nb2: float): with lock: return multiply(nb1, nb2) def mult_by_2(n): return n * 2 def nothing(): return True def concat(a, b): return a + b def _error(): raise Exception # ################################ TEST METHODS ################################## def test_submit_task(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) before_creation = datetime.now() sleep(0.1) task = _create_task(multiply) output_dn_id = task.output[f\"{task.config_id}_output0\"].id _OrchestratorFactory._build_dispatcher() assert _DataManager._get(output_dn_id).last_edit_date > before_creation assert _DataManager._get(output_dn_id).job_ids == [] assert _DataManager._get(output_dn_id).is_ready_for_reading before_submission_creation = datetime.now() sleep(0.1) job = _Orchestrator.submit_task(task) sleep(0.1) after_submission_creation = datetime.now() assert _DataManager._get(output_dn_id).read() == 42 assert _DataManager._get(output_dn_id).last_edit_date > before_submission_creation assert _DataManager._get(output_dn_id).last_edit_date < after_submission_creation assert _DataManager._get(output_dn_id).job_ids == [job.id] assert _DataManager._get(output_dn_id).is_ready_for_reading assert job.is_completed() assert _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED def test_submit_sequence_generate_unique_submit_id(): dn_1 = PickleDataNode(\"dn_config_id_1\", Scope.SCENARIO) dn_2 = PickleDataNode(\"dn_config_id_2\", Scope.SCENARIO) task_1 = Task(\"task_config_id_1\", {}, print, [dn_1]) task_2 = Task(\"task_config_id_2\", {}, print, [dn_1], [dn_2]) _DataManager._set(dn_1) _DataManager._set(dn_2) _TaskManager._set(task_1) _TaskManager._set(task_2) scenario = Scenario(\"scenario\", [task_1, task_2], {}, sequences={\"sequence\": {\"tasks\": [task_1, task_2]}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] jobs_1 = taipy.submit(sequence) jobs_2 = taipy.submit(sequence) assert len(jobs_1) == 2 assert len(jobs_2) == 2 submit_ids_1 = [job.submit_id for job in jobs_1] submit_ids_2 = [job.submit_id for job in jobs_2] assert len(set(submit_ids_1)) == 1 assert len(set(submit_ids_2)) == 1 assert set(submit_ids_1) != set(submit_ids_2) def test_submit_scenario_generate_unique_submit_id(): dn_1 = PickleDataNode(\"dn_config_id_1\", Scope.SCENARIO) dn_2 = PickleDataNode(\"dn_config_id_2\", Scope.SCENARIO) dn_3 = PickleDataNode(\"dn_config_id_3\", Scope.SCENARIO) task_1 = Task(\"task_config_id_1\", {}, print, [dn_1]) task_2 = Task(\"task_config_id_2\", {}, print, [dn_2]) task_3 = Task(\"task_config_id_3\", {}, print, [dn_3]) scenario = Scenario(\"scenario_config_id\", [task_1, task_2, task_3], {}) _DataManager._set(dn_1) _DataManager._set(dn_2) _TaskManager._set(task_1) _TaskManager._set(task_2) _TaskManager._set(task_3) _ScenarioManager._set(scenario) jobs_1 = taipy.submit(scenario) jobs_2 = taipy.submit(scenario) assert len(jobs_1) == 3 assert len(jobs_2) == 3 def test_submit_entity_store_entity_id_in_job(): dn_1 = PickleDataNode(\"dn_config_id_1\", Scope.SCENARIO) dn_2 = PickleDataNode(\"dn_config_id_2\", Scope.SCENARIO) dn_3 = PickleDataNode(\"dn_config_id_3\", Scope.SCENARIO) task_1 = Task(\"task_config_id_1\", {}, print, [dn_1]) task_2 = Task(\"task_config_id_2\", {}, print, [dn_2]) task_3 = Task(\"task_config_id_3\", {}, print, [dn_3]) scenario = Scenario(\"scenario_config_id\", [task_1, task_2, task_3], {}) _DataManager._set(dn_1) _DataManager._set(dn_2) _TaskManager._set(task_1) _TaskManager._set(task_2) _TaskManager._set(task_3) _ScenarioManager._set(scenario) jobs_1 = taipy.submit(scenario) assert all(job.submit_entity_id == scenario.id for job in jobs_1) job_1 = taipy.submit(task_1) assert job_1.submit_entity_id == task_1.id def test_submit_task_that_return_multiple_outputs(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) def return_2tuple(nb1, nb2): return multiply(nb1, nb2), multiply(nb1, nb2) / 2 def return_list(nb1, nb2): return [multiply(nb1, nb2), multiply(nb1, nb2) / 2] with_tuple = _create_task(return_2tuple, 2) with_list = _create_task(return_list, 2) _OrchestratorFactory._build_dispatcher() _Orchestrator.submit_task(with_tuple) _Orchestrator.submit_task(with_list) assert ( with_tuple.output[f\"{with_tuple.config_id}_output0\"].read() == with_list.output[f\"{with_list.config_id}_output0\"].read() == 42 ) assert ( with_tuple.output[f\"{with_tuple.config_id}_output1\"].read() == with_list.output[f\"{with_list.config_id}_output1\"].read() == 21 ) def test_submit_task_returns_single_iterable_output(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) def return_2tuple(nb1, nb2): return multiply(nb1, nb2), multiply(nb1, nb2) / 2 def return_list(nb1, nb2): return [multiply(nb1, nb2), multiply(nb1, nb2) / 2] task_with_tuple = _create_task(return_2tuple, 1) task_with_list = _create_task(return_list, 1) _OrchestratorFactory._build_dispatcher() _Orchestrator.submit_task(task_with_tuple) assert task_with_tuple.output[f\"{task_with_tuple.config_id}_output0\"].read() == (42, 21) assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 _Orchestrator.submit_task(task_with_list) assert task_with_list.output[f\"{task_with_list.config_id}_output0\"].read() == [42, 21] assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 def test_data_node_not_written_due_to_wrong_result_nb(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) def return_2tuple(): return lambda nb1, nb2: (multiply(nb1, nb2), multiply(nb1, nb2) / 2) task = _create_task(return_2tuple(), 3) _OrchestratorFactory._build_dispatcher() job = _Orchestrator.submit_task(task) assert task.output[f\"{task.config_id}_output0\"].read() == 0 assert job.is_failed() assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 assert _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.FAILED def test_scenario_only_submit_same_task_once(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() dn_0 = PickleDataNode(\"dn_config_0\", Scope.SCENARIO, properties={\"default_data\": 0}) dn_1 = PickleDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dn_2 = PickleDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) task_1 = Task(\"task_config_1\", {}, print, input=[dn_0], output=[dn_1], id=\"task_1\") task_2 = Task(\"task_config_2\", {}, print, input=[dn_1], id=\"task_2\") task_3 = Task(\"task_config_3\", {}, print, input=[dn_1], output=[dn_2], id=\"task_3\") scenario_1 = Scenario( \"scenario_config_1\", [task_1, task_2, task_3], {}, \"scenario_1\", sequences={\"sequence_1\": {\"tasks\": [task_1, task_2]}, \"sequence_2\": {\"tasks\": [task_1, task_3]}}, ) sequence_1 = scenario_1.sequences[\"sequence_1\"] sequence_2 = scenario_1.sequences[\"sequence_2\"] jobs = _Orchestrator.submit(scenario_1) assert len(jobs) == 3 assert all([job.is_completed() for job in jobs]) assert all(not _Orchestrator._is_blocked(job) for job in jobs) assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.COMPLETED jobs = _Orchestrator.submit(sequence_1) assert len(jobs) == 2 assert all([job.is_completed() for job in jobs]) assert all(not _Orchestrator._is_blocked(job) for job in jobs) assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.COMPLETED jobs = _Orchestrator.submit(sequence_2) assert len(jobs) == 2 assert all([job.is_completed() for job in jobs]) assert all(not _Orchestrator._is_blocked(job) for job in jobs) assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.COMPLETED def test_update_status_fail_job(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() dn_0 = PickleDataNode(\"dn_config_0\", Scope.SCENARIO, properties={\"default_data\": 0}) dn_1 = PickleDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dn_2 = PickleDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) task_0 = Task(\"task_config_0\", {}, _error, output=[dn_0], id=\"task_0\") task_1 = Task(\"task_config_1\", {}, print, input=[dn_0], output=[dn_1], id=\"task_1\") task_2 = Task(\"task_config_2\", {}, print, input=[dn_1], id=\"task_2\") task_3 = Task(\"task_config_3\", {}, print, input=[dn_2], id=\"task_3\") scenario_1 = Scenario(\"scenario_config_1\", [task_0, task_1, task_2, task_3], {}, \"scenario_1\") scenario_2 = Scenario(\"scenario_config_2\", [task_0, task_1, task_2, task_3], {}, \"scenario_2\") _DataManager._set(dn_0) _DataManager._set(dn_1) _DataManager._set(dn_2) _TaskManager._set(task_0) _TaskManager._set(task_1) _TaskManager._set(task_2) _TaskManager._set(task_3) _ScenarioManager._set(scenario_1) _ScenarioManager._set(scenario_2) job = _Orchestrator.submit_task(task_0) assert job.is_failed() assert _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.FAILED jobs = _Orchestrator.submit(scenario_1) tasks_jobs = {job._task.id: job for job in jobs} assert tasks_jobs[\"task_0\"].is_failed() assert all([job.is_abandoned() for job in [tasks_jobs[\"task_1\"], tasks_jobs[\"task_2\"]]]) assert tasks_jobs[\"task_3\"].is_completed() assert all(not _Orchestrator._is_blocked(job) for job in jobs) assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.FAILED jobs = _Orchestrator.submit(scenario_2) tasks_jobs = {job._task.id: job for job in jobs} assert tasks_jobs[\"task_0\"].is_failed() assert all([job.is_abandoned() for job in [tasks_jobs[\"task_1\"], tasks_jobs[\"task_2\"]]]) assert tasks_jobs[\"task_3\"].is_completed() assert all(not _Orchestrator._is_blocked(job) for job in jobs) assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.FAILED def test_update_status_fail_job_in_parallel_one_job(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() dn = PickleDataNode(\"dn_config_0\", Scope.SCENARIO, properties={\"default_data\": 0}) task = Task(\"task_config_0\", {}, _error, output=[dn], id=\"task_0\") _DataManager._set(dn) _TaskManager._set(task) job = _Orchestrator.submit_task(task) assert_true_after_time(job.is_failed) assert_true_after_time(lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.FAILED) def test_update_status_fail_job_in_parallel_one_sequence(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() dn_0 = PickleDataNode(\"dn_config_0\", Scope.SCENARIO, properties={\"default_data\": 0}) dn_1 = PickleDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dn_2 = PickleDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) task_0 = Task(\"task_config_0\", {}, _error, output=[dn_0], id=\"task_0\") task_1 = Task(\"task_config_1\", {}, print, input=[dn_0], output=[dn_1], id=\"task_1\") task_2 = Task(\"task_config_2\", {}, print, input=[dn_1], id=\"task_2\") task_3 = Task(\"task_config_3\", {}, print, input=[dn_2], id=\"task_3\") sc = Scenario( \"scenario_config_1\", set([task_0, task_1, task_2, task_3]), {}, set(), \"scenario_1\", sequences={\"sequence_1\": {\"tasks\": [task_0, task_1, task_2]}}, ) _DataManager._set(dn_0) _DataManager._set(dn_1) _DataManager._set(dn_2) _TaskManager._set(task_0) _TaskManager._set(task_1) _TaskManager._set(task_2) _TaskManager._set(task_3) _ScenarioManager._set(sc) jobs = _Orchestrator.submit(sc.sequences[\"sequence_1\"]) tasks_jobs = {job._task.id: job for job in jobs} assert_true_after_time(tasks_jobs[\"task_0\"].is_failed) assert_true_after_time(lambda: all([job.is_abandoned() for job in [tasks_jobs[\"task_1\"], tasks_jobs[\"task_2\"]]])) assert_true_after_time(lambda: all(not _Orchestrator._is_blocked(job) for job in jobs)) submit_id = jobs[0].submit_id submission = _SubmissionManager._get(submit_id) assert_true_after_time(lambda: submission.submission_status == SubmissionStatus.FAILED) def test_update_status_fail_job_in_parallel_one_scenario(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() dn_0 = PickleDataNode(\"dn_config_0\", Scope.SCENARIO, properties={\"default_data\": 0}) dn_1 = PickleDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dn_2 = PickleDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) task_0 = Task(\"task_config_0\", {}, _error, output=[dn_0], id=\"task_0\") task_1 = Task(\"task_config_1\", {}, print, input=[dn_0], output=[dn_1], id=\"task_1\") task_2 = Task(\"task_config_2\", {}, print, input=[dn_1], id=\"task_2\") task_3 = Task(\"task_config_3\", {}, print, input=[dn_2], id=\"task_3\") sc = Scenario(\"scenario_config_1\", set([task_0, task_1, task_2, task_3]), {}, set(), \"scenario_1\") _DataManager._set(dn_0) _DataManager._set(dn_1) _DataManager._set(dn_2) _TaskManager._set(task_0) _TaskManager._set(task_1) _TaskManager._set(task_2) _TaskManager._set(task_3) _ScenarioManager._set(sc) jobs = _Orchestrator.submit(sc) tasks_jobs = {job._task.id: job for job in jobs} assert_true_after_time(tasks_jobs[\"task_0\"].is_failed) assert_true_after_time(tasks_jobs[\"task_3\"].is_completed) assert_true_after_time(lambda: all([job.is_abandoned() for job in [tasks_jobs[\"task_1\"], tasks_jobs[\"task_2\"]]])) assert_true_after_time(lambda: all(not _Orchestrator._is_blocked(job) for job in jobs)) submit_id = jobs[0].submit_id submission = _SubmissionManager._get(submit_id) assert_true_after_time(lambda: submission.submission_status == SubmissionStatus.FAILED) def test_submit_task_in_parallel(): m = multiprocessing.Manager() lock = m.Lock() Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) task = _create_task(partial(lock_multiply, lock)) _OrchestratorFactory._build_dispatcher() with lock: assert task.output[f\"{task.config_id}_output0\"].read() == 0 job = _Orchestrator.submit_task(task) assert_true_after_time(job.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time( lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(lambda: task.output[f\"{task.config_id}_output0\"].read() == 42) assert_true_after_time(job.is_completed) assert_true_after_time( lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED ) assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 def test_submit_sequence_in_parallel(): m = multiprocessing.Manager() lock = m.Lock() Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) task = _create_task(partial(lock_multiply, lock)) sequence = Sequence({}, [task], \"sequence_id\") _OrchestratorFactory._build_dispatcher() with lock: assert task.output[f\"{task.config_id}_output0\"].read() == 0 job = _Orchestrator.submit(sequence)[0] assert_true_after_time(job.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time( lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(lambda: task.output[f\"{task.config_id}_output0\"].read() == 42) assert_true_after_time(job.is_completed) assert_true_after_time( lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED ) assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 def test_submit_scenario_in_parallel(): m = multiprocessing.Manager() lock = m.Lock() Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) task = _create_task(partial(lock_multiply, lock)) scenario = Scenario(\"scenario_config\", [task], {}, [], \"scenario_id\") _OrchestratorFactory._build_dispatcher() with lock: assert task.output[f\"{task.config_id}_output0\"].read() == 0 job = _Orchestrator.submit(scenario)[0] assert_true_after_time(job.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time( lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(lambda: task.output[f\"{task.config_id}_output0\"].read() == 42) assert_true_after_time(job.is_completed) assert_true_after_time( lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED ) assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 def sleep_fct(seconds): sleep(seconds) def sleep_and_raise_error_fct(seconds): sleep(seconds) raise Exception def test_submit_task_synchronously_in_parallel(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() sleep_period = 1 start_time = datetime.now() task = Task(\"sleep_task\", {}, function=partial(sleep, sleep_period)) job = _Orchestrator.submit_task(task, wait=True) assert (datetime.now() - start_time).seconds >= sleep_period assert_true_after_time(job.is_completed) assert_true_after_time( lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED ) def test_submit_sequence_synchronously_in_parallel(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() sleep_period = 1 start_time = datetime.now() task = Task(\"sleep_task\", {}, function=partial(sleep, sleep_period)) sequence = Sequence({}, [task], \"sequence_id\") job = _Orchestrator.submit(sequence, wait=True)[0] assert (datetime.now() - start_time).seconds >= sleep_period assert_true_after_time(job.is_completed) assert_true_after_time( lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED ) def test_submit_scenario_synchronously_in_parallel(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() sleep_period = 1 start_time = datetime.now() task = Task(\"sleep_task\", {}, function=partial(sleep, sleep_period)) scenario = Scenario(\"scenario_config\", [task], {}) job = _Orchestrator.submit(scenario, wait=True)[0] assert (datetime.now() - start_time).seconds >= sleep_period assert_true_after_time(job.is_completed) assert_true_after_time( lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED ) def test_submit_fail_task_synchronously_in_parallel(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() sleep_period = 1.0 start_time = datetime.now() task = Task(\"sleep_task\", {}, function=partial(sleep_and_raise_error_fct, sleep_period)) job = _Orchestrator.submit_task(task, wait=True) assert (datetime.now() - start_time).seconds >= sleep_period assert_true_after_time(job.is_failed) assert_true_after_time(lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.FAILED) def test_submit_fail_sequence_synchronously_in_parallel(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() sleep_period = 1.0 start_time = datetime.now() task = Task(\"sleep_task\", {}, function=partial(sleep_and_raise_error_fct, sleep_period)) sequence = Sequence({}, [task], \"sequence_id\") job = _Orchestrator.submit(sequence, wait=True)[0] assert (datetime.now() - start_time).seconds >= sleep_period assert_true_after_time(job.is_failed) assert_true_after_time(lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.FAILED) def test_submit_fail_scenario_synchronously_in_parallel(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() sleep_period = 1.0 start_time = datetime.now() task = Task(\"sleep_task\", {}, function=partial(sleep_and_raise_error_fct, sleep_period)) scenario = Scenario(\"scenario_config\", [task], {}) job = _Orchestrator.submit(scenario, wait=True)[0] assert (datetime.now() - start_time).seconds >= sleep_period assert_true_after_time(job.is_failed) assert_true_after_time(lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.FAILED) def test_submit_task_synchronously_in_parallel_with_timeout(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() task_duration = 2 timeout_duration = task_duration - 1 task = Task(\"sleep_task\", {}, function=partial(sleep, task_duration)) start_time = datetime.now() job = _Orchestrator.submit_task(task, wait=True, timeout=timeout_duration) end_time = datetime.now() assert timeout_duration <= (end_time - start_time).seconds assert_true_after_time(job.is_completed) assert_true_after_time( lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED ) def test_submit_task_multithreading_multiple_task(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_1 = m.Lock() lock_2 = m.Lock() task_1 = _create_task(partial(lock_multiply, lock_1)) task_2 = _create_task(partial(lock_multiply, lock_2)) _OrchestratorFactory._build_dispatcher() with lock_1: with lock_2: job_1 = _Orchestrator.submit_task(task_1) job_2 = _Orchestrator.submit_task(task_2) assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert task_2.output[f\"{task_2.config_id}_output0\"].read() == 0 assert_true_after_time(job_1.is_running) assert_true_after_time(job_2.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time( lambda: _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(lambda: task_2.output[f\"{task_2.config_id}_output0\"].read() == 42) assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert_true_after_time(job_2.is_completed) assert_true_after_time(job_1.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time( lambda: _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.COMPLETED ) assert_true_after_time(lambda: task_1.output[f\"{task_1.config_id}_output0\"].read() == 42) assert_true_after_time(job_1.is_completed) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.COMPLETED ) assert job_2.is_completed() assert _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.COMPLETED def test_submit_sequence_multithreading_multiple_task(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_1 = m.Lock() lock_2 = m.Lock() task_1 = _create_task(partial(lock_multiply, lock_1)) task_2 = _create_task(partial(lock_multiply, lock_2)) sequence = Sequence({}, [task_1, task_2], \"sequence_id\") _OrchestratorFactory._build_dispatcher() with lock_1: with lock_2: tasks_jobs = {job._task.id: job for job in _Orchestrator.submit(sequence)} job_1 = tasks_jobs[task_1.id] job_2 = tasks_jobs[task_2.id] assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert task_2.output[f\"{task_2.config_id}_output0\"].read() == 0 assert_true_after_time(job_1.is_running) assert_true_after_time(job_2.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(lambda: task_2.output[f\"{task_2.config_id}_output0\"].read() == 42) assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert_true_after_time(job_2.is_completed) assert_true_after_time(job_1.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(lambda: task_1.output[f\"{task_1.config_id}_output0\"].read() == 42) assert_true_after_time(job_1.is_completed) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.COMPLETED ) assert job_2.is_completed() assert _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.COMPLETED def test_submit_scenario_multithreading_multiple_task(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_1 = m.Lock() lock_2 = m.Lock() task_1 = _create_task(partial(lock_multiply, lock_1)) task_2 = _create_task(partial(lock_multiply, lock_2)) scenario = Scenario(\"scenario_config\", [task_1, task_2], {}) _OrchestratorFactory._build_dispatcher() with lock_1: with lock_2: tasks_jobs = {job._task.id: job for job in _Orchestrator.submit(scenario)} job_1 = tasks_jobs[task_1.id] job_2 = tasks_jobs[task_2.id] assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert task_2.output[f\"{task_2.config_id}_output0\"].read() == 0 assert_true_after_time(job_1.is_running) assert_true_after_time(job_2.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(lambda: task_2.output[f\"{task_2.config_id}_output0\"].read() == 42) assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert_true_after_time(job_2.is_completed) assert_true_after_time(job_1.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(lambda: task_1.output[f\"{task_1.config_id}_output0\"].read() == 42) assert_true_after_time(job_1.is_completed) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) assert_true_after_time(job_2.is_completed) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.COMPLETED ) def test_submit_task_multithreading_multiple_task_in_sync_way_to_check_job_status(): # TODO Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_0 = m.Lock() lock_1 = m.Lock() lock_2 = m.Lock() task_0 = _create_task(partial(lock_multiply, lock_0)) task_1 = _create_task(partial(lock_multiply, lock_1)) task_2 = _create_task(partial(lock_multiply, lock_2)) _OrchestratorFactory._build_dispatcher() with lock_0: job_0 = _Orchestrator.submit_task(task_0) assert_true_after_time(job_0.is_running) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time( lambda: _SubmissionManager._get(job_0.submit_id).submission_status == SubmissionStatus.RUNNING ) with lock_1: with lock_2: assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert task_2.output[f\"{task_2.config_id}_output0\"].read() == 0 job_2 = _Orchestrator.submit_task(task_2) job_1 = _Orchestrator.submit_task(task_1) assert_true_after_time(job_0.is_running) assert_true_after_time(job_1.is_pending) assert_true_after_time(job_2.is_running) assert_true_after_time( lambda: _SubmissionManager._get(job_0.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.PENDING ) assert_true_after_time( lambda: _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2) assert_true_after_time(lambda: task_2.output[f\"{task_2.config_id}_output0\"].read() == 42) assert task_1.output[f\"{task_1.config_id}_output0\"].read() == 0 assert_true_after_time(job_0.is_running) assert_true_after_time(job_1.is_running) assert_true_after_time(job_2.is_completed) assert_true_after_time( lambda: _SubmissionManager._get(job_0.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time( lambda: _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.COMPLETED ) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2) assert_true_after_time(lambda: task_1.output[f\"{task_1.config_id}_output0\"].read() == 42) assert task_0.output[f\"{task_0.config_id}_output0\"].read() == 0 assert_true_after_time(job_0.is_running) assert_true_after_time(job_1.is_completed) assert_true_after_time( lambda: _SubmissionManager._get(job_0.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.COMPLETED ) assert job_2.is_completed() assert _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.COMPLETED assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) assert task_0.output[f\"{task_0.config_id}_output0\"].read() == 42 assert job_0.is_completed() assert job_1.is_completed() assert job_2.is_completed() assert _SubmissionManager._get(job_0.submit_id).submission_status == SubmissionStatus.COMPLETED assert _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.COMPLETED assert _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.COMPLETED def test_blocked_task(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_1 = m.Lock() lock_2 = m.Lock() foo_cfg = Config.configure_data_node(\"foo\", default_data=1) bar_cfg = Config.configure_data_node(\"bar\") baz_cfg = Config.configure_data_node(\"baz\") _OrchestratorFactory._build_dispatcher() dns = _DataManager._bulk_get_or_create([foo_cfg, bar_cfg, baz_cfg]) foo = dns[foo_cfg] bar = dns[bar_cfg] baz = dns[baz_cfg] task_1 = Task(\"by_2\", {}, partial(lock_multiply, lock_1, 2), [foo], [bar]) task_2 = Task(\"by_3\", {}, partial(lock_multiply, lock_2, 3), [bar], [baz]) assert task_1.foo.is_ready_for_reading # foo is ready assert not task_1.bar.is_ready_for_reading # But bar is not ready assert not task_2.baz.is_ready_for_reading # neither does baz assert len(_Orchestrator.blocked_jobs) == 0 job_2 = _Orchestrator.submit_task(task_2) # job 2 is submitted first assert job_2.is_blocked() # since bar is not is_valid the job 2 is blocked assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) assert _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.BLOCKED assert len(_Orchestrator.blocked_jobs) == 1 with lock_2: with lock_1: job_1 = _Orchestrator.submit_task( task_1, ) # job 1 is submitted and locked assert_true_after_time(job_1.is_running) # so it is still running assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert not _DataManager._get(task_1.bar.id).is_ready_for_reading # And bar still not ready assert_true_after_time(job_2.is_blocked) # the job_2 remains blocked assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time( lambda: _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.BLOCKED ) assert_true_after_time(job_1.is_completed) # job1 unlocked and can complete assert _DataManager._get(task_1.bar.id).is_ready_for_reading # bar becomes ready assert _DataManager._get(task_1.bar.id).read() == 2 # the data is computed and written assert_true_after_time(job_2.is_running) # And job 2 can start running assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert len(_Orchestrator.blocked_jobs) == 0 assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.COMPLETED ) assert_true_after_time( lambda: _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(job_2.is_completed) # job 2 unlocked so it can complete assert _DataManager._get(task_2.baz.id).is_ready_for_reading # baz becomes ready assert _DataManager._get(task_2.baz.id).read() == 6 # the data is computed and written assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) assert _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.COMPLETED assert_true_after_time( lambda: _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.COMPLETED ) def test_blocked_sequence(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_1 = m.Lock() lock_2 = m.Lock() foo_cfg = Config.configure_data_node(\"foo\", default_data=1) bar_cfg = Config.configure_data_node(\"bar\") baz_cfg = Config.configure_data_node(\"baz\") _OrchestratorFactory._build_dispatcher() dns = _DataManager._bulk_get_or_create([foo_cfg, bar_cfg, baz_cfg]) foo = dns[foo_cfg] bar = dns[bar_cfg] baz = dns[baz_cfg] task_1 = Task(\"by_2\", {}, partial(lock_multiply, lock_1, 2), [foo], [bar]) task_2 = Task(\"by_3\", {}, partial(lock_multiply, lock_2, 3), [bar], [baz]) sequence = Sequence({}, [task_1, task_2], \"sequence_id\") assert task_1.foo.is_ready_for_reading # foo is ready assert not task_1.bar.is_ready_for_reading # But bar is not ready assert not task_2.baz.is_ready_for_reading # neither does baz assert len(_Orchestrator.blocked_jobs) == 0 with lock_2: with lock_1: jobs = _Orchestrator.submit(sequence) # sequence is submitted tasks_jobs = {job._task.id: job for job in jobs} job_1, job_2 = tasks_jobs[task_1.id], tasks_jobs[task_2.id] assert_true_after_time(job_1.is_running) # job 1 is submitted and locked so it is still running assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert not _DataManager._get(task_1.bar.id).is_ready_for_reading # And bar still not ready assert_true_after_time(job_2.is_blocked) # the job_2 remains blocked assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(job_1.is_completed) # job1 unlocked and can complete assert _DataManager._get(task_1.bar.id).is_ready_for_reading # bar becomes ready assert _DataManager._get(task_1.bar.id).read() == 2 # the data is computed and written assert_true_after_time(job_2.is_running) # And job 2 can start running assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert len(_Orchestrator.blocked_jobs) == 0 assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(job_2.is_completed) # job 2 unlocked so it can complete assert _DataManager._get(task_2.baz.id).is_ready_for_reading # baz becomes ready assert _DataManager._get(task_2.baz.id).read() == 6 # the data is computed and written assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.COMPLETED ) def test_blocked_scenario(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock_1 = m.Lock() lock_2 = m.Lock() foo_cfg = Config.configure_data_node(\"foo\", default_data=1) bar_cfg = Config.configure_data_node(\"bar\") baz_cfg = Config.configure_data_node(\"baz\") _OrchestratorFactory._build_dispatcher() dns = _DataManager._bulk_get_or_create([foo_cfg, bar_cfg, baz_cfg]) foo = dns[foo_cfg] bar = dns[bar_cfg] baz = dns[baz_cfg] task_1 = Task(\"by_2\", {}, partial(lock_multiply, lock_1, 2), [foo], [bar]) task_2 = Task(\"by_3\", {}, partial(lock_multiply, lock_2, 3), [bar], [baz]) scenario = Scenario(\"scenario_config\", [task_1, task_2], {}) assert task_1.foo.is_ready_for_reading # foo is ready assert not task_1.bar.is_ready_for_reading # But bar is not ready assert not task_2.baz.is_ready_for_reading # neither does baz assert len(_Orchestrator.blocked_jobs) == 0 with lock_2: with lock_1: jobs = _Orchestrator.submit(scenario) # scenario is submitted tasks_jobs = {job._task.id: job for job in jobs} job_1, job_2 = tasks_jobs[task_1.id], tasks_jobs[task_2.id] assert_true_after_time(job_1.is_running) # job 1 is submitted and locked so it is still running assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert not _DataManager._get(task_1.bar.id).is_ready_for_reading # And bar still not ready assert_true_after_time(job_2.is_blocked) # the job_2 remains blocked assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(job_1.is_completed) # job1 unlocked and can complete assert _DataManager._get(task_1.bar.id).is_ready_for_reading # bar becomes ready assert _DataManager._get(task_1.bar.id).read() == 2 # the data is computed and written assert_true_after_time(job_2.is_running) # And job 2 can start running assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1) assert len(_Orchestrator.blocked_jobs) == 0 assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING ) assert_true_after_time(job_2.is_completed) # job 2 unlocked so it can complete assert _DataManager._get(task_2.baz.id).is_ready_for_reading # baz becomes ready assert _DataManager._get(task_2.baz.id).read() == 6 # the data is computed and written assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0) assert_true_after_time( lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.COMPLETED ) def test_task_orchestrator_create_synchronous_dispatcher(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() assert _OrchestratorFactory._dispatcher._nb_available_workers == 1 def test_task_orchestrator_create_standalone_dispatcher(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=3) _OrchestratorFactory._build_dispatcher() assert isinstance(_OrchestratorFactory._dispatcher._executor, ProcessPoolExecutor) assert _OrchestratorFactory._dispatcher._nb_available_workers == 3 def modified_config_task(n): from taipy.config import Config assert_true_after_time(lambda: Config.core.storage_folder == \".my_data/\") assert_true_after_time(lambda: Config.core.custom_property == \"custom_property\") return n * 2 def test_can_exec_task_with_modified_config(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) Config.configure_core(storage_folder=\".my_data/\", custom_property=\"custom_property\") dn_input_config = Config.configure_data_node(\"input\", \"pickle\", scope=Scope.SCENARIO, default_data=1) dn_output_config = Config.configure_data_node(\"output\", \"pickle\") task_config = Config.configure_task(\"task_config\", modified_config_task, dn_input_config, dn_output_config) scenario_config = Config.configure_scenario(\"scenario_config\", [task_config]) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) jobs = scenario.submit() assert_true_after_time(jobs[0].is_finished, time=120) assert_true_after_time( jobs[0].is_completed ) # If the job is completed, that means the asserts in the task are successful assert_true_after_time( lambda: _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.COMPLETED ) def update_config_task(n): from taipy.config import Config # The exception will be saved to logger, and there is no way to check for it, # so it will be checked here with pytest.raises(ConfigurationUpdateBlocked): Config.core.storage_folder = \".new_storage_folder/\" with pytest.raises(ConfigurationUpdateBlocked): Config.core.properties = {\"custom_property\": \"new_custom_property\"} Config.core.storage_folder = \".new_storage_folder/\" Config.core.properties = {\"custom_property\": \"new_custom_property\"} return n * 2 def test_cannot_exec_task_that_update_config(): \"\"\" _ConfigBlocker singleton is not passed to the subprocesses. That means in each subprocess, the config update will not be blocked. After rebuilding a new Config in each subprocess, the Config should be blocked. \"\"\" Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) dn_input_config = Config.configure_data_node(\"input\", \"pickle\", scope=Scope.SCENARIO, default_data=1) dn_output_config = Config.configure_data_node(\"output\", \"pickle\") task_config = Config.configure_task(\"task_config\", update_config_task, dn_input_config, dn_output_config) scenario_config = Config.configure_scenario(\"scenario_config\", [task_config]) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) jobs = scenario.submit() # The job should fail due to an exception is raised assert_true_after_time(jobs[0].is_failed) assert_true_after_time( lambda: _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.FAILED ) def test_can_execute_task_with_development_mode(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_input_config = Config.configure_data_node(\"input\", \"pickle\", scope=Scope.SCENARIO, default_data=1) dn_output_config = Config.configure_data_node(\"output\", \"pickle\") task_config = Config.configure_task(\"task_config\", mult_by_2, dn_input_config, dn_output_config) scenario_config = Config.configure_scenario(\"scenario_config\", [task_config]) _OrchestratorFactory._build_dispatcher() scenario = _ScenarioManager._create(scenario_config) scenario.submit() while scenario.output.edit_in_progress: sleep(1) assert 2 == scenario.output.read() def test_need_to_run_no_output(): hello_cfg = Config.configure_data_node(\"hello\", default_data=\"Hello \") world_cfg = Config.configure_data_node(\"world\", default_data=\"world !\") task_cfg = Config.configure_task(\"name\", input=[hello_cfg, world_cfg], function=concat, output=[]) task = _create_task_from_config(task_cfg) assert _OrchestratorFactory._dispatcher._needs_to_run(task) def test_need_to_run_task_not_skippable(): hello_cfg = Config.configure_data_node(\"hello\", default_data=\"Hello \") world_cfg = Config.configure_data_node(\"world\", default_data=\"world !\") hello_world_cfg = Config.configure_data_node(\"hello_world\") task_cfg = Config.configure_task( \"name\", input=[hello_cfg, world_cfg], function=concat, output=[hello_world_cfg], skippable=False ) task = _create_task_from_config(task_cfg) assert _OrchestratorFactory._dispatcher._needs_to_run(task) def test_need_to_run_skippable_task_no_input(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) hello_world_cfg = Config.configure_data_node(\"hello_world\") task_cfg = Config.configure_task(\"name\", input=[], function=nothing, output=[hello_world_cfg], skippable=True) _OrchestratorFactory._build_dispatcher() task = _create_task_from_config(task_cfg) assert _OrchestratorFactory._dispatcher._needs_to_run(task) _Orchestrator.submit_task(task) assert not _OrchestratorFactory._dispatcher._needs_to_run(task) def test_need_to_run_skippable_task_no_validity_period_on_output(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) hello_cfg = Config.configure_data_node(\"hello\", default_data=\"Hello \") world_cfg = Config.configure_data_node(\"world\", default_data=\"world !\") hello_world_cfg = Config.configure_data_node(\"hello_world\") task_cfg = Config.configure_task( \"name\", input=[hello_cfg, world_cfg], function=concat, output=[hello_world_cfg], skippable=True ) _OrchestratorFactory._build_dispatcher() task = _create_task_from_config(task_cfg) assert _OrchestratorFactory._dispatcher._needs_to_run(task) _Orchestrator.submit_task(task) assert not _OrchestratorFactory._dispatcher._needs_to_run(task) def test_need_to_run_skippable_task_with_validity_period_is_valid_on_output(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) hello_cfg = Config.configure_data_node(\"hello\", default_data=\"Hello \") world_cfg = Config.configure_data_node(\"world\", default_data=\"world !\") hello_world_cfg = Config.configure_data_node(\"hello_world\", validity_days=1) task_cfg = Config.configure_task( \"name\", input=[hello_cfg, world_cfg], function=concat, output=[hello_world_cfg], skippable=True ) _OrchestratorFactory._build_dispatcher() task = _create_task_from_config(task_cfg) assert _OrchestratorFactory._dispatcher._needs_to_run(task) job = _Orchestrator.submit_task(task) assert not _OrchestratorFactory._dispatcher._needs_to_run(task) job_skipped = _Orchestrator.submit_task(task) assert job.is_completed() assert job.is_finished() assert job_skipped.is_skipped() assert job_skipped.is_finished() def test_need_to_run_skippable_task_with_validity_period_obsolete_on_output(): hello_cfg = Config.configure_data_node(\"hello\", default_data=\"Hello \") world_cfg = Config.configure_data_node(\"world\", default_data=\"world !\") hello_world_cfg = Config.configure_data_node(\"hello_world\", validity_days=1) task_cfg = Config.configure_task( \"name\", input=[hello_cfg, world_cfg], function=concat, output=[hello_world_cfg], skippable=True ) task = _create_task_from_config(task_cfg) assert _OrchestratorFactory._dispatcher._needs_to_run(task) _Orchestrator.submit_task(task) output = task.hello_world output._last_edit_date = datetime.now() - timedelta(days=1, minutes=30) _DataManager()._set(output) assert _OrchestratorFactory._dispatcher._needs_to_run(task) # ################################ UTIL METHODS ################################## def _create_task(function, nb_outputs=1): output_dn_config_id = \"\".join(random.choice(string.ascii_lowercase) for _ in range(10)) dn_input_configs = [ Config.configure_data_node(\"input1\", \"pickle\", Scope.SCENARIO, default_data=21), Config.configure_data_node(\"input2\", \"pickle\", Scope.SCENARIO, default_data=2), ] dn_output_configs = [ Config.configure_data_node(f\"{output_dn_config_id}_output{i}\", \"pickle\", Scope.SCENARIO, default_data=0) for i in range(nb_outputs) ] input_dn = _DataManager._bulk_get_or_create(dn_input_configs).values() output_dn = _DataManager._bulk_get_or_create(dn_output_configs).values() return Task( output_dn_config_id, {}, function=function, input=input_dn, output=output_dn, ) def _create_task_from_config(task_cfg): return _TaskManager()._bulk_get_or_create([task_cfg])[0] "} {"text": "from unittest import mock import pytest from src.taipy.core._orchestrator._dispatcher import _DevelopmentJobDispatcher, _JobDispatcher, _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.exceptions.exceptions import OrchestratorNotBuilt from taipy.config import Config def test_build_orchestrator(): _OrchestratorFactory._orchestrator = None _OrchestratorFactory._dispatcher = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is None orchestrator = _OrchestratorFactory._build_orchestrator() assert orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator == _Orchestrator dispatcher = _OrchestratorFactory._build_dispatcher() assert isinstance(dispatcher, _JobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _JobDispatcher) _OrchestratorFactory._orchestrator = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is not None with mock.patch( \"src.taipy.core._orchestrator._orchestrator_factory._OrchestratorFactory._build_dispatcher\" ) as build_dispatcher, mock.patch( \"src.taipy.core._orchestrator._orchestrator._Orchestrator.initialize\" ) as initialize: orchestrator = _OrchestratorFactory._build_orchestrator() assert orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator == _Orchestrator build_dispatcher.assert_not_called() initialize.assert_called_once() def test_build_development_dispatcher(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._orchestrator = None _OrchestratorFactory._dispatcher = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is None with pytest.raises(OrchestratorNotBuilt): _OrchestratorFactory._build_dispatcher() _OrchestratorFactory._build_orchestrator() assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._dispatcher is None _OrchestratorFactory._build_dispatcher() assert isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) def test_build_standalone_dispatcher(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher) assert not isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 2 _OrchestratorFactory._dispatcher._nb_available_workers = 1 _OrchestratorFactory._build_dispatcher(force_restart=False) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 1 _OrchestratorFactory._build_dispatcher(force_restart=True) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 2 "} {"text": "import multiprocessing from concurrent.futures import ProcessPoolExecutor from functools import partial from unittest import mock from unittest.mock import MagicMock from pytest import raises from src.taipy.core import DataNodeId, JobId, TaskId from src.taipy.core._orchestrator._dispatcher._development_job_dispatcher import _DevelopmentJobDispatcher from src.taipy.core._orchestrator._dispatcher._standalone_job_dispatcher import _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.job.job import Job from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.task.task import Task from taipy.config.config import Config from tests.core.utils import assert_true_after_time def execute(lock): with lock: ... return None def _error(): raise RuntimeError(\"Something bad has happened\") def test_build_development_job_dispatcher(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() dispatcher = _OrchestratorFactory._dispatcher assert isinstance(dispatcher, _DevelopmentJobDispatcher) assert dispatcher._nb_available_workers == 1 with raises(NotImplementedError): assert dispatcher.start() assert dispatcher.is_running() with raises(NotImplementedError): dispatcher.stop() def test_build_standalone_job_dispatcher(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() dispatcher = _OrchestratorFactory._dispatcher assert not isinstance(dispatcher, _DevelopmentJobDispatcher) assert isinstance(dispatcher, _StandaloneJobDispatcher) assert isinstance(dispatcher._executor, ProcessPoolExecutor) assert dispatcher._nb_available_workers == 2 assert_true_after_time(dispatcher.is_running) dispatcher.stop() dispatcher.join() assert_true_after_time(lambda: not dispatcher.is_running()) def test_can_execute_2_workers(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock = m.Lock() task_id = TaskId(\"task_id1\") output = list(_DataManager._bulk_get_or_create([Config.configure_data_node(\"input1\", default_data=21)]).values()) _OrchestratorFactory._build_dispatcher() task = Task( config_id=\"name\", properties={}, input=[], function=partial(execute, lock), output=output, id=task_id, ) job_id = JobId(\"id1\") job = Job(job_id, task, \"submit_id\", task.id) dispatcher = _StandaloneJobDispatcher(_OrchestratorFactory._orchestrator) with lock: assert dispatcher._can_execute() dispatcher._dispatch(job) assert dispatcher._can_execute() dispatcher._dispatch(job) assert not dispatcher._can_execute() assert_true_after_time(lambda: dispatcher._can_execute()) def test_can_execute_synchronous(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId(\"task_id1\") task = Task(config_id=\"name\", properties={}, input=[], function=print, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id, task._ID_PREFIX) job_id = JobId(\"id1\") job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher assert dispatcher._can_execute() dispatcher._dispatch(job) assert dispatcher._can_execute() def test_exception_in_user_function(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId(\"task_id1\") job_id = JobId(\"id1\") task = Task(config_id=\"name\", properties={}, input=[], function=_error, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id, task._ID_PREFIX) job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher dispatcher._dispatch(job) assert job.is_failed() assert 'RuntimeError(\"Something bad has happened\")' in str(job.stacktrace[0]) def test_exception_in_writing_data(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId(\"task_id1\") job_id = JobId(\"id1\") output = MagicMock() output.id = DataNodeId(\"output_id\") output.config_id = \"my_raising_datanode\" output._is_in_cache = False output.write.side_effect = ValueError() task = Task(config_id=\"name\", properties={}, input=[], function=print, output=[output], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id, task._ID_PREFIX) job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher with mock.patch(\"src.taipy.core.data._data_manager._DataManager._get\") as get: get.return_value = output dispatcher._dispatch(job) assert job.is_failed() assert \"node\" in job.stacktrace[0] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from unittest import mock import pytest from src.taipy.core.config.data_node_config import DataNodeConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.csv import CSVDataNode from src.taipy.core.data.data_node import DataNode from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import InvalidConfigurationId @pytest.fixture def output(): return [DataNode(\"name_1\"), DataNode(\"name_2\"), DataNode(\"name_3\")] @pytest.fixture def output_config(): return [DataNodeConfig(\"name_1\"), DataNodeConfig(\"name_2\"), DataNodeConfig(\"name_3\")] @pytest.fixture def input(): return [DataNode(\"input_name_1\"), DataNode(\"input_name_2\"), DataNode(\"input_name_3\")] @pytest.fixture def input_config(): return [DataNodeConfig(\"input_name_1\"), DataNodeConfig(\"input_name_2\"), DataNodeConfig(\"input_name_3\")] def test_create_task(): name = \"name_1\" task = Task(name, {}, print, [], []) assert f\"TASK_{name}_\" in task.id assert task.config_id == \"name_1\" with pytest.raises(InvalidConfigurationId): Task(\"foo bar\", {}, print, [], []) path = \"my/csv/path\" foo_dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": True}) task = Task(\"name_1\", {}, print, [foo_dn], []) assert task.config_id == \"name_1\" assert task.id is not None assert task.owner_id is None assert task.parent_ids == set() assert task.foo == foo_dn assert task.foo.path == path with pytest.raises(AttributeError): task.bar task = Task(\"name_1\", {}, print, [foo_dn], [], parent_ids={\"parent_id\"}) assert task.parent_ids == {\"parent_id\"} path = \"my/csv/path\" abc_dn = InMemoryDataNode(\"name_1ea\", Scope.SCENARIO, properties={\"path\": path}) task = Task(\"name_1ea\", {}, print, [abc_dn], [], owner_id=\"owner_id\", parent_ids={\"parent_id_1\", \"parent_id_2\"}) assert task.config_id == \"name_1ea\" assert task.id is not None assert task.owner_id == \"owner_id\" assert task.parent_ids == {\"parent_id_1\", \"parent_id_2\"} assert task.name_1ea == abc_dn assert task.name_1ea.path == path with pytest.raises(AttributeError): task.bar with mock.patch(\"src.taipy.core.get\") as get_mck: class MockOwner: label = \"owner_label\" def get_label(self): return self.label get_mck.return_value = MockOwner() assert task.get_label() == \"owner_label > \" + task.config_id assert task.get_simple_label() == task.config_id def test_can_not_change_task_output(output): task = Task(\"name_1\", {}, print, output=output) with pytest.raises(Exception): task.output = {} assert list(task.output.values()) == output output.append(output[0]) assert list(task.output.values()) != output def test_can_not_change_task_input(input): task = Task(\"name_1\", {}, print, input=input) with pytest.raises(Exception): task.input = {} assert list(task.input.values()) == input input.append(input[0]) assert list(task.input.values()) != input def test_can_not_change_task_config_output(output_config): task_config = Config.configure_task(\"name_1\", print, [], output=output_config) assert task_config.output_configs == output_config with pytest.raises(Exception): task_config.output_configs = [] output_config.append(output_config[0]) assert task_config._output != output_config def test_can_not_update_task_output_values(output_config): data_node_cfg = Config.configure_data_node(\"data_node_cfg\") task_config = Config.configure_task(\"name_1\", print, [], output=output_config) task_config.output_configs.append(data_node_cfg) assert task_config.output_configs == output_config task_config.output_configs[0] = data_node_cfg assert task_config.output_configs[0] != data_node_cfg def test_can_not_update_task_input_values(input_config): data_node_config = DataNodeConfig(\"data_node\") task_config = Config.configure_task(\"name_1\", print, input=input_config, output=[]) task_config.input_configs.append(data_node_config) assert task_config.input_configs == input_config task_config.input_configs[0] = data_node_config assert task_config.input_configs[0] != data_node_config def mock_func(): pass def test_auto_set_and_reload(data_node): task_1 = Task( config_id=\"foo\", properties={}, function=print, input=None, output=None, owner_id=None, skippable=False ) _DataManager._set(data_node) _TaskManager._set(task_1) task_2 = _TaskManager._get(task_1) # auto set & reload on function attribute assert task_1.function == print assert task_2.function == print task_1.function = sum assert task_1.function == sum assert task_2.function == sum task_2.function = mock_func assert task_1.function == mock_func assert task_2.function == mock_func # auto set & reload on skippable attribute assert not task_1.skippable assert not task_2.skippable task_1.skippable = True assert task_1.skippable assert task_2.skippable task_2.skippable = False assert not task_1.skippable assert not task_2.skippable # auto set & reload on parent_ids attribute (set() object does not have auto set yet) assert task_1.parent_ids == set() assert task_2.parent_ids == set() task_1._parent_ids.update([\"sc2\"]) _TaskManager._set(task_1) assert task_1.parent_ids == {\"sc2\"} assert task_2.parent_ids == {\"sc2\"} task_2._parent_ids.clear() task_2._parent_ids.update([\"sc1\"]) _TaskManager._set(task_2) assert task_1.parent_ids == {\"sc1\"} assert task_2.parent_ids == {\"sc1\"} # auto set & reload on properties attribute assert task_1.properties == {} assert task_2.properties == {} task_1._properties[\"qux\"] = 4 assert task_1.properties[\"qux\"] == 4 assert task_2.properties[\"qux\"] == 4 assert task_1.properties == {\"qux\": 4} assert task_2.properties == {\"qux\": 4} task_2._properties[\"qux\"] = 5 assert task_1.properties[\"qux\"] == 5 assert task_2.properties[\"qux\"] == 5 task_1.properties[\"temp_key_1\"] = \"temp_value_1\" task_1.properties[\"temp_key_2\"] = \"temp_value_2\" assert task_1.properties == { \"qux\": 5, \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } assert task_2.properties == { \"qux\": 5, \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } task_1.properties.pop(\"temp_key_1\") assert \"temp_key_1\" not in task_1.properties.keys() assert \"temp_key_1\" not in task_1.properties.keys() assert task_1.properties == { \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } assert task_2.properties == { \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } task_2.properties.pop(\"temp_key_2\") assert task_1.properties == {\"qux\": 5} assert task_2.properties == {\"qux\": 5} assert \"temp_key_2\" not in task_1.properties.keys() assert \"temp_key_2\" not in task_2.properties.keys() task_1.properties[\"temp_key_3\"] = 0 assert task_1.properties == {\"qux\": 5, \"temp_key_3\": 0} assert task_2.properties == {\"qux\": 5, \"temp_key_3\": 0} task_1.properties.update({\"temp_key_3\": 1}) assert task_1.properties == {\"qux\": 5, \"temp_key_3\": 1} assert task_2.properties == {\"qux\": 5, \"temp_key_3\": 1} task_1.properties.update(dict()) assert task_1.properties == {\"qux\": 5, \"temp_key_3\": 1} assert task_2.properties == {\"qux\": 5, \"temp_key_3\": 1} task_1.properties[\"temp_key_4\"] = 0 task_1.properties[\"temp_key_5\"] = 0 with task_1 as task: assert task.config_id == \"foo\" assert task.owner_id is None assert task.function == mock_func assert not task.skippable assert task._is_in_context assert task.properties[\"qux\"] == 5 assert task.properties[\"temp_key_3\"] == 1 assert task.properties[\"temp_key_4\"] == 0 assert task.properties[\"temp_key_5\"] == 0 task.function = print task.skippable = True task.properties[\"qux\"] = 9 task.properties.pop(\"temp_key_3\") task.properties.pop(\"temp_key_4\") task.properties.update({\"temp_key_4\": 1}) task.properties.update({\"temp_key_5\": 2}) task.properties.pop(\"temp_key_5\") task.properties.update(dict()) assert task.config_id == \"foo\" assert task.owner_id is None assert task.function == mock_func assert not task.skippable assert task._is_in_context assert task.properties[\"qux\"] == 5 assert task.properties[\"temp_key_3\"] == 1 assert task.properties[\"temp_key_4\"] == 0 assert task.properties[\"temp_key_5\"] == 0 assert task_1.config_id == \"foo\" assert task_1.owner_id is None assert task_1.function == print assert task.skippable assert not task_1._is_in_context assert task_1.properties[\"qux\"] == 9 assert \"temp_key_3\" not in task_1.properties.keys() assert task_1.properties[\"temp_key_4\"] == 1 assert \"temp_key_5\" not in task_1.properties.keys() def test_get_parents(task): with mock.patch(\"src.taipy.core.get_parents\") as mck: task.get_parents() mck.assert_called_once_with(task) def test_submit_task(task: Task): with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._submit\") as mock_submit: task.submit([], True) mock_submit.assert_called_once_with(task, [], True, False, None) "} {"text": "import os import pytest from src.taipy.core.data._data_sql_repository import _DataSQLRepository from src.taipy.core.exceptions import ModelNotFound from src.taipy.core.task._task_fs_repository import _TaskFSRepository from src.taipy.core.task._task_sql_repository import _TaskSQLRepository from src.taipy.core.task.task import Task, TaskId class TestTaskFSRepository: @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_save_and_load(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) repository._save(task) obj = repository._load(task.id) assert isinstance(obj, Task) @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_exists(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) repository._save(task) assert repository._exists(task.id) assert not repository._exists(\"not-existed-task\") @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_load_all(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) for i in range(10): task.id = TaskId(f\"task-{i}\") repository._save(task) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_load_all_with_filters(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) for i in range(10): task.id = TaskId(f\"task-{i}\") task.owner_id = f\"owner-{i}\" repository._save(task) objs = repository._load_all(filters=[{\"owner_id\": \"owner-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_delete(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) repository._save(task) repository._delete(task.id) with pytest.raises(ModelNotFound): repository._load(task.id) @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_delete_all(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) for i in range(10): task.id = TaskId(f\"task-{i}\") repository._save(task) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_delete_many(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) for i in range(10): task.id = TaskId(f\"task-{i}\") repository._save(task) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_delete_by(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): task.id = TaskId(f\"task-{i}\") task._version = f\"{(i+1) // 5}.0\" repository._save(task) objs = repository._load_all() assert len(objs) == 10 repository._delete_by(\"version\", \"1.0\") assert len(repository._load_all()) == 5 @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_search(self, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node], version=\"random_version_number\") for i in range(10): task.id = TaskId(f\"task-{i}\") task.owner_id = f\"owner-{i}\" repository._save(task) assert len(repository._load_all()) == 10 objs = repository._search(\"owner_id\", \"owner-2\") assert len(objs) == 1 assert isinstance(objs[0], Task) objs = repository._search(\"owner_id\", \"owner-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], Task) assert repository._search(\"owner_id\", \"owner-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"repo\", [_TaskFSRepository, _TaskSQLRepository]) def test_export(self, tmpdir, data_node, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) repository._save(task) repository._export(task.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _TaskFSRepository else os.path.join(tmpdir.strpath, \"task\") assert os.path.exists(os.path.join(dir_path, f\"{task.id}.json\")) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import uuid from unittest import mock import pytest from src.taipy.core import taipy from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import ModelNotFound, NonExistingTask from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task from src.taipy.core.task.task_id import TaskId from taipy.config.common.scope import Scope from taipy.config.config import Config def test_create_and_save(): input_configs = [Config.configure_data_node(\"my_input\", \"in_memory\")] output_configs = Config.configure_data_node(\"my_output\", \"in_memory\") task_config = Config.configure_task(\"foo\", print, input_configs, output_configs) task = _create_task_from_config(task_config) assert task.id is not None assert task.config_id == \"foo\" assert len(task.input) == 1 assert len(_DataManager._get_all()) == 2 assert task.my_input.id is not None assert task.my_input.config_id == \"my_input\" assert task.my_output.id is not None assert task.my_output.config_id == \"my_output\" assert task.function == print assert task.parent_ids == set() task_retrieved_from_manager = _TaskManager._get(task.id) assert task_retrieved_from_manager.id == task.id assert task_retrieved_from_manager.config_id == task.config_id assert len(task_retrieved_from_manager.input) == len(task.input) assert task_retrieved_from_manager.my_input.id is not None assert task_retrieved_from_manager.my_input.config_id == task.my_input.config_id assert task_retrieved_from_manager.my_output.id is not None assert task_retrieved_from_manager.my_output.config_id == task.my_output.config_id assert task_retrieved_from_manager.function == task.function assert task_retrieved_from_manager.parent_ids == set() def test_do_not_recreate_existing_data_node(): input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO) output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.SCENARIO) _DataManager._create_and_set(input_config, \"scenario_id\", \"task_id\") assert len(_DataManager._get_all()) == 1 task_config = Config.configure_task(\"foo\", print, input_config, output_config) _create_task_from_config(task_config, scenario_id=\"scenario_id\") assert len(_DataManager._get_all()) == 2 def test_assign_task_as_parent_of_datanode(): dn_config_1 = Config.configure_data_node(\"dn_1\", \"in_memory\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(\"dn_2\", \"in_memory\", scope=Scope.SCENARIO) dn_config_3 = Config.configure_data_node(\"dn_3\", \"in_memory\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"task_1\", print, dn_config_1, dn_config_2) task_config_2 = Config.configure_task(\"task_2\", print, dn_config_2, dn_config_3) tasks = _TaskManager._bulk_get_or_create([task_config_1, task_config_2], \"cycle_id\", \"scenario_id\") assert len(_DataManager._get_all()) == 3 assert len(_TaskManager._get_all()) == 2 assert len(tasks) == 2 dns = {dn.config_id: dn for dn in _DataManager._get_all()} assert dns[\"dn_1\"].parent_ids == {tasks[0].id} assert dns[\"dn_2\"].parent_ids == set([tasks[0].id, tasks[1].id]) assert dns[\"dn_3\"].parent_ids == {tasks[1].id} def test_do_not_recreate_existing_task(): input_config_scope_scenario = Config.configure_data_node(\"my_input_1\", \"in_memory\", Scope.SCENARIO) output_config_scope_scenario = Config.configure_data_node(\"my_output_1\", \"in_memory\", Scope.SCENARIO) task_config_1 = Config.configure_task(\"bar\", print, input_config_scope_scenario, output_config_scope_scenario) # task_config_2 scope is Scenario task_1 = _create_task_from_config(task_config_1) assert len(_TaskManager._get_all()) == 1 task_2 = _create_task_from_config(task_config_1) # Do not create. It already exists for None scenario assert len(_TaskManager._get_all()) == 1 assert task_1.id == task_2.id task_3 = _create_task_from_config(task_config_1, None, None) # Do not create. It already exists for None scenario assert len(_TaskManager._get_all()) == 1 assert task_1.id == task_2.id assert task_2.id == task_3.id task_4 = _create_task_from_config(task_config_1, None, \"scenario_1\") # Create even if sequence is the same. assert len(_TaskManager._get_all()) == 2 assert task_1.id == task_2.id assert task_2.id == task_3.id assert task_3.id != task_4.id task_5 = _create_task_from_config( task_config_1, None, \"scenario_1\" ) # Do not create. It already exists for scenario_1 assert len(_TaskManager._get_all()) == 2 assert task_1.id == task_2.id assert task_2.id == task_3.id assert task_3.id != task_4.id assert task_4.id == task_5.id task_6 = _create_task_from_config(task_config_1, None, \"scenario_2\") assert len(_TaskManager._get_all()) == 3 assert task_1.id == task_2.id assert task_2.id == task_3.id assert task_3.id != task_4.id assert task_4.id == task_5.id assert task_5.id != task_6.id assert task_3.id != task_6.id input_config_scope_cycle = Config.configure_data_node(\"my_input_2\", \"in_memory\", Scope.CYCLE) output_config_scope_cycle = Config.configure_data_node(\"my_output_2\", \"in_memory\", Scope.CYCLE) task_config_2 = Config.configure_task(\"xyz\", print, input_config_scope_cycle, output_config_scope_cycle) # task_config_3 scope is Cycle task_7 = _create_task_from_config(task_config_2) assert len(_TaskManager._get_all()) == 4 task_8 = _create_task_from_config(task_config_2) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id task_9 = _create_task_from_config(task_config_2, None, None) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id assert task_8.id == task_9.id task_10 = _create_task_from_config( task_config_2, None, \"scenario\" ) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id task_11 = _create_task_from_config( task_config_2, None, \"scenario\" ) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id assert task_10.id == task_11.id task_12 = _create_task_from_config(task_config_2, \"cycle\", None) assert len(_TaskManager._get_all()) == 5 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id assert task_10.id == task_11.id assert task_11.id != task_12.id task_13 = _create_task_from_config(task_config_2, \"cycle\", None) assert len(_TaskManager._get_all()) == 5 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id assert task_10.id == task_11.id assert task_11.id != task_12.id assert task_12.id == task_13.id def test_set_and_get_task(): task_id_1 = TaskId(\"id1\") first_task = Task(\"name_1\", {}, print, [], [], task_id_1) task_id_2 = TaskId(\"id2\") second_task = Task(\"name_2\", {}, print, [], [], task_id_2) third_task_with_same_id_as_first_task = Task(\"name_is_not_1_anymore\", {}, print, [], [], task_id_1) # No task at initialization assert len(_TaskManager._get_all()) == 0 assert _TaskManager._get(task_id_1) is None assert _TaskManager._get(first_task) is None assert _TaskManager._get(task_id_2) is None assert _TaskManager._get(second_task) is None # Save one task. We expect to have only one task stored _TaskManager._set(first_task) assert len(_TaskManager._get_all()) == 1 assert _TaskManager._get(task_id_1).id == first_task.id assert _TaskManager._get(first_task).id == first_task.id assert _TaskManager._get(task_id_2) is None assert _TaskManager._get(second_task) is None # Save a second task. Now, we expect to have a total of two tasks stored _TaskManager._set(second_task) assert len(_TaskManager._get_all()) == 2 assert _TaskManager._get(task_id_1).id == first_task.id assert _TaskManager._get(first_task).id == first_task.id assert _TaskManager._get(task_id_2).id == second_task.id assert _TaskManager._get(second_task).id == second_task.id # We save the first task again. We expect nothing to change _TaskManager._set(first_task) assert len(_TaskManager._get_all()) == 2 assert _TaskManager._get(task_id_1).id == first_task.id assert _TaskManager._get(first_task).id == first_task.id assert _TaskManager._get(task_id_2).id == second_task.id assert _TaskManager._get(second_task).id == second_task.id # We save a third task with same id as the first one. # We expect the first task to be updated _TaskManager._set(third_task_with_same_id_as_first_task) assert len(_TaskManager._get_all()) == 2 assert _TaskManager._get(task_id_1).id == third_task_with_same_id_as_first_task.id assert _TaskManager._get(task_id_1).config_id == third_task_with_same_id_as_first_task.config_id assert _TaskManager._get(first_task).id == third_task_with_same_id_as_first_task.id assert _TaskManager._get(task_id_2).id == second_task.id assert _TaskManager._get(second_task).id == second_task.id def test_get_all_on_multiple_versions_environment(): # Create 5 tasks with 2 versions each # Only version 1.0 has the task with config_id = \"config_id_1\" # Only version 2.0 has the task with config_id = \"config_id_6\" for version in range(1, 3): for i in range(5): _TaskManager._set( Task( f\"config_id_{i+version}\", {}, print, [], [], id=TaskId(f\"id{i}_v{version}\"), version=f\"{version}.0\" ) ) _VersionManager._set_experiment_version(\"1.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_experiment_version(\"2.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 _VersionManager._set_development_version(\"1.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_development_version(\"2.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 def test_ensure_conservation_of_order_of_data_nodes_on_task_creation(): embedded_1 = Config.configure_data_node(\"dn_1\", \"in_memory\", scope=Scope.SCENARIO) embedded_2 = Config.configure_data_node(\"dn_2\", \"in_memory\", scope=Scope.SCENARIO) embedded_3 = Config.configure_data_node(\"a_dn_3\", \"in_memory\", scope=Scope.SCENARIO) embedded_4 = Config.configure_data_node(\"dn_4\", \"in_memory\", scope=Scope.SCENARIO) embedded_5 = Config.configure_data_node(\"dn_5\", \"in_memory\", scope=Scope.SCENARIO) input = [embedded_1, embedded_2, embedded_3] output = [embedded_4, embedded_5] task_config_1 = Config.configure_task(\"name_1\", print, input, output) task_config_2 = Config.configure_task(\"name_2\", print, input, output) task_1, task_2 = _TaskManager._bulk_get_or_create([task_config_1, task_config_2]) assert [i.config_id for i in task_1.input.values()] == [embedded_1.id, embedded_2.id, embedded_3.id] assert [o.config_id for o in task_1.output.values()] == [embedded_4.id, embedded_5.id] assert [i.config_id for i in task_2.input.values()] == [embedded_1.id, embedded_2.id, embedded_3.id] assert [o.config_id for o in task_2.output.values()] == [embedded_4.id, embedded_5.id] def test_delete_raise_exception(): dn_input_config_1 = Config.configure_data_node( \"my_input_1\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\" ) dn_output_config_1 = Config.configure_data_node(\"my_output_1\", \"in_memory\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_input_config_1, dn_output_config_1) task_1 = _create_task_from_config(task_config_1) _TaskManager._delete(task_1.id) with pytest.raises(ModelNotFound): _TaskManager._delete(task_1.id) def test_hard_delete(): dn_input_config_1 = Config.configure_data_node( \"my_input_1\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\" ) dn_output_config_1 = Config.configure_data_node(\"my_output_1\", \"in_memory\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_input_config_1, dn_output_config_1) task_1 = _create_task_from_config(task_config_1) assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 _TaskManager._hard_delete(task_1.id) assert len(_TaskManager._get_all()) == 0 assert len(_DataManager._get_all()) == 2 def test_is_submittable(): assert len(_TaskManager._get_all()) == 0 dn_config = Config.configure_in_memory_data_node(\"dn\", 10) task_config = Config.configure_task(\"task\", print, [dn_config]) task = _TaskManager._bulk_get_or_create([task_config])[0] assert len(_TaskManager._get_all()) == 1 assert _TaskManager._is_submittable(task) assert _TaskManager._is_submittable(task.id) assert not _TaskManager._is_submittable(\"Task_temp\") task.input[\"dn\"].edit_in_progress = True assert not _TaskManager._is_submittable(task) assert not _TaskManager._is_submittable(task.id) task.input[\"dn\"].edit_in_progress = False assert _TaskManager._is_submittable(task) assert _TaskManager._is_submittable(task.id) def test_submit_task(): data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") task_1 = Task( \"grault\", {}, print, [data_node_1], [data_node_2], TaskId(\"t1\"), ) class MockOrchestrator(_Orchestrator): submit_calls = [] submit_ids = [] def submit_task(self, task, callbacks=None, force=False, wait=False, timeout=None): submit_id = f\"SUBMISSION_{str(uuid.uuid4())}\" self.submit_calls.append(task) self.submit_ids.append(submit_id) return None with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._orchestrator\", new=MockOrchestrator): # Task does not exist, we expect an exception with pytest.raises(NonExistingTask): _TaskManager._submit(task_1) with pytest.raises(NonExistingTask): _TaskManager._submit(task_1.id) _TaskManager._set(task_1) _TaskManager._submit(task_1) call_ids = [call.id for call in MockOrchestrator.submit_calls] assert call_ids == [task_1.id] assert len(MockOrchestrator.submit_ids) == 1 _TaskManager._submit(task_1) assert len(MockOrchestrator.submit_ids) == 2 assert len(MockOrchestrator.submit_ids) == len(set(MockOrchestrator.submit_ids)) _TaskManager._submit(task_1) assert len(MockOrchestrator.submit_ids) == 3 assert len(MockOrchestrator.submit_ids) == len(set(MockOrchestrator.submit_ids)) def my_print(a, b): print(a + b) def test_submit_task_with_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_path=\"wrong_path.pickle\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") task_cfg = Config.configure_task(\"task\", my_print, [csv_dn_cfg, pickle_dn_cfg], parquet_dn_cfg) task_manager = _TaskManagerFactory._build_manager() tasks = task_manager._bulk_get_or_create([task_cfg]) task = tasks[0] taipy.submit(task) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in task.input.values() ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in task.output.values() ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_submit_task_with_one_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") pickle_dn_cfg = Config.configure_pickle_data_node(\"pickle_file_path\", default_data=\"value\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") task_cfg = Config.configure_task(\"task\", my_print, [csv_dn_cfg, pickle_dn_cfg], parquet_dn_cfg) task_manager = _TaskManagerFactory._build_manager() tasks = task_manager._bulk_get_or_create([task_cfg]) task = tasks[0] taipy.submit(task) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in [task.input[\"wrong_csv_file_path\"]] ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in [task.input[\"pickle_file_path\"], task.output[\"wrong_parquet_file_path\"]] ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_get_tasks_by_config_id(): dn_config = Config.configure_data_node(\"dn\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"t1\", print, dn_config) task_config_2 = Config.configure_task(\"t2\", print, dn_config) task_config_3 = Config.configure_task(\"t3\", print, dn_config) t_1_1 = _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_1\")[0] t_1_2 = _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_2\")[0] t_1_3 = _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_3\")[0] assert len(_TaskManager._get_all()) == 3 t_2_1 = _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_4\")[0] t_2_2 = _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_5\")[0] assert len(_TaskManager._get_all()) == 5 t_3_1 = _TaskManager._bulk_get_or_create([task_config_3], scenario_id=\"scenario_6\")[0] assert len(_TaskManager._get_all()) == 6 t1_tasks = _TaskManager._get_by_config_id(task_config_1.id) assert len(t1_tasks) == 3 assert sorted([t_1_1.id, t_1_2.id, t_1_3.id]) == sorted([task.id for task in t1_tasks]) t2_tasks = _TaskManager._get_by_config_id(task_config_2.id) assert len(t2_tasks) == 2 assert sorted([t_2_1.id, t_2_2.id]) == sorted([task.id for task in t2_tasks]) t3_tasks = _TaskManager._get_by_config_id(task_config_3.id) assert len(t3_tasks) == 1 assert sorted([t_3_1.id]) == sorted([task.id for task in t3_tasks]) def test_get_scenarios_by_config_id_in_multiple_versions_environment(): dn_config = Config.configure_data_node(\"dn\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"t1\", print, dn_config) task_config_2 = Config.configure_task(\"t2\", print, dn_config) _VersionManager._set_experiment_version(\"1.0\") _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_1\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_2\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_3\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_4\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_5\")[0] assert len(_TaskManager._get_by_config_id(task_config_1.id)) == 3 assert len(_TaskManager._get_by_config_id(task_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_1\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_2\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_3\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_4\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_5\")[0] assert len(_TaskManager._get_by_config_id(task_config_1.id)) == 3 assert len(_TaskManager._get_by_config_id(task_config_2.id)) == 2 def _create_task_from_config(task_config, *args, **kwargs): return _TaskManager._bulk_get_or_create([task_config], *args, **kwargs)[0] "} {"text": "from src.taipy.core.data import InMemoryDataNode from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.task._task_model import _TaskModel from taipy.config.common.scope import Scope def test_none_properties_attribute_compatible(): model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"parent_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"output_id\"], \"version\": \"latest\", \"skippable\": False, } ) assert len(model.properties) == 0 def test_skippable_compatibility_with_non_existing_output(): model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"output_id\"], \"version\": \"latest\", \"skippable\": False, } ) assert not model.skippable def test_skippable_compatibility_with_no_output(): model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [], \"version\": \"latest\", \"skippable\": False, } ) assert not model.skippable def test_skippable_compatibility_with_one_output(): manager = _DataManagerFactory._build_manager() manager._set(InMemoryDataNode(\"cfg_id\", Scope.SCENARIO, id=\"dn_id\")) model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"dn_id\"], \"version\": \"latest\", \"skippable\": True, } ) assert model.skippable def test_skippable_compatibility_with_many_outputs(): manager = _DataManagerFactory._build_manager() manager._set(InMemoryDataNode(\"cfg_id\", Scope.SCENARIO, id=\"dn_id\")) manager._set(InMemoryDataNode(\"cfg_id_2\", Scope.SCENARIO, id=\"dn_2_id\")) model = _TaskModel.from_dict( { \"id\": \"id\", \"config_id\": \"config_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id\"], \"input_ids\": [\"input_id\"], \"function_name\": \"function_name\", \"function_module\": \"function_module\", \"output_ids\": [\"dn_id\", \"dn_2_id\"], \"version\": \"latest\", \"skippable\": True, } ) assert model.skippable "} {"text": "import uuid from unittest import mock import pytest from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import ModelNotFound, NonExistingTask from src.taipy.core.job._job_manager_factory import _JobManagerFactory from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task from src.taipy.core.task.task_id import TaskId from taipy.config.common.scope import Scope from taipy.config.config import Config def init_managers(): _JobManagerFactory._build_manager()._delete_all() _TaskManagerFactory._build_manager()._delete_all() _DataManagerFactory._build_manager()._delete_all() def test_create_and_save(init_sql_repo): init_managers() input_configs = [Config.configure_data_node(\"my_input\", \"in_memory\")] output_configs = Config.configure_data_node(\"my_output\", \"in_memory\") task_config = Config.configure_task(\"foo\", print, input_configs, output_configs) task = _create_task_from_config(task_config) assert task.id is not None assert task.config_id == \"foo\" assert len(task.input) == 1 assert len(_DataManager._get_all()) == 2 assert task.my_input.id is not None assert task.my_input.config_id == \"my_input\" assert task.my_output.id is not None assert task.my_output.config_id == \"my_output\" assert task.function == print assert task.parent_ids == set() task_retrieved_from_manager = _TaskManager._get(task.id) assert task_retrieved_from_manager.id == task.id assert task_retrieved_from_manager.config_id == task.config_id assert len(task_retrieved_from_manager.input) == len(task.input) assert task_retrieved_from_manager.my_input.id is not None assert task_retrieved_from_manager.my_input.config_id == task.my_input.config_id assert task_retrieved_from_manager.my_output.id is not None assert task_retrieved_from_manager.my_output.config_id == task.my_output.config_id assert task_retrieved_from_manager.function == task.function assert task_retrieved_from_manager.parent_ids == set() def test_do_not_recreate_existing_data_node(init_sql_repo): init_managers() input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO) output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.SCENARIO) _DataManager._create_and_set(input_config, \"scenario_id\", \"task_id\") assert len(_DataManager._get_all()) == 1 task_config = Config.configure_task(\"foo\", print, input_config, output_config) _create_task_from_config(task_config, scenario_id=\"scenario_id\") assert len(_DataManager._get_all()) == 2 def test_do_not_recreate_existing_task(init_sql_repo): init_managers() assert len(_TaskManager._get_all()) == 0 input_config_scope_scenario = Config.configure_data_node(\"my_input_1\", \"in_memory\", Scope.SCENARIO) output_config_scope_scenario = Config.configure_data_node(\"my_output_1\", \"in_memory\", Scope.SCENARIO) task_config_1 = Config.configure_task(\"bar\", print, input_config_scope_scenario, output_config_scope_scenario) # task_config_1 scope is Scenario task_1 = _create_task_from_config(task_config_1) assert len(_TaskManager._get_all()) == 1 task_2 = _create_task_from_config(task_config_1) # Do not create. It already exists for None scenario assert len(_TaskManager._get_all()) == 1 assert task_1.id == task_2.id task_3 = _create_task_from_config(task_config_1, None, None) # Do not create. It already exists for None scenario assert len(_TaskManager._get_all()) == 1 assert task_1.id == task_2.id assert task_2.id == task_3.id task_4 = _create_task_from_config(task_config_1, None, \"scenario_1\") # Create even if sequence is the same. assert len(_TaskManager._get_all()) == 2 assert task_1.id == task_2.id assert task_2.id == task_3.id assert task_3.id != task_4.id task_5 = _create_task_from_config( task_config_1, None, \"scenario_1\" ) # Do not create. It already exists for scenario_1 assert len(_TaskManager._get_all()) == 2 assert task_1.id == task_2.id assert task_2.id == task_3.id assert task_3.id != task_4.id assert task_4.id == task_5.id task_6 = _create_task_from_config(task_config_1, None, \"scenario_2\") assert len(_TaskManager._get_all()) == 3 assert task_1.id == task_2.id assert task_2.id == task_3.id assert task_3.id != task_4.id assert task_4.id == task_5.id assert task_5.id != task_6.id assert task_3.id != task_6.id input_config_scope_cycle = Config.configure_data_node(\"my_input_2\", \"in_memory\", Scope.CYCLE) output_config_scope_cycle = Config.configure_data_node(\"my_output_2\", \"in_memory\", Scope.CYCLE) task_config_2 = Config.configure_task(\"xyz\", print, input_config_scope_cycle, output_config_scope_cycle) # task_config_3 scope is Cycle task_7 = _create_task_from_config(task_config_2) assert len(_TaskManager._get_all()) == 4 task_8 = _create_task_from_config(task_config_2) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id task_9 = _create_task_from_config(task_config_2, None, None) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id assert task_8.id == task_9.id task_10 = _create_task_from_config( task_config_2, None, \"scenario\" ) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id task_11 = _create_task_from_config( task_config_2, None, \"scenario\" ) # Do not create. It already exists for None cycle assert len(_TaskManager._get_all()) == 4 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id assert task_10.id == task_11.id task_12 = _create_task_from_config(task_config_2, \"cycle\", None) assert len(_TaskManager._get_all()) == 5 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id assert task_10.id == task_11.id assert task_11.id != task_12.id task_13 = _create_task_from_config(task_config_2, \"cycle\", None) assert len(_TaskManager._get_all()) == 5 assert task_7.id == task_8.id assert task_8.id == task_9.id assert task_9.id == task_10.id assert task_10.id == task_11.id assert task_11.id != task_12.id assert task_12.id == task_13.id def test_set_and_get_task(init_sql_repo): init_managers() task_id_1 = TaskId(\"id1\") first_task = Task(\"name_1\", {}, print, [], [], task_id_1) task_id_2 = TaskId(\"id2\") second_task = Task(\"name_2\", {}, print, [], [], task_id_2) third_task_with_same_id_as_first_task = Task(\"name_is_not_1_anymore\", {}, print, [], [], task_id_1) # No task at initialization assert len(_TaskManager._get_all()) == 0 assert _TaskManager._get(task_id_1) is None assert _TaskManager._get(first_task) is None assert _TaskManager._get(task_id_2) is None assert _TaskManager._get(second_task) is None # Save one task. We expect to have only one task stored _TaskManager._set(first_task) assert len(_TaskManager._get_all()) == 1 assert _TaskManager._get(task_id_1).id == first_task.id assert _TaskManager._get(first_task).id == first_task.id assert _TaskManager._get(task_id_2) is None assert _TaskManager._get(second_task) is None # Save a second task. Now, we expect to have a total of two tasks stored _TaskManager._set(second_task) assert len(_TaskManager._get_all()) == 2 assert _TaskManager._get(task_id_1).id == first_task.id assert _TaskManager._get(first_task).id == first_task.id assert _TaskManager._get(task_id_2).id == second_task.id assert _TaskManager._get(second_task).id == second_task.id # We save the first task again. We expect nothing to change _TaskManager._set(first_task) assert len(_TaskManager._get_all()) == 2 assert _TaskManager._get(task_id_1).id == first_task.id assert _TaskManager._get(first_task).id == first_task.id assert _TaskManager._get(task_id_2).id == second_task.id assert _TaskManager._get(second_task).id == second_task.id # We save a third task with same id as the first one. # We expect the first task to be updated _TaskManager._set(third_task_with_same_id_as_first_task) assert len(_TaskManager._get_all()) == 2 assert _TaskManager._get(task_id_1).id == third_task_with_same_id_as_first_task.id assert _TaskManager._get(task_id_1).config_id == third_task_with_same_id_as_first_task.config_id assert _TaskManager._get(first_task).id == third_task_with_same_id_as_first_task.id assert _TaskManager._get(task_id_2).id == second_task.id assert _TaskManager._get(second_task).id == second_task.id def test_get_all_on_multiple_versions_environment(init_sql_repo): Config.configure_global_app(repository_type=\"sql\") init_managers() # Create 5 tasks with 2 versions each # Only version 1.0 has the task with config_id = \"config_id_1\" # Only version 2.0 has the task with config_id = \"config_id_6\" for version in range(1, 3): for i in range(5): _TaskManager._set( Task( f\"config_id_{i+version}\", {}, print, [], [], id=TaskId(f\"id{i}_v{version}\"), version=f\"{version}.0\" ) ) _VersionManager._set_experiment_version(\"1.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_experiment_version(\"2.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 _VersionManager._set_development_version(\"1.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_development_version(\"2.0\") assert len(_TaskManager._get_all()) == 5 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_TaskManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 def test_ensure_conservation_of_order_of_data_nodes_on_task_creation(init_sql_repo): init_managers() embedded_1 = Config.configure_data_node(\"dn_1\", \"in_memory\", scope=Scope.SCENARIO) embedded_2 = Config.configure_data_node(\"dn_2\", \"in_memory\", scope=Scope.SCENARIO) embedded_3 = Config.configure_data_node(\"a_dn_3\", \"in_memory\", scope=Scope.SCENARIO) embedded_4 = Config.configure_data_node(\"dn_4\", \"in_memory\", scope=Scope.SCENARIO) embedded_5 = Config.configure_data_node(\"dn_5\", \"in_memory\", scope=Scope.SCENARIO) input = [embedded_1, embedded_2, embedded_3] output = [embedded_4, embedded_5] task_config_1 = Config.configure_task(\"name_1\", print, input, output) task_config_2 = Config.configure_task(\"name_2\", print, input, output) task_1, task_2 = _TaskManager._bulk_get_or_create([task_config_1, task_config_2]) assert [i.config_id for i in task_1.input.values()] == [embedded_1.id, embedded_2.id, embedded_3.id] assert [o.config_id for o in task_1.output.values()] == [embedded_4.id, embedded_5.id] assert [i.config_id for i in task_2.input.values()] == [embedded_1.id, embedded_2.id, embedded_3.id] assert [o.config_id for o in task_2.output.values()] == [embedded_4.id, embedded_5.id] def test_delete_raise_exception(init_sql_repo): init_managers() dn_input_config_1 = Config.configure_data_node( \"my_input_1\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\" ) dn_output_config_1 = Config.configure_data_node(\"my_output_1\", \"in_memory\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_input_config_1, dn_output_config_1) task_1 = _create_task_from_config(task_config_1) _TaskManager._delete(task_1.id) with pytest.raises(ModelNotFound): _TaskManager._delete(task_1.id) def test_hard_delete(init_sql_repo): init_managers() dn_input_config_1 = Config.configure_data_node( \"my_input_1\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\" ) dn_output_config_1 = Config.configure_data_node(\"my_output_1\", \"in_memory\") task_config_1 = Config.configure_task(\"task_config_1\", print, dn_input_config_1, dn_output_config_1) task_1 = _create_task_from_config(task_config_1) assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 _TaskManager._hard_delete(task_1.id) assert len(_TaskManager._get_all()) == 0 assert len(_DataManager._get_all()) == 2 def test_submit_task(): data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") task_1 = Task( \"grault\", {}, print, [data_node_1], [data_node_2], TaskId(\"t1\"), ) class MockOrchestrator(_Orchestrator): submit_calls = [] submit_ids = [] def submit_task(self, task, callbacks=None, force=False, wait=False, timeout=None): submit_id = f\"SUBMISSION_{str(uuid.uuid4())}\" self.submit_calls.append(task) self.submit_ids.append(submit_id) return None with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._orchestrator\", new=MockOrchestrator): # Task does not exist, we expect an exception with pytest.raises(NonExistingTask): _TaskManager._submit(task_1) with pytest.raises(NonExistingTask): _TaskManager._submit(task_1.id) _TaskManager._set(task_1) _TaskManager._submit(task_1) call_ids = [call.id for call in MockOrchestrator.submit_calls] assert call_ids == [task_1.id] assert len(MockOrchestrator.submit_ids) == 1 _TaskManager._submit(task_1) assert len(MockOrchestrator.submit_ids) == 2 assert len(MockOrchestrator.submit_ids) == len(set(MockOrchestrator.submit_ids)) _TaskManager._submit(task_1) assert len(MockOrchestrator.submit_ids) == 3 assert len(MockOrchestrator.submit_ids) == len(set(MockOrchestrator.submit_ids)) def test_get_tasks_by_config_id(init_sql_repo): init_managers() dn_config = Config.configure_data_node(\"dn\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"t1\", print, dn_config) task_config_2 = Config.configure_task(\"t2\", print, dn_config) task_config_3 = Config.configure_task(\"t3\", print, dn_config) t_1_1 = _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_1\")[0] t_1_2 = _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_2\")[0] t_1_3 = _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_3\")[0] assert len(_TaskManager._get_all()) == 3 t_2_1 = _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_4\")[0] t_2_2 = _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_5\")[0] assert len(_TaskManager._get_all()) == 5 t_3_1 = _TaskManager._bulk_get_or_create([task_config_3], scenario_id=\"scenario_6\")[0] assert len(_TaskManager._get_all()) == 6 t1_tasks = _TaskManager._get_by_config_id(task_config_1.id) assert len(t1_tasks) == 3 assert sorted([t_1_1.id, t_1_2.id, t_1_3.id]) == sorted([task.id for task in t1_tasks]) t2_tasks = _TaskManager._get_by_config_id(task_config_2.id) assert len(t2_tasks) == 2 assert sorted([t_2_1.id, t_2_2.id]) == sorted([task.id for task in t2_tasks]) t3_tasks = _TaskManager._get_by_config_id(task_config_3.id) assert len(t3_tasks) == 1 assert sorted([t_3_1.id]) == sorted([task.id for task in t3_tasks]) def test_get_scenarios_by_config_id_in_multiple_versions_environment(init_sql_repo): init_managers() dn_config = Config.configure_data_node(\"dn\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"t1\", print, dn_config) task_config_2 = Config.configure_task(\"t2\", print, dn_config) _VersionManager._set_experiment_version(\"1.0\") _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_1\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_2\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_3\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_4\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_5\")[0] assert len(_TaskManager._get_by_config_id(task_config_1.id)) == 3 assert len(_TaskManager._get_by_config_id(task_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_1\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_2\")[0] _TaskManager._bulk_get_or_create([task_config_1], scenario_id=\"scenario_3\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_4\")[0] _TaskManager._bulk_get_or_create([task_config_2], scenario_id=\"scenario_5\")[0] assert len(_TaskManager._get_by_config_id(task_config_1.id)) == 3 assert len(_TaskManager._get_by_config_id(task_config_2.id)) == 2 def _create_task_from_config(task_config, *args, **kwargs): return _TaskManager._bulk_get_or_create([task_config], *args, **kwargs)[0] "} {"text": "from datetime import timedelta from time import sleep from typing import Union from unittest import mock from unittest.mock import MagicMock import pytest from src.taipy.core import JobId, Sequence, SequenceId, TaskId from src.taipy.core._orchestrator._dispatcher._development_job_dispatcher import _DevelopmentJobDispatcher from src.taipy.core._orchestrator._dispatcher._standalone_job_dispatcher import _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.job.job import Job from src.taipy.core.job.status import Status from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from taipy.config.common.scope import Scope from taipy.config.config import Config @pytest.fixture def task_id(): return TaskId(\"task_id1\") @pytest.fixture def task(task_id): return Task(config_id=\"name\", properties={}, function=print, input=[], output=[], id=task_id) @pytest.fixture def job_id(): return JobId(\"id1\") @pytest.fixture(scope=\"class\") def scenario(): return Scenario( \"scenario_config\", [], {}, [], \"SCENARIO_scenario_config\", version=\"random_version_number\", ) @pytest.fixture def job(task, job_id): return Job(job_id, task, \"submit_id\", \"SCENARIO_scenario_config\") @pytest.fixture def replace_in_memory_write_fct(): default_write = InMemoryDataNode.write InMemoryDataNode.write = _error yield InMemoryDataNode.write = default_write def _foo(): return 42 def _error(): raise RuntimeError(\"Something bad has happened\") def test_create_job(scenario, task, job): from src.taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory _ScenarioManagerFactory._build_manager()._set(scenario) assert job.id == \"id1\" assert task in job assert job.is_submitted() assert job.submit_id is not None assert job.submit_entity_id == \"SCENARIO_scenario_config\" assert job.submit_entity == scenario with mock.patch(\"src.taipy.core.get\") as get_mck: get_mck.return_value = task assert job.get_label() == \"name > \" + job.id assert job.get_simple_label() == job.id def test_comparison(task): job_id_1 = JobId(\"id1\") job_id_2 = JobId(\"id2\") job_1 = Job(job_id_1, task, \"submit_id\", \"scenario_entity_id\") sleep(0.01) # Comparison is based on time, precision on Windows is not enough important job_2 = Job(job_id_2, task, \"submit_id\", \"scenario_entity_id\") assert job_1 < job_2 assert job_2 > job_1 assert job_1 <= job_2 assert job_1 <= job_1 assert job_2 >= job_1 assert job_1 >= job_1 assert job_1 == job_1 assert job_1 != job_2 def test_status_job(task): submission = _SubmissionManagerFactory._build_manager()._create(task.id, task._ID_PREFIX) job = Job(\"job_id\", task, submission.id, \"SCENARIO_scenario_config\") submission.jobs = [job] assert job.is_submitted() assert job.is_skipped() is False assert job.is_pending() is False assert job.is_blocked() is False assert job.is_canceled() is False assert job.is_failed() is False assert job.is_completed() is False assert job.is_running() is False job.canceled() assert job.is_canceled() job.failed() assert job.is_failed() job.running() assert job.is_running() job.completed() assert job.is_completed() job.pending() assert job.is_pending() job.blocked() assert job.is_blocked() job.skipped() assert job.is_skipped() def test_notification_job(task): subscribe = MagicMock() submission = _SubmissionManagerFactory._build_manager()._create(task.id, task._ID_PREFIX) job = Job(\"job_id\", task, submission.id, \"SCENARIO_scenario_config\") submission.jobs = [job] job._on_status_change(subscribe) job.running() subscribe.assert_called_once_with(job) subscribe.reset_mock() job.completed() subscribe.assert_called_once_with(job) subscribe.reset_mock() job.skipped() subscribe.assert_called_once_with(job) def test_handle_exception_in_user_function(task_id, job_id): task = Task(config_id=\"name\", properties={}, input=[], function=_error, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task.id, task._ID_PREFIX) job = Job(job_id, task, submission.id, \"scenario_entity_id\") submission.jobs = [job] _dispatch(task, job) job = _JobManager._get(job_id) assert job.is_failed() assert 'raise RuntimeError(\"Something bad has happened\")' in str(job.stacktrace[0]) def test_handle_exception_in_input_data_node(task_id, job_id): data_node = InMemoryDataNode(\"data_node\", scope=Scope.SCENARIO) task = Task(config_id=\"name\", properties={}, input=[data_node], function=print, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task.id, task._ID_PREFIX) job = Job(job_id, task, submission.id, \"scenario_entity_id\") submission.jobs = [job] _dispatch(task, job) job = _JobManager._get(job_id) assert job.is_failed() assert \"taipy.core.exceptions.exceptions.NoData\" in str(job.stacktrace[0]) def test_handle_exception_in_ouptut_data_node(replace_in_memory_write_fct, task_id, job_id): data_node = InMemoryDataNode(\"data_node\", scope=Scope.SCENARIO) task = Task(config_id=\"name\", properties={}, input=[], function=_foo, output=[data_node], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task.id, task._ID_PREFIX) job = Job(job_id, task, submission.id, \"scenario_entity_id\") submission.jobs = [job] _dispatch(task, job) job = _JobManager._get(job_id) assert job.is_failed() assert \"taipy.core.exceptions.exceptions.DataNodeWritingError\" in str(job.stacktrace[0]) def test_auto_set_and_reload(current_datetime, job_id): task_1 = Task(config_id=\"name_1\", properties={}, function=_foo, id=TaskId(\"task_1\")) task_2 = Task(config_id=\"name_2\", properties={}, function=_foo, id=TaskId(\"task_2\")) submission = _SubmissionManagerFactory._build_manager()._create(task_1.id, task_1._ID_PREFIX) job_1 = Job(job_id, task_1, submission.id, \"scenario_entity_id\") submission.jobs = [job_1] _TaskManager._set(task_1) _TaskManager._set(task_2) _JobManager._set(job_1) job_2 = _JobManager._get(job_1, \"submit_id_2\") # auto set & reload on task attribute assert job_1.task.id == task_1.id assert job_2.task.id == task_1.id job_1.task = task_2 assert job_1.task.id == task_2.id assert job_2.task.id == task_2.id job_2.task = task_1 assert job_1.task.id == task_1.id assert job_2.task.id == task_1.id # auto set & reload on force attribute assert not job_1.force assert not job_2.force job_1.force = True assert job_1.force assert job_2.force job_2.force = False assert not job_1.force assert not job_2.force # auto set & reload on status attribute assert job_1.status == Status.SUBMITTED assert job_2.status == Status.SUBMITTED job_1.status = Status.CANCELED assert job_1.status == Status.CANCELED assert job_2.status == Status.CANCELED job_2.status = Status.BLOCKED assert job_1.status == Status.BLOCKED assert job_2.status == Status.BLOCKED # auto set & reload on creation_date attribute new_datetime = current_datetime + timedelta(1) new_datetime_1 = current_datetime + timedelta(1) job_1.creation_date = new_datetime_1 assert job_1.creation_date == new_datetime_1 assert job_2.creation_date == new_datetime_1 job_2.creation_date = new_datetime assert job_1.creation_date == new_datetime assert job_2.creation_date == new_datetime with job_1 as job: assert job.task.id == task_1.id assert not job.force assert job.status == Status.BLOCKED assert job.creation_date == new_datetime assert job._is_in_context new_datetime_2 = new_datetime + timedelta(1) job.task = task_2 job.force = True job.status = Status.COMPLETED job.creation_date = new_datetime_2 assert job.task.id == task_1.id assert not job.force assert job.status == Status.BLOCKED assert job.creation_date == new_datetime assert job._is_in_context assert job_1.task.id == task_2.id assert job_1.force assert job_1.status == Status.COMPLETED assert job_1.creation_date == new_datetime_2 assert not job_1._is_in_context def _dispatch(task: Task, job: Job, mode=JobConfig._DEVELOPMENT_MODE): Config.configure_job_executions(mode=mode) _OrchestratorFactory._build_dispatcher() _TaskManager._set(task) _JobManager._set(job) dispatcher: Union[_StandaloneJobDispatcher, _DevelopmentJobDispatcher] = _StandaloneJobDispatcher( _OrchestratorFactory._orchestrator ) if mode == JobConfig._DEVELOPMENT_MODE: dispatcher = _DevelopmentJobDispatcher(_OrchestratorFactory._orchestrator) dispatcher._dispatch(job) def test_is_deletable(): with mock.patch(\"src.taipy.core.job._job_manager._JobManager._is_deletable\") as mock_submit: task = Task(config_id=\"name_1\", properties={}, function=_foo, id=TaskId(\"task_1\")) job = Job(job_id, task, \"submit_id_1\", \"scenario_entity_id\") job.is_deletable() mock_submit.assert_called_once_with(job) "} {"text": "import multiprocessing import random import string from functools import partial from time import sleep from unittest import mock import pytest from src.taipy.core._orchestrator._dispatcher._job_dispatcher import _JobDispatcher from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import JobNotDeletedException from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.job.job_id import JobId from src.taipy.core.job.status import Status from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils import assert_true_after_time def multiply(nb1: float, nb2: float): return nb1 * nb2 def lock_multiply(lock, nb1: float, nb2: float): with lock: return multiply(1 or nb1, 2 or nb2) def test_create_jobs(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task = _create_task(multiply, name=\"get_job\") _OrchestratorFactory._build_dispatcher() job_1 = _JobManager._create(task, [print], \"submit_id\", \"secnario_id\", True) assert _JobManager._get(job_1.id) == job_1 assert job_1.is_submitted() assert task.config_id in job_1.id assert job_1.task.id == task.id assert job_1.submit_id == \"submit_id\" assert job_1.submit_entity_id == \"secnario_id\" assert job_1.force job_2 = _JobManager._create(task, [print], \"submit_id_1\", \"secnario_id\", False) assert _JobManager._get(job_2.id) == job_2 assert job_2.is_submitted() assert task.config_id in job_2.id assert job_2.task.id == task.id assert job_2.submit_id == \"submit_id_1\" assert job_2.submit_entity_id == \"secnario_id\" assert not job_2.force def test_get_job(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task = _create_task(multiply, name=\"get_job\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) assert _JobManager._get(job_1.id) == job_1 assert _JobManager._get(job_1.id).submit_entity_id == task.id job_2 = _OrchestratorFactory._orchestrator.submit_task(task) assert job_1 != job_2 assert _JobManager._get(job_1.id).id == job_1.id assert _JobManager._get(job_2.id).id == job_2.id assert _JobManager._get(job_2.id).submit_entity_id == task.id def test_get_latest_job(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task = _create_task(multiply, name=\"get_latest_job\") task_2 = _create_task(multiply, name=\"get_latest_job_2\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) assert _JobManager._get_latest(task) == job_1 assert _JobManager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important job_2 = _OrchestratorFactory._orchestrator.submit_task(task_2) assert _JobManager._get_latest(task).id == job_1.id assert _JobManager._get_latest(task_2).id == job_2.id sleep(0.01) # Comparison is based on time, precision on Windows is not enough important job_1_bis = _OrchestratorFactory._orchestrator.submit_task(task) assert _JobManager._get_latest(task).id == job_1_bis.id assert _JobManager._get_latest(task_2).id == job_2.id def test_get_job_unknown(): assert _JobManager._get(JobId(\"Unknown\")) is None def test_get_jobs(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task = _create_task(multiply, name=\"get_all_jobs\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) job_2 = _OrchestratorFactory._orchestrator.submit_task(task) assert {job.id for job in _JobManager._get_all()} == {job_1.id, job_2.id} def test_delete_job(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task = _create_task(multiply, name=\"delete_job\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) job_2 = _OrchestratorFactory._orchestrator.submit_task(task) _JobManager._delete(job_1) assert [job.id for job in _JobManager._get_all()] == [job_2.id] assert _JobManager._get(job_1.id) is None m = multiprocessing.Manager() lock = m.Lock() def inner_lock_multiply(nb1: float, nb2: float): with lock: return multiply(1 or nb1, 2 or nb2) def test_raise_when_trying_to_delete_unfinished_job(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock = m.Lock() dnm = _DataManagerFactory._build_manager() dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dnm._set(dn_1) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) dnm._set(dn_2) dn_3 = InMemoryDataNode(\"dn_config_3\", Scope.SCENARIO) dnm._set(dn_3) task = Task( \"task_config_1\", {}, partial(lock_multiply, lock), [dn_1, dn_2], [dn_3], id=\"raise_when_delete_unfinished\" ) _OrchestratorFactory._build_dispatcher() with lock: job = _OrchestratorFactory._orchestrator.submit_task(task) assert_true_after_time(lambda: len(_JobDispatcher._dispatched_processes) == 1) assert_true_after_time(job.is_running) with pytest.raises(JobNotDeletedException): _JobManager._delete(job) with pytest.raises(JobNotDeletedException): _JobManager._delete(job, force=False) assert_true_after_time(job.is_completed) _JobManager._delete(job) def test_force_deleting_unfinished_job(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock = m.Lock() dnm = _DataManagerFactory._build_manager() dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dnm._set(dn_1) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) dnm._set(dn_2) dn_3 = InMemoryDataNode(\"dn_config_3\", Scope.SCENARIO) dnm._set(dn_3) task = Task( \"task_config_1\", {}, partial(lock_multiply, lock), [dn_1, dn_2], [dn_3], id=\"force_deleting_unfinished_job\" ) _OrchestratorFactory._build_dispatcher() with lock: job = _OrchestratorFactory._orchestrator.submit_task(task) assert_true_after_time(job.is_running) with pytest.raises(JobNotDeletedException): _JobManager._delete(job, force=False) _JobManager._delete(job, force=True) assert _JobManager._get(job.id) is None def test_cancel_single_job(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=1) task = _create_task(multiply, name=\"cancel_single_job\") _OrchestratorFactory._build_dispatcher() assert_true_after_time(_OrchestratorFactory._dispatcher.is_running) _OrchestratorFactory._dispatcher.stop() assert_true_after_time(lambda: not _OrchestratorFactory._dispatcher.is_running()) job = _OrchestratorFactory._orchestrator.submit_task(task) assert_true_after_time(job.is_pending) assert_true_after_time(lambda: len(_JobDispatcher._dispatched_processes) == 0) _JobManager._cancel(job.id) assert_true_after_time(job.is_canceled) assert_true_after_time(job.is_canceled) @mock.patch( \"src.taipy.core._orchestrator._orchestrator._Orchestrator._orchestrate_job_to_run_or_block\", return_value=\"orchestrated_job\", ) @mock.patch(\"src.taipy.core._orchestrator._orchestrator._Orchestrator._cancel_jobs\") def test_cancel_canceled_abandoned_failed_jobs(cancel_jobs, orchestrated_job): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=1) task = _create_task(multiply, name=\"test_cancel_canceled_abandoned_failed_jobs\") _OrchestratorFactory._build_dispatcher() assert_true_after_time(_OrchestratorFactory._dispatcher.is_running) _OrchestratorFactory._dispatcher.stop() assert_true_after_time(lambda: not _OrchestratorFactory._dispatcher.is_running()) job = _OrchestratorFactory._orchestrator.submit_task(task) job.canceled() assert job.is_canceled() _JobManager._cancel(job) cancel_jobs.assert_not_called() assert job.is_canceled() job = _OrchestratorFactory._orchestrator.submit_task(task) job.failed() assert job.is_failed() _JobManager._cancel(job) cancel_jobs.assert_not_called() assert job.is_failed() job = _OrchestratorFactory._orchestrator.submit_task(task) job.abandoned() assert job.is_abandoned() _JobManager._cancel(job) cancel_jobs.assert_not_called() assert job.is_abandoned() @mock.patch( \"src.taipy.core._orchestrator._orchestrator._Orchestrator._orchestrate_job_to_run_or_block\", return_value=\"orchestrated_job\", ) @mock.patch(\"src.taipy.core.job.job.Job.canceled\") def test_cancel_completed_skipped_jobs(cancel_jobs, orchestrated_job): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=1) task = _create_task(multiply, name=\"cancel_single_job\") _OrchestratorFactory._build_dispatcher() assert_true_after_time(_OrchestratorFactory._dispatcher.is_running) _OrchestratorFactory._dispatcher.stop() assert_true_after_time(lambda: not _OrchestratorFactory._dispatcher.is_running()) job = _OrchestratorFactory._orchestrator.submit_task(task) job.completed() assert job.is_completed() cancel_jobs.assert_not_called() _JobManager._cancel(job) assert job.is_completed() job = _OrchestratorFactory._orchestrator.submit_task(task) job.failed() assert job.is_failed() cancel_jobs.assert_not_called() _JobManager._cancel(job) assert job.is_failed() job = _OrchestratorFactory._orchestrator.submit_task(task) job.skipped() assert job.is_skipped() cancel_jobs.assert_not_called() _JobManager._cancel(job) assert job.is_skipped() def test_cancel_single_running_job(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock = m.Lock() dnm = _DataManagerFactory._build_manager() dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dnm._set(dn_1) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) dnm._set(dn_2) dn_3 = InMemoryDataNode(\"dn_config_3\", Scope.SCENARIO) dnm._set(dn_3) task = Task(\"task_config_1\", {}, partial(lock_multiply, lock), [dn_1, dn_2], [dn_3], id=\"cancel_single_job\") _OrchestratorFactory._build_dispatcher() assert_true_after_time(_OrchestratorFactory._dispatcher.is_running) assert_true_after_time(lambda: _OrchestratorFactory._dispatcher._nb_available_workers == 2) with lock: job = _OrchestratorFactory._orchestrator.submit_task(task) assert_true_after_time(lambda: len(_JobDispatcher._dispatched_processes) == 1) assert_true_after_time(lambda: _OrchestratorFactory._dispatcher._nb_available_workers == 1) assert_true_after_time(job.is_running) _JobManager._cancel(job) assert_true_after_time(job.is_running) assert_true_after_time(lambda: len(_JobDispatcher._dispatched_processes) == 0) assert_true_after_time(lambda: _OrchestratorFactory._dispatcher._nb_available_workers == 2) assert_true_after_time(job.is_completed) def test_cancel_subsequent_jobs(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=1) _OrchestratorFactory._build_dispatcher() orchestrator = _OrchestratorFactory._orchestrator submission_manager = _SubmissionManagerFactory._build_manager() lock_0 = m.Lock() dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) dn_3 = InMemoryDataNode(\"dn_config_3\", Scope.SCENARIO, properties={\"default_data\": 3}) dn_4 = InMemoryDataNode(\"dn_config_4\", Scope.SCENARIO, properties={\"default_data\": 4}) task_1 = Task(\"task_config_1\", {}, partial(lock_multiply, lock_0), [dn_1, dn_2], [dn_3], id=\"task_1\") task_2 = Task(\"task_config_2\", {}, multiply, [dn_1, dn_3], [dn_4], id=\"task_2\") task_3 = Task(\"task_config_3\", {}, print, [dn_4], id=\"task_3\") # Can't get tasks under 1 scenario due to partial not serializable submission_1 = submission_manager._create(\"scenario_id\", Scenario._ID_PREFIX) submission_2 = submission_manager._create(\"scenario_id\", Scenario._ID_PREFIX) _DataManager._set(dn_1) _DataManager._set(dn_2) _DataManager._set(dn_3) _DataManager._set(dn_4) with lock_0: job_1 = orchestrator._lock_dn_output_and_create_job( task_1, submit_id=submission_1.id, submit_entity_id=submission_1.entity_id ) orchestrator._orchestrate_job_to_run_or_block([job_1]) job_2 = orchestrator._lock_dn_output_and_create_job( task_2, submit_id=submission_1.id, submit_entity_id=submission_1.entity_id ) orchestrator._orchestrate_job_to_run_or_block([job_2]) job_3 = orchestrator._lock_dn_output_and_create_job( task_3, submit_id=submission_1.id, submit_entity_id=submission_1.entity_id ) orchestrator._orchestrate_job_to_run_or_block([job_3]) submission_1.jobs = [job_1, job_2, job_3] assert_true_after_time(lambda: _OrchestratorFactory._orchestrator.jobs_to_run.qsize() == 0) assert_true_after_time(lambda: len(_OrchestratorFactory._orchestrator.blocked_jobs) == 2) assert_true_after_time(job_1.is_running) assert_true_after_time(job_2.is_blocked) assert_true_after_time(job_3.is_blocked) job_4 = _OrchestratorFactory._orchestrator._lock_dn_output_and_create_job( task_1, submit_id=submission_2.id, submit_entity_id=submission_2.entity_id ) orchestrator._orchestrate_job_to_run_or_block([job_4]) job_5 = _OrchestratorFactory._orchestrator._lock_dn_output_and_create_job( task_2, submit_id=submission_2.id, submit_entity_id=submission_2.entity_id ) orchestrator._orchestrate_job_to_run_or_block([job_5]) job_6 = _OrchestratorFactory._orchestrator._lock_dn_output_and_create_job( task_3, submit_id=submission_2.id, submit_entity_id=submission_2.entity_id ) orchestrator._orchestrate_job_to_run_or_block([job_6]) submission_2.jobs = [job_4, job_5, job_6] assert_true_after_time(job_4.is_pending) assert_true_after_time(job_5.is_blocked) assert_true_after_time(job_6.is_blocked) assert _OrchestratorFactory._orchestrator.jobs_to_run.qsize() == 1 assert len(_OrchestratorFactory._orchestrator.blocked_jobs) == 4 _JobManager._cancel(job_4) assert_true_after_time(job_4.is_canceled) assert_true_after_time(job_5.is_abandoned) assert_true_after_time(job_6.is_abandoned) assert _OrchestratorFactory._orchestrator.jobs_to_run.qsize() == 0 assert len(_OrchestratorFactory._orchestrator.blocked_jobs) == 2 _JobManager._cancel(job_1) assert_true_after_time(job_1.is_running) assert_true_after_time(job_2.is_abandoned) assert_true_after_time(job_3.is_abandoned) assert_true_after_time(job_1.is_completed) assert_true_after_time(job_2.is_abandoned) assert_true_after_time(job_3.is_abandoned) assert_true_after_time(job_4.is_canceled) assert_true_after_time(job_5.is_abandoned) assert_true_after_time(job_6.is_abandoned) assert_true_after_time( lambda: all( not _OrchestratorFactory._orchestrator._is_blocked(job) for job in [job_1, job_2, job_3, job_4, job_5, job_6] ) ) assert_true_after_time(lambda: _OrchestratorFactory._orchestrator.jobs_to_run.qsize() == 0) def test_is_deletable(): assert len(_JobManager._get_all()) == 0 task = _create_task(print, 0, \"task\") job = _OrchestratorFactory._orchestrator.submit_task(task) assert job.is_completed() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.abandoned() assert job.is_abandoned() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.canceled() assert job.is_canceled() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.failed() assert job.is_failed() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.skipped() assert job.is_skipped() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.blocked() assert job.is_blocked() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) job.running() assert job.is_running() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) job.pending() assert job.is_pending() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) job.status = Status.SUBMITTED assert job.is_submitted() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) def _create_task(function, nb_outputs=1, name=None): input1_dn_config = Config.configure_data_node(\"input1\", \"pickle\", Scope.SCENARIO, default_data=21) input2_dn_config = Config.configure_data_node(\"input2\", \"pickle\", Scope.SCENARIO, default_data=2) output_dn_configs = [ Config.configure_data_node(f\"output{i}\", \"pickle\", Scope.SCENARIO, default_data=0) for i in range(nb_outputs) ] _DataManager._bulk_get_or_create({cfg for cfg in output_dn_configs}) name = name or \"\".join(random.choice(string.ascii_lowercase) for _ in range(10)) task_config = Config.configure_task( id=name, function=function, input=[input1_dn_config, input2_dn_config], output=output_dn_configs, ) return _TaskManager._bulk_get_or_create([task_config])[0] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import multiprocessing import random import string from functools import partial from time import sleep import pytest from src.taipy.core import Task from src.taipy.core._orchestrator._dispatcher._job_dispatcher import _JobDispatcher from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._repository.db._sql_connection import _build_connection, _SQLConnection from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data import InMemoryDataNode from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.exceptions.exceptions import JobNotDeletedException from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.job._job_manager_factory import _JobManagerFactory from src.taipy.core.job.job_id import JobId from src.taipy.core.job.status import Status from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils import assert_true_after_time def multiply(nb1: float, nb2: float): return nb1 * nb2 def lock_multiply(lock, nb1: float, nb2: float): with lock: return multiply(nb1 or 1, nb2 or 2) def init_managers(): _TaskManagerFactory._build_manager()._delete_all() _DataManagerFactory._build_manager()._delete_all() _JobManagerFactory._build_manager()._delete_all() def test_create_jobs(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() task = _create_task(multiply, name=\"get_job\") _OrchestratorFactory._build_dispatcher() job_1 = _JobManager._create(task, [print], \"submit_id\", \"secnario_id\", True) assert _JobManager._get(job_1.id) == job_1 assert job_1.is_submitted() assert task.config_id in job_1.id assert job_1.task.id == task.id assert job_1.submit_id == \"submit_id\" assert job_1.submit_entity_id == \"secnario_id\" assert job_1.force job_2 = _JobManager._create(task, [print], \"submit_id_1\", \"secnario_id\", False) assert _JobManager._get(job_2.id) == job_2 assert job_2.is_submitted() assert task.config_id in job_2.id assert job_2.task.id == task.id assert job_2.submit_id == \"submit_id_1\" assert job_2.submit_entity_id == \"secnario_id\" assert not job_2.force def test_get_job(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() task = _create_task(multiply, name=\"get_job\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) assert _JobManager._get(job_1.id) == job_1 assert _JobManager._get(job_1.id).submit_entity_id == task.id job_2 = _OrchestratorFactory._orchestrator.submit_task(task) assert job_1 != job_2 assert _JobManager._get(job_1.id).id == job_1.id assert _JobManager._get(job_2.id).id == job_2.id assert _JobManager._get(job_2.id).submit_entity_id == task.id def test_get_latest_job(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() task = _create_task(multiply, name=\"get_latest_job\") task_2 = _create_task(multiply, name=\"get_latest_job_2\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) assert _JobManager._get_latest(task) == job_1 assert _JobManager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important job_2 = _OrchestratorFactory._orchestrator.submit_task(task_2) assert _JobManager._get_latest(task).id == job_1.id assert _JobManager._get_latest(task_2).id == job_2.id sleep(0.01) # Comparison is based on time, precision on Windows is not enough important job_1_bis = _OrchestratorFactory._orchestrator.submit_task(task) assert _JobManager._get_latest(task).id == job_1_bis.id assert _JobManager._get_latest(task_2).id == job_2.id def test_get_job_unknown(init_sql_repo): init_managers() assert _JobManager._get(JobId(\"Unknown\")) is None def test_get_jobs(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() task = _create_task(multiply, name=\"get_all_jobs\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) job_2 = _OrchestratorFactory._orchestrator.submit_task(task) assert {job.id for job in _JobManager._get_all()} == {job_1.id, job_2.id} def test_delete_job(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() task = _create_task(multiply, name=\"delete_job\") _OrchestratorFactory._build_dispatcher() job_1 = _OrchestratorFactory._orchestrator.submit_task(task) job_2 = _OrchestratorFactory._orchestrator.submit_task(task) _JobManager._delete(job_1) assert [job.id for job in _JobManager._get_all()] == [job_2.id] assert _JobManager._get(job_1.id) is None def test_raise_when_trying_to_delete_unfinished_job(init_sql_repo): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) init_managers() m = multiprocessing.Manager() lock = m.Lock() dnm = _DataManagerFactory._build_manager() dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dnm._set(dn_1) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) dnm._set(dn_2) dn_3 = InMemoryDataNode(\"dn_config_3\", Scope.SCENARIO) dnm._set(dn_3) task = Task(\"task_cfg\", {}, partial(lock_multiply, lock), [dn_1, dn_2], [dn_3], id=\"raise_when_delete_unfinished\") _OrchestratorFactory._build_dispatcher() with lock: job = _OrchestratorFactory._orchestrator.submit_task(task) assert_true_after_time(lambda: len(_JobDispatcher._dispatched_processes) == 1) assert_true_after_time(job.is_running) with pytest.raises(JobNotDeletedException): _JobManager._delete(job) with pytest.raises(JobNotDeletedException): _JobManager._delete(job, force=False) assert_true_after_time(job.is_completed) _JobManager._delete(job) def test_force_deleting_unfinished_job(init_sql_repo): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) init_managers() m = multiprocessing.Manager() lock = m.Lock() dnm = _DataManagerFactory._build_manager() dn_1 = InMemoryDataNode(\"dn_config_1\", Scope.SCENARIO, properties={\"default_data\": 1}) dnm._set(dn_1) dn_2 = InMemoryDataNode(\"dn_config_2\", Scope.SCENARIO, properties={\"default_data\": 2}) dnm._set(dn_2) dn_3 = InMemoryDataNode(\"dn_config_3\", Scope.SCENARIO) dnm._set(dn_3) task_1 = Task( \"task_config_1\", {}, partial(lock_multiply, lock), [dn_1, dn_2], [dn_3], id=\"delete_force_unfinished_job\" ) reference_last_edit_date = dn_3.last_edit_date _OrchestratorFactory._build_dispatcher() with lock: job = _OrchestratorFactory._orchestrator.submit_task(task_1) assert_true_after_time(job.is_running) with pytest.raises(JobNotDeletedException): _JobManager._delete(job, force=False) _JobManager._delete(job, force=True) assert _JobManager._get(job.id) is None assert_true_after_time(lambda: reference_last_edit_date != dn_3.last_edit_date) def test_is_deletable(init_sql_repo): init_managers() assert len(_JobManager._get_all()) == 0 task = _create_task(print, 0, \"task\") job = _OrchestratorFactory._orchestrator.submit_task(task) assert job.is_completed() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.abandoned() assert job.is_abandoned() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.canceled() assert job.is_canceled() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.failed() assert job.is_failed() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.skipped() assert job.is_skipped() assert _JobManager._is_deletable(job) assert _JobManager._is_deletable(job.id) job.blocked() assert job.is_blocked() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) job.running() assert job.is_running() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) job.pending() assert job.is_pending() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) job.status = Status.SUBMITTED assert job.is_submitted() assert not _JobManager._is_deletable(job) assert not _JobManager._is_deletable(job.id) def _create_task(function, nb_outputs=1, name=None): input1_dn_config = Config.configure_data_node(\"input1\", scope=Scope.SCENARIO, default_data=21) input2_dn_config = Config.configure_data_node(\"input2\", scope=Scope.SCENARIO, default_data=2) output_dn_configs = [ Config.configure_data_node(f\"output{i}\", scope=Scope.SCENARIO, default_data=0) for i in range(nb_outputs) ] _DataManager._bulk_get_or_create({cfg for cfg in output_dn_configs}) name = name or \"\".join(random.choice(string.ascii_lowercase) for _ in range(10)) task_config = Config.configure_task( id=name, function=function, input=[input1_dn_config, input2_dn_config], output=output_dn_configs, ) return _TaskManager._bulk_get_or_create([task_config])[0] "} {"text": "import os import pytest from src.taipy.core.data._data_sql_repository import _DataSQLRepository from src.taipy.core.exceptions import ModelNotFound from src.taipy.core.job._job_fs_repository import _JobFSRepository from src.taipy.core.job._job_sql_repository import _JobSQLRepository from src.taipy.core.job.job import Job, JobId, Task from src.taipy.core.task._task_sql_repository import _TaskSQLRepository class TestJobRepository: @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_save_and_load(self, data_node, job, repo, init_sql_repo): _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task repository = repo() repository._save(job) obj = repository._load(job.id) assert isinstance(obj, Job) @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_exists(self, data_node, job, repo, init_sql_repo): _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task repository = repo() repository._save(job) assert repository._exists(job.id) assert not repository._exists(\"not-existed-job\") @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_load_all(self, data_node, job, repo, init_sql_repo): _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task repository = repo() for i in range(10): job.id = JobId(f\"job-{i}\") repository._save(job) jobs = repository._load_all() assert len(jobs) == 10 @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_load_all_with_filters(self, data_node, job, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task for i in range(10): job.id = JobId(f\"job-{i}\") repository._save(job) objs = repository._load_all(filters=[{\"id\": \"job-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_delete(self, data_node, job, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task repository._save(job) repository._delete(job.id) with pytest.raises(ModelNotFound): repository._load(job.id) @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_delete_all(self, data_node, job, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task for i in range(10): job.id = JobId(f\"job-{i}\") repository._save(job) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_delete_many(self, data_node, job, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task for i in range(10): job.id = JobId(f\"job-{i}\") repository._save(job) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_delete_by(self, data_node, job, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): job.id = JobId(f\"job-{i}\") job._version = f\"{(i+1) // 5}.0\" repository._save(job) objs = repository._load_all() assert len(objs) == 10 repository._delete_by(\"version\", \"1.0\") assert len(repository._load_all()) == 5 @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_search(self, data_node, job, repo, init_sql_repo): repository = repo() _DataSQLRepository()._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskSQLRepository()._save(task) job._task = task for i in range(10): job.id = JobId(f\"job-{i}\") repository._save(job) assert len(repository._load_all()) == 10 objs = repository._search(\"id\", \"job-2\") assert len(objs) == 1 assert isinstance(objs[0], Job) objs = repository._search(\"id\", \"job-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], Job) assert repository._search(\"id\", \"job-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"repo\", [_JobFSRepository, _JobSQLRepository]) def test_export(self, tmpdir, job, repo, init_sql_repo): repository = repo() repository._save(job) repository._export(job.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _JobFSRepository else os.path.join(tmpdir.strpath, \"job\") assert os.path.exists(os.path.join(dir_path, f\"{job.id}.json\")) "} {"text": "from unittest import mock import pytest from src.taipy.core.common._utils import _Subscriber from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data.data_node import DataNode from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.data.pickle import PickleDataNode from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task, TaskId from taipy.config.common.scope import Scope def test_create_sequence(): input = InMemoryDataNode(\"foo\", Scope.SCENARIO) output = InMemoryDataNode(\"bar\", Scope.SCENARIO) task = Task(\"baz\", {}, print, [input], [output], TaskId(\"task_id\")) sequence = Sequence({\"description\": \"description\"}, [task], sequence_id=SequenceId(\"name_1\")) assert sequence.id == \"name_1\" assert sequence.owner_id is None assert sequence.description == \"description\" assert sequence.foo == input assert sequence.bar == output assert sequence.baz.id == task.id assert sequence.tasks == {task.config_id: task} assert sequence.data_nodes == {\"foo\": input, \"bar\": output} assert sequence.parent_ids == set() with pytest.raises(AttributeError): sequence.qux assert sequence.get_label() == sequence.id assert sequence.get_simple_label() == sequence.id input_1 = InMemoryDataNode(\"input\", Scope.SCENARIO) output_1 = InMemoryDataNode(\"output\", Scope.SCENARIO) task_1 = Task(\"task_1\", {}, print, [input_1], [output_1], TaskId(\"task_id_1\")) sequence_1 = Sequence( {\"description\": \"description\"}, [task_1], owner_id=\"owner_id\", parent_ids={\"scenario_id\"}, sequence_id=SequenceId(\"name_1\"), ) assert sequence_1.id == \"name_1\" assert sequence_1.owner_id == \"owner_id\" assert sequence_1.description == \"description\" assert sequence_1.input == input_1 assert sequence_1.output == output_1 assert sequence_1.task_1 == task_1 assert sequence_1.tasks == {task_1.config_id: task_1} assert sequence_1.data_nodes == {\"input\": input_1, \"output\": output_1} assert sequence_1.parent_ids == {\"scenario_id\"} assert sequence_1.id is not None with mock.patch(\"src.taipy.core.get\") as get_mck: class MockOwner: label = \"owner_label\" def get_label(self): return self.label get_mck.return_value = MockOwner() assert sequence_1.get_label() == \"owner_label > \" + sequence_1.id assert sequence_1.get_simple_label() == sequence_1.id sequence_2 = Sequence( {\"description\": \"description\", \"name\": \"Name\"}, [task, task_1], owner_id=\"owner_id\", parent_ids={\"parent_id_1\", \"parent_id_2\"}, sequence_id=SequenceId(\"name_2\"), ) assert sequence_2.owner_id == \"owner_id\" assert sequence_2.id == \"name_2\" assert sequence_2.description == \"description\" assert sequence_2.tasks == {task.config_id: task, task_1.config_id: task_1} assert sequence_2.data_nodes == {\"foo\": input, \"bar\": output, \"input\": input_1, \"output\": output_1} assert sequence_2.parent_ids == {\"parent_id_1\", \"parent_id_2\"} with mock.patch(\"src.taipy.core.get\") as get_mck: class MockOwner: label = \"owner_label\" def get_label(self): return self.label get_mck.return_value = MockOwner() assert sequence_2.get_label() == \"owner_label > \" + sequence_2.name assert sequence_2.get_simple_label() == sequence_2.name def test_check_consistency(): sequence_1 = Sequence({}, [], \"name_1\") assert sequence_1._is_consistent() input_2 = InMemoryDataNode(\"foo\", Scope.SCENARIO) output_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO) task_2 = Task(\"tfoo\", {}, print, [input_2], [output_2], TaskId(\"task_id_2\")) sequence_2 = Sequence({}, [task_2], \"name_2\") assert sequence_2._is_consistent() data_node_3 = InMemoryDataNode(\"foo\", Scope.SCENARIO) task_3 = Task(\"tfoo\", {}, print, [data_node_3], [data_node_3], TaskId(\"task_id_3\")) sequence_3 = Sequence({}, [task_3], \"name_3\") assert not sequence_3._is_consistent() # Not a dag input_4 = InMemoryDataNode(\"foo\", Scope.SCENARIO) output_4 = InMemoryDataNode(\"bar\", Scope.SCENARIO) task_4_1 = Task(\"tfoo\", {}, print, [input_4], [output_4], TaskId(\"task_id_4_1\")) task_4_2 = Task(\"tbar\", {}, print, [output_4], [input_4], TaskId(\"task_id_4_2\")) sequence_4 = Sequence({}, [task_4_1, task_4_2], \"name_4\") assert not sequence_4._is_consistent() # Not a Dag class FakeDataNode: config_id = \"config_id_of_a_fake_dn\" input_6 = DataNode(\"foo\", Scope.SCENARIO, \"input_id_5\") output_6 = DataNode(\"bar\", Scope.SCENARIO, \"output_id_5\") task_6_1 = Task(\"tfoo\", {}, print, [input_6], [output_6], TaskId(\"task_id_5_1\")) task_6_2 = Task(\"tbar\", {}, print, [output_6], [FakeDataNode()], TaskId(\"task_id_5_2\")) sequence_6 = Sequence({}, [task_6_1, task_6_2], \"name_5\") assert not sequence_6._is_consistent() # Not a DataNode intermediate_7 = DataNode(\"foo\", Scope.SCENARIO, \"intermediate_id_7\") output_7 = DataNode(\"bar\", Scope.SCENARIO, \"output_id_7\") task_7_1 = Task(\"tfoo\", {}, print, [], [intermediate_7], TaskId(\"task_id_7_1\")) task_7_2 = Task(\"tbar\", {}, print, [intermediate_7], [output_7], TaskId(\"task_id_7_2\")) sequence_7 = Sequence({}, [task_7_1, task_7_2], \"name_7\") assert sequence_7._is_consistent() input_8 = DataNode(\"foo\", Scope.SCENARIO, \"output_id_8\") intermediate_8 = DataNode(\"bar\", Scope.SCENARIO, \"intermediate_id_8\") task_8_1 = Task(\"tfoo\", {}, print, [input_8], [intermediate_8], TaskId(\"task_id_8_1\")) task_8_2 = Task(\"tbar\", {}, print, [intermediate_8], [], TaskId(\"task_id_8_2\")) sequence_8 = Sequence({}, [task_8_1, task_8_2], \"name_8\") assert sequence_8._is_consistent() input_9_1 = DataNode(\"foo\", Scope.SCENARIO, \"input_id_9_1\") output_9_1 = DataNode(\"bar\", Scope.SCENARIO, \"output_id_9_1\") input_9_2 = DataNode(\"baz\", Scope.SCENARIO, \"input_id_9_2\") output_9_2 = DataNode(\"qux\", Scope.SCENARIO, \"output_id_9_2\") task_9_1 = Task(\"tfoo\", {}, print, [input_9_1], [output_9_1], TaskId(\"task_id_9_1\")) task_9_2 = Task(\"tbar\", {}, print, [input_9_2], [output_9_2], TaskId(\"task_id_9_2\")) sequence_9 = Sequence({}, [task_9_1, task_9_2], \"name_9\") assert not sequence_9._is_consistent() # Not connected input_10_1 = DataNode(\"foo\", Scope.SCENARIO, \"output_id_10_1\") intermediate_10_1 = DataNode(\"bar\", Scope.SCENARIO, \"intermediate_id_10_1\") intermediate_10_2 = DataNode(\"baz\", Scope.SCENARIO, \"intermediate_id_10_2\") output_10 = DataNode(\"qux\", Scope.SCENARIO, \"output_id_10\") post_10 = DataNode(\"quux\", Scope.SCENARIO, \"post_id_10\") task_10_1 = Task(\"tfoo\", {}, print, [input_10_1], [intermediate_10_1], TaskId(\"task_id_10_1\")) task_10_2 = Task(\"tbar\", {}, print, [], [intermediate_10_2], TaskId(\"task_id_10_2\")) task_10_3 = Task(\"tbaz\", {}, print, [intermediate_10_1, intermediate_10_2], [output_10], TaskId(\"task_id_10_3\")) task_10_4 = Task(\"tqux\", {}, print, [output_10], [post_10], TaskId(\"task_id_10_4\")) task_10_5 = Task(\"tquux\", {}, print, [output_10], [], TaskId(\"task_id_10_5\")) sequence_10 = Sequence({}, [task_10_1, task_10_2, task_10_3, task_10_4, task_10_5], \"name_10\") assert sequence_10._is_consistent() def test_get_sorted_tasks(): def assert_equal(tasks_a, tasks_b) -> bool: if len(tasks_a) != len(tasks_b): return False for i in range(len(tasks_a)): task_a, task_b = tasks_a[i], tasks_b[i] if isinstance(task_a, list) and isinstance(task_b, list): if not assert_equal(task_a, task_b): return False elif isinstance(task_a, list) or isinstance(task_b, list): return False else: index_task_b = tasks_b.index(task_a) if any([isinstance(task_b, list) for task_b in tasks_b[i : index_task_b + 1]]): return False return True data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- ---> s3 ---> t2 ---> s5 ---- # | | | # |---> t1 ---| -------------------------> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert assert_equal(sequence._get_sorted_tasks(), [[task_1], [task_2, task_4], [task_3]]) data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, None, [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- t2 ---> s5 ------ # | | # |---> t1 ---| -----> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert assert_equal(sequence._get_sorted_tasks(), [[task_2, task_1], [task_4, task_3]]) data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert assert_equal(sequence._get_sorted_tasks(), [[task_2, task_1], [task_4, task_3]]) data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, output=[data_node_5], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert assert_equal(sequence._get_sorted_tasks(), [[task_2, task_1], [task_4, task_3]]) data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = DataNode(\"hugh\", Scope.SCENARIO, \"s8\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, output=[data_node_5], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_8], None, TaskId(\"t5\")) sequence = Sequence({}, [task_5, task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 # t2 ---> s5 # s8 ---> t5 assert assert_equal(sequence._get_sorted_tasks(), [[task_5, task_2, task_1], [task_4, task_3]]) def test_get_inputs(): data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = DataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- ---> s3 ---> t2 ---> s5 ---- # | | | # |---> t1 ---| -------------------------> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert sequence.get_inputs() == {data_node_1, data_node_2} assert sequence.get_outputs() == {data_node_6, data_node_7} assert sequence.get_intermediate() == {data_node_3, data_node_4, data_node_5} data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, None, [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- t2 ---> s5 ------ # | | # |---> t1 ---| -----> t3 ---> s6 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert sequence.get_inputs() == {data_node_1, data_node_2} assert sequence.get_outputs() == {data_node_6, data_node_7} assert sequence.get_intermediate() == {data_node_4, data_node_5} data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 assert sequence.get_inputs() == {data_node_1, data_node_2, data_node_6} assert sequence.get_outputs() == {data_node_7} assert sequence.get_intermediate() == {data_node_4, data_node_5} data_node_1 = DataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = DataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_4 = DataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = DataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = DataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = DataNode(\"corge\", Scope.SCENARIO, \"s7\") data_node_8 = DataNode(\"hugh\", Scope.SCENARIO, \"s8\") task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, output=[data_node_5], id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_4], None, id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4, data_node_6], [data_node_7], TaskId(\"t4\")) task_5 = Task(\"bob\", {}, print, [data_node_8], None, TaskId(\"t5\")) sequence = Sequence({}, [task_5, task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- # | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 # t2 ---> s5 | # s8 ---> t5 s6 --| assert sequence.get_inputs() == {data_node_1, data_node_2, data_node_8, data_node_6} assert sequence.get_outputs() == {data_node_5, data_node_7} assert sequence.get_intermediate() == {data_node_4} def test_is_ready_to_run(): data_node_1 = PickleDataNode(\"foo\", Scope.SCENARIO, \"s1\", properties={\"default_data\": 1}) data_node_2 = PickleDataNode(\"bar\", Scope.SCENARIO, \"s2\", properties={\"default_data\": 2}) data_node_4 = PickleDataNode(\"qux\", Scope.SCENARIO, \"s4\", properties={\"default_data\": 4}) data_node_5 = PickleDataNode(\"quux\", Scope.SCENARIO, \"s5\", properties={\"default_data\": 5}) data_node_6 = PickleDataNode(\"quuz\", Scope.SCENARIO, \"s6\", properties={\"default_data\": 6}) data_node_7 = PickleDataNode(\"corge\", Scope.SCENARIO, \"s7\", properties={\"default_data\": 7}) task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 data_manager = _DataManagerFactory._build_manager() for dn in [data_node_1, data_node_2, data_node_4, data_node_5, data_node_6, data_node_7]: data_manager._set(dn) assert sequence.is_ready_to_run() data_node_1.edit_in_progress = True assert not sequence.is_ready_to_run() data_node_2.edit_in_progress = True data_node_6.edit_in_progress = True assert not sequence.is_ready_to_run() data_node_1.edit_in_progress = False data_node_2.edit_in_progress = False data_node_6.edit_in_progress = False assert sequence.is_ready_to_run() def test_data_nodes_being_edited(): data_node_1 = PickleDataNode(\"foo\", Scope.SCENARIO, \"s1\", properties={\"default_data\": 1}) data_node_2 = PickleDataNode(\"bar\", Scope.SCENARIO, \"s2\", properties={\"default_data\": 2}) data_node_4 = PickleDataNode(\"qux\", Scope.SCENARIO, \"s4\", properties={\"default_data\": 4}) data_node_5 = PickleDataNode(\"quux\", Scope.SCENARIO, \"s5\", properties={\"default_data\": 5}) data_node_6 = PickleDataNode(\"quuz\", Scope.SCENARIO, \"s6\", properties={\"default_data\": 6}) data_node_7 = PickleDataNode(\"corge\", Scope.SCENARIO, \"s7\", properties={\"default_data\": 7}) task_1 = Task(\"grault\", {}, print, [data_node_1, data_node_2], [data_node_4], TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, [data_node_6], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], id=TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) sequence = Sequence({}, [task_4, task_2, task_1, task_3], SequenceId(\"p1\")) # s1 --- s6 ---> t2 ---> s5 # | | # |---> t1 ---| -----> t3 # | | | # s2 --- ---> s4 ---> t4 ---> s7 data_manager = _DataManagerFactory._build_manager() for dn in [data_node_1, data_node_2, data_node_4, data_node_5, data_node_6, data_node_7]: data_manager._set(dn) assert len(sequence.data_nodes_being_edited()) == 0 assert sequence.data_nodes_being_edited() == set() data_node_1.edit_in_progress = True assert len(sequence.data_nodes_being_edited()) == 1 assert sequence.data_nodes_being_edited() == {data_node_1} data_node_2.edit_in_progress = True data_node_6.edit_in_progress = True assert len(sequence.data_nodes_being_edited()) == 3 assert sequence.data_nodes_being_edited() == {data_node_1, data_node_2, data_node_6} data_node_4.edit_in_progress = True data_node_5.edit_in_progress = True assert len(sequence.data_nodes_being_edited()) == 5 assert sequence.data_nodes_being_edited() == {data_node_1, data_node_2, data_node_4, data_node_5, data_node_6} data_node_1.edit_in_progress = False data_node_2.edit_in_progress = False data_node_6.edit_in_progress = False assert len(sequence.data_nodes_being_edited()) == 2 assert sequence.data_nodes_being_edited() == {data_node_4, data_node_5} data_node_4.edit_in_progress = False data_node_5.edit_in_progress = False data_node_7.edit_in_progress = True assert len(sequence.data_nodes_being_edited()) == 1 assert sequence.data_nodes_being_edited() == {data_node_7} data_node_7.edit_in_progress = False assert len(sequence.data_nodes_being_edited()) == 0 assert sequence.data_nodes_being_edited() == set() def test_get_tasks(): task_1 = Task(\"grault\", {}, print, id=TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, id=TaskId(\"t3\")) sequence_1 = Sequence({}, [task_1, task_2, task_3], SequenceId(\"p1\")) assert sequence_1.tasks == {\"grault\": task_1, \"garply\": task_2, \"waldo\": task_3} def test_get_set_of_tasks(): task_1 = Task(\"grault\", {}, print, id=TaskId(\"t1\")) task_2 = Task(\"garply\", {}, print, id=TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, id=TaskId(\"t3\")) sequence_1 = Sequence({}, [task_1, task_2, task_3], SequenceId(\"p1\")) assert sequence_1._get_set_of_tasks() == {task_1, task_2, task_3} def test_auto_set_and_reload(task): tmp_task = Task(\"tmp_task_config_id\", {}, print, list(task.output.values()), [], TaskId(\"tmp_task_id\")) scenario = Scenario(\"scenario\", [task, tmp_task], {}, sequences={\"foo\": {}}) _TaskManager._set(task) _TaskManager._set(tmp_task) _ScenarioManager._set(scenario) sequence_1 = scenario.sequences[\"foo\"] sequence_2 = _SequenceManager._get(sequence_1) # auto set & reload on tasks attribute assert len(sequence_1.tasks) == 0 assert len(sequence_2.tasks) == 0 sequence_1.tasks = [tmp_task] assert len(sequence_1.tasks) == 1 assert sequence_1.tasks[tmp_task.config_id].id == tmp_task.id assert len(sequence_2.tasks) == 1 assert sequence_2.tasks[tmp_task.config_id].id == tmp_task.id sequence_2.tasks = [task] assert len(sequence_1.tasks) == 1 assert sequence_1.tasks[task.config_id].id == task.id assert len(sequence_2.tasks) == 1 assert sequence_2.tasks[task.config_id].id == task.id assert sequence_1.owner_id == scenario.id assert sequence_2.owner_id == scenario.id # auto set & reload on subscribers attribute assert len(sequence_1.subscribers) == 0 assert len(sequence_2.subscribers) == 0 sequence_1.subscribers.append(print) assert len(sequence_1.subscribers) == 1 assert len(sequence_2.subscribers) == 1 sequence_2.subscribers.append(print) assert len(sequence_1.subscribers) == 2 assert len(sequence_2.subscribers) == 2 sequence_1.subscribers.clear() assert len(sequence_1.subscribers) == 0 assert len(sequence_2.subscribers) == 0 sequence_1.subscribers.extend([print, map]) assert len(sequence_1.subscribers) == 2 assert len(sequence_2.subscribers) == 2 sequence_1.subscribers.remove(_Subscriber(print, [])) assert len(sequence_1.subscribers) == 1 assert len(sequence_2.subscribers) == 1 sequence_2.subscribers.clear() assert len(sequence_1.subscribers) == 0 assert len(sequence_2.subscribers) == 0 sequence_1.subscribers + print + len assert len(sequence_1.subscribers) == 2 assert len(sequence_2.subscribers) == 2 sequence_1.subscribers = [] assert len(sequence_1.subscribers) == 0 assert len(sequence_2.subscribers) == 0 # auto set & reload on properties attribute assert sequence_1.properties == {\"name\": \"foo\"} assert sequence_2.properties == {\"name\": \"foo\"} sequence_1.properties[\"qux\"] = 4 assert sequence_1.properties[\"qux\"] == 4 assert sequence_2.properties[\"qux\"] == 4 sequence_2.properties[\"qux\"] = 5 assert sequence_1.properties[\"qux\"] == 5 assert sequence_2.properties[\"qux\"] == 5 sequence_1.properties[\"temp_key_1\"] = \"temp_value_1\" sequence_1.properties[\"temp_key_2\"] = \"temp_value_2\" assert sequence_1.properties == { \"qux\": 5, \"name\": \"foo\", \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } assert sequence_2.properties == { \"qux\": 5, \"name\": \"foo\", \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } sequence_1.properties.pop(\"temp_key_1\") assert \"temp_key_1\" not in sequence_1.properties.keys() assert \"temp_key_1\" not in sequence_1.properties.keys() assert sequence_1.properties == { \"qux\": 5, \"name\": \"foo\", \"temp_key_2\": \"temp_value_2\", } assert sequence_2.properties == { \"qux\": 5, \"name\": \"foo\", \"temp_key_2\": \"temp_value_2\", } sequence_2.properties.pop(\"temp_key_2\") assert sequence_1.properties == {\"name\": \"foo\", \"qux\": 5} assert sequence_2.properties == {\"name\": \"foo\", \"qux\": 5} assert \"temp_key_2\" not in sequence_1.properties.keys() assert \"temp_key_2\" not in sequence_2.properties.keys() sequence_1.properties[\"temp_key_3\"] = 0 assert sequence_1.properties == {\"name\": \"foo\", \"qux\": 5, \"temp_key_3\": 0} assert sequence_2.properties == {\"name\": \"foo\", \"qux\": 5, \"temp_key_3\": 0} sequence_1.properties.update({\"temp_key_3\": 1}) assert sequence_1.properties == {\"name\": \"foo\", \"qux\": 5, \"temp_key_3\": 1} assert sequence_2.properties == {\"name\": \"foo\", \"qux\": 5, \"temp_key_3\": 1} sequence_1.properties.update(dict()) assert sequence_1.properties == {\"name\": \"foo\", \"qux\": 5, \"temp_key_3\": 1} assert sequence_2.properties == {\"name\": \"foo\", \"qux\": 5, \"temp_key_3\": 1} sequence_1.properties[\"temp_key_4\"] = 0 sequence_1.properties[\"temp_key_5\"] = 0 with sequence_1 as sequence: assert len(sequence.tasks) == 1 assert sequence.tasks[task.config_id].id == task.id assert len(sequence.subscribers) == 0 assert sequence._is_in_context assert sequence.properties[\"qux\"] == 5 assert sequence.properties[\"temp_key_3\"] == 1 assert sequence.properties[\"temp_key_4\"] == 0 assert sequence.properties[\"temp_key_5\"] == 0 sequence.tasks = [] sequence.subscribers = [print] sequence.properties[\"qux\"] = 9 sequence.properties.pop(\"temp_key_3\") sequence.properties.pop(\"temp_key_4\") sequence.properties.update({\"temp_key_4\": 1}) sequence.properties.update({\"temp_key_5\": 2}) sequence.properties.pop(\"temp_key_5\") sequence.properties.update(dict()) assert len(sequence.tasks) == 1 assert sequence.tasks[task.config_id].id == task.id assert len(sequence.subscribers) == 0 assert sequence._is_in_context assert sequence.properties[\"qux\"] == 5 assert sequence.properties[\"temp_key_3\"] == 1 assert sequence.properties[\"temp_key_4\"] == 0 assert sequence.properties[\"temp_key_5\"] == 0 assert len(sequence_1.tasks) == 0 assert len(sequence_1.subscribers) == 1 assert not sequence_1._is_in_context assert sequence_1.properties[\"qux\"] == 9 assert \"temp_key_3\" not in sequence_1.properties.keys() assert sequence_1.properties[\"temp_key_4\"] == 1 assert \"temp_key_5\" not in sequence_1.properties.keys() def test_get_parents(sequence): with mock.patch(\"src.taipy.core.get_parents\") as mck: sequence.get_parents() mck.assert_called_once_with(sequence) def test_subscribe_sequence(): with mock.patch(\"src.taipy.core.subscribe_sequence\") as mck: sequence = Sequence({}, [], \"id\") sequence.subscribe(None) mck.assert_called_once_with(None, None, sequence) def test_unsubscribe_sequence(): with mock.patch(\"src.taipy.core.unsubscribe_sequence\") as mck: sequence = Sequence({}, [], \"id\") sequence.unsubscribe(None) mck.assert_called_once_with(None, None, sequence) def test_submit_sequence(): with mock.patch(\"src.taipy.core.sequence._sequence_manager._SequenceManager._submit\") as mck: sequence = Sequence({}, [], \"id\") sequence.submit(None, False) mck.assert_called_once_with(sequence, None, False, False, None) "} {"text": "from src.taipy.core.sequence._sequence_converter import _SequenceConverter from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.task.task import Task def test_entity_to_model(sequence): sequence_model_1 = _SequenceConverter._entity_to_model(sequence) expected_sequence_model_1 = { \"id\": \"sequence_id\", \"owner_id\": \"owner_id\", \"parent_ids\": [\"parent_id_1\", \"parent_id_2\"], \"properties\": {}, \"tasks\": [], \"subscribers\": [], \"version\": \"random_version_number\", } sequence_model_1[\"parent_ids\"] = sorted(sequence_model_1[\"parent_ids\"]) assert sequence_model_1 == expected_sequence_model_1 task_1 = Task(\"task_1\", {}, print) task_2 = Task(\"task_2\", {}, print) sequence_2 = Sequence( {\"name\": \"sequence_2\"}, [task_1, task_2], \"SEQUENCE_sq_1_SCENARIO_sc\", \"SCENARIO_sc\", [\"SCENARIO_sc\"], [], \"random_version\", ) sequence_model_2 = _SequenceConverter._entity_to_model(sequence_2) expected_sequence_model_2 = { \"id\": \"SEQUENCE_sq_1_SCENARIO_sc\", \"owner_id\": \"SCENARIO_sc\", \"parent_ids\": [\"SCENARIO_sc\"], \"properties\": {\"name\": \"sequence_2\"}, \"tasks\": [task_1.id, task_2.id], \"subscribers\": [], \"version\": \"random_version\", } assert sequence_model_2 == expected_sequence_model_2 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pytest from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from src.taipy.core.task.task_id import TaskId from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.conftest import init_managers def test_set_and_get_sequence(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() _OrchestratorFactory._build_dispatcher() input_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO) output_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO) task = Task(\"task\", {}, print, [input_dn], [output_dn], TaskId(\"task_id\")) scenario = Scenario(\"scenario\", {task}, {}, set()) _ScenarioManager._set(scenario) sequence_name_1 = \"p1\" sequence_id_1 = SequenceId(f\"SEQUENCE_{sequence_name_1}_{scenario.id}\") sequence_name_2 = \"p2\" sequence_id_2 = SequenceId(f\"SEQUENCE_{sequence_name_2}_{scenario.id}\") # No existing Sequence assert _SequenceManager._get(sequence_id_1) is None assert _SequenceManager._get(sequence_id_2) is None scenario.add_sequences({sequence_name_1: []}) sequence_1 = scenario.sequences[sequence_name_1] # Save one sequence. We expect to have only one sequence stored _SequenceManager._set(sequence_1) assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 0 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 0 assert _SequenceManager._get(sequence_id_2) is None # Save a second sequence. Now, we expect to have a total of two sequences stored _TaskManager._set(task) scenario.add_sequences({sequence_name_2: [task]}) sequence_2 = scenario.sequences[sequence_name_2] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 0 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 0 assert _SequenceManager._get(sequence_id_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_id_2).tasks) == 1 assert _SequenceManager._get(sequence_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_2).tasks) == 1 # We save the first sequence again. We expect nothing to change scenario.add_sequences({sequence_name_1: {}}) sequence_1 = scenario.sequences[sequence_name_1] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 0 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 0 assert _SequenceManager._get(sequence_id_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_id_2).tasks) == 1 assert _SequenceManager._get(sequence_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_2).tasks) == 1 # We save a third sequence with same id as the first one. # We expect the first sequence to be updated scenario.add_sequences({sequence_name_1: [task]}) sequence_3 = scenario.sequences[sequence_name_1] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert _SequenceManager._get(sequence_id_1).id == sequence_3.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 1 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 1 assert _SequenceManager._get(sequence_id_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_id_2).tasks) == 1 assert _SequenceManager._get(sequence_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_2).tasks) == 1 assert _TaskManager._get(task.id).id == task.id def test_get_all_on_multiple_versions_environment(init_sql_repo): init_managers() # Create 5 sequences from Scenario with 2 versions each for version in range(1, 3): for i in range(5): _ScenarioManager._set( Scenario( f\"config_id_{i+version}\", [], {}, [], f\"SCENARIO_id_{i}_v{version}\", version=f\"{version}.0\", sequences={\"sequence\": {}}, ) ) _VersionManager._set_experiment_version(\"1.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"1.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 1 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 0 ) _VersionManager._set_experiment_version(\"2.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 0 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v2\"}])) == 1 ) _VersionManager._set_development_version(\"1.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"1.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 1 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"1.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v2\"}])) == 0 ) _VersionManager._set_development_version(\"2.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 0 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v2\"}])) == 1 ) def mult_by_two(nb: int): return nb * 2 def mult_by_3(nb: int): return nb * 3 def test_get_or_create_data(init_sql_repo): # only create intermediate data node once Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() dn_config_1 = Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1) dn_config_2 = Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0) dn_config_6 = Config.configure_data_node(\"baz\", \"in_memory\", Scope.SCENARIO, default_data=0) task_config_mult_by_two = Config.configure_task(\"mult_by_two\", mult_by_two, [dn_config_1], dn_config_2) task_config_mult_by_3 = Config.configure_task(\"mult_by_3\", mult_by_3, [dn_config_2], dn_config_6) # dn_1 ---> mult_by_two ---> dn_2 ---> mult_by_3 ---> dn_6 scenario_config = Config.configure_scenario(\"scenario\", [task_config_mult_by_two, task_config_mult_by_3]) _OrchestratorFactory._build_dispatcher() assert len(_DataManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 0 scenario = _ScenarioManager._create(scenario_config) scenario.add_sequences({\"by_6\": list(scenario.tasks.values())}) sequence = scenario.sequences[\"by_6\"] assert sequence.name == \"by_6\" assert len(_DataManager._get_all()) == 3 assert len(_TaskManager._get_all()) == 2 assert len(sequence._get_sorted_tasks()) == 2 assert sequence.foo.read() == 1 assert sequence.bar.read() == 0 assert sequence.baz.read() == 0 assert sequence._get_sorted_tasks()[0][0].config_id == task_config_mult_by_two.id assert sequence._get_sorted_tasks()[1][0].config_id == task_config_mult_by_3.id _SequenceManager._submit(sequence.id) assert sequence.foo.read() == 1 assert sequence.bar.read() == 2 assert sequence.baz.read() == 6 sequence.foo.write(\"new data value\") assert sequence.foo.read() == \"new data value\" assert sequence.bar.read() == 2 assert sequence.baz.read() == 6 sequence.bar.write(7) assert sequence.foo.read() == \"new data value\" assert sequence.bar.read() == 7 assert sequence.baz.read() == 6 with pytest.raises(AttributeError): sequence.WRONG.write(7) def test_hard_delete_one_single_sequence_with_scenario_data_nodes(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.SCENARIO) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create([task_config]) scenario = Scenario(\"scenario\", set(tasks), {}, sequences={\"sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] sequence.submit() assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 _SequenceManager._hard_delete(sequence.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 def test_hard_delete_one_single_sequence_with_cycle_data_nodes(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.CYCLE, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.CYCLE) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create([task_config]) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] sequence.submit() assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 _SequenceManager._hard_delete(sequence.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 def test_hard_delete_shared_entities(init_sql_repo): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) init_managers() input_dn = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") intermediate_dn = Config.configure_data_node(\"my_inter\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") output_dn = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") task_1 = Config.configure_task(\"task_1\", print, input_dn, intermediate_dn) task_2 = Config.configure_task(\"task_2\", print, intermediate_dn, output_dn) _OrchestratorFactory._build_dispatcher() tasks_scenario_1 = _TaskManager._bulk_get_or_create([task_1, task_2], scenario_id=\"scenario_id_1\") tasks_scenario_2 = _TaskManager._bulk_get_or_create([task_1, task_2], scenario_id=\"scenario_id_2\") scenario_1 = Scenario(\"scenario_1\", tasks_scenario_1, {}, sequences={\"sequence\": {\"tasks\": tasks_scenario_1}}) scenario_2 = Scenario(\"scenario_2\", tasks_scenario_2, {}, sequences={\"sequence\": {\"tasks\": tasks_scenario_2}}) _ScenarioManager._set(scenario_1) _ScenarioManager._set(scenario_2) sequence_1 = scenario_1.sequences[\"sequence\"] sequence_2 = scenario_2.sequences[\"sequence\"] _SequenceManager._submit(sequence_1.id) _SequenceManager._submit(sequence_2.id) assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 3 assert len(_DataManager._get_all()) == 4 assert len(_JobManager._get_all()) == 4 _SequenceManager._hard_delete(sequence_1.id) assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 3 assert len(_DataManager._get_all()) == 4 assert len(_JobManager._get_all()) == 4 "} {"text": "import json from pathlib import Path from typing import Callable, Iterable, Optional from unittest import mock from unittest.mock import ANY import pytest from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.common import _utils from src.taipy.core.common._utils import _Subscriber from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import ( InvalidSequenceId, ModelNotFound, NonExistingSequence, NonExistingTask, SequenceBelongsToNonExistingScenario, ) from src.taipy.core.job._job_manager import _JobManager from src.taipy.core.scenario._scenario_manager import _ScenarioManager from src.taipy.core.scenario.scenario import Scenario from src.taipy.core.sequence._sequence_manager import _SequenceManager from src.taipy.core.sequence._sequence_manager_factory import _SequenceManagerFactory from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.sequence.sequence_id import SequenceId from src.taipy.core.task._task_manager import _TaskManager from src.taipy.core.task.task import Task from src.taipy.core.task.task_id import TaskId from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils.NotifyMock import NotifyMock def test_breakdown_sequence_id(): with pytest.raises(InvalidSequenceId): _SequenceManager._breakdown_sequence_id(\"scenario_id\") with pytest.raises(InvalidSequenceId): _SequenceManager._breakdown_sequence_id(\"sequence_id\") with pytest.raises(InvalidSequenceId): _SequenceManager._breakdown_sequence_id(\"SEQUENCE_sequence_id\") with pytest.raises(InvalidSequenceId): _SequenceManager._breakdown_sequence_id(\"SCENARIO_scenario_id\") with pytest.raises(InvalidSequenceId): _SequenceManager._breakdown_sequence_id(\"sequence_SCENARIO_scenario_id\") with pytest.raises(InvalidSequenceId): _SequenceManager._breakdown_sequence_id(\"SEQUENCE_sequence_scenario_id\") sequence_name, scenario_id = _SequenceManager._breakdown_sequence_id(\"SEQUENCE_sequence_SCENARIO_scenario\") assert sequence_name == \"sequence\" and scenario_id == \"SCENARIO_scenario\" sequence_name, scenario_id = _SequenceManager._breakdown_sequence_id(\"SEQUENCEsequenceSCENARIO_scenario\") assert sequence_name == \"sequence\" and scenario_id == \"SCENARIO_scenario\" def test_raise_sequence_does_not_belong_to_scenario(): with pytest.raises(SequenceBelongsToNonExistingScenario): sequence = Sequence({\"name\": \"sequence_name\"}, [], \"SEQUENCE_sequence_name_SCENARIO_scenario_id\") _SequenceManager._set(sequence) def __init(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() input_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO) output_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO) task = Task(\"task\", {}, print, [input_dn], [output_dn], TaskId(\"task_id\")) scenario = Scenario(\"scenario\", set([task]), {}, set()) _ScenarioManager._set(scenario) return scenario, task def test_set_and_get_sequence_no_existing_sequence(): scenario, task = __init() sequence_name_1 = \"p1\" sequence_id_1 = SequenceId(f\"SEQUENCE_{sequence_name_1}_{scenario.id}\") sequence_name_2 = \"p2\" sequence_id_2 = SequenceId(f\"SEQUENCE_{sequence_name_2}_{scenario.id}\") assert _SequenceManager._get(sequence_id_1) is None assert _SequenceManager._get(sequence_id_2) is None assert _SequenceManager._get(\"sequence\") is None def test_set_and_get(): scenario, task = __init() sequence_name_1 = \"p1\" sequence_id_1 = SequenceId(f\"SEQUENCE_{sequence_name_1}_{scenario.id}\") sequence_name_2 = \"p2\" sequence_id_2 = SequenceId(f\"SEQUENCE_{sequence_name_2}_{scenario.id}\") scenario.add_sequences({sequence_name_1: []}) sequence_1 = scenario.sequences[sequence_name_1] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 0 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 0 assert _SequenceManager._get(sequence_id_2) is None # Save a second sequence. Now, we expect to have a total of two sequences stored _TaskManager._set(task) scenario.add_sequences({sequence_name_2: [task]}) sequence_2 = scenario.sequences[sequence_name_2] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 0 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 0 assert _SequenceManager._get(sequence_id_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_id_2).tasks) == 1 assert _SequenceManager._get(sequence_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_2).tasks) == 1 assert _TaskManager._get(task.id).id == task.id # We save the first sequence again. We expect nothing to change scenario.add_sequence(sequence_name_1, []) sequence_1 = scenario.sequences[sequence_name_1] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 0 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 0 assert _SequenceManager._get(sequence_id_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_id_2).tasks) == 1 assert _SequenceManager._get(sequence_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_2).tasks) == 1 assert _TaskManager._get(task.id).id == task.id # We save a third sequence with same name as the first one. # We expect the first sequence to be updated scenario.add_sequences({sequence_name_1: [task]}) sequence_3 = scenario.sequences[sequence_name_1] assert _SequenceManager._get(sequence_id_1).id == sequence_1.id assert _SequenceManager._get(sequence_id_1).id == sequence_3.id assert len(_SequenceManager._get(sequence_id_1).tasks) == 1 assert _SequenceManager._get(sequence_1).id == sequence_1.id assert len(_SequenceManager._get(sequence_1).tasks) == 1 assert _SequenceManager._get(sequence_id_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_id_2).tasks) == 1 assert _SequenceManager._get(sequence_2).id == sequence_2.id assert len(_SequenceManager._get(sequence_2).tasks) == 1 assert _TaskManager._get(task.id).id == task.id def test_get_all_on_multiple_versions_environment(): # Create 5 sequences from Scenario with 2 versions each for version in range(1, 3): for i in range(5): _ScenarioManager._set( Scenario( f\"config_id_{i+version}\", [], {}, [], f\"SCENARIO_id_{i}_v{version}\", version=f\"{version}.0\", sequences={\"sequence\": {}}, ) ) _VersionManager._set_experiment_version(\"1.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"1.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 1 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 0 ) _VersionManager._set_experiment_version(\"2.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 0 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v2\"}])) == 1 ) _VersionManager._set_development_version(\"1.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"1.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 1 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"1.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v2\"}])) == 0 ) _VersionManager._set_development_version(\"2.0\") assert len(_SequenceManager._get_all()) == 5 assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v1\"}])) == 0 ) assert ( len(_SequenceManager._get_all_by(filters=[{\"version\": \"2.0\", \"id\": \"SEQUENCE_sequence_SCENARIO_id_1_v2\"}])) == 1 ) def test_is_submittable(): dn = InMemoryDataNode(\"dn\", Scope.SCENARIO, properties={\"default_data\": 10}) task = Task(\"task\", {}, print, [dn]) scenario = Scenario(\"scenario\", set([task]), {}, set()) _ScenarioManager._set(scenario) scenario.add_sequences({\"sequence\": list([task])}) sequence = scenario.sequences[\"sequence\"] assert len(_SequenceManager._get_all()) == 1 assert _SequenceManager._is_submittable(sequence) assert _SequenceManager._is_submittable(sequence.id) assert not _SequenceManager._is_submittable(\"Sequence_temp\") assert not _SequenceManager._is_submittable(\"SEQUENCE_temp_SCENARIO_scenario\") scenario.dn.edit_in_progress = True assert not _SequenceManager._is_submittable(sequence) assert not _SequenceManager._is_submittable(sequence.id) scenario.dn.edit_in_progress = False assert _SequenceManager._is_submittable(sequence) assert _SequenceManager._is_submittable(sequence.id) def test_submit(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() data_node_1 = InMemoryDataNode(\"foo\", Scope.SCENARIO, \"s1\") data_node_2 = InMemoryDataNode(\"bar\", Scope.SCENARIO, \"s2\") data_node_3 = InMemoryDataNode(\"baz\", Scope.SCENARIO, \"s3\") data_node_4 = InMemoryDataNode(\"qux\", Scope.SCENARIO, \"s4\") data_node_5 = InMemoryDataNode(\"quux\", Scope.SCENARIO, \"s5\") data_node_6 = InMemoryDataNode(\"quuz\", Scope.SCENARIO, \"s6\") data_node_7 = InMemoryDataNode(\"corge\", Scope.SCENARIO, \"s7\") task_1 = Task( \"grault\", {}, print, [data_node_1, data_node_2], [data_node_3, data_node_4], TaskId(\"t1\"), ) task_2 = Task(\"garply\", {}, print, [data_node_3], [data_node_5], TaskId(\"t2\")) task_3 = Task(\"waldo\", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId(\"t3\")) task_4 = Task(\"fred\", {}, print, [data_node_4], [data_node_7], TaskId(\"t4\")) scenario = Scenario(\"sce\", {task_1, task_2, task_3, task_4}, {}) sequence_name = \"sequence\" sequence_id = Sequence._new_id(sequence_name, scenario.id) class MockOrchestrator(_Orchestrator): submit_calls = [] @classmethod def _lock_dn_output_and_create_job( cls, task: Task, submit_id: str, submit_entity_id: str, callbacks: Optional[Iterable[Callable]] = None, force: bool = False, ): cls.submit_calls.append(task) return super()._lock_dn_output_and_create_job(task, submit_id, submit_entity_id, callbacks, force) with mock.patch(\"src.taipy.core.task._task_manager._TaskManager._orchestrator\", new=MockOrchestrator): # sequence does not exists. We expect an exception to be raised with pytest.raises(NonExistingSequence): _SequenceManager._submit(sequence_id) _ScenarioManager._set(scenario) scenario.add_sequences({sequence_name: [task_4, task_2, task_1, task_3]}) # sequence, and tasks does exist. We expect the tasks to be submitted # in a specific order _TaskManager._set(task_1) _TaskManager._set(task_2) _TaskManager._set(task_3) _TaskManager._set(task_4) sequence = scenario.sequences[sequence_name] _SequenceManager._submit(sequence.id) calls_ids = [t.id for t in _TaskManager._orchestrator().submit_calls] tasks_ids = [task_1.id, task_2.id, task_4.id, task_3.id] assert calls_ids == tasks_ids _SequenceManager._submit(sequence) calls_ids = [t.id for t in _TaskManager._orchestrator().submit_calls] tasks_ids = tasks_ids * 2 assert set(calls_ids) == set(tasks_ids) def test_assign_sequence_as_parent_of_task(): dn_config_1 = Config.configure_data_node(\"dn_1\", \"in_memory\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(\"dn_2\", \"in_memory\", scope=Scope.SCENARIO) dn_config_3 = Config.configure_data_node(\"dn_3\", \"in_memory\", scope=Scope.SCENARIO) task_config_1 = Config.configure_task(\"task_1\", print, [dn_config_1], [dn_config_2]) task_config_2 = Config.configure_task(\"task_2\", print, [dn_config_2], [dn_config_3]) task_config_3 = Config.configure_task(\"task_3\", print, [dn_config_2], [dn_config_3]) tasks = _TaskManager._bulk_get_or_create([task_config_1, task_config_2, task_config_3], \"scenario_id\") sequence_1 = _SequenceManager._create(\"sequence_1\", [tasks[0], tasks[1]], scenario_id=\"scenario_id\") sequence_2 = _SequenceManager._create(\"sequence_2\", [tasks[0], tasks[2]], scenario_id=\"scenario_id\") tasks_1 = list(sequence_1.tasks.values()) tasks_2 = list(sequence_2.tasks.values()) assert len(tasks_1) == 2 assert len(tasks_2) == 2 assert tasks_1[0].parent_ids == {sequence_1.id, sequence_2.id} assert tasks_2[0].parent_ids == {sequence_1.id, sequence_2.id} assert tasks_1[1].parent_ids == {sequence_1.id} assert tasks_2[1].parent_ids == {sequence_2.id} g = 0 def mock_function_no_input_no_output(): global g g += 1 def mock_function_one_input_no_output(inp): global g g += inp def mock_function_no_input_one_output(): global g return g def test_submit_sequence_from_tasks_with_one_or_no_input_output(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() # test no input and no output Task task_no_input_no_output = Task(\"task_no_input_no_output\", {}, mock_function_no_input_no_output) scenario_1 = Scenario(\"scenario_1\", {task_no_input_no_output}, {}) _TaskManager._set(task_no_input_no_output) _ScenarioManager._set(scenario_1) scenario_1.add_sequences({\"my_sequence_1\": [task_no_input_no_output]}) sequence_1 = scenario_1.sequences[\"my_sequence_1\"] assert len(sequence_1._get_sorted_tasks()) == 1 _SequenceManager._submit(sequence_1) assert g == 1 # test one input and no output Task data_node_input = InMemoryDataNode(\"input_dn\", Scope.SCENARIO, properties={\"default_data\": 2}) task_one_input_no_output = Task( \"task_one_input_no_output\", {}, mock_function_one_input_no_output, input=[data_node_input] ) scenario_2 = Scenario(\"scenario_2\", {task_one_input_no_output}, {}) _DataManager._set(data_node_input) data_node_input.unlock_edit() _TaskManager._set(task_one_input_no_output) _ScenarioManager._set(scenario_2) scenario_2.add_sequences({\"my_sequence_2\": [task_one_input_no_output]}) sequence_2 = scenario_2.sequences[\"my_sequence_2\"] assert len(sequence_2._get_sorted_tasks()) == 1 _SequenceManager._submit(sequence_2) assert g == 3 # test no input and one output Task data_node_output = InMemoryDataNode(\"output_dn\", Scope.SCENARIO, properties={\"default_data\": None}) task_no_input_one_output = Task( \"task_no_input_one_output\", {}, mock_function_no_input_one_output, output=[data_node_output] ) scenario_3 = Scenario(\"scenario_3\", {task_no_input_one_output}, {}) _DataManager._set(data_node_output) assert data_node_output.read() is None _TaskManager._set(task_no_input_one_output) _ScenarioManager._set(scenario_3) scenario_3.add_sequences({\"my_sequence_3\": [task_no_input_one_output]}) sequence_3 = scenario_3.sequences[\"my_sequence_3\"] assert len(sequence_2._get_sorted_tasks()) == 1 _SequenceManager._submit(sequence_3) assert data_node_output.read() == 3 def mult_by_two(nb: int): return nb * 2 def mult_by_3(nb: int): return nb * 3 def test_get_or_create_data(): # only create intermediate data node once Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_config_1 = Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1) dn_config_2 = Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0) dn_config_6 = Config.configure_data_node(\"baz\", \"in_memory\", Scope.SCENARIO, default_data=0) task_config_mult_by_two = Config.configure_task(\"mult_by_two\", mult_by_two, [dn_config_1], dn_config_2) task_config_mult_by_3 = Config.configure_task(\"mult_by_3\", mult_by_3, [dn_config_2], dn_config_6) # dn_1 ---> mult_by_two ---> dn_2 ---> mult_by_3 ---> dn_6 scenario_config = Config.configure_scenario(\"scenario\", [task_config_mult_by_two, task_config_mult_by_3]) _OrchestratorFactory._build_dispatcher() assert len(_DataManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 0 scenario = _ScenarioManager._create(scenario_config) scenario.add_sequences({\"by_6\": list(scenario.tasks.values())}) sequence = scenario.sequences[\"by_6\"] assert sequence.name == \"by_6\" assert len(_DataManager._get_all()) == 3 assert len(_TaskManager._get_all()) == 2 assert len(sequence._get_sorted_tasks()) == 2 assert sequence.foo.read() == 1 assert sequence.bar.read() == 0 assert sequence.baz.read() == 0 assert sequence._get_sorted_tasks()[0][0].config_id == task_config_mult_by_two.id assert sequence._get_sorted_tasks()[1][0].config_id == task_config_mult_by_3.id _SequenceManager._submit(sequence.id) assert sequence.foo.read() == 1 assert sequence.bar.read() == 2 assert sequence.baz.read() == 6 sequence.foo.write(\"new data value\") assert sequence.foo.read() == \"new data value\" assert sequence.bar.read() == 2 assert sequence.baz.read() == 6 sequence.bar.write(7) assert sequence.foo.read() == \"new data value\" assert sequence.bar.read() == 7 assert sequence.baz.read() == 6 with pytest.raises(AttributeError): sequence.WRONG.write(7) def notify1(*args, **kwargs): ... def notify2(*args, **kwargs): ... def notify_multi_param(*args, **kwargs): ... def test_sequence_notification_subscribe(mocker): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) mocker.patch(\"src.taipy.core._entity._reload._Reloader._reload\", side_effect=lambda m, o: o) task_configs = [ Config.configure_task( \"mult_by_two\", mult_by_two, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ] _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create(task_configs=task_configs) scenario = Scenario(\"scenario\", set(tasks), {}, sequences={\"by_1\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"by_1\"] notify_1 = NotifyMock(sequence) notify_1.__name__ = \"notify_1\" notify_1.__module__ = \"notify_1\" notify_2 = NotifyMock(sequence) notify_2.__name__ = \"notify_2\" notify_2.__module__ = \"notify_2\" # Mocking this because NotifyMock is a class that does not loads correctly when getting the sequence # from the storage. mocker.patch.object(_utils, \"_load_fct\", side_effect=[notify_1, notify_1, notify_2, notify_2, notify_2, notify_2]) # test subscription callback = mock.MagicMock() _SequenceManager._submit(sequence.id, [callback]) callback.assert_called() # test sequence subscribe notification _SequenceManager._subscribe(callback=notify_1, sequence=sequence) _SequenceManager._submit(sequence.id) notify_1.assert_called_3_times() notify_1.reset() # test sequence unsubscribe notification # test subscribe notification only on new job _SequenceManager._unsubscribe(callback=notify_1, sequence=sequence) _SequenceManager._subscribe(callback=notify_2, sequence=sequence) _SequenceManager._submit(sequence) notify_1.assert_not_called() notify_2.assert_called_3_times() def test_sequence_notification_subscribe_multi_param(mocker): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) mocker.patch(\"src.taipy.core._entity._reload._Reloader._reload\", side_effect=lambda m, o: o) task_configs = [ Config.configure_task( \"mult_by_two\", mult_by_two, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ] _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create(task_configs) scenario = Scenario(\"scenario\", set(tasks), {}, sequences={\"by_6\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"by_6\"] notify = mocker.Mock() # test sequence subscribe notification _SequenceManager._subscribe(callback=notify, params=[\"foobar\", 123, 1.2], sequence=sequence) mocker.patch.object(_SequenceManager, \"_get\", return_value=sequence) _SequenceManager._submit(sequence.id) # as the callback is called with Sequence/Scenario and Job objects # we can assert that is called with params plus a sequence object that we know # of and a job object that is represented by ANY in this case notify.assert_called_with(\"foobar\", 123, 1.2, sequence, ANY) def test_sequence_notification_unsubscribe(mocker): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) mocker.patch(\"src.taipy.core._entity._reload._Reloader._reload\", side_effect=lambda m, o: o) task_configs = [ Config.configure_task( \"mult_by_two\", mult_by_two, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ] _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create(task_configs) scenario = Scenario(\"scenario\", set(tasks), {}, sequences={\"by_6\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"by_6\"] notify_1 = notify1 notify_2 = notify2 _SequenceManager._subscribe(callback=notify_1, sequence=sequence) _SequenceManager._unsubscribe(callback=notify_1, sequence=sequence) _SequenceManager._subscribe(callback=notify_2, sequence=sequence) _SequenceManager._submit(sequence.id) with pytest.raises(ValueError): _SequenceManager._unsubscribe(callback=notify_1, sequence=sequence) _SequenceManager._unsubscribe(callback=notify_2, sequence=sequence) def test_sequence_notification_unsubscribe_multi_param(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task_configs = [ Config.configure_task( \"mult_by_two\", mult_by_two, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ] _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create(task_configs) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"by_6\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"by_6\"] _SequenceManager._subscribe(callback=notify_multi_param, params=[\"foobar\", 123, 0], sequence=sequence) _SequenceManager._subscribe(callback=notify_multi_param, params=[\"foobar\", 123, 1], sequence=sequence) _SequenceManager._subscribe(callback=notify_multi_param, params=[\"foobar\", 123, 2], sequence=sequence) assert len(sequence.subscribers) == 3 sequence.unsubscribe(notify_multi_param) assert len(sequence.subscribers) == 2 assert _Subscriber(notify_multi_param, [\"foobar\", 123, 0]) not in sequence.subscribers sequence.unsubscribe(notify_multi_param, [\"foobar\", 123, 2]) assert len(sequence.subscribers) == 1 assert _Subscriber(notify_multi_param, [\"foobar\", 123, 2]) not in sequence.subscribers with pytest.raises(ValueError): sequence.unsubscribe(notify_multi_param, [\"foobar\", 123, 10000]) def test_sequence_notification_subscribe_all(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) task_configs = [ Config.configure_task( \"mult_by_two\", mult_by_two, [Config.configure_data_node(\"foo\", \"in_memory\", Scope.SCENARIO, default_data=1)], Config.configure_data_node(\"bar\", \"in_memory\", Scope.SCENARIO, default_data=0), ) ] _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create(task_configs) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"by_6\": {\"tasks\": tasks}, \"other_sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"by_6\"] other_sequence = scenario.sequences[\"other_sequence\"] notify_1 = NotifyMock(sequence) _SequenceManager._subscribe(notify_1) assert len(_SequenceManager._get(sequence.id).subscribers) == 1 assert len(_SequenceManager._get(other_sequence.id).subscribers) == 1 def test_delete(): sequence_id = \"SEQUENCE_sequence_SCENARIO_scenario_id_1\" with pytest.raises(ModelNotFound): _SequenceManager._delete(sequence_id) scenario_1 = Scenario(\"scenario_1\", [], {}, scenario_id=\"SCENARIO_scenario_id_1\") scenario_2 = Scenario(\"scenario_2\", [], {}, scenario_id=\"SCENARIO_scenario_id_2\") _ScenarioManager._set(scenario_1) _ScenarioManager._set(scenario_2) with pytest.raises(ModelNotFound): _SequenceManager._delete(sequence_id) scenario_1.add_sequences({\"sequence\": {}}) assert len(_SequenceManager._get_all()) == 1 _SequenceManager._delete(sequence_id) assert len(_SequenceManager._get_all()) == 0 scenario_1.add_sequences({\"sequence\": {}, \"sequence_1\": {}}) assert len(_SequenceManager._get_all()) == 2 _SequenceManager._delete(sequence_id) assert len(_SequenceManager._get_all()) == 1 scenario_1.add_sequences({\"sequence_1\": {}, \"sequence_2\": {}, \"sequence_3\": {}}) scenario_2.add_sequences({\"sequence_1_2\": {}, \"sequence_2_2\": {}}) assert len(_SequenceManager._get_all()) == 5 _SequenceManager._delete_all() assert len(_SequenceManager._get_all()) == 0 scenario_1.add_sequences({\"sequence_1\": {}, \"sequence_2\": {}, \"sequence_3\": {}, \"sequence_4\": {}}) scenario_2.add_sequences({\"sequence_1_2\": {}, \"sequence_2_2\": {}}) assert len(_SequenceManager._get_all()) == 6 _SequenceManager._delete_many( [ \"SEQUENCE_sequence_1_SCENARIO_scenario_id_1\", \"SEQUENCE_sequence_2_SCENARIO_scenario_id_1\", \"SEQUENCE_sequence_1_2_SCENARIO_scenario_id_2\", ] ) assert len(_SequenceManager._get_all()) == 3 with pytest.raises(ModelNotFound): _SequenceManager._delete_many( [\"SEQUENCE_sequence_1_SCENARIO_scenario_id_1\", \"SEQUENCE_sequence_2_SCENARIO_scenario_id_1\"] ) def test_delete_version(): scenario_1_0 = Scenario( \"scenario_config\", [], {}, scenario_id=\"SCENARIO_id_1_v1_0\", version=\"1.0\", sequences={\"sequence_1\": {}, \"sequence_2\": {}}, ) scenario_1_1 = Scenario( \"scenario_config\", [], {}, scenario_id=\"SCENARIO_id_1_v1_1\", version=\"1.1\", sequences={\"sequence_1\": {}, \"sequence_2\": {}}, ) _ScenarioManager._set(scenario_1_0) _ScenarioManager._set(scenario_1_1) _VersionManager._set_experiment_version(\"1.1\") assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 2 _VersionManager._set_experiment_version(\"1.0\") assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 2 _SequenceManager._delete_by_version(\"1.0\") assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 0 assert len(scenario_1_0.sequences) == 0 assert len(scenario_1_1.sequences) == 2 _VersionManager._set_experiment_version(\"1.1\") assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 2 assert len(scenario_1_0.sequences) == 0 assert len(scenario_1_1.sequences) == 2 _SequenceManager._delete_by_version(\"1.1\") assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 0 def test_exists(): scenario = Scenario(\"scenario\", [], {}, scenario_id=\"SCENARIO_scenario\", sequences={\"sequence\": {}}) _ScenarioManager._set(scenario) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert not _SequenceManager._exists(\"SEQUENCE_sequence_not_exist_SCENARIO_scenario\") assert not _SequenceManager._exists(\"SEQUENCE_sequence_SCENARIO_scenario_id\") assert _SequenceManager._exists(\"SEQUENCE_sequence_SCENARIO_scenario\") assert _SequenceManager._exists(scenario.sequences[\"sequence\"]) def test_export(tmpdir_factory): path = tmpdir_factory.mktemp(\"data\") task = Task(\"task\", {}, print, id=TaskId(\"task_id\")) scenario = Scenario( \"scenario\", set([task]), {}, set(), version=\"1.0\", sequences={\"sequence_1\": {}, \"sequence_2\": {\"tasks\": [task], \"properties\": {\"xyz\": \"acb\"}}}, ) _TaskManager._set(task) _ScenarioManager._set(scenario) sequence_1 = scenario.sequences[\"sequence_1\"] sequence_2 = scenario.sequences[\"sequence_2\"] _SequenceManager._export(sequence_1.id, Path(path)) export_sequence_json_file_path = f\"{path}/sequences/{sequence_1.id}.json\" with open(export_sequence_json_file_path, \"rb\") as f: sequence_json_file = json.load(f) expected_json = { \"id\": sequence_1.id, \"owner_id\": scenario.id, \"parent_ids\": [scenario.id], \"name\": \"sequence_1\", \"tasks\": [], \"properties\": {}, \"subscribers\": [], } assert expected_json == sequence_json_file _SequenceManager._export(sequence_2.id, Path(path)) export_sequence_json_file_path = f\"{path}/sequences/{sequence_2.id}.json\" with open(export_sequence_json_file_path, \"rb\") as f: sequence_json_file = json.load(f) expected_json = { \"id\": sequence_2.id, \"owner_id\": scenario.id, \"parent_ids\": [scenario.id], \"name\": \"sequence_2\", \"tasks\": [task.id], \"properties\": {\"xyz\": \"acb\"}, \"subscribers\": [], } assert expected_json == sequence_json_file def test_hard_delete_one_single_sequence_with_scenario_data_nodes(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.SCENARIO) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create([task_config]) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] sequence.submit() assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 _SequenceManager._hard_delete(sequence.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 def test_hard_delete_one_single_sequence_with_cycle_data_nodes(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) dn_input_config = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.CYCLE, default_data=\"testing\") dn_output_config = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.CYCLE) task_config = Config.configure_task(\"task_config\", print, dn_input_config, dn_output_config) _OrchestratorFactory._build_dispatcher() tasks = _TaskManager._bulk_get_or_create([task_config]) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] sequence.submit() assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 _SequenceManager._hard_delete(sequence.id) assert len(_ScenarioManager._get_all()) == 1 assert len(_SequenceManager._get_all()) == 0 assert len(_TaskManager._get_all()) == 1 assert len(_DataManager._get_all()) == 2 assert len(_JobManager._get_all()) == 1 def test_hard_delete_shared_entities(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) input_dn = Config.configure_data_node(\"my_input\", \"in_memory\", scope=Scope.SCENARIO, default_data=\"testing\") intermediate_dn = Config.configure_data_node(\"my_inter\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") output_dn = Config.configure_data_node(\"my_output\", \"in_memory\", scope=Scope.GLOBAL, default_data=\"testing\") task_1 = Config.configure_task(\"task_1\", print, input_dn, intermediate_dn) task_2 = Config.configure_task(\"task_2\", print, intermediate_dn, output_dn) _OrchestratorFactory._build_dispatcher() tasks_scenario_1 = _TaskManager._bulk_get_or_create([task_1, task_2], scenario_id=\"scenario_id_1\") tasks_scenario_2 = _TaskManager._bulk_get_or_create([task_1, task_2], scenario_id=\"scenario_id_2\") scenario_1 = Scenario(\"scenario_1\", tasks_scenario_1, {}, sequences={\"sequence\": {\"tasks\": tasks_scenario_1}}) scenario_2 = Scenario(\"scenario_2\", tasks_scenario_2, {}, sequences={\"sequence\": {\"tasks\": tasks_scenario_2}}) _ScenarioManager._set(scenario_1) _ScenarioManager._set(scenario_2) sequence_1 = scenario_1.sequences[\"sequence\"] sequence_2 = scenario_2.sequences[\"sequence\"] _SequenceManager._submit(sequence_1.id) _SequenceManager._submit(sequence_2.id) assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 2 assert len(_TaskManager._get_all()) == 3 assert len(_DataManager._get_all()) == 4 assert len(_JobManager._get_all()) == 4 _SequenceManager._hard_delete(sequence_1.id) assert len(_ScenarioManager._get_all()) == 2 assert len(_SequenceManager._get_all()) == 1 assert len(_TaskManager._get_all()) == 3 assert len(_DataManager._get_all()) == 4 assert len(_JobManager._get_all()) == 4 def my_print(a, b): print(a + b) def test_submit_task_with_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_path=\"wrong_path.pickle\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") json_dn_cfg = Config.configure_parquet_data_node(\"wrong_json_file_path\", default_path=\"wrong_path.json\") task_cfg = Config.configure_task(\"task\", my_print, [csv_dn_cfg, pickle_dn_cfg], parquet_dn_cfg) task_2_cfg = Config.configure_task(\"task2\", my_print, [csv_dn_cfg, parquet_dn_cfg], json_dn_cfg) tasks = _TaskManager._bulk_get_or_create([task_cfg, task_2_cfg]) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] pip_manager = _SequenceManagerFactory._build_manager() pip_manager._submit(sequence) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in sequence.get_inputs() ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in sequence.data_nodes.values() if input_dn not in sequence.get_inputs() ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_submit_task_with_one_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node(\"wrong_csv_file_path\", default_path=\"wrong_path.csv\") pickle_dn_cfg = Config.configure_pickle_data_node(\"wrong_pickle_file_path\", default_data=\"value\") parquet_dn_cfg = Config.configure_parquet_data_node(\"wrong_parquet_file_path\", default_path=\"wrong_path.parquet\") json_dn_cfg = Config.configure_parquet_data_node(\"wrong_json_file_path\", default_path=\"wrong_path.json\") task_cfg = Config.configure_task(\"task\", my_print, [csv_dn_cfg, pickle_dn_cfg], parquet_dn_cfg) task_2_cfg = Config.configure_task(\"task2\", my_print, [csv_dn_cfg, parquet_dn_cfg], json_dn_cfg) tasks = _TaskManager._bulk_get_or_create([task_cfg, task_2_cfg]) scenario = Scenario(\"scenario\", tasks, {}, sequences={\"sequence\": {\"tasks\": tasks}}) _ScenarioManager._set(scenario) sequence = scenario.sequences[\"sequence\"] pip_manager = _SequenceManagerFactory._build_manager() pip_manager._submit(sequence) stdout = caplog.text expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in sequence.get_inputs() if input_dn.config_id == \"wrong_csv_file_path\" ] not_expected_outputs = [ f\"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong \" f\"path : {input_dn.path} \" for input_dn in sequence.data_nodes.values() if input_dn.config_id != \"wrong_csv_file_path\" ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) "} {"text": "import datetime import json import os import pathlib from dataclasses import dataclass from enum import Enum from time import sleep import numpy as np import pandas as pd import pytest from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.json import JSONDataNode from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.exceptions.exceptions import NoData from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import InvalidConfigurationId @pytest.fixture(scope=\"function\", autouse=True) def cleanup(): yield path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.json\") if os.path.isfile(path): os.remove(path) class MyCustomObject: def __init__(self, id, integer, text): self.id = id self.integer = integer self.text = text class MyCustomObject2: def __init__(self, id, boolean, text): self.id = id self.boolean = boolean self.text = text class MyEnum(Enum): A = 1 B = 2 C = 3 @dataclass class CustomDataclass: integer: int string: str class MyCustomEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, MyCustomObject): return {\"__type__\": \"MyCustomObject\", \"id\": o.id, \"integer\": o.integer, \"text\": o.text} return super().default(self, o) class MyCustomDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): super().__init__(object_hook=self.object_hook, *args, **kwargs) def object_hook(self, o): if o.get(\"__type__\") == \"MyCustomObject\": return MyCustomObject(o[\"id\"], o[\"integer\"], o[\"text\"]) else: return o class TestJSONDataNode: def test_create(self): path = \"data/node/path\" dn = JSONDataNode(\"foo_bar\", Scope.SCENARIO, properties={\"default_path\": path, \"name\": \"super name\"}) assert isinstance(dn, JSONDataNode) assert dn.storage_type() == \"json\" assert dn.config_id == \"foo_bar\" assert dn.name == \"super name\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.last_edit_date is None assert dn.job_ids == [] assert not dn.is_ready_for_reading assert dn.path == path with pytest.raises(InvalidConfigurationId): dn = JSONDataNode( \"foo bar\", Scope.SCENARIO, properties={\"default_path\": path, \"has_header\": False, \"name\": \"super name\"} ) def test_get_user_properties(self, json_file): dn_1 = JSONDataNode(\"dn_1\", Scope.SCENARIO, properties={\"path\": json_file}) assert dn_1._get_user_properties() == {} dn_2 = JSONDataNode( \"dn_2\", Scope.SCENARIO, properties={ \"default_data\": \"foo\", \"default_path\": json_file, \"encoder\": MyCustomEncoder, \"decoder\": MyCustomDecoder, \"foo\": \"bar\", }, ) # default_data, default_path, path, encoder, decoder are filtered out assert dn_2._get_user_properties() == {\"foo\": \"bar\"} def test_new_json_data_node_with_existing_file_is_ready_for_reading(self): not_ready_dn_cfg = Config.configure_data_node( \"not_ready_data_node_config_id\", \"json\", default_path=\"NOT_EXISTING.json\" ) not_ready_dn = _DataManager._bulk_get_or_create([not_ready_dn_cfg])[not_ready_dn_cfg] assert not not_ready_dn.is_ready_for_reading assert not_ready_dn.path == \"NOT_EXISTING.json\" path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/json/example_list.json\") ready_dn_cfg = Config.configure_data_node(\"ready_data_node_config_id\", \"json\", default_path=path) ready_dn = _DataManager._bulk_get_or_create([ready_dn_cfg])[ready_dn_cfg] assert ready_dn.is_ready_for_reading def test_read_non_existing_json(self): not_existing_json = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": \"WRONG.json\"}) with pytest.raises(NoData): assert not_existing_json.read() is None not_existing_json.read_or_raise() def test_read(self): path_1 = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/json/example_list.json\") dn_1 = JSONDataNode(\"bar\", Scope.SCENARIO, properties={\"default_path\": path_1}) data_1 = dn_1.read() assert isinstance(data_1, list) assert len(data_1) == 4 path_2 = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/json/example_dict.json\") dn_2 = JSONDataNode(\"bar\", Scope.SCENARIO, properties={\"default_path\": path_2}) data_2 = dn_2.read() assert isinstance(data_2, dict) assert data_2[\"id\"] == \"1\" path_3 = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/json/example_int.json\") dn_3 = JSONDataNode(\"bar\", Scope.SCENARIO, properties={\"default_path\": path_3}) data_3 = dn_3.read() assert isinstance(data_3, int) assert data_3 == 1 path_4 = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/json/example_null.json\") dn_4 = JSONDataNode(\"bar\", Scope.SCENARIO, properties={\"default_path\": path_4}) data_4 = dn_4.read() assert data_4 is None def test_read_invalid_json(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/invalid.json.txt\") dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path}) with pytest.raises(ValueError): dn.read() def test_append_to_list(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) original_data = json_dn.read() # Append a dictionary append_data_1 = {\"a\": 1, \"b\": 2, \"c\": 3} json_dn.append(append_data_1) assert json_dn.read() == original_data + [append_data_1] # Append a list of dictionaries append_data_data_2 = [{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}] json_dn.append(append_data_data_2) assert json_dn.read() == original_data + [append_data_1] + append_data_data_2 def test_append_to_a_dictionary(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) original_data = {\"a\": 1, \"b\": 2, \"c\": 3} json_dn.write(original_data) # Append another dictionary append_data_1 = {\"d\": 1, \"e\": 2, \"f\": 3} json_dn.append(append_data_1) assert json_dn.read() == {**original_data, **append_data_1} # Append an overlap dictionary append_data_data_2 = {\"a\": 10, \"b\": 20, \"g\": 30} json_dn.append(append_data_data_2) assert json_dn.read() == {**original_data, **append_data_1, **append_data_data_2} def test_write(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) data = {\"a\": 1, \"b\": 2, \"c\": 3} json_dn.write(data) assert np.array_equal(json_dn.read(), data) def test_write_with_different_encoding(self, json_file): data = {\"\u2265a\": 1, \"b\": 2} utf8_dn = JSONDataNode(\"utf8_dn\", Scope.SCENARIO, properties={\"default_path\": json_file}) utf16_dn = JSONDataNode( \"utf16_dn\", Scope.SCENARIO, properties={\"default_path\": json_file, \"encoding\": \"utf-16\"} ) # If a file is written with utf-8 encoding, it can only be read with utf-8, not utf-16 encoding utf8_dn.write(data) assert np.array_equal(utf8_dn.read(), data) with pytest.raises(UnicodeError): utf16_dn.read() # If a file is written with utf-16 encoding, it can only be read with utf-16, not utf-8 encoding utf16_dn.write(data) assert np.array_equal(utf16_dn.read(), data) with pytest.raises(UnicodeError): utf8_dn.read() def test_write_non_serializable(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) data = {\"a\": 1, \"b\": json_dn} with pytest.raises(TypeError): json_dn.write(data) def test_write_date(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) now = datetime.datetime.now() data = {\"date\": now} json_dn.write(data) read_data = json_dn.read() assert read_data[\"date\"] == now def test_write_enum(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) data = [MyEnum.A, MyEnum.B, MyEnum.C] json_dn.write(data) read_data = json_dn.read() assert read_data == [MyEnum.A, MyEnum.B, MyEnum.C] def test_write_dataclass(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) json_dn.write(CustomDataclass(integer=1, string=\"foo\")) read_data = json_dn.read() assert read_data.integer == 1 assert read_data.string == \"foo\" def test_write_custom_encoder(self, json_file): json_dn = JSONDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": json_file, \"encoder\": MyCustomEncoder} ) data = [MyCustomObject(\"1\", 1, \"abc\"), 100] json_dn.write(data) read_data = json_dn.read() assert read_data[0][\"__type__\"] == \"MyCustomObject\" assert read_data[0][\"id\"] == \"1\" assert read_data[0][\"integer\"] == 1 assert read_data[0][\"text\"] == \"abc\" assert read_data[1] == 100 def test_read_write_custom_encoder_decoder(self, json_file): json_dn = JSONDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": json_file, \"encoder\": MyCustomEncoder, \"decoder\": MyCustomDecoder}, ) data = [MyCustomObject(\"1\", 1, \"abc\"), 100] json_dn.write(data) read_data = json_dn.read() assert isinstance(read_data[0], MyCustomObject) assert read_data[0].id == \"1\" assert read_data[0].integer == 1 assert read_data[0].text == \"abc\" assert read_data[1] == 100 def test_filter(self, json_file): json_dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": json_file}) json_dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, {\"KWARGS_KEY\": \"KWARGS_VALUE\"}, ] ) assert len(json_dn.filter((\"foo\", 1, Operator.EQUAL))) == 3 assert len(json_dn.filter((\"foo\", 1, Operator.NOT_EQUAL))) == 3 assert len(json_dn.filter((\"bar\", 2, Operator.EQUAL))) == 3 assert len(json_dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)) == 4 assert json_dn[0] == {\"foo\": 1, \"bar\": 1} assert json_dn[2] == {\"foo\": 1} assert json_dn[:2] == [{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}] @pytest.mark.parametrize( [\"properties\", \"exists\"], [ ({}, False), ({\"default_data\": {\"foo\": \"bar\"}}, True), ], ) def test_create_with_default_data(self, properties, exists): dn = JSONDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) assert os.path.exists(dn.path) is exists def test_set_path(self): dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": \"foo.json\"}) assert dn.path == \"foo.json\" dn.path = \"bar.json\" assert dn.path == \"bar.json\" def test_read_write_after_modify_path(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/json/example_dict.json\") new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.json\") dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path}) read_data = dn.read() assert read_data is not None dn.path = new_path with pytest.raises(FileNotFoundError): dn.read() dn.write({\"other\": \"stuff\"}) assert dn.read() == {\"other\": \"stuff\"} def test_get_system_modified_date_instead_of_last_edit_date(self, tmpdir_factory): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.json\")) pd.DataFrame([]).to_json(temp_file_path) dn = JSONDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path}) dn.write([1, 2, 3]) previous_edit_date = dn.last_edit_date sleep(0.1) pd.DataFrame([4, 5, 6]).to_json(temp_file_path) new_edit_date = datetime.datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert previous_edit_date < dn.last_edit_date assert new_edit_date == dn.last_edit_date sleep(0.1) dn.write([1, 2, 3]) assert new_edit_date < dn.last_edit_date os.unlink(temp_file_path) "} {"text": "from importlib import util from unittest.mock import patch import modin.pandas as modin_pd import numpy as np import pandas as pd import pytest from modin.pandas.test.utils import df_equals from pandas.testing import assert_frame_equal from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.data.sql import SQLDataNode from src.taipy.core.exceptions.exceptions import MissingAppendQueryBuilder, MissingRequiredProperty from taipy.config.common.scope import Scope class MyCustomObject: def __init__(self, foo=None, bar=None, *args, **kwargs): self.foo = foo self.bar = bar self.args = args self.kwargs = kwargs def my_write_query_builder_with_pandas(data: pd.DataFrame): insert_data = data.to_dict(\"records\") return [\"DELETE FROM example\", (\"INSERT INTO example VALUES (:foo, :bar)\", insert_data)] def my_write_query_builder_with_modin(data: modin_pd.DataFrame): insert_data = data.to_dict(\"records\") return [\"DELETE FROM example\", (\"INSERT INTO example VALUES (:foo, :bar)\", insert_data)] def my_append_query_builder_with_pandas(data: pd.DataFrame): insert_data = data.to_dict(\"records\") return [(\"INSERT INTO example VALUES (:foo, :bar)\", insert_data)] def my_append_query_builder_with_modin(data: modin_pd.DataFrame): insert_data = data.to_dict(\"records\") return [(\"INSERT INTO example VALUES (:foo, :bar)\", insert_data)] def single_write_query_builder(data): return \"DELETE FROM example\" class TestSQLDataNode: __pandas_properties = [ { \"db_name\": \"taipy.sqlite3\", \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", \"other\": \"value\", }, }, ] __modin_properties = [ { \"db_name\": \"taipy.sqlite3\", \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_modin, \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", \"other\": \"value\", }, }, ] if util.find_spec(\"pyodbc\"): __pandas_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mssql\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) __modin_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mssql\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_modin, \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) if util.find_spec(\"pymysql\"): __pandas_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mysql\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) __modin_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mysql\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_modin, \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) if util.find_spec(\"psycopg2\"): __pandas_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"postgresql\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) __modin_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"postgresql\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_modin, \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) @pytest.mark.parametrize(\"modin_properties\", __modin_properties) def test_create(self, pandas_properties, modin_properties): dn = SQLDataNode( \"foo_bar\", Scope.SCENARIO, properties=pandas_properties, ) assert isinstance(dn, SQLDataNode) assert dn.storage_type() == \"sql\" assert dn.config_id == \"foo_bar\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.exposed_type == \"pandas\" assert dn.read_query == \"SELECT * FROM example\" assert dn.write_query_builder == my_write_query_builder_with_pandas dn = SQLDataNode( \"foo_bar\", Scope.SCENARIO, properties=modin_properties, ) assert isinstance(dn, SQLDataNode) assert dn.storage_type() == \"sql\" assert dn.config_id == \"foo_bar\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.exposed_type == \"modin\" assert dn.read_query == \"SELECT * FROM example\" assert dn.write_query_builder == my_write_query_builder_with_modin @pytest.mark.parametrize(\"properties\", __pandas_properties + __modin_properties) def test_get_user_properties(self, properties): custom_properties = properties.copy() custom_properties[\"foo\"] = \"bar\" dn = SQLDataNode( \"foo_bar\", Scope.SCENARIO, properties=custom_properties, ) assert dn._get_user_properties() == {\"foo\": \"bar\"} @pytest.mark.parametrize( \"properties\", [ {}, {\"db_username\": \"foo\"}, {\"db_username\": \"foo\", \"db_password\": \"foo\"}, {\"db_username\": \"foo\", \"db_password\": \"foo\", \"db_name\": \"foo\"}, {\"engine\": \"sqlite\"}, {\"engine\": \"mssql\", \"db_name\": \"foo\"}, {\"engine\": \"mysql\", \"db_username\": \"foo\"}, {\"engine\": \"postgresql\", \"db_username\": \"foo\", \"db_password\": \"foo\"}, ], ) def test_create_with_missing_parameters(self, properties): with pytest.raises(MissingRequiredProperty): SQLDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\")) with pytest.raises(MissingRequiredProperty): SQLDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) @pytest.mark.parametrize(\"modin_properties\", __modin_properties) def test_write_query_builder(self, pandas_properties, modin_properties): custom_properties = pandas_properties.copy() custom_properties.pop(\"db_extra_args\") dn = SQLDataNode(\"foo_bar\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock: # mock connection execute dn.write(pd.DataFrame({\"foo\": [1, 2, 3], \"bar\": [4, 5, 6]})) assert len(engine_mock.mock_calls[4].args) == 1 assert engine_mock.mock_calls[4].args[0].text == \"DELETE FROM example\" assert len(engine_mock.mock_calls[5].args) == 2 assert engine_mock.mock_calls[5].args[0].text == \"INSERT INTO example VALUES (:foo, :bar)\" assert engine_mock.mock_calls[5].args[1] == [ {\"foo\": 1, \"bar\": 4}, {\"foo\": 2, \"bar\": 5}, {\"foo\": 3, \"bar\": 6}, ] custom_properties[\"write_query_builder\"] = single_write_query_builder dn = SQLDataNode(\"foo_bar\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock: # mock connection execute dn.write(pd.DataFrame({\"foo\": [1, 2, 3], \"bar\": [4, 5, 6]})) assert len(engine_mock.mock_calls[4].args) == 1 assert engine_mock.mock_calls[4].args[0].text == \"DELETE FROM example\" custom_properties = modin_properties.copy() custom_properties.pop(\"db_extra_args\") dn = SQLDataNode(\"foo_bar\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock: # mock connection execute dn.write(modin_pd.DataFrame({\"foo\": [1, 2, 3], \"bar\": [4, 5, 6]})) assert len(engine_mock.mock_calls[4].args) == 1 assert engine_mock.mock_calls[4].args[0].text == \"DELETE FROM example\" assert len(engine_mock.mock_calls[5].args) == 2 assert engine_mock.mock_calls[5].args[0].text == \"INSERT INTO example VALUES (:foo, :bar)\" assert engine_mock.mock_calls[5].args[1] == [ {\"foo\": 1, \"bar\": 4}, {\"foo\": 2, \"bar\": 5}, {\"foo\": 3, \"bar\": 6}, ] custom_properties[\"write_query_builder\"] = single_write_query_builder dn = SQLDataNode(\"foo_bar\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock: # mock connection execute dn.write(modin_pd.DataFrame({\"foo\": [1, 2, 3], \"bar\": [4, 5, 6]})) assert len(engine_mock.mock_calls[4].args) == 1 assert engine_mock.mock_calls[4].args[0].text == \"DELETE FROM example\" @pytest.mark.parametrize( \"tmp_sqlite_path\", [ \"tmp_sqlite_db_file_path\", \"tmp_sqlite_sqlite3_file_path\", ], ) def test_sqlite_read_file_with_different_extension(self, tmp_sqlite_path, request): tmp_sqlite_path = request.getfixturevalue(tmp_sqlite_path) folder_path, db_name, file_extension = tmp_sqlite_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * from example\", \"write_query_builder\": single_write_query_builder, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, } dn = SQLDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) data = dn.read() assert data.equals(pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}])) def test_sqlite_append_pandas(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"append_query_builder\": my_append_query_builder_with_pandas, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, } dn = SQLDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) original_data = pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}]) data = dn.read() assert_frame_equal(data, original_data) append_data_1 = pd.DataFrame([{\"foo\": 5, \"bar\": 6}, {\"foo\": 7, \"bar\": 8}]) dn.append(append_data_1) assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True)) def test_sqlite_append_modin(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"append_query_builder\": my_append_query_builder_with_pandas, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"modin\", } dn = SQLDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) original_data = modin_pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}]) data = dn.read() df_equals(data, original_data) append_data_1 = modin_pd.DataFrame([{\"foo\": 5, \"bar\": 6}, {\"foo\": 7, \"bar\": 8}]) dn.append(append_data_1) df_equals(dn.read(), modin_pd.concat([original_data, append_data_1]).reset_index(drop=True)) def test_sqlite_append_without_append_query_builder(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, } dn = SQLDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) with pytest.raises(MissingAppendQueryBuilder): dn.append(pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}])) def test_filter_pandas_exposed_type(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"pandas\", } dn = SQLDataNode(\"foo\", Scope.SCENARIO, properties=properties) dn.write( pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}, ] ) ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, 2, 2])) assert dn[\"bar\"].equals(pd.Series([1, 2, 3, 1, 2, 3])) assert dn[:2].equals(pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = pd.DataFrame([{\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, ] ) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_modin_exposed_type(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_modin, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"modin\", } dn = SQLDataNode(\"foo\", Scope.SCENARIO, properties=properties) dn.write( pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}, ] ) ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, 2, 2])) assert dn[\"bar\"].equals(pd.Series([1, 2, 3, 1, 2, 3])) assert dn[:2].equals(modin_pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = modin_pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = modin_pd.DataFrame([{\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = modin_pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, ] ) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_numpy_exposed_type(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"numpy\", } dn = SQLDataNode(\"foo\", Scope.SCENARIO, properties=properties) dn.write( pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}, ] ) ) # Test datanode indexing and slicing assert np.array_equal(dn[0], np.array([1, 1])) assert np.array_equal(dn[1], np.array([1, 2])) assert np.array_equal(dn[:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[1:4, :1], np.array([[1], [1], [2]])) # Test filter data assert np.array_equal(dn.filter((\"foo\", 1, Operator.EQUAL)), np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[dn[:, 0] == 1], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn.filter((\"foo\", 1, Operator.NOT_EQUAL)), np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn[dn[:, 0] != 1], np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn.filter((\"bar\", 2, Operator.EQUAL)), np.array([[1, 2], [2, 2]])) assert np.array_equal(dn[dn[:, 1] == 2], np.array([[1, 2], [2, 2]])) assert np.array_equal( dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR), np.array([[1, 1], [1, 2], [2, 1], [2, 2]]), ) assert np.array_equal(dn[(dn[:, 1] == 1) | (dn[:, 1] == 2)], np.array([[1, 1], [1, 2], [2, 1], [2, 2]])) def test_filter_does_not_read_all_entities(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"read_query\": \"SELECT * FROM example\", \"write_query_builder\": my_write_query_builder_with_pandas, \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"numpy\", } dn = SQLDataNode(\"foo\", Scope.SCENARIO, properties=properties) # SQLDataNode.filter() should not call the MongoCollectionDataNode._read() method with patch.object(SQLDataNode, \"_read\") as read_mock: dn.filter((\"foo\", 1, Operator.EQUAL)) dn.filter((\"bar\", 2, Operator.NOT_EQUAL)) dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) assert read_mock[\"_read\"].call_count == 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import os import pathlib import pytest from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config.data_node_config import DataNodeConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.csv import CSVDataNode from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.data.pickle import PickleDataNode from src.taipy.core.exceptions.exceptions import InvalidDataNodeType, ModelNotFound from taipy.config.common.scope import Scope from taipy.config.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile def file_exists(file_path: str) -> bool: return os.path.exists(file_path) class TestDataManager: def test_create_data_node_and_modify_properties_does_not_modify_config(self): dn_config = Config.configure_data_node(id=\"name\", foo=\"bar\") dn = _DataManager._create_and_set(dn_config, None, None) assert dn_config.properties.get(\"foo\") == \"bar\" assert dn_config.properties.get(\"baz\") is None dn.properties[\"baz\"] = \"qux\" _DataManager._set(dn) assert dn_config.properties.get(\"foo\") == \"bar\" assert dn_config.properties.get(\"baz\") is None assert dn.properties.get(\"foo\") == \"bar\" assert dn.properties.get(\"baz\") == \"qux\" def test_create_data_node_with_name_provided(self): dn_config = Config.configure_data_node(id=\"dn\", foo=\"bar\", name=\"acb\") dn = _DataManager._create_and_set(dn_config, None, None) assert dn.name == \"acb\" def test_create_and_get_csv_data_node(self): # Test we can instantiate a CsvDataNode from DataNodeConfig with : # - a csv type # - a default scenario scope # - No owner_id csv_dn_config = Config.configure_data_node(id=\"foo\", storage_type=\"csv\", path=\"bar\", has_header=True) csv_dn = _DataManager._create_and_set(csv_dn_config, None, None) assert isinstance(csv_dn, CSVDataNode) assert isinstance(_DataManager._get(csv_dn.id), CSVDataNode) assert _DataManager._exists(csv_dn.id) assert _DataManager._get(csv_dn.id) is not None assert _DataManager._get(csv_dn.id).id == csv_dn.id assert _DataManager._get(csv_dn.id).config_id == \"foo\" assert _DataManager._get(csv_dn.id).config_id == csv_dn.config_id assert _DataManager._get(csv_dn.id).scope == Scope.SCENARIO assert _DataManager._get(csv_dn.id).scope == csv_dn.scope assert _DataManager._get(csv_dn.id).owner_id is None assert _DataManager._get(csv_dn.id).owner_id == csv_dn.owner_id assert _DataManager._get(csv_dn.id).parent_ids == set() assert _DataManager._get(csv_dn.id).parent_ids == csv_dn.parent_ids assert _DataManager._get(csv_dn.id).last_edit_date is None assert _DataManager._get(csv_dn.id).last_edit_date == csv_dn.last_edit_date assert _DataManager._get(csv_dn.id).job_ids == [] assert _DataManager._get(csv_dn.id).job_ids == csv_dn.job_ids assert not _DataManager._get(csv_dn.id).is_ready_for_reading assert _DataManager._get(csv_dn.id).is_ready_for_reading == csv_dn.is_ready_for_reading assert len(_DataManager._get(csv_dn.id).properties) == 4 assert _DataManager._get(csv_dn.id).properties.get(\"path\") == \"bar\" assert _DataManager._get(csv_dn.id).properties.get(\"encoding\") == \"utf-8\" assert _DataManager._get(csv_dn.id).properties.get(\"has_header\") is True assert _DataManager._get(csv_dn.id).properties.get(\"exposed_type\") == \"pandas\" assert _DataManager._get(csv_dn.id).properties == csv_dn.properties assert _DataManager._get(csv_dn.id).edit_in_progress is False assert _DataManager._get(csv_dn.id)._editor_id is None assert _DataManager._get(csv_dn.id)._editor_expiration_date is None assert _DataManager._get(csv_dn) is not None assert _DataManager._get(csv_dn).id == csv_dn.id assert _DataManager._get(csv_dn).config_id == \"foo\" assert _DataManager._get(csv_dn).config_id == csv_dn.config_id assert _DataManager._get(csv_dn).scope == Scope.SCENARIO assert _DataManager._get(csv_dn).scope == csv_dn.scope assert _DataManager._get(csv_dn).owner_id is None assert _DataManager._get(csv_dn).owner_id == csv_dn.owner_id assert _DataManager._get(csv_dn).parent_ids == set() assert _DataManager._get(csv_dn).parent_ids == csv_dn.parent_ids assert _DataManager._get(csv_dn).last_edit_date is None assert _DataManager._get(csv_dn).last_edit_date == csv_dn.last_edit_date assert _DataManager._get(csv_dn).job_ids == [] assert _DataManager._get(csv_dn).job_ids == csv_dn.job_ids assert not _DataManager._get(csv_dn).is_ready_for_reading assert _DataManager._get(csv_dn).is_ready_for_reading == csv_dn.is_ready_for_reading assert len(_DataManager._get(csv_dn).properties) == 4 assert _DataManager._get(csv_dn).properties.get(\"path\") == \"bar\" assert _DataManager._get(csv_dn).properties.get(\"encoding\") == \"utf-8\" assert _DataManager._get(csv_dn).properties.get(\"has_header\") is True assert _DataManager._get(csv_dn.id).properties.get(\"exposed_type\") == \"pandas\" assert _DataManager._get(csv_dn).properties == csv_dn.properties assert _DataManager._get(csv_dn.id).edit_in_progress is False assert _DataManager._get(csv_dn.id)._editor_id is None assert _DataManager._get(csv_dn.id)._editor_expiration_date is None def test_edit_and_get_data_node(self): config = Config.configure_pickle_data_node(id=\"foo\") dn = _DataManager._create_and_set(config, None, None) assert _DataManager._get(dn.id).last_edit_date is None assert len(_DataManager._get(dn.id).properties) == 1 assert _DataManager._get(dn.id).properties.get(\"is_generated\") assert not _DataManager._get(dn.id).edit_in_progress assert _DataManager._get(dn.id)._editor_id is None assert _DataManager._get(dn.id)._editor_expiration_date is None dn.lock_edit(\"foo\") assert _DataManager._get(dn.id).last_edit_date is None assert len(_DataManager._get(dn.id).properties) == 1 assert _DataManager._get(dn.id).properties.get(\"is_generated\") assert _DataManager._get(dn.id).edit_in_progress assert _DataManager._get(dn.id).editor_id == \"foo\" assert _DataManager._get(dn.id).editor_expiration_date is not None dn.unlock_edit(\"foo\") assert _DataManager._get(dn.id).last_edit_date is None assert len(_DataManager._get(dn.id).properties) == 1 assert _DataManager._get(dn.id).properties.get(\"is_generated\") assert not _DataManager._get(dn.id).edit_in_progress assert _DataManager._get(dn.id).editor_id is None assert _DataManager._get(dn.id).editor_expiration_date is None def test_create_and_get_in_memory_data_node(self): # Test we can instantiate an InMemoryDataNode from DataNodeConfig with : # - an in_memory type # - a scenario scope # - an owner id # - some default data in_memory_dn_config = Config.configure_data_node( id=\"baz\", storage_type=\"in_memory\", scope=Scope.SCENARIO, default_data=\"qux\", other_data=\"foo\" ) in_mem_dn = _DataManager._create_and_set(in_memory_dn_config, \"Scenario_id\", {\"task_id\"}) assert isinstance(in_mem_dn, InMemoryDataNode) assert isinstance(_DataManager._get(in_mem_dn.id), InMemoryDataNode) assert _DataManager._exists(in_mem_dn.id) assert _DataManager._get(in_mem_dn.id) is not None assert _DataManager._get(in_mem_dn.id).id == in_mem_dn.id assert _DataManager._get(in_mem_dn.id).config_id == \"baz\" assert _DataManager._get(in_mem_dn.id).config_id == in_mem_dn.config_id assert _DataManager._get(in_mem_dn.id).scope == Scope.SCENARIO assert _DataManager._get(in_mem_dn.id).scope == in_mem_dn.scope assert _DataManager._get(in_mem_dn.id).owner_id == \"Scenario_id\" assert _DataManager._get(in_mem_dn.id).owner_id == in_mem_dn.owner_id assert _DataManager._get(in_mem_dn.id).parent_ids == {\"task_id\"} assert _DataManager._get(in_mem_dn.id).parent_ids == in_mem_dn.parent_ids assert _DataManager._get(in_mem_dn.id).last_edit_date is not None assert _DataManager._get(in_mem_dn.id).last_edit_date == in_mem_dn.last_edit_date assert _DataManager._get(in_mem_dn.id).job_ids == [] assert _DataManager._get(in_mem_dn.id).job_ids == in_mem_dn.job_ids assert _DataManager._get(in_mem_dn.id).is_ready_for_reading assert _DataManager._get(in_mem_dn.id).is_ready_for_reading == in_mem_dn.is_ready_for_reading assert len(_DataManager._get(in_mem_dn.id).properties) == 1 assert _DataManager._get(in_mem_dn.id).properties.get(\"other_data\") == \"foo\" assert _DataManager._get(in_mem_dn.id).properties == in_mem_dn.properties assert _DataManager._get(in_mem_dn) is not None assert _DataManager._get(in_mem_dn).id == in_mem_dn.id assert _DataManager._get(in_mem_dn).config_id == \"baz\" assert _DataManager._get(in_mem_dn).config_id == in_mem_dn.config_id assert _DataManager._get(in_mem_dn).scope == Scope.SCENARIO assert _DataManager._get(in_mem_dn).scope == in_mem_dn.scope assert _DataManager._get(in_mem_dn).owner_id == \"Scenario_id\" assert _DataManager._get(in_mem_dn).owner_id == in_mem_dn.owner_id assert _DataManager._get(in_mem_dn).parent_ids == {\"task_id\"} assert _DataManager._get(in_mem_dn).parent_ids == in_mem_dn.parent_ids assert _DataManager._get(in_mem_dn).last_edit_date is not None assert _DataManager._get(in_mem_dn).last_edit_date == in_mem_dn.last_edit_date assert _DataManager._get(in_mem_dn).job_ids == [] assert _DataManager._get(in_mem_dn).job_ids == in_mem_dn.job_ids assert _DataManager._get(in_mem_dn).is_ready_for_reading assert _DataManager._get(in_mem_dn).is_ready_for_reading == in_mem_dn.is_ready_for_reading assert len(_DataManager._get(in_mem_dn).properties) == 1 assert _DataManager._get(in_mem_dn).properties.get(\"other_data\") == \"foo\" assert _DataManager._get(in_mem_dn).properties == in_mem_dn.properties def test_create_and_get_pickle_data_node(self): # Test we can instantiate a PickleDataNode from DataNodeConfig with : # - an in_memory type # - a business cycle scope # - No owner id # - no default data dn_config = Config.configure_data_node(id=\"plop\", storage_type=\"pickle\", scope=Scope.CYCLE) pickle_dn = _DataManager._create_and_set(dn_config, None, {\"task_id_1\", \"task_id_2\"}) assert isinstance(pickle_dn, PickleDataNode) assert isinstance(_DataManager._get(pickle_dn.id), PickleDataNode) assert _DataManager._exists(pickle_dn.id) assert _DataManager._get(pickle_dn.id) is not None assert _DataManager._get(pickle_dn.id).id == pickle_dn.id assert _DataManager._get(pickle_dn.id).config_id == \"plop\" assert _DataManager._get(pickle_dn.id).config_id == pickle_dn.config_id assert _DataManager._get(pickle_dn.id).scope == Scope.CYCLE assert _DataManager._get(pickle_dn.id).scope == pickle_dn.scope assert _DataManager._get(pickle_dn.id).owner_id is None assert _DataManager._get(pickle_dn.id).owner_id == pickle_dn.owner_id assert _DataManager._get(pickle_dn.id).parent_ids == {\"task_id_1\", \"task_id_2\"} assert _DataManager._get(pickle_dn.id).parent_ids == pickle_dn.parent_ids assert _DataManager._get(pickle_dn.id).last_edit_date is None assert _DataManager._get(pickle_dn.id).last_edit_date == pickle_dn.last_edit_date assert _DataManager._get(pickle_dn.id).job_ids == [] assert _DataManager._get(pickle_dn.id).job_ids == pickle_dn.job_ids assert not _DataManager._get(pickle_dn.id).is_ready_for_reading assert _DataManager._get(pickle_dn.id).is_ready_for_reading == pickle_dn.is_ready_for_reading assert len(_DataManager._get(pickle_dn.id).properties) == 1 assert _DataManager._get(pickle_dn.id).properties == pickle_dn.properties assert _DataManager._get(pickle_dn) is not None assert _DataManager._get(pickle_dn).id == pickle_dn.id assert _DataManager._get(pickle_dn).config_id == \"plop\" assert _DataManager._get(pickle_dn).config_id == pickle_dn.config_id assert _DataManager._get(pickle_dn).scope == Scope.CYCLE assert _DataManager._get(pickle_dn).scope == pickle_dn.scope assert _DataManager._get(pickle_dn).owner_id is None assert _DataManager._get(pickle_dn).owner_id == pickle_dn.owner_id assert _DataManager._get(pickle_dn).parent_ids == {\"task_id_1\", \"task_id_2\"} assert _DataManager._get(pickle_dn).parent_ids == pickle_dn.parent_ids assert _DataManager._get(pickle_dn).last_edit_date is None assert _DataManager._get(pickle_dn).last_edit_date == pickle_dn.last_edit_date assert _DataManager._get(pickle_dn).job_ids == [] assert _DataManager._get(pickle_dn).job_ids == pickle_dn.job_ids assert not _DataManager._get(pickle_dn).is_ready_for_reading assert _DataManager._get(pickle_dn).is_ready_for_reading == pickle_dn.is_ready_for_reading assert len(_DataManager._get(pickle_dn).properties) == 1 assert _DataManager._get(pickle_dn).properties == pickle_dn.properties def test_create_raises_exception_with_wrong_type(self): wrong_type_dn_config = DataNodeConfig(id=\"foo\", storage_type=\"bar\", scope=DataNodeConfig._DEFAULT_SCOPE) with pytest.raises(InvalidDataNodeType): _DataManager._create_and_set(wrong_type_dn_config, None, None) def test_create_from_same_config_generates_new_data_node_and_new_id(self): dn_config = Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") dn = _DataManager._create_and_set(dn_config, None, None) dn_2 = _DataManager._create_and_set(dn_config, None, None) assert dn_2.id != dn.id def test_create_uses_overridden_attributes_in_config_file(self): Config.override(os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/config.toml\")) csv_dn_cfg = Config.configure_data_node(id=\"foo\", storage_type=\"csv\", path=\"bar\", has_header=True) csv_dn = _DataManager._create_and_set(csv_dn_cfg, None, None) assert csv_dn.config_id == \"foo\" assert isinstance(csv_dn, CSVDataNode) assert csv_dn._path == \"path_from_config_file\" assert csv_dn.has_header csv_dn_cfg = Config.configure_data_node(id=\"baz\", storage_type=\"csv\", path=\"bar\", has_header=True) csv_dn = _DataManager._create_and_set(csv_dn_cfg, None, None) assert csv_dn.config_id == \"baz\" assert isinstance(csv_dn, CSVDataNode) assert csv_dn._path == \"bar\" assert csv_dn.has_header def test_get_if_not_exists(self): with pytest.raises(ModelNotFound): _DataManager._repository._load(\"test_data_node_2\") def test_get_all(self): assert len(_DataManager._get_all()) == 0 dn_config_1 = Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") _DataManager._create_and_set(dn_config_1, None, None) assert len(_DataManager._get_all()) == 1 dn_config_2 = Config.configure_data_node(id=\"baz\", storage_type=\"in_memory\") _DataManager._create_and_set(dn_config_2, None, None) _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_all()) == 3 assert len([dn for dn in _DataManager._get_all() if dn.config_id == \"foo\"]) == 1 assert len([dn for dn in _DataManager._get_all() if dn.config_id == \"baz\"]) == 2 def test_get_all_on_multiple_versions_environment(self): # Create 5 data nodes with 2 versions each # Only version 1.0 has the data node with config_id = \"config_id_1\" # Only version 2.0 has the data node with config_id = \"config_id_6\" for version in range(1, 3): for i in range(5): _DataManager._set( InMemoryDataNode( f\"config_id_{i + version}\", Scope.SCENARIO, id=DataNodeId(f\"id{i}_v{version}\"), version=f\"{version}.0\", ) ) _VersionManager._set_experiment_version(\"1.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_development_version(\"1.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_experiment_version(\"2.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 _VersionManager._set_development_version(\"2.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 def test_set(self): dn = InMemoryDataNode( \"config_id\", Scope.SCENARIO, id=DataNodeId(\"id\"), owner_id=None, parent_ids={\"task_id_1\"}, last_edit_date=None, edits=[], edit_in_progress=False, properties={\"foo\": \"bar\"}, ) assert len(_DataManager._get_all()) == 0 assert not _DataManager._exists(dn.id) _DataManager._set(dn) assert len(_DataManager._get_all()) == 1 assert _DataManager._exists(dn.id) # changing data node attribute dn.config_id = \"foo\" assert dn.config_id == \"foo\" _DataManager._set(dn) assert len(_DataManager._get_all()) == 1 assert dn.config_id == \"foo\" assert _DataManager._get(dn.id).config_id == \"foo\" def test_delete(self): dn_1 = InMemoryDataNode(\"config_id\", Scope.SCENARIO, id=\"id_1\") dn_2 = InMemoryDataNode(\"config_id\", Scope.SCENARIO, id=\"id_2\") dn_3 = InMemoryDataNode(\"config_id\", Scope.SCENARIO, id=\"id_3\") assert len(_DataManager._get_all()) == 0 _DataManager._set(dn_1) _DataManager._set(dn_2) _DataManager._set(dn_3) assert len(_DataManager._get_all()) == 3 assert all(_DataManager._exists(dn.id) for dn in [dn_1, dn_2, dn_3]) _DataManager._delete(dn_1.id) assert len(_DataManager._get_all()) == 2 assert _DataManager._get(dn_2.id).id == dn_2.id assert _DataManager._get(dn_3.id).id == dn_3.id assert _DataManager._get(dn_1.id) is None assert all(_DataManager._exists(dn.id) for dn in [dn_2, dn_3]) assert not _DataManager._exists(dn_1.id) _DataManager._delete_all() assert len(_DataManager._get_all()) == 0 assert not any(_DataManager._exists(dn.id) for dn in [dn_2, dn_3]) def test_get_or_create(self): def _get_or_create_dn(config, *args): return _DataManager._bulk_get_or_create([config], *args)[config] _DataManager._delete_all() global_dn_config = Config.configure_data_node( id=\"test_data_node\", storage_type=\"in_memory\", scope=Scope.GLOBAL, data=\"In memory Data Node\" ) cycle_dn_config = Config.configure_data_node( id=\"test_data_node1\", storage_type=\"in_memory\", scope=Scope.CYCLE, data=\"In memory Data Node\" ) scenario_dn_config = Config.configure_data_node( id=\"test_data_node2\", storage_type=\"in_memory\", scope=Scope.SCENARIO, data=\"In memory scenario\" ) assert len(_DataManager._get_all()) == 0 global_dn = _get_or_create_dn(global_dn_config, None, None) assert len(_DataManager._get_all()) == 1 global_dn_bis = _get_or_create_dn(global_dn_config, None) assert len(_DataManager._get_all()) == 1 assert global_dn.id == global_dn_bis.id scenario_dn = _get_or_create_dn(scenario_dn_config, None, \"scenario_id\") assert len(_DataManager._get_all()) == 2 scenario_dn_bis = _get_or_create_dn(scenario_dn_config, None, \"scenario_id\") assert len(_DataManager._get_all()) == 2 assert scenario_dn.id == scenario_dn_bis.id scenario_dn_ter = _get_or_create_dn(scenario_dn_config, None, \"scenario_id\") assert len(_DataManager._get_all()) == 2 assert scenario_dn.id == scenario_dn_bis.id assert scenario_dn_bis.id == scenario_dn_ter.id scenario_dn_quater = _get_or_create_dn(scenario_dn_config, None, \"scenario_id_2\") assert len(_DataManager._get_all()) == 3 assert scenario_dn.id == scenario_dn_bis.id assert scenario_dn_bis.id == scenario_dn_ter.id assert scenario_dn_ter.id != scenario_dn_quater.id assert len(_DataManager._get_all()) == 3 cycle_dn = _get_or_create_dn(cycle_dn_config, \"cycle_id\", None) assert len(_DataManager._get_all()) == 4 cycle_dn_1 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", None) assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_1.id cycle_dn_2 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", \"scenario_id\") assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_2.id cycle_dn_3 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", None) assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_3.id cycle_dn_4 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", \"scenario_id\") assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_4.id cycle_dn_5 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", \"scenario_id_2\") assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_5.id assert cycle_dn_1.id == cycle_dn_2.id assert cycle_dn_2.id == cycle_dn_3.id assert cycle_dn_3.id == cycle_dn_4.id assert cycle_dn_4.id == cycle_dn_5.id def test_ensure_persistence_of_data_node(self): dm = _DataManager() dm._delete_all() dn_config_1 = Config.configure_data_node( id=\"data_node_1\", storage_type=\"in_memory\", data=\"In memory sequence 2\" ) dn_config_2 = Config.configure_data_node( id=\"data_node_2\", storage_type=\"in_memory\", data=\"In memory sequence 2\" ) dm._bulk_get_or_create([dn_config_1, dn_config_2]) assert len(dm._get_all()) == 2 # Delete the DataManager to ensure it's get from the storage system del dm dm = _DataManager() dm._bulk_get_or_create([dn_config_1]) assert len(dm._get_all()) == 2 dm._delete_all() def test_clean_generated_pickle_files(self, pickle_file_path): user_pickle_dn_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", path=pickle_file_path, default_data=\"d\" ) generated_pickle_dn_1_config = Config.configure_data_node(id=\"d2\", storage_type=\"pickle\", default_data=\"d\") generated_pickle_dn_2_config = Config.configure_data_node(id=\"d3\", storage_type=\"pickle\", default_data=\"d\") dns = _DataManager._bulk_get_or_create( [user_pickle_dn_config, generated_pickle_dn_1_config, generated_pickle_dn_2_config] ) user_pickle_dn = dns[user_pickle_dn_config] generated_pickle_dn_1 = dns[generated_pickle_dn_1_config] generated_pickle_dn_2 = dns[generated_pickle_dn_2_config] _DataManager._clean_pickle_file(user_pickle_dn.id) assert file_exists(user_pickle_dn.path) _DataManager._clean_pickle_files([generated_pickle_dn_1, generated_pickle_dn_2]) assert not file_exists(generated_pickle_dn_1.path) assert not file_exists(generated_pickle_dn_2.path) def test_delete_does_clean_generated_pickle_files(self, pickle_file_path): user_pickle_dn_config = Config.configure_data_node( id=\"d1\", storage_type=\"pickle\", path=pickle_file_path, default_data=\"d\" ) generated_pickle_dn_config_1 = Config.configure_data_node(id=\"d2\", storage_type=\"pickle\", default_data=\"d\") generated_pickle_dn_config_2 = Config.configure_data_node(id=\"d3\", storage_type=\"pickle\", default_data=\"d\") generated_pickle_dn_config_3 = Config.configure_data_node(id=\"d4\", storage_type=\"pickle\", default_data=\"d\") dns = _DataManager._bulk_get_or_create( [ user_pickle_dn_config, generated_pickle_dn_config_1, generated_pickle_dn_config_2, generated_pickle_dn_config_3, ] ) user_pickle_dn = dns[user_pickle_dn_config] generated_pickle_dn_1 = dns[generated_pickle_dn_config_1] generated_pickle_dn_2 = dns[generated_pickle_dn_config_2] generated_pickle_dn_3 = dns[generated_pickle_dn_config_3] _DataManager._delete(user_pickle_dn.id) assert file_exists(user_pickle_dn.path) _DataManager._delete_many([generated_pickle_dn_1.id, generated_pickle_dn_2.id]) assert not file_exists(generated_pickle_dn_1.path) assert not file_exists(generated_pickle_dn_2.path) _DataManager._delete_all() assert not file_exists(generated_pickle_dn_3.path) def test_create_dn_from_loaded_config_no_scope(self): file_config = NamedTemporaryFile( \"\"\" [TAIPY] [DATA_NODE.a] default_data = \"4:int\" [DATA_NODE.b] [TASK.t] function = \"math.sqrt:function\" inputs = [ \"a:SECTION\",] outputs = [ \"b:SECTION\",] skippable = \"False:bool\" [SCENARIO.s] tasks = [ \"t:SECTION\",] sequences.s_sequence = [ \"t:SECTION\",] [SCENARIO.s.comparators] \"\"\" ) from src.taipy import core as tp Config.override(file_config.filename) tp.create_scenario(Config.scenarios[\"s\"]) tp.create_scenario(Config.scenarios[\"s\"]) assert len(tp.get_data_nodes()) == 4 def test_create_dn_from_loaded_config_no_storage_type(self): file_config = NamedTemporaryFile( \"\"\" [TAIPY] [DATA_NODE.input] scope = \"SCENARIO:SCOPE\" default_data = \"21:int\" [DATA_NODE.output] storage_type = \"in_memory\" scope = \"SCENARIO:SCOPE\" [TASK.double] inputs = [ \"input:SECTION\",] function = \"math.sqrt:function\" outputs = [ \"output:SECTION\",] skippable = \"False:bool\" [SCENARIO.my_scenario] tasks = [ \"double:SECTION\",] sequences.my_sequence = [ \"double:SECTION\",] [SCENARIO.my_scenario.comparators] \"\"\" ) from src.taipy import core as tp Config.override(file_config.filename) scenario = tp.create_scenario(Config.scenarios[\"my_scenario\"]) assert isinstance(scenario.input, PickleDataNode) assert isinstance(scenario.output, InMemoryDataNode) def test_create_dn_from_loaded_config_modified_default_config(self): file_config = NamedTemporaryFile( \"\"\" [TAIPY] [DATA_NODE.input] scope = \"SCENARIO:SCOPE\" default_path=\"fake/path.csv\" [DATA_NODE.output] storage_type = \"in_memory\" scope = \"SCENARIO:SCOPE\" [TASK.double] inputs = [ \"input:SECTION\",] function = \"math.sqrt:function\" outputs = [ \"output:SECTION\",] skippable = \"False:bool\" [SCENARIO.my_scenario] tasks = [ \"double:SECTION\",] sequences.my_sequence = [ \"double:SECTION\",] [SCENARIO.my_scenario.comparators] \"\"\" ) from src.taipy import core as tp Config.set_default_data_node_configuration(storage_type=\"csv\") Config.override(file_config.filename) scenario = tp.create_scenario(Config.scenarios[\"my_scenario\"]) assert isinstance(scenario.input, CSVDataNode) assert isinstance(scenario.output, InMemoryDataNode) def test_get_tasks_by_config_id(self): dn_config_1 = Config.configure_data_node(\"dn_1\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(\"dn_2\", scope=Scope.SCENARIO) dn_config_3 = Config.configure_data_node(\"dn_3\", scope=Scope.SCENARIO) dn_1_1 = _DataManager._create_and_set(dn_config_1, None, None) dn_1_2 = _DataManager._create_and_set(dn_config_1, None, None) dn_1_3 = _DataManager._create_and_set(dn_config_1, None, None) assert len(_DataManager._get_all()) == 3 dn_2_1 = _DataManager._create_and_set(dn_config_2, None, None) dn_2_2 = _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_all()) == 5 dn_3_1 = _DataManager._create_and_set(dn_config_3, None, None) assert len(_DataManager._get_all()) == 6 dn_1_datanodes = _DataManager._get_by_config_id(dn_config_1.id) assert len(dn_1_datanodes) == 3 assert sorted([dn_1_1.id, dn_1_2.id, dn_1_3.id]) == sorted([sequence.id for sequence in dn_1_datanodes]) dn_2_datanodes = _DataManager._get_by_config_id(dn_config_2.id) assert len(dn_2_datanodes) == 2 assert sorted([dn_2_1.id, dn_2_2.id]) == sorted([sequence.id for sequence in dn_2_datanodes]) dn_3_datanodes = _DataManager._get_by_config_id(dn_config_3.id) assert len(dn_3_datanodes) == 1 assert sorted([dn_3_1.id]) == sorted([sequence.id for sequence in dn_3_datanodes]) def test_get_data_nodes_by_config_id_in_multiple_versions_environment(self): dn_config_1 = Config.configure_data_node(\"dn_1\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(\"dn_2\", scope=Scope.SCENARIO) _VersionManager._set_experiment_version(\"1.0\") _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_2, None, None) _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_by_config_id(dn_config_1.id)) == 3 assert len(_DataManager._get_by_config_id(dn_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_2, None, None) _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_by_config_id(dn_config_1.id)) == 3 assert len(_DataManager._get_by_config_id(dn_config_2.id)) == 2 "} {"text": "from typing import Dict, List import numpy as np import pandas as pd import pytest from src.taipy.core.data.operator import JoinOperator, Operator from .utils import ( CustomClass, FakeCustomDataNode, FakeDataframeDataNode, FakeDataNode, FakeListDataNode, FakeMultiSheetExcelCustomDataNode, FakeMultiSheetExcelDataFrameDataNode, FakeNumpyarrayDataNode, ) def test_filter_pandas_exposed_type(default_data_frame): dn = FakeDataNode(\"fake_dn\") dn.write(\"Any data\") with pytest.raises(NotImplementedError): dn.filter(((\"any\", 0, Operator.EQUAL)), JoinOperator.OR) with pytest.raises(NotImplementedError): dn.filter(((\"any\", 0, Operator.NOT_EQUAL)), JoinOperator.OR) with pytest.raises(NotImplementedError): dn.filter(((\"any\", 0, Operator.LESS_THAN)), JoinOperator.AND) with pytest.raises(NotImplementedError): dn.filter(((\"any\", 0, Operator.LESS_OR_EQUAL)), JoinOperator.AND) with pytest.raises(NotImplementedError): dn.filter(((\"any\", 0, Operator.GREATER_THAN))) with pytest.raises(NotImplementedError): dn.filter((\"any\", 0, Operator.GREATER_OR_EQUAL)) df_dn = FakeDataframeDataNode(\"fake_dataframe_dn\", default_data_frame) COLUMN_NAME_1 = \"a\" COLUMN_NAME_2 = \"b\" assert len(df_dn.filter((COLUMN_NAME_1, 1, Operator.EQUAL))) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] == 1] ) assert len(df_dn.filter((COLUMN_NAME_1, 1, Operator.NOT_EQUAL))) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] != 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, 1, Operator.EQUAL)])) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] == 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, 1, Operator.NOT_EQUAL)])) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] != 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, 1, Operator.LESS_THAN)])) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] < 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, 1, Operator.LESS_OR_EQUAL)])) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] <= 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, 1, Operator.GREATER_THAN)])) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] > 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, 1, Operator.GREATER_OR_EQUAL)])) == len( default_data_frame[default_data_frame[COLUMN_NAME_1] >= 1] ) assert len(df_dn.filter([(COLUMN_NAME_1, -1000, Operator.LESS_OR_EQUAL)])) == 0 assert len(df_dn.filter([(COLUMN_NAME_1, 1000, Operator.GREATER_OR_EQUAL)])) == 0 assert len(df_dn.filter([(COLUMN_NAME_1, 4, Operator.EQUAL), (COLUMN_NAME_1, 5, Operator.EQUAL)])) == len( default_data_frame[(default_data_frame[COLUMN_NAME_1] == 4) & (default_data_frame[COLUMN_NAME_1] == 5)] ) assert len( df_dn.filter([(COLUMN_NAME_1, 4, Operator.EQUAL), (COLUMN_NAME_2, 5, Operator.EQUAL)], JoinOperator.OR) ) == len(default_data_frame[(default_data_frame[COLUMN_NAME_1] == 4) | (default_data_frame[COLUMN_NAME_2] == 5)]) assert len( df_dn.filter( [(COLUMN_NAME_1, 1, Operator.GREATER_THAN), (COLUMN_NAME_2, 3, Operator.GREATER_THAN)], JoinOperator.AND ) ) == len(default_data_frame[(default_data_frame[COLUMN_NAME_1] > 1) & (default_data_frame[COLUMN_NAME_2] > 3)]) assert len( df_dn.filter( [(COLUMN_NAME_1, 2, Operator.GREATER_THAN), (COLUMN_NAME_1, 3, Operator.GREATER_THAN)], JoinOperator.OR ) ) == len(default_data_frame[(default_data_frame[COLUMN_NAME_1] > 2) | (default_data_frame[COLUMN_NAME_1] > 3)]) assert len( df_dn.filter( [(COLUMN_NAME_1, 10, Operator.GREATER_THAN), (COLUMN_NAME_1, -10, Operator.LESS_THAN)], JoinOperator.AND ) ) == len(default_data_frame[(default_data_frame[COLUMN_NAME_1] > 10) | (default_data_frame[COLUMN_NAME_1] < -10)]) assert len( df_dn.filter( [(COLUMN_NAME_1, 10, Operator.GREATER_THAN), (COLUMN_NAME_1, -10, Operator.LESS_THAN)], JoinOperator.OR ) ) == len(default_data_frame[(default_data_frame[COLUMN_NAME_1] > 10) | (default_data_frame[COLUMN_NAME_1] < -10)]) def test_filter_list(): list_dn = FakeListDataNode(\"fake_list_dn\") KEY_NAME = \"value\" assert len(list_dn.filter((KEY_NAME, 4, Operator.EQUAL))) == 1 assert len(list_dn.filter((KEY_NAME, 4, Operator.NOT_EQUAL))) == 9 assert len(list_dn.filter([(KEY_NAME, 4, Operator.EQUAL)])) == 1 assert len(list_dn.filter([(KEY_NAME, 4, Operator.NOT_EQUAL)])) == 9 assert len(list_dn.filter([(KEY_NAME, 4, Operator.LESS_THAN)])) == 4 assert len(list_dn.filter([(KEY_NAME, 4, Operator.LESS_OR_EQUAL)])) == 5 assert len(list_dn.filter([(KEY_NAME, 4, Operator.GREATER_THAN)])) == 5 assert len(list_dn.filter([(KEY_NAME, 4, Operator.GREATER_OR_EQUAL)])) == 6 assert len(list_dn.filter([(KEY_NAME, -1000, Operator.LESS_OR_EQUAL)])) == 0 assert len(list_dn.filter([(KEY_NAME, 1000, Operator.GREATER_OR_EQUAL)])) == 0 assert len(list_dn.filter([(KEY_NAME, 4, Operator.EQUAL), (KEY_NAME, 5, Operator.EQUAL)])) == 0 assert len(list_dn.filter([(KEY_NAME, 4, Operator.EQUAL), (KEY_NAME, 5, Operator.EQUAL)], JoinOperator.OR)) == 2 assert len(list_dn.filter([(KEY_NAME, 4, Operator.EQUAL), (KEY_NAME, 11, Operator.EQUAL)], JoinOperator.AND)) == 0 assert len(list_dn.filter([(KEY_NAME, 4, Operator.EQUAL), (KEY_NAME, 11, Operator.EQUAL)], JoinOperator.OR)) == 1 assert ( len(list_dn.filter([(KEY_NAME, -10, Operator.LESS_OR_EQUAL), (KEY_NAME, 11, Operator.GREATER_OR_EQUAL)])) == 0 ) assert ( len( list_dn.filter( [ (KEY_NAME, 4, Operator.GREATER_OR_EQUAL), (KEY_NAME, 6, Operator.GREATER_OR_EQUAL), ], JoinOperator.AND, ) ) == 4 ) assert ( len( list_dn.filter( [ (KEY_NAME, 4, Operator.GREATER_OR_EQUAL), (KEY_NAME, 6, Operator.GREATER_OR_EQUAL), (KEY_NAME, 11, Operator.EQUAL), ], JoinOperator.AND, ) ) == 0 ) assert ( len( list_dn.filter( [ (KEY_NAME, 4, Operator.GREATER_OR_EQUAL), (KEY_NAME, 6, Operator.GREATER_OR_EQUAL), (KEY_NAME, 11, Operator.EQUAL), ], JoinOperator.OR, ) ) == 6 ) def test_filter_numpy_exposed_type(default_data_frame): default_array = default_data_frame.to_numpy() df_dn = FakeNumpyarrayDataNode(\"fake_dataframe_dn\", default_array) assert len(df_dn.filter((0, 1, Operator.EQUAL))) == len(default_array[default_array[:, 0] == 1]) assert len(df_dn.filter((0, 1, Operator.NOT_EQUAL))) == len(default_array[default_array[:, 0] != 1]) assert len(df_dn.filter([(0, 1, Operator.EQUAL)])) == len(default_array[default_array[:, 0] == 1]) assert len(df_dn.filter([(0, 1, Operator.NOT_EQUAL)])) == len(default_array[default_array[:, 0] != 1]) assert len(df_dn.filter([(0, 1, Operator.LESS_THAN)])) == len(default_array[default_array[:, 0] < 1]) assert len(df_dn.filter([(0, 1, Operator.LESS_OR_EQUAL)])) == len(default_array[default_array[:, 0] <= 1]) assert len(df_dn.filter([(0, 1, Operator.GREATER_THAN)])) == len(default_array[default_array[:, 0] > 1]) assert len(df_dn.filter([(0, 1, Operator.GREATER_OR_EQUAL)])) == len(default_array[default_array[:, 0] >= 1]) assert len(df_dn.filter([(0, -1000, Operator.LESS_OR_EQUAL)])) == 0 assert len(df_dn.filter([(0, 1000, Operator.GREATER_OR_EQUAL)])) == 0 assert len(df_dn.filter([(0, 4, Operator.EQUAL), (0, 5, Operator.EQUAL)])) == len( default_array[(default_array[:, 0] == 4) & (default_array[:, 0] == 5)] ) assert len(df_dn.filter([(0, 4, Operator.EQUAL), (1, 5, Operator.EQUAL)], JoinOperator.OR)) == len( default_array[(default_array[:, 0] == 4) | (default_array[:, 1] == 5)] ) assert len(df_dn.filter([(0, 1, Operator.GREATER_THAN), (1, 3, Operator.GREATER_THAN)], JoinOperator.AND)) == len( default_array[(default_array[:, 0] > 1) & (default_array[:, 1] > 3)] ) assert len(df_dn.filter([(0, 2, Operator.GREATER_THAN), (0, 3, Operator.GREATER_THAN)], JoinOperator.OR)) == len( default_array[(default_array[:, 0] > 2) | (default_array[:, 0] > 3)] ) assert len(df_dn.filter([(0, 10, Operator.GREATER_THAN), (0, -10, Operator.LESS_THAN)], JoinOperator.AND)) == len( default_array[(default_array[:, 0] > 10) | (default_array[:, 0] < -10)] ) assert len(df_dn.filter([(0, 10, Operator.GREATER_THAN), (0, -10, Operator.LESS_THAN)], JoinOperator.OR)) == len( default_array[(default_array[:, 0] > 10) | (default_array[:, 0] < -10)] ) def test_filter_by_get_item(default_data_frame): # get item for DataFrame data_type default_data_frame[1] = [100, 100] df_dn = FakeDataframeDataNode(\"fake_dataframe_dn\", default_data_frame) filtered_df_dn = df_dn[\"a\"] assert isinstance(filtered_df_dn, pd.Series) assert len(filtered_df_dn) == len(default_data_frame[\"a\"]) assert filtered_df_dn.to_dict() == default_data_frame[\"a\"].to_dict() filtered_df_dn = df_dn[1] assert isinstance(filtered_df_dn, pd.Series) assert len(filtered_df_dn) == len(default_data_frame[1]) assert filtered_df_dn.to_dict() == default_data_frame[1].to_dict() filtered_df_dn = df_dn[0:2] assert isinstance(filtered_df_dn, pd.DataFrame) assert filtered_df_dn.shape == default_data_frame[0:2].shape assert len(filtered_df_dn) == 2 bool_df = default_data_frame.copy(deep=True) > 4 filtered_df_dn = df_dn[bool_df] assert isinstance(filtered_df_dn, pd.DataFrame) bool_1d_index = [True, False] filtered_df_dn = df_dn[bool_1d_index] assert isinstance(filtered_df_dn, pd.DataFrame) assert filtered_df_dn.to_dict() == default_data_frame[bool_1d_index].to_dict() assert len(filtered_df_dn) == 1 filtered_df_dn = df_dn[[\"a\", \"b\"]] assert isinstance(filtered_df_dn, pd.DataFrame) assert filtered_df_dn.shape == default_data_frame[[\"a\", \"b\"]].shape assert filtered_df_dn.to_dict() == default_data_frame[[\"a\", \"b\"]].to_dict() # get item for custom data_type custom_dn = FakeCustomDataNode(\"fake_custom_dn\") filtered_custom_dn = custom_dn[\"a\"] assert isinstance(filtered_custom_dn, List) assert len(filtered_custom_dn) == 10 assert filtered_custom_dn == [i for i in range(10)] filtered_custom_dn = custom_dn[0:5] assert isinstance(filtered_custom_dn, List) assert all([isinstance(x, CustomClass) for x in filtered_custom_dn]) assert len(filtered_custom_dn) == 5 bool_1d_index = [True if i < 5 else False for i in range(10)] filtered_custom_dn = custom_dn[bool_1d_index] assert isinstance(filtered_custom_dn, List) assert len(filtered_custom_dn) == 5 assert filtered_custom_dn == custom_dn._read()[:5] filtered_custom_dn = custom_dn[[\"a\", \"b\"]] assert isinstance(filtered_custom_dn, List) assert all([isinstance(x, Dict) for x in filtered_custom_dn]) assert len(filtered_custom_dn) == 10 assert filtered_custom_dn == [{\"a\": i, \"b\": i * 2} for i in range(10)] # get item for Multi-sheet Excel data_type multi_sheet_excel_df_dn = FakeMultiSheetExcelDataFrameDataNode(\"fake_multi_sheet_excel_df_dn\", default_data_frame) filtered_multi_sheet_excel_df_dn = multi_sheet_excel_df_dn[\"Sheet1\"] assert isinstance(filtered_multi_sheet_excel_df_dn, pd.DataFrame) assert len(filtered_multi_sheet_excel_df_dn) == len(default_data_frame) assert np.array_equal(filtered_multi_sheet_excel_df_dn.to_numpy(), default_data_frame.to_numpy()) multi_sheet_excel_custom_dn = FakeMultiSheetExcelCustomDataNode(\"fake_multi_sheet_excel_df_dn\") filtered_multi_sheet_excel_custom_dn = multi_sheet_excel_custom_dn[\"Sheet1\"] assert isinstance(filtered_multi_sheet_excel_custom_dn, List) assert len(filtered_multi_sheet_excel_custom_dn) == 10 expected_value = [CustomClass(i, i * 2) for i in range(10)] assert all( [ expected.a == filtered.a and expected.b == filtered.b for expected, filtered in zip(expected_value, filtered_multi_sheet_excel_custom_dn) ] ) "} {"text": "import os import pathlib from datetime import datetime from importlib import util from time import sleep import modin.pandas as modin_pd import numpy as np import pandas as pd import pytest from modin.pandas.test.utils import df_equals from pandas.testing import assert_frame_equal from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.data.parquet import ParquetDataNode from src.taipy.core.exceptions.exceptions import ( InvalidExposedType, NoData, UnknownCompressionAlgorithm, UnknownParquetEngine, ) from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import InvalidConfigurationId @pytest.fixture(scope=\"function\", autouse=True) def cleanup(): yield path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.parquet\") if os.path.isfile(path): os.remove(path) class MyCustomObject: def __init__(self, id, integer, text): self.id = id self.integer = integer self.text = text class MyOtherCustomObject: def __init__(self, id, sentence): self.id = id self.sentence = sentence def create_custom_class(**kwargs): return MyOtherCustomObject(id=kwargs[\"id\"], sentence=kwargs[\"text\"]) class TestParquetDataNode: __engine = [\"pyarrow\"] if util.find_spec(\"fastparquet\"): __engine.append(\"fastparquet\") def test_create(self): path = \"data/node/path\" compression = \"snappy\" dn = ParquetDataNode( \"foo_bar\", Scope.SCENARIO, properties={\"path\": path, \"compression\": compression, \"name\": \"super name\"} ) assert isinstance(dn, ParquetDataNode) assert dn.storage_type() == \"parquet\" assert dn.config_id == \"foo_bar\" assert dn.name == \"super name\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.last_edit_date is None assert dn.job_ids == [] assert not dn.is_ready_for_reading assert dn.path == path assert dn.exposed_type == \"pandas\" assert dn.compression == \"snappy\" assert dn.engine == \"pyarrow\" with pytest.raises(InvalidConfigurationId): dn = ParquetDataNode(\"foo bar\", Scope.SCENARIO, properties={\"path\": path, \"name\": \"super name\"}) def test_get_user_properties(self, parquet_file_path): dn_1 = ParquetDataNode(\"dn_1\", Scope.SCENARIO, properties={\"path\": parquet_file_path}) assert dn_1._get_user_properties() == {} dn_2 = ParquetDataNode( \"dn_2\", Scope.SCENARIO, properties={ \"exposed_type\": \"numpy\", \"default_data\": \"foo\", \"default_path\": parquet_file_path, \"engine\": \"pyarrow\", \"compression\": \"snappy\", \"read_kwargs\": {\"columns\": [\"a\", \"b\"]}, \"write_kwargs\": {\"index\": False}, \"foo\": \"bar\", }, ) # exposed_type, default_data, default_path, path, engine, compression, read_kwargs, write_kwargs # are filtered out assert dn_2._get_user_properties() == {\"foo\": \"bar\"} def test_new_parquet_data_node_with_existing_file_is_ready_for_reading(self, parquet_file_path): not_ready_dn_cfg = Config.configure_data_node( \"not_ready_data_node_config_id\", \"parquet\", path=\"NOT_EXISTING.parquet\" ) not_ready_dn = _DataManager._bulk_get_or_create([not_ready_dn_cfg])[not_ready_dn_cfg] assert not not_ready_dn.is_ready_for_reading ready_dn_cfg = Config.configure_data_node(\"ready_data_node_config_id\", \"parquet\", path=parquet_file_path) ready_dn = _DataManager._bulk_get_or_create([ready_dn_cfg])[ready_dn_cfg] assert ready_dn.is_ready_for_reading @pytest.mark.parametrize( [\"properties\", \"exists\"], [ ({}, False), ({\"default_data\": {\"a\": [\"foo\", \"bar\"]}}, True), ], ) def test_create_with_default_data(self, properties, exists): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) assert os.path.exists(dn.path) is exists @pytest.mark.parametrize(\"engine\", __engine) def test_read_file(self, engine, parquet_file_path): not_existing_parquet = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": \"nonexistent.parquet\", \"engine\": engine} ) with pytest.raises(NoData): assert not_existing_parquet.read() is None not_existing_parquet.read_or_raise() df = pd.read_parquet(parquet_file_path) # Create ParquetDataNode without exposed_type (Default is pandas.DataFrame) parquet_data_node_as_pandas = ParquetDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"engine\": engine} ) data_pandas = parquet_data_node_as_pandas.read() assert isinstance(data_pandas, pd.DataFrame) assert len(data_pandas) == 2 assert data_pandas.equals(df) assert np.array_equal(data_pandas.to_numpy(), df.to_numpy()) # Create ParquetDataNode with modin exposed_type parquet_data_node_as_modin = ParquetDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"exposed_type\": \"modin\", \"engine\": engine} ) data_modin = parquet_data_node_as_modin.read() assert isinstance(data_modin, modin_pd.DataFrame) assert len(data_modin) == 2 assert data_modin.equals(df) assert np.array_equal(data_modin.to_numpy(), df.to_numpy()) # Create ParquetDataNode with numpy exposed_type parquet_data_node_as_numpy = ParquetDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"exposed_type\": \"numpy\", \"engine\": engine} ) data_numpy = parquet_data_node_as_numpy.read() assert isinstance(data_numpy, np.ndarray) assert len(data_numpy) == 2 assert np.array_equal(data_numpy, df.to_numpy()) @pytest.mark.parametrize(\"engine\", __engine) def test_read_folder(self, engine): parquet_folder_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/parquet_example\") df = pd.read_parquet(parquet_folder_path) parquet_data_node_as_pandas = ParquetDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": parquet_folder_path, \"engine\": engine} ) data_pandas = parquet_data_node_as_pandas.read() assert isinstance(data_pandas, pd.DataFrame) assert len(data_pandas) == 5 assert data_pandas.equals(df) assert np.array_equal(data_pandas.to_numpy(), df.to_numpy()) def test_set_path(self): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": \"foo.parquet\"}) assert dn.path == \"foo.parquet\" dn.path = \"bar.parquet\" assert dn.path == \"bar.parquet\" @pytest.mark.parametrize(\"engine\", __engine) def test_read_write_after_modify_path(self, engine): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.parquet\") new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.parquet\") dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"engine\": engine}) read_data = dn.read() assert read_data is not None dn.path = new_path with pytest.raises(FileNotFoundError): dn.read() dn.write(read_data) assert dn.read().equals(read_data) def test_read_custom_exposed_type(self): example_parquet_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.parquet\") dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": example_parquet_path, \"exposed_type\": MyCustomObject} ) assert all([isinstance(obj, MyCustomObject) for obj in dn.read()]) dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": example_parquet_path, \"exposed_type\": create_custom_class} ) assert all([isinstance(obj, MyOtherCustomObject) for obj in dn.read()]) def test_raise_error_unknown_parquet_engine(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.parquet\") with pytest.raises(UnknownParquetEngine): ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"engine\": \"foo\"}) def test_raise_error_unknown_compression_algorithm(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.parquet\") with pytest.raises(UnknownCompressionAlgorithm): ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"compression\": \"foo\"}) def test_raise_error_invalid_exposed_type(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.parquet\") with pytest.raises(InvalidExposedType): ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"foo\"}) def test_read_empty_data(self, tmpdir_factory): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.parquet\")) empty_df = pd.DataFrame([]) empty_df.to_parquet(temp_file_path) # Pandas dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": \"pandas\"}) assert dn.read().equals(empty_df) # Numpy dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": \"numpy\"}) assert np.array_equal(dn.read(), empty_df.to_numpy()) # Custom dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": MyCustomObject}) assert dn.read() == [] def test_get_system_file_modified_date_instead_of_last_edit_date(self, tmpdir_factory): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.parquet\")) pd.DataFrame([]).to_parquet(temp_file_path) dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": \"pandas\"}) dn.write(pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})) previous_edit_date = dn.last_edit_date sleep(0.1) pd.DataFrame(pd.DataFrame(data={\"col1\": [5, 6], \"col2\": [7, 8]})).to_parquet(temp_file_path) new_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert previous_edit_date < dn.last_edit_date assert new_edit_date == dn.last_edit_date sleep(0.1) dn.write(pd.DataFrame(data={\"col1\": [9, 10], \"col2\": [10, 12]})) assert new_edit_date < dn.last_edit_date os.unlink(temp_file_path) def test_get_system_folder_modified_date_instead_of_last_edit_date(self, tmpdir_factory): temp_folder_path = tmpdir_factory.mktemp(\"data\").strpath temp_file_path = os.path.join(temp_folder_path, \"temp.parquet\") pd.DataFrame([]).to_parquet(temp_file_path) dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_folder_path}) initial_edit_date = dn.last_edit_date # Sleep so that the file can be created successfully on Ubuntu sleep(0.1) pd.DataFrame(pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})).to_parquet(temp_file_path) first_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert dn.last_edit_date > initial_edit_date assert dn.last_edit_date == first_edit_date sleep(0.1) pd.DataFrame(pd.DataFrame(data={\"col1\": [5, 6], \"col2\": [7, 8]})).to_parquet(temp_file_path) second_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert dn.last_edit_date > first_edit_date assert dn.last_edit_date == second_edit_date os.unlink(temp_file_path) @pytest.mark.skipif(not util.find_spec(\"fastparquet\"), reason=\"Append parquet requires fastparquet to be installed\") @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ], ) def test_append_pandas(self, parquet_file_path, default_data_frame, content): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": parquet_file_path}) assert_frame_equal(dn.read(), default_data_frame) dn.append(content) assert_frame_equal( dn.read(), pd.concat([default_data_frame, pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index(drop=True), ) @pytest.mark.skipif(not util.find_spec(\"fastparquet\"), reason=\"Append parquet requires fastparquet to be installed\") @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ], ) def test_append_modin(self, parquet_file_path, default_data_frame, content): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"exposed_type\": \"modin\"}) df_equals(dn.read(), modin_pd.DataFrame(default_data_frame)) dn.append(content) df_equals( dn.read(), modin_pd.concat([default_data_frame, pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index( drop=True ), ) @pytest.mark.parametrize( \"data\", [ [{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), modin_pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), ], ) def test_write_to_disk(self, tmpdir_factory, data): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.parquet\")) dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path}) dn.write(data) assert pathlib.Path(temp_file_path).exists() assert isinstance(dn.read(), pd.DataFrame) def test_filter_pandas_exposed_type(self, parquet_file_path): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"exposed_type\": \"pandas\"}) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, None])) assert dn[\"bar\"].equals(pd.Series([1, 2, None, 2, 2])) assert dn[:2].equals(pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 1.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = pd.DataFrame([{\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"bar\", 2, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"bar\"] == 2] expected_data = pd.DataFrame([{\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = pd.DataFrame( [ {\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}, ] ) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_modin_exposed_type(self, parquet_file_path): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"exposed_type\": \"modin\"}) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(modin_pd.Series([1, 1, 1, 2, None])) assert dn[\"bar\"].equals(modin_pd.Series([1, 2, None, 2, 2])) assert dn[:2].equals(modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 1.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = modin_pd.DataFrame([{\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"bar\", 2, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"bar\"] == 2] expected_data = modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = modin_pd.DataFrame( [ {\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}, ] ) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_numpy_exposed_type(self, parquet_file_path): dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": parquet_file_path, \"exposed_type\": \"numpy\"}) dn.write( [ [1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], ] ) # Test datanode indexing and slicing assert np.array_equal(dn[0], np.array([1, 1])) assert np.array_equal(dn[1], np.array([1, 2])) assert np.array_equal(dn[:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[1:4, :1], np.array([[1], [1], [2]])) # Test filter data assert np.array_equal(dn.filter((0, 1, Operator.EQUAL)), np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[dn[:, 0] == 1], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn.filter((0, 1, Operator.NOT_EQUAL)), np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn[dn[:, 0] != 1], np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn.filter((1, 2, Operator.EQUAL)), np.array([[1, 2], [2, 2]])) assert np.array_equal(dn[dn[:, 1] == 2], np.array([[1, 2], [2, 2]])) assert np.array_equal( dn.filter([(1, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR), np.array([[1, 1], [1, 2], [2, 1], [2, 2]]), ) assert np.array_equal(dn[(dn[:, 1] == 1) | (dn[:, 1] == 2)], np.array([[1, 1], [1, 2], [2, 1], [2, 2]])) @pytest.mark.parametrize(\"engine\", __engine) def test_pandas_parquet_config_kwargs(self, engine, tmpdir_factory): read_kwargs = {\"filters\": [(\"integer\", \"<\", 10)], \"columns\": [\"integer\"]} temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.parquet\")) dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"engine\": engine, \"read_kwargs\": read_kwargs} ) df = pd.read_csv(os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\")) dn.write(df) assert set(pd.read_parquet(temp_file_path).columns) == {\"id\", \"integer\", \"text\"} assert set(dn.read().columns) == set(read_kwargs[\"columns\"]) # !!! filter doesn't work with `fastparquet` without partition_cols if engine == \"pyarrow\": assert len(dn.read()) != len(df) assert len(dn.read()) == 2 @pytest.mark.parametrize(\"engine\", __engine) def test_kwarg_precedence(self, engine, tmpdir_factory, default_data_frame): # Precedence: # 1. Class read/write methods # 2. Defined in read_kwargs and write_kwargs, in properties # 3. Defined top-level in properties temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.parquet\")) temp_file_2_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp_2.parquet\")) df = default_data_frame.copy(deep=True) # Write # 3 comp3 = \"snappy\" dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"engine\": engine, \"compression\": comp3} ) dn.write(df) df.to_parquet(path=temp_file_2_path, compression=comp3, engine=engine) with open(temp_file_2_path, \"rb\") as tf: with pathlib.Path(temp_file_path).open(\"rb\") as f: assert f.read() == tf.read() # 3 and 2 comp2 = \"gzip\" dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={ \"path\": temp_file_path, \"engine\": engine, \"compression\": comp3, \"write_kwargs\": {\"compression\": comp2}, }, ) dn.write(df) df.to_parquet(path=temp_file_2_path, compression=comp2, engine=engine) with open(temp_file_2_path, \"rb\") as tf: with pathlib.Path(temp_file_path).open(\"rb\") as f: assert f.read() == tf.read() # 3, 2 and 1 comp1 = \"brotli\" dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={ \"path\": temp_file_path, \"engine\": engine, \"compression\": comp3, \"write_kwargs\": {\"compression\": comp2}, }, ) dn.write_with_kwargs(df, compression=comp1) df.to_parquet(path=temp_file_2_path, compression=comp1, engine=engine) with open(temp_file_2_path, \"rb\") as tf: with pathlib.Path(temp_file_path).open(\"rb\") as f: assert f.read() == tf.read() # Read df.to_parquet(temp_file_path, engine=engine) # 2 cols2 = [\"a\", \"b\"] dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"engine\": engine, \"read_kwargs\": {\"columns\": cols2}}, ) assert set(dn.read().columns) == set(cols2) # 1 cols1 = [\"a\"] dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"engine\": engine, \"read_kwargs\": {\"columns\": cols2}}, ) assert set(dn.read_with_kwargs(columns=cols1).columns) == set(cols1) def test_partition_cols(self, tmpdir_factory, default_data_frame: pd.DataFrame): temp_dir_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp_dir\")) write_kwargs = {\"partition_cols\": [\"a\", \"b\"]} dn = ParquetDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": temp_dir_path, \"write_kwargs\": write_kwargs} ) # type: ignore dn.write(default_data_frame) assert pathlib.Path(temp_dir_path).is_dir() # dtypes change during round-trip with partition_cols pd.testing.assert_frame_equal( dn.read().sort_index(axis=1), default_data_frame.sort_index(axis=1), check_dtype=False, check_categorical=False, ) def test_read_with_kwargs_never_written(self): path = \"data/node/path\" dn = ParquetDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path}) assert dn.read_with_kwargs() is None "} {"text": "from importlib import util from unittest.mock import patch import modin.pandas as modin_pd import numpy as np import pandas as pd import pytest from modin.pandas.test.utils import df_equals from pandas.testing import assert_frame_equal from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.data.sql_table import SQLTableDataNode from src.taipy.core.exceptions.exceptions import InvalidExposedType, MissingRequiredProperty from taipy.config.common.scope import Scope class MyCustomObject: def __init__(self, foo=None, bar=None, *args, **kwargs): self.foo = foo self.bar = bar self.args = args self.kwargs = kwargs class TestSQLTableDataNode: __pandas_properties = [ { \"db_name\": \"taipy\", \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", \"other\": \"value\", }, }, ] __modin_properties = [ { \"db_name\": \"taipy\", \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", \"other\": \"value\", }, }, ] if util.find_spec(\"pyodbc\"): __pandas_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mssql\", \"table_name\": \"example\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) __modin_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mssql\", \"table_name\": \"example\", \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) if util.find_spec(\"pymysql\"): __pandas_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mysql\", \"table_name\": \"example\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) __modin_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"mysql\", \"table_name\": \"example\", \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) if util.find_spec(\"psycopg2\"): __pandas_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"postgresql\", \"table_name\": \"example\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) __modin_properties.append( { \"db_username\": \"sa\", \"db_password\": \"Passw0rd\", \"db_name\": \"taipy\", \"db_engine\": \"postgresql\", \"table_name\": \"example\", \"exposed_type\": \"modin\", \"db_extra_args\": { \"TrustServerCertificate\": \"yes\", }, }, ) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) @pytest.mark.parametrize(\"modin_properties\", __modin_properties) def test_create(self, pandas_properties, modin_properties): dn = SQLTableDataNode( \"foo_bar\", Scope.SCENARIO, properties=pandas_properties, ) assert isinstance(dn, SQLTableDataNode) assert dn.storage_type() == \"sql_table\" assert dn.config_id == \"foo_bar\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.exposed_type == \"pandas\" assert dn.table_name == \"example\" assert dn._get_base_read_query() == \"SELECT * FROM example\" dn = SQLTableDataNode( \"foo_bar\", Scope.SCENARIO, properties=modin_properties, ) assert isinstance(dn, SQLTableDataNode) assert dn.storage_type() == \"sql_table\" assert dn.config_id == \"foo_bar\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.exposed_type == \"modin\" assert dn.table_name == \"example\" assert dn._get_base_read_query() == \"SELECT * FROM example\" @pytest.mark.parametrize(\"properties\", __pandas_properties) def test_get_user_properties(self, properties): custom_properties = properties.copy() custom_properties[\"foo\"] = \"bar\" dn = SQLTableDataNode( \"foo_bar\", Scope.SCENARIO, properties=custom_properties, ) assert dn._get_user_properties() == {\"foo\": \"bar\"} @pytest.mark.parametrize( \"properties\", [ {}, {\"db_username\": \"foo\"}, {\"db_username\": \"foo\", \"db_password\": \"foo\"}, {\"db_username\": \"foo\", \"db_password\": \"foo\", \"db_name\": \"foo\"}, ], ) def test_create_with_missing_parameters(self, properties): with pytest.raises(MissingRequiredProperty): SQLTableDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\")) with pytest.raises(MissingRequiredProperty): SQLTableDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) @patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._read_as\", return_value=\"custom\") @patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._read_as_pandas_dataframe\", return_value=\"pandas\") @patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._read_as_modin_dataframe\", return_value=\"modin\") @patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._read_as_numpy\", return_value=\"numpy\") @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) @pytest.mark.parametrize(\"modin_properties\", __modin_properties) def test_read( self, mock_read_as, mock_read_as_pandas_dataframe, mock_read_as_modin_dataframe, mock_read_as_numpy, pandas_properties, modin_properties, ): custom_properties = pandas_properties.copy() # Create SQLTableDataNode without exposed_type (Default is pandas.DataFrame) sql_data_node_as_pandas = SQLTableDataNode( \"foo\", Scope.SCENARIO, properties=pandas_properties, ) assert sql_data_node_as_pandas.read() == \"pandas\" custom_properties.pop(\"db_extra_args\") custom_properties[\"exposed_type\"] = MyCustomObject # Create the same SQLTableDataNode but with custom exposed_type sql_data_node_as_custom_object = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) assert sql_data_node_as_custom_object.read() == \"custom\" # Create the same SQLDataSource but with numpy exposed_type custom_properties[\"exposed_type\"] = \"numpy\" sql_data_source_as_numpy_object = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) assert sql_data_source_as_numpy_object.read() == \"numpy\" # Create the same SQLDataSource but with modin exposed_type sql_data_source_as_modin_object = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=modin_properties) assert sql_data_source_as_modin_object.properties[\"exposed_type\"] == \"modin\" assert sql_data_source_as_modin_object.read() == \"modin\" @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) def test_read_as(self, pandas_properties): custom_properties = pandas_properties.copy() custom_properties.pop(\"db_extra_args\") custom_properties[\"exposed_type\"] = MyCustomObject sql_data_node = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock: cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.return_value = [ {\"foo\": \"baz\", \"bar\": \"qux\"}, {\"foo\": \"quux\", \"bar\": \"quuz\"}, {\"foo\": \"corge\"}, {\"bar\": \"grault\"}, {\"KWARGS_KEY\": \"KWARGS_VALUE\"}, {}, ] data = sql_data_node._read_as() assert isinstance(data, list) assert isinstance(data[0], MyCustomObject) assert isinstance(data[1], MyCustomObject) assert isinstance(data[2], MyCustomObject) assert isinstance(data[3], MyCustomObject) assert isinstance(data[4], MyCustomObject) assert isinstance(data[5], MyCustomObject) assert data[0].foo == \"baz\" assert data[0].bar == \"qux\" assert data[1].foo == \"quux\" assert data[1].bar == \"quuz\" assert data[2].foo == \"corge\" assert data[2].bar is None assert data[3].foo is None assert data[3].bar == \"grault\" assert data[4].foo is None assert data[4].bar is None assert data[4].kwargs[\"KWARGS_KEY\"] == \"KWARGS_VALUE\" assert data[5].foo is None assert data[5].bar is None assert len(data[5].args) == 0 assert len(data[5].kwargs) == 0 with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock: cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.return_value = [] data_2 = sql_data_node._read_as() assert isinstance(data_2, list) assert len(data_2) == 0 @pytest.mark.parametrize( \"data,written_data,called_func\", [ ([{\"a\": 1, \"b\": 2}, {\"a\": 3, \"b\": 4}], [{\"a\": 1, \"b\": 2}, {\"a\": 3, \"b\": 4}], \"__insert_dicts\"), ({\"a\": 1, \"b\": 2}, [{\"a\": 1, \"b\": 2}], \"__insert_dicts\"), ([(1, 2), (3, 4)], [(1, 2), (3, 4)], \"__insert_tuples\"), ([[1, 2], [3, 4]], [[1, 2], [3, 4]], \"__insert_tuples\"), ((1, 2), [(1, 2)], \"__insert_tuples\"), ([1, 2, 3, 4], [(1,), (2,), (3,), (4,)], \"__insert_tuples\"), (\"foo\", [(\"foo\",)], \"__insert_tuples\"), (None, [(None,)], \"__insert_tuples\"), (np.array([1, 2, 3, 4]), [(1,), (2,), (3,), (4,)], \"__insert_tuples\"), (np.array([np.array([1, 2]), np.array([3, 4])]), [[1, 2], [3, 4]], \"__insert_tuples\"), ], ) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) def test_write_1(self, data, written_data, called_func, pandas_properties): custom_properties = pandas_properties.copy() custom_properties.pop(\"db_extra_args\") dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock, patch( \"src.taipy.core.data.sql_table.SQLTableDataNode._create_table\" ) as create_table_mock: cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.side_effect = None with patch(f\"src.taipy.core.data.sql_table.SQLTableDataNode._SQLTableDataNode{called_func}\") as mck: dn.write(data) mck.assert_called_once_with(written_data, create_table_mock.return_value, cursor_mock, True) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) def test_raise_error_invalid_exposed_type(self, pandas_properties): custom_properties = pandas_properties.copy() custom_properties.pop(\"db_extra_args\") custom_properties[\"exposed_type\"] = \"foo\" with pytest.raises(InvalidExposedType): SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) @pytest.mark.parametrize(\"modin_properties\", __modin_properties) def test_write_dataframe(self, pandas_properties, modin_properties): # test write pandas dataframe custom_properties = pandas_properties.copy() custom_properties.pop(\"db_extra_args\") dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [5, 6, 7, 8]}) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock, patch( \"src.taipy.core.data.sql_table.SQLTableDataNode._create_table\" ): cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.side_effect = None with patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._SQLTableDataNode__insert_dataframe\") as mck: dn.write(df) assert mck.call_args[0][0].equals(df) # test write modin dataframe custom_properties = modin_properties.copy() custom_properties.pop(\"db_extra_args\") dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) df = modin_pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [5, 6, 7, 8]}) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock, patch( \"src.taipy.core.data.sql_table.SQLTableDataNode._create_table\" ): cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.side_effect = None with patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._SQLTableDataNode__insert_dataframe\") as mck: dn.write(df) assert mck.call_args[0][0].equals(df) @pytest.mark.parametrize( \"data\", [ [], np.array([]), ], ) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) def test_write_empty_list(self, data, pandas_properties): custom_properties = pandas_properties.copy() custom_properties.pop(\"db_extra_args\") dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock, patch( \"src.taipy.core.data.sql_table.SQLTableDataNode._create_table\" ) as create_table_mock: cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.side_effect = None with patch(\"src.taipy.core.data.sql_table.SQLTableDataNode._SQLTableDataNode__delete_all_rows\") as mck: dn.write(data) mck.assert_called_once_with(create_table_mock.return_value, cursor_mock, True) @pytest.mark.parametrize(\"pandas_properties\", __pandas_properties) @patch(\"pandas.read_sql_query\") def test_engine_cache(self, _, pandas_properties): dn = SQLTableDataNode( \"foo\", Scope.SCENARIO, properties=pandas_properties, ) assert dn._engine is None with patch(\"sqlalchemy.engine.Engine.connect\") as engine_mock, patch( \"src.taipy.core.data.sql_table.SQLTableDataNode._create_table\" ): cursor_mock = engine_mock.return_value.__enter__.return_value cursor_mock.execute.side_effect = None dn.read() assert dn._engine is not None dn.db_username = \"foo\" assert dn._engine is None dn.write(1) assert dn._engine is not None dn.some_random_attribute_that_does_not_related_to_engine = \"foo\" assert dn._engine is not None @pytest.mark.parametrize( \"tmp_sqlite_path\", [ \"tmp_sqlite_db_file_path\", \"tmp_sqlite_sqlite3_file_path\", ], ) def test_sqlite_read_file_with_different_extension(self, tmp_sqlite_path, request): tmp_sqlite_path = request.getfixturevalue(tmp_sqlite_path) folder_path, db_name, file_extension = tmp_sqlite_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, } dn = SQLTableDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) data = dn.read() assert data.equals(pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}])) def test_sqlite_append_pandas(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, } dn = SQLTableDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) original_data = pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}]) data = dn.read() assert_frame_equal(data, original_data) append_data_1 = pd.DataFrame([{\"foo\": 5, \"bar\": 6}, {\"foo\": 7, \"bar\": 8}]) dn.append(append_data_1) assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True)) def test_sqlite_append_modin(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"modin\", } dn = SQLTableDataNode(\"sqlite_dn\", Scope.SCENARIO, properties=properties) original_data = modin_pd.DataFrame([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}]) data = dn.read() df_equals(data, original_data) append_data_1 = modin_pd.DataFrame([{\"foo\": 5, \"bar\": 6}, {\"foo\": 7, \"bar\": 8}]) dn.append(append_data_1) df_equals(dn.read(), modin_pd.concat([original_data, append_data_1]).reset_index(drop=True)) def test_filter_pandas_exposed_type(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"pandas\", } dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=properties) dn.write( pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}, ] ) ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, 2, 2])) assert dn[\"bar\"].equals(pd.Series([1, 2, 3, 1, 2, 3])) assert dn[:2].equals(pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = pd.DataFrame([{\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, ] ) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_modin_exposed_type(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"modin\", } dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=properties) dn.write( pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}, ] ) ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, 2, 2])) assert dn[\"bar\"].equals(pd.Series([1, 2, 3, 1, 2, 3])) assert dn[:2].equals(modin_pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = modin_pd.DataFrame([{\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = modin_pd.DataFrame([{\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = modin_pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, ] ) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_numpy_exposed_type(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"numpy\", } dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=properties) dn.write( pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1, \"bar\": 3}, {\"foo\": 2, \"bar\": 1}, {\"foo\": 2, \"bar\": 2}, {\"foo\": 2, \"bar\": 3}, ] ) ) # Test datanode indexing and slicing assert np.array_equal(dn[0], np.array([1, 1])) assert np.array_equal(dn[1], np.array([1, 2])) assert np.array_equal(dn[:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[1:4, :1], np.array([[1], [1], [2]])) # Test filter data assert np.array_equal(dn.filter((\"foo\", 1, Operator.EQUAL)), np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[dn[:, 0] == 1], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn.filter((\"foo\", 1, Operator.NOT_EQUAL)), np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn[dn[:, 0] != 1], np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn.filter((\"bar\", 2, Operator.EQUAL)), np.array([[1, 2], [2, 2]])) assert np.array_equal(dn[dn[:, 1] == 2], np.array([[1, 2], [2, 2]])) assert np.array_equal( dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR), np.array([[1, 1], [1, 2], [2, 1], [2, 2]]), ) assert np.array_equal(dn[(dn[:, 1] == 1) | (dn[:, 1] == 2)], np.array([[1, 1], [1, 2], [2, 1], [2, 2]])) def test_filter_does_not_read_all_entities(self, tmp_sqlite_sqlite3_file_path): folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path properties = { \"db_engine\": \"sqlite\", \"table_name\": \"example\", \"db_name\": db_name, \"sqlite_folder_path\": folder_path, \"sqlite_file_extension\": file_extension, \"exposed_type\": \"numpy\", } dn = SQLTableDataNode(\"foo\", Scope.SCENARIO, properties=properties) # SQLTableDataNode.filter() should not call the MongoCollectionDataNode._read() method with patch.object(SQLTableDataNode, \"_read\") as read_mock: dn.filter((\"foo\", 1, Operator.EQUAL)) dn.filter((\"bar\", 2, Operator.NOT_EQUAL)) dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) assert read_mock[\"_read\"].call_count == 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os from datetime import datetime, timedelta from time import sleep from unittest import mock import pytest import src.taipy.core as tp from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.data_node import DataNode from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import DataNodeIsBeingEdited, NoData from src.taipy.core.job.job_id import JobId from taipy.config import Config from taipy.config.common.scope import Scope from taipy.config.exceptions.exceptions import InvalidConfigurationId from .utils import FakeDataNode def funct_a_b(input: str): print(\"task_a_b\") return \"B\" def funct_b_c(input: str): print(\"task_b_c\") return \"C\" def funct_b_d(input: str): print(\"task_b_d\") return \"D\" class TestDataNode: def test_create_with_default_values(self): dn = DataNode(\"foo_bar\") assert dn.config_id == \"foo_bar\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.name is None assert dn.owner_id is None assert dn.parent_ids == set() assert dn.last_edit_date is None assert dn.job_ids == [] assert not dn.is_ready_for_reading assert len(dn.properties) == 0 def test_create(self): a_date = datetime.now() dn = DataNode( \"foo_bar\", Scope.SCENARIO, DataNodeId(\"an_id\"), \"a_scenario_id\", {\"a_parent_id\"}, a_date, [dict(job_id=\"a_job_id\")], edit_in_progress=False, prop=\"erty\", name=\"a name\", ) assert dn.config_id == \"foo_bar\" assert dn.scope == Scope.SCENARIO assert dn.id == \"an_id\" assert dn.name == \"a name\" assert dn.owner_id == \"a_scenario_id\" assert dn.parent_ids == {\"a_parent_id\"} assert dn.last_edit_date == a_date assert dn.job_ids == [\"a_job_id\"] assert dn.is_ready_for_reading assert len(dn.properties) == 2 assert dn.properties == {\"prop\": \"erty\", \"name\": \"a name\"} with pytest.raises(InvalidConfigurationId): DataNode(\"foo bar\") def test_read_write(self): dn = FakeDataNode(\"foo_bar\") with pytest.raises(NoData): assert dn.read() is None dn.read_or_raise() assert dn.write_has_been_called == 0 assert dn.read_has_been_called == 0 assert not dn.is_ready_for_reading assert dn.last_edit_date is None assert dn.job_ids == [] assert dn.edits == [] dn.write(\"Any data\") assert dn.write_has_been_called == 1 assert dn.read_has_been_called == 0 assert dn.last_edit_date is not None first_edition = dn.last_edit_date assert dn.is_ready_for_reading assert dn.job_ids == [] assert len(dn.edits) == 1 assert dn.get_last_edit()[\"timestamp\"] == dn.last_edit_date sleep(0.1) dn.write(\"Any other data\", job_id := JobId(\"a_job_id\")) assert dn.write_has_been_called == 2 assert dn.read_has_been_called == 0 second_edition = dn.last_edit_date assert first_edition < second_edition assert dn.is_ready_for_reading assert dn.job_ids == [job_id] assert len(dn.edits) == 2 assert dn.get_last_edit()[\"timestamp\"] == dn.last_edit_date dn.read() assert dn.write_has_been_called == 2 assert dn.read_has_been_called == 1 second_edition = dn.last_edit_date assert first_edition < second_edition assert dn.is_ready_for_reading assert dn.job_ids == [job_id] def test_lock_initialization(self): dn = InMemoryDataNode(\"dn\", Scope.SCENARIO) assert not dn.edit_in_progress assert dn._editor_id is None assert dn._editor_expiration_date is None def test_locked_dn_unlockable_only_by_same_editor(self): dn = InMemoryDataNode(\"dn\", Scope.SCENARIO) dn.lock_edit(\"user_1\") assert dn.edit_in_progress assert dn._editor_id == \"user_1\" assert dn._editor_expiration_date is not None with pytest.raises(DataNodeIsBeingEdited): dn.lock_edit(\"user_2\") with pytest.raises(DataNodeIsBeingEdited): dn.unlock_edit(\"user_2\") dn.unlock_edit(\"user_1\") assert not dn.edit_in_progress assert dn._editor_id is None assert dn._editor_expiration_date is None def test_none_editor_can_lock_a_locked_dn(self): dn = InMemoryDataNode(\"dn\", Scope.SCENARIO) dn.lock_edit(\"user\") assert dn.edit_in_progress assert dn._editor_id == \"user\" assert dn._editor_expiration_date is not None dn.lock_edit() assert dn.edit_in_progress assert dn._editor_id is None assert dn._editor_expiration_date is None def test_none_editor_can_unlock_a_locked_dn(self): dn = InMemoryDataNode(\"dn\", Scope.SCENARIO) dn.lock_edit(\"user\") assert dn.edit_in_progress assert dn._editor_id == \"user\" assert dn._editor_expiration_date is not None dn.unlock_edit() assert not dn.edit_in_progress assert dn._editor_id is None assert dn._editor_expiration_date is None dn.lock_edit() assert dn.edit_in_progress assert dn._editor_id is None assert dn._editor_expiration_date is None dn.unlock_edit() assert not dn.edit_in_progress assert dn._editor_id is None assert dn._editor_expiration_date is None def test_ready_for_reading(self): dn = InMemoryDataNode(\"foo_bar\", Scope.CYCLE) assert dn.last_edit_date is None assert not dn.is_ready_for_reading assert dn.job_ids == [] dn.lock_edit() assert dn.last_edit_date is None assert not dn.is_ready_for_reading assert dn.job_ids == [] dn.unlock_edit() assert dn.last_edit_date is None assert not dn.is_ready_for_reading assert dn.job_ids == [] dn.lock_edit() assert dn.last_edit_date is None assert not dn.is_ready_for_reading assert dn.job_ids == [] dn.write(\"toto\", job_id := JobId(\"a_job_id\")) assert dn.last_edit_date is not None assert dn.is_ready_for_reading assert dn.job_ids == [job_id] def test_is_valid_no_validity_period(self): # Test Never been writen dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"id\"), \"name\", \"owner_id\") assert not dn.is_valid # test has been writen dn.write(\"My data\") assert dn.is_valid def test_is_valid_with_30_min_validity_period(self): # Test Never been writen dn = InMemoryDataNode( \"foo\", Scope.SCENARIO, DataNodeId(\"id\"), \"name\", \"owner_id\", validity_period=timedelta(minutes=30) ) assert dn.is_valid is False # Has been writen less than 30 minutes ago dn.write(\"My data\") assert dn.is_valid is True # Has been writen more than 30 minutes ago dn.last_edit_date = datetime.now() + timedelta(days=-1) assert dn.is_valid is False def test_is_valid_with_5_days_validity_period(self): # Test Never been writen dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, validity_period=timedelta(days=5)) assert dn.is_valid is False # Has been writen less than 30 minutes ago dn.write(\"My data\") assert dn.is_valid is True # Has been writen more than 30 minutes ago dn._last_edit_date = datetime.now() - timedelta(days=6) _DataManager()._set(dn) assert dn.is_valid is False def test_is_up_to_date(self, current_datetime): dn_confg_1 = Config.configure_in_memory_data_node(\"dn_1\") dn_confg_2 = Config.configure_in_memory_data_node(\"dn_2\") dn_confg_3 = Config.configure_in_memory_data_node(\"dn_3\", scope=Scope.GLOBAL) task_config_1 = Config.configure_task(\"t1\", print, [dn_confg_1], [dn_confg_2]) task_config_2 = Config.configure_task(\"t2\", print, [dn_confg_2], [dn_confg_3]) scenario_config = Config.configure_scenario(\"sc\", [task_config_1, task_config_2]) scenario_1 = tp.create_scenario(scenario_config) assert len(_DataManager._get_all()) == 3 dn_1_1 = scenario_1.data_nodes[\"dn_1\"] dn_2_1 = scenario_1.data_nodes[\"dn_2\"] dn_3_1 = scenario_1.data_nodes[\"dn_3\"] assert dn_1_1.last_edit_date is None assert dn_2_1.last_edit_date is None assert dn_3_1.last_edit_date is None dn_1_1.last_edit_date = current_datetime + timedelta(1) dn_2_1.last_edit_date = current_datetime + timedelta(2) dn_3_1.last_edit_date = current_datetime + timedelta(3) assert dn_1_1.is_up_to_date assert dn_2_1.is_up_to_date assert dn_3_1.is_up_to_date dn_2_1.last_edit_date = current_datetime + timedelta(4) assert dn_1_1.is_up_to_date assert dn_2_1.is_up_to_date assert not dn_3_1.is_up_to_date dn_1_1.last_edit_date = current_datetime + timedelta(5) assert dn_1_1.is_up_to_date assert not dn_2_1.is_up_to_date assert not dn_3_1.is_up_to_date dn_1_1.last_edit_date = current_datetime + timedelta(1) dn_2_1.last_edit_date = current_datetime + timedelta(2) dn_3_1.last_edit_date = current_datetime + timedelta(3) def test_is_up_to_date_across_scenarios(self, current_datetime): dn_confg_1 = Config.configure_in_memory_data_node(\"dn_1\", scope=Scope.SCENARIO) dn_confg_2 = Config.configure_in_memory_data_node(\"dn_2\", scope=Scope.SCENARIO) dn_confg_3 = Config.configure_in_memory_data_node(\"dn_3\", scope=Scope.GLOBAL) task_config_1 = Config.configure_task(\"t1\", print, [dn_confg_1], [dn_confg_2]) task_config_2 = Config.configure_task(\"t2\", print, [dn_confg_2], [dn_confg_3]) scenario_config = Config.configure_scenario(\"sc\", [task_config_1, task_config_2]) scenario_1 = tp.create_scenario(scenario_config) scenario_2 = tp.create_scenario(scenario_config) assert len(_DataManager._get_all()) == 5 dn_1_1 = scenario_1.data_nodes[\"dn_1\"] dn_2_1 = scenario_1.data_nodes[\"dn_2\"] dn_1_2 = scenario_2.data_nodes[\"dn_1\"] dn_2_2 = scenario_2.data_nodes[\"dn_2\"] dn_3 = scenario_1.data_nodes[\"dn_3\"] assert dn_3 == scenario_2.data_nodes[\"dn_3\"] assert dn_1_1.last_edit_date is None assert dn_2_1.last_edit_date is None assert dn_1_2.last_edit_date is None assert dn_2_2.last_edit_date is None assert dn_3.last_edit_date is None dn_1_1.last_edit_date = current_datetime + timedelta(1) dn_2_1.last_edit_date = current_datetime + timedelta(2) dn_1_2.last_edit_date = current_datetime + timedelta(3) dn_2_2.last_edit_date = current_datetime + timedelta(4) dn_3.last_edit_date = current_datetime + timedelta(5) assert dn_1_1.is_up_to_date assert dn_2_1.is_up_to_date assert dn_1_2.is_up_to_date assert dn_2_2.is_up_to_date assert dn_3.is_up_to_date dn_2_1.last_edit_date = current_datetime + timedelta(6) assert dn_1_1.is_up_to_date assert dn_2_1.is_up_to_date assert dn_1_2.is_up_to_date assert dn_2_2.is_up_to_date assert not dn_3.is_up_to_date dn_2_1.last_edit_date = current_datetime + timedelta(2) dn_2_2.last_edit_date = current_datetime + timedelta(6) assert dn_1_1.is_up_to_date assert dn_2_1.is_up_to_date assert dn_1_2.is_up_to_date assert dn_2_2.is_up_to_date assert not dn_3.is_up_to_date dn_2_2.last_edit_date = current_datetime + timedelta(4) dn_1_1.last_edit_date = current_datetime + timedelta(6) assert dn_1_1.is_up_to_date assert not dn_2_1.is_up_to_date assert dn_1_2.is_up_to_date assert dn_2_2.is_up_to_date assert not dn_3.is_up_to_date dn_1_2.last_edit_date = current_datetime + timedelta(6) assert dn_1_1.is_up_to_date assert not dn_2_1.is_up_to_date assert dn_1_2.is_up_to_date assert not dn_2_2.is_up_to_date assert not dn_3.is_up_to_date def test_do_not_recompute_data_node_valid_but_continue_sequence_execution(self): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) a = Config.configure_data_node(\"A\", \"pickle\", default_data=\"A\") b = Config.configure_data_node(\"B\", \"pickle\") c = Config.configure_data_node(\"C\", \"pickle\") d = Config.configure_data_node(\"D\", \"pickle\") task_a_b = Config.configure_task(\"task_a_b\", funct_a_b, input=a, output=b, skippable=True) task_b_c = Config.configure_task(\"task_b_c\", funct_b_c, input=b, output=c) task_b_d = Config.configure_task(\"task_b_d\", funct_b_d, input=b, output=d) scenario_cfg = Config.configure_scenario(\"scenario\", [task_a_b, task_b_c, task_b_d]) _OrchestratorFactory._build_dispatcher() scenario = tp.create_scenario(scenario_cfg) scenario.submit() assert scenario.A.read() == \"A\" assert scenario.B.read() == \"B\" assert scenario.C.read() == \"C\" assert scenario.D.read() == \"D\" scenario.submit() assert len(tp.get_jobs()) == 6 jobs_and_status = [(job.task.config_id, job.status) for job in tp.get_jobs()] assert (\"task_a_b\", tp.Status.COMPLETED) in jobs_and_status assert (\"task_a_b\", tp.Status.SKIPPED) in jobs_and_status assert (\"task_b_c\", tp.Status.COMPLETED) in jobs_and_status assert (\"task_b_d\", tp.Status.COMPLETED) in jobs_and_status def test_data_node_update_after_writing(self): dn = FakeDataNode(\"foo\") _DataManager._set(dn) assert not _DataManager._get(dn.id).is_ready_for_reading dn.write(\"Any data\") assert dn.is_ready_for_reading assert _DataManager._get(dn.id).is_ready_for_reading def test_expiration_date_raise_if_never_write(self): dn = FakeDataNode(\"foo\") with pytest.raises(NoData): dn.expiration_date def test_validity_null_if_never_write(self): dn = FakeDataNode(\"foo\") assert dn.validity_period is None def test_auto_set_and_reload(self, current_datetime): dn_1 = InMemoryDataNode( \"foo\", scope=Scope.GLOBAL, id=DataNodeId(\"an_id\"), owner_id=None, parent_ids=None, last_edit_date=current_datetime, edits=[dict(job_id=\"a_job_id\")], edit_in_progress=False, validity_period=None, properties={ \"name\": \"foo\", }, ) dm = _DataManager() dm._set(dn_1) dn_2 = dm._get(dn_1) # auto set & reload on scope attribute assert dn_1.scope == Scope.GLOBAL assert dn_2.scope == Scope.GLOBAL dn_1.scope = Scope.CYCLE assert dn_1.scope == Scope.CYCLE assert dn_2.scope == Scope.CYCLE dn_2.scope = Scope.SCENARIO assert dn_1.scope == Scope.SCENARIO assert dn_2.scope == Scope.SCENARIO new_datetime = current_datetime + timedelta(1) new_datetime_1 = current_datetime + timedelta(3) # auto set & reload on last_edit_date attribute assert dn_1.last_edit_date == current_datetime assert dn_2.last_edit_date == current_datetime dn_1.last_edit_date = new_datetime_1 assert dn_1.last_edit_date == new_datetime_1 assert dn_2.last_edit_date == new_datetime_1 dn_2.last_edit_date = new_datetime assert dn_1.last_edit_date == new_datetime assert dn_2.last_edit_date == new_datetime # auto set & reload on name attribute assert dn_1.name == \"foo\" assert dn_2.name == \"foo\" dn_1.name = \"fed\" assert dn_1.name == \"fed\" assert dn_2.name == \"fed\" dn_2.name = \"def\" assert dn_1.name == \"def\" assert dn_2.name == \"def\" # auto set & reload on parent_ids attribute (set() object does not have auto set yet) assert dn_1.parent_ids == set() assert dn_2.parent_ids == set() dn_1._parent_ids.update([\"sc2\"]) _DataManager._set(dn_1) assert dn_1.parent_ids == {\"sc2\"} assert dn_2.parent_ids == {\"sc2\"} dn_2._parent_ids.clear() dn_2._parent_ids.update([\"sc1\"]) _DataManager._set(dn_2) assert dn_1.parent_ids == {\"sc1\"} assert dn_2.parent_ids == {\"sc1\"} # auto set & reload on edit_in_progress attribute assert not dn_2.edit_in_progress assert not dn_1.edit_in_progress dn_1.edit_in_progress = True assert dn_1.edit_in_progress assert dn_2.edit_in_progress dn_2.unlock_edit() assert not dn_1.edit_in_progress assert not dn_2.edit_in_progress dn_1.lock_edit() assert dn_1.edit_in_progress assert dn_2.edit_in_progress # auto set & reload on validity_period attribute time_period_1 = timedelta(1) time_period_2 = timedelta(5) assert dn_1.validity_period is None assert dn_2.validity_period is None dn_1.validity_period = time_period_1 assert dn_1.validity_period == time_period_1 assert dn_2.validity_period == time_period_1 dn_2.validity_period = time_period_2 assert dn_1.validity_period == time_period_2 assert dn_2.validity_period == time_period_2 # auto set & reload on properties attribute assert dn_1.properties == {\"name\": \"def\"} assert dn_2.properties == {\"name\": \"def\"} dn_1._properties[\"qux\"] = 4 assert dn_1.properties[\"qux\"] == 4 assert dn_2.properties[\"qux\"] == 4 assert dn_1.properties == {\"qux\": 4, \"name\": \"def\"} assert dn_2.properties == {\"qux\": 4, \"name\": \"def\"} dn_2._properties[\"qux\"] = 5 assert dn_1.properties[\"qux\"] == 5 assert dn_2.properties[\"qux\"] == 5 dn_1.properties[\"temp_key_1\"] = \"temp_value_1\" dn_1.properties[\"temp_key_2\"] = \"temp_value_2\" assert dn_1.properties == { \"name\": \"def\", \"qux\": 5, \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } assert dn_2.properties == { \"name\": \"def\", \"qux\": 5, \"temp_key_1\": \"temp_value_1\", \"temp_key_2\": \"temp_value_2\", } dn_1.properties.pop(\"temp_key_1\") assert \"temp_key_1\" not in dn_1.properties.keys() assert \"temp_key_1\" not in dn_1.properties.keys() assert dn_1.properties == { \"name\": \"def\", \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } assert dn_2.properties == { \"name\": \"def\", \"qux\": 5, \"temp_key_2\": \"temp_value_2\", } dn_2.properties.pop(\"temp_key_2\") assert dn_1.properties == { \"qux\": 5, \"name\": \"def\", } assert dn_2.properties == { \"qux\": 5, \"name\": \"def\", } assert \"temp_key_2\" not in dn_1.properties.keys() assert \"temp_key_2\" not in dn_2.properties.keys() dn_1.properties[\"temp_key_3\"] = 0 assert dn_1.properties == { \"qux\": 5, \"temp_key_3\": 0, \"name\": \"def\", } assert dn_2.properties == { \"qux\": 5, \"temp_key_3\": 0, \"name\": \"def\", } dn_1.properties.update({\"temp_key_3\": 1}) assert dn_1.properties == { \"qux\": 5, \"temp_key_3\": 1, \"name\": \"def\", } assert dn_2.properties == { \"qux\": 5, \"temp_key_3\": 1, \"name\": \"def\", } dn_1.properties.update(dict()) assert dn_1.properties == { \"qux\": 5, \"temp_key_3\": 1, \"name\": \"def\", } assert dn_2.properties == { \"qux\": 5, \"temp_key_3\": 1, \"name\": \"def\", } dn_1.properties[\"temp_key_4\"] = 0 dn_1.properties[\"temp_key_5\"] = 0 dn_1.last_edit_date = new_datetime assert len(dn_1.job_ids) == 1 assert len(dn_2.job_ids) == 1 with dn_1 as dn: assert dn.config_id == \"foo\" assert dn.owner_id is None assert dn.scope == Scope.SCENARIO assert dn.last_edit_date == new_datetime assert dn.name == \"def\" assert dn.edit_in_progress assert dn.validity_period == time_period_2 assert len(dn.job_ids) == 1 assert dn._is_in_context assert dn.properties[\"qux\"] == 5 assert dn.properties[\"temp_key_3\"] == 1 assert dn.properties[\"temp_key_4\"] == 0 assert dn.properties[\"temp_key_5\"] == 0 new_datetime_2 = new_datetime + timedelta(5) dn.scope = Scope.CYCLE dn.last_edit_date = new_datetime_2 dn.name = \"abc\" dn.edit_in_progress = False dn.validity_period = None dn.properties[\"qux\"] = 9 dn.properties.pop(\"temp_key_3\") dn.properties.pop(\"temp_key_4\") dn.properties.update({\"temp_key_4\": 1}) dn.properties.update({\"temp_key_5\": 2}) dn.properties.pop(\"temp_key_5\") dn.properties.update(dict()) assert dn.config_id == \"foo\" assert dn.owner_id is None assert dn.scope == Scope.SCENARIO assert dn.last_edit_date == new_datetime assert dn.name == \"def\" assert dn.edit_in_progress assert dn.validity_period == time_period_2 assert len(dn.job_ids) == 1 assert dn.properties[\"qux\"] == 5 assert dn.properties[\"temp_key_3\"] == 1 assert dn.properties[\"temp_key_4\"] == 0 assert dn.properties[\"temp_key_5\"] == 0 assert dn_1.config_id == \"foo\" assert dn_1.owner_id is None assert dn_1.scope == Scope.CYCLE assert dn_1.last_edit_date == new_datetime_2 assert dn_1.name == \"abc\" assert not dn_1.edit_in_progress assert dn_1.validity_period is None assert not dn_1._is_in_context assert len(dn_1.job_ids) == 1 assert dn_1.properties[\"qux\"] == 9 assert \"temp_key_3\" not in dn_1.properties.keys() assert dn_1.properties[\"temp_key_4\"] == 1 assert \"temp_key_5\" not in dn_1.properties.keys() def test_get_parents(self, data_node): with mock.patch(\"src.taipy.core.get_parents\") as mck: data_node.get_parents() mck.assert_called_once_with(data_node) def test_cacheable_deprecated_false(self): dn = FakeDataNode(\"foo\") with pytest.warns(DeprecationWarning): dn.cacheable assert dn.cacheable is False def test_cacheable_deprecated_true(self): dn = FakeDataNode(\"foo\", properties={\"cacheable\": True}) with pytest.warns(DeprecationWarning): dn.cacheable assert dn.cacheable is True def test_data_node_with_env_variable_value_not_stored(self): dn_config = Config.configure_data_node(\"A\", prop=\"ENV[FOO]\") with mock.patch.dict(os.environ, {\"FOO\": \"bar\"}): dn = _DataManager._bulk_get_or_create([dn_config])[dn_config] assert dn._properties.data[\"prop\"] == \"ENV[FOO]\" assert dn.properties[\"prop\"] == \"bar\" assert dn.prop == \"bar\" def test_path_populated_with_config_default_path(self): dn_config = Config.configure_data_node(\"data_node\", \"pickle\", default_path=\"foo.p\") assert dn_config.default_path == \"foo.p\" data_node = _DataManager._bulk_get_or_create([dn_config])[dn_config] assert data_node.path == \"foo.p\" data_node.path = \"baz.p\" assert data_node.path == \"baz.p\" def test_track_edit(self): dn_config = Config.configure_data_node(\"A\") data_node = _DataManager._bulk_get_or_create([dn_config])[dn_config] data_node.write(data=\"1\", job_id=\"job_1\") data_node.write(data=\"2\", job_id=\"job_1\") data_node.write(data=\"3\", job_id=\"job_1\") assert len(data_node.edits) == 3 assert len(data_node.job_ids) == 3 assert data_node.edits[-1] == data_node.get_last_edit() assert data_node.last_edit_date == data_node.get_last_edit().get(\"timestamp\") date = datetime(2050, 1, 1, 12, 12) data_node.write(data=\"4\", timestamp=date, message=\"This is a comment on this edit\", env=\"staging\") assert len(data_node.edits) == 4 assert len(data_node.job_ids) == 3 assert data_node.edits[-1] == data_node.get_last_edit() last_edit = data_node.get_last_edit() assert last_edit[\"message\"] == \"This is a comment on this edit\" assert last_edit[\"env\"] == \"staging\" assert last_edit[\"timestamp\"] == date def test_label(self): a_date = datetime.now() dn = DataNode( \"foo_bar\", Scope.SCENARIO, DataNodeId(\"an_id\"), \"a_scenario_id\", {\"a_parent_id\"}, a_date, [dict(job_id=\"a_job_id\")], edit_in_progress=False, prop=\"erty\", name=\"a name\", ) with mock.patch(\"src.taipy.core.get\") as get_mck: class MockOwner: label = \"owner_label\" def get_label(self): return self.label get_mck.return_value = MockOwner() assert dn.get_label() == \"owner_label > \" + dn.name assert dn.get_simple_label() == dn.name def test_explicit_label(self): a_date = datetime.now() dn = DataNode( \"foo_bar\", Scope.SCENARIO, DataNodeId(\"an_id\"), \"a_scenario_id\", {\"a_parent_id\"}, a_date, [dict(job_id=\"a_job_id\")], edit_in_progress=False, label=\"a label\", name=\"a name\", ) assert dn.get_label() == \"a label\" assert dn.get_simple_label() == \"a label\" def test_change_data_node_name(self): cgf = Config.configure_data_node(\"foo\", scope=Scope.GLOBAL) dn = tp.create_global_data_node(cgf) dn.name = \"bar\" assert dn.name == \"bar\" # This new syntax will be the only one allowed: https://github.com/Avaiga/taipy-core/issues/806 dn.properties[\"name\"] = \"baz\" assert dn.name == \"baz\" "} {"text": "import os import pathlib from datetime import datetime from time import sleep import modin.pandas as modin_pd import numpy as np import pandas as pd import pytest from modin.pandas.test.utils import df_equals from pandas.testing import assert_frame_equal from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.csv import CSVDataNode from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.exceptions.exceptions import InvalidExposedType, NoData from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import InvalidConfigurationId @pytest.fixture(scope=\"function\", autouse=True) def cleanup(): yield path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.csv\") if os.path.isfile(path): os.remove(path) class MyCustomObject: def __init__(self, id, integer, text): self.id = id self.integer = integer self.text = text class TestCSVDataNode: def test_create(self): path = \"data/node/path\" dn = CSVDataNode( \"foo_bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"name\": \"super name\"} ) assert isinstance(dn, CSVDataNode) assert dn.storage_type() == \"csv\" assert dn.config_id == \"foo_bar\" assert dn.name == \"super name\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.last_edit_date is None assert dn.job_ids == [] assert not dn.is_ready_for_reading assert dn.path == path assert dn.has_header is False assert dn.exposed_type == \"pandas\" with pytest.raises(InvalidConfigurationId): dn = CSVDataNode( \"foo bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"name\": \"super name\"} ) def test_get_user_properties(self, csv_file): dn_1 = CSVDataNode(\"dn_1\", Scope.SCENARIO, properties={\"path\": \"data/node/path\"}) assert dn_1._get_user_properties() == {} dn_2 = CSVDataNode( \"dn_2\", Scope.SCENARIO, properties={ \"exposed_type\": \"numpy\", \"default_data\": \"foo\", \"default_path\": csv_file, \"has_header\": False, \"foo\": \"bar\", }, ) # exposed_type, default_data, default_path, path, has_header, sheet_name are filtered out assert dn_2._get_user_properties() == {\"foo\": \"bar\"} def test_new_csv_data_node_with_existing_file_is_ready_for_reading(self): not_ready_dn_cfg = Config.configure_data_node(\"not_ready_data_node_config_id\", \"csv\", path=\"NOT_EXISTING.csv\") not_ready_dn = _DataManager._bulk_get_or_create([not_ready_dn_cfg])[not_ready_dn_cfg] assert not not_ready_dn.is_ready_for_reading path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") ready_dn_cfg = Config.configure_data_node(\"ready_data_node_config_id\", \"csv\", path=path) ready_dn = _DataManager._bulk_get_or_create([ready_dn_cfg])[ready_dn_cfg] assert ready_dn.is_ready_for_reading @pytest.mark.parametrize( [\"properties\", \"exists\"], [ ({}, False), ({\"default_data\": [\"foo\", \"bar\"]}, True), ], ) def test_create_with_default_data(self, properties, exists): dn = CSVDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) assert os.path.exists(dn.path) is exists def test_read_with_header(self): not_existing_csv = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": \"WRONG.csv\", \"has_header\": True}) with pytest.raises(NoData): assert not_existing_csv.read() is None not_existing_csv.read_or_raise() path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") # # Create CSVDataNode without exposed_type (Default is pandas.DataFrame) csv_data_node_as_pandas = CSVDataNode(\"bar\", Scope.SCENARIO, properties={\"path\": path}) data_pandas = csv_data_node_as_pandas.read() assert isinstance(data_pandas, pd.DataFrame) assert len(data_pandas) == 10 assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path).to_numpy()) # Create CSVDataNode with modin exposed_type csv_data_node_as_modin = CSVDataNode(\"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"modin\"}) data_modin = csv_data_node_as_modin.read() assert isinstance(data_modin, modin_pd.DataFrame) assert len(data_modin) == 10 assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path).to_numpy()) # Create CSVDataNode with numpy exposed_type csv_data_node_as_numpy = CSVDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": True, \"exposed_type\": \"numpy\"} ) data_numpy = csv_data_node_as_numpy.read() assert isinstance(data_numpy, np.ndarray) assert len(data_numpy) == 10 assert np.array_equal(data_numpy, pd.read_csv(path).to_numpy()) # Create the same CSVDataNode but with custom exposed_type csv_data_node_as_custom_object = CSVDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": MyCustomObject} ) data_custom = csv_data_node_as_custom_object.read() assert isinstance(data_custom, list) assert len(data_custom) == 10 for (index, row_pandas), row_custom in zip(data_pandas.iterrows(), data_custom): assert isinstance(row_custom, MyCustomObject) assert row_pandas[\"id\"] == row_custom.id assert str(row_pandas[\"integer\"]) == row_custom.integer assert row_pandas[\"text\"] == row_custom.text def test_read_without_header(self): not_existing_csv = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": \"WRONG.csv\", \"has_header\": False}) with pytest.raises(NoData): assert not_existing_csv.read() is None not_existing_csv.read_or_raise() path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") # Create CSVDataNode without exposed_type (Default is pandas.DataFrame) csv_data_node_as_pandas = CSVDataNode(\"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False}) data_pandas = csv_data_node_as_pandas.read() assert isinstance(data_pandas, pd.DataFrame) assert len(data_pandas) == 11 assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path, header=None).to_numpy()) # Create CSVDataNode with modin exposed_type csv_data_node_as_modin = CSVDataNode( \"baz\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": \"modin\"} ) data_modin = csv_data_node_as_modin.read() assert isinstance(data_modin, modin_pd.DataFrame) assert len(data_modin) == 11 assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path, header=None).to_numpy()) # Create CSVDataNode with numpy exposed_type csv_data_node_as_numpy = CSVDataNode( \"qux\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": \"numpy\"} ) data_numpy = csv_data_node_as_numpy.read() assert isinstance(data_numpy, np.ndarray) assert len(data_numpy) == 11 assert np.array_equal(data_numpy, pd.read_csv(path, header=None).to_numpy()) # Create the same CSVDataNode but with custom exposed_type csv_data_node_as_custom_object = CSVDataNode( \"quux\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": MyCustomObject} ) data_custom = csv_data_node_as_custom_object.read() assert isinstance(data_custom, list) assert len(data_custom) == 11 for (index, row_pandas), row_custom in zip(data_pandas.iterrows(), data_custom): assert isinstance(row_custom, MyCustomObject) assert row_pandas[0] == row_custom.id assert str(row_pandas[1]) == row_custom.integer assert row_pandas[2] == row_custom.text @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append(self, csv_file, default_data_frame, content): csv_dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file}) assert_frame_equal(csv_dn.read(), default_data_frame) csv_dn.append(content) assert_frame_equal( csv_dn.read(), pd.concat([default_data_frame, pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index(drop=True), ) @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append_modin(self, csv_file, default_data_frame, content): csv_dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"modin\"}) df_equals(csv_dn.read(), modin_pd.DataFrame(default_data_frame)) csv_dn.append(content) df_equals( csv_dn.read(), modin_pd.concat([default_data_frame, pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index( drop=True ), ) @pytest.mark.parametrize( \"content,columns\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], None), ([[11, 22, 33], [44, 55, 66]], None), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"]), ], ) def test_write(self, csv_file, default_data_frame, content, columns): csv_dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file}) assert np.array_equal(csv_dn.read().values, default_data_frame.values) if not columns: csv_dn.write(content) df = pd.DataFrame(content) else: csv_dn.write_with_column_names(content, columns) df = pd.DataFrame(content, columns=columns) assert np.array_equal(csv_dn.read().values, df.values) csv_dn.write(None) assert len(csv_dn.read()) == 0 def test_write_with_different_encoding(self, csv_file): data = pd.DataFrame([{\"\u2265a\": 1, \"b\": 2}]) utf8_dn = CSVDataNode(\"utf8_dn\", Scope.SCENARIO, properties={\"default_path\": csv_file}) utf16_dn = CSVDataNode(\"utf16_dn\", Scope.SCENARIO, properties={\"default_path\": csv_file, \"encoding\": \"utf-16\"}) # If a file is written with utf-8 encoding, it can only be read with utf-8, not utf-16 encoding utf8_dn.write(data) assert np.array_equal(utf8_dn.read(), data) with pytest.raises(UnicodeError): utf16_dn.read() # If a file is written with utf-16 encoding, it can only be read with utf-16, not utf-8 encoding utf16_dn.write(data) assert np.array_equal(utf16_dn.read(), data) with pytest.raises(UnicodeError): utf8_dn.read() @pytest.mark.parametrize( \"content,columns\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], None), ([[11, 22, 33], [44, 55, 66]], None), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"]), ], ) def test_write_modin(self, csv_file, default_data_frame, content, columns): default_data_frame = modin_pd.DataFrame(default_data_frame) csv_dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"modin\"}) assert np.array_equal(csv_dn.read().values, default_data_frame.values) if not columns: csv_dn.write(content) df = pd.DataFrame(content) else: csv_dn.write_with_column_names(content, columns) df = pd.DataFrame(content, columns=columns) assert np.array_equal(csv_dn.read().values, df.values) csv_dn.write(None) assert len(csv_dn.read()) == 0 def test_write_modin_with_different_encoding(self, csv_file): data = pd.DataFrame([{\"\u2265a\": 1, \"b\": 2}]) utf8_dn = CSVDataNode(\"utf8_dn\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"modin\"}) utf16_dn = CSVDataNode( \"utf16_dn\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"modin\", \"encoding\": \"utf-16\"} ) # If a file is written with utf-8 encoding, it can only be read with utf-8, not utf-16 encoding utf8_dn.write(data) assert np.array_equal(utf8_dn.read(), data) with pytest.raises(UnicodeError): utf16_dn.read() # If a file is written with utf-16 encoding, it can only be read with utf-16, not utf-8 encoding utf16_dn.write(data) assert np.array_equal(utf16_dn.read(), data) with pytest.raises(UnicodeError): utf8_dn.read() def test_set_path(self): dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": \"foo.csv\"}) assert dn.path == \"foo.csv\" dn.path = \"bar.csv\" assert dn.path == \"bar.csv\" def test_read_write_after_modify_path(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.csv\") dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path}) read_data = dn.read() assert read_data is not None dn.path = new_path with pytest.raises(FileNotFoundError): dn.read() dn.write(read_data) assert dn.read().equals(read_data) def test_pandas_exposed_type(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"pandas\"}) assert isinstance(dn.read(), pd.DataFrame) def test_filter_pandas_exposed_type(self, csv_file): dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"pandas\"}) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, None])) assert dn[\"bar\"].equals(pd.Series([1, 2, None, 2, 2])) assert dn[:2].equals(pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 1.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = pd.DataFrame([{\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"bar\", 2, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"bar\"] == 2] expected_data = pd.DataFrame([{\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = pd.DataFrame( [ {\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}, ] ) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_modin_exposed_type(self, csv_file): dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"modin\"}) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(modin_pd.Series([1, 1, 1, 2, None])) assert dn[\"bar\"].equals(modin_pd.Series([1, 2, None, 2, 2])) assert dn[:2].equals(modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 1.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = modin_pd.DataFrame([{\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"bar\", 2, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"bar\"] == 2] expected_data = modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = modin_pd.DataFrame( [ {\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}, ] ) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_numpy_exposed_type(self, csv_file): dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": csv_file, \"exposed_type\": \"numpy\"}) dn.write( [ [1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], ] ) # Test datanode indexing and slicing assert np.array_equal(dn[0], np.array([1, 1])) assert np.array_equal(dn[1], np.array([1, 2])) assert np.array_equal(dn[:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[1:4, :1], np.array([[1], [1], [2]])) # Test filter data assert np.array_equal(dn.filter((0, 1, Operator.EQUAL)), np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[dn[:, 0] == 1], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn.filter((0, 1, Operator.NOT_EQUAL)), np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn[dn[:, 0] != 1], np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn.filter((1, 2, Operator.EQUAL)), np.array([[1, 2], [2, 2]])) assert np.array_equal(dn[dn[:, 1] == 2], np.array([[1, 2], [2, 2]])) assert np.array_equal( dn.filter([(1, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR), np.array([[1, 1], [1, 2], [2, 1], [2, 2]]), ) assert np.array_equal(dn[(dn[:, 1] == 1) | (dn[:, 1] == 2)], np.array([[1, 1], [1, 2], [2, 1], [2, 2]])) def test_raise_error_invalid_exposed_type(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.csv\") with pytest.raises(InvalidExposedType): CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"foo\"}) def test_get_system_modified_date_instead_of_last_edit_date(self, tmpdir_factory): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.csv\")) pd.DataFrame([]).to_csv(temp_file_path) dn = CSVDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": \"pandas\"}) dn.write(pd.DataFrame([1, 2, 3])) previous_edit_date = dn.last_edit_date sleep(0.1) pd.DataFrame([4, 5, 6]).to_csv(temp_file_path) new_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert previous_edit_date < dn.last_edit_date assert new_edit_date == dn.last_edit_date sleep(0.1) dn.write(pd.DataFrame([7, 8, 9])) assert new_edit_date < dn.last_edit_date os.unlink(temp_file_path) "} {"text": "import os import pathlib from datetime import datetime from time import sleep import modin.pandas as modin_pd import pandas as pd import pytest from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.pickle import PickleDataNode from src.taipy.core.exceptions.exceptions import NoData from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.exceptions.exceptions import InvalidConfigurationId @pytest.fixture(scope=\"function\", autouse=True) def cleanup(): yield path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.p\") if os.path.isfile(path): os.remove(path) class TestPickleDataNodeEntity: @pytest.fixture(scope=\"function\", autouse=True) def remove_pickle_files(self): yield import glob for f in glob.glob(\"*.p\"): print(f\"deleting file {f}\") os.remove(f) def test_create(self): dn = PickleDataNode(\"foobar_bazxyxea\", Scope.SCENARIO, properties={\"default_data\": \"Data\"}) assert os.path.isfile(Config.core.storage_folder + \"pickles/\" + dn.id + \".p\") assert isinstance(dn, PickleDataNode) assert dn.storage_type() == \"pickle\" assert dn.config_id == \"foobar_bazxyxea\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.name is None assert dn.owner_id is None assert dn.last_edit_date is not None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.read() == \"Data\" assert dn.last_edit_date is not None assert dn.job_ids == [] with pytest.raises(InvalidConfigurationId): PickleDataNode(\"foobar bazxyxea\", Scope.SCENARIO, properties={\"default_data\": \"Data\"}) def test_get_user_properties(self, pickle_file_path): dn_1 = PickleDataNode(\"dn_1\", Scope.SCENARIO, properties={\"path\": pickle_file_path}) assert dn_1._get_user_properties() == {} dn_2 = PickleDataNode( \"dn_2\", Scope.SCENARIO, properties={ \"default_data\": \"foo\", \"default_path\": pickle_file_path, \"foo\": \"bar\", }, ) # default_data, default_path, path, is_generated are filtered out assert dn_2._get_user_properties() == {\"foo\": \"bar\"} def test_new_pickle_data_node_with_existing_file_is_ready_for_reading(self): not_ready_dn_cfg = Config.configure_data_node(\"not_ready_data_node_config_id\", \"pickle\", path=\"NOT_EXISTING.p\") path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.p\") ready_dn_cfg = Config.configure_data_node(\"ready_data_node_config_id\", \"pickle\", path=path) dns = _DataManager._bulk_get_or_create([not_ready_dn_cfg, ready_dn_cfg]) assert not dns[not_ready_dn_cfg].is_ready_for_reading assert dns[ready_dn_cfg].is_ready_for_reading def test_create_with_file_name(self): dn = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": \"bar\", \"path\": \"foo.FILE.p\"}) assert os.path.isfile(\"foo.FILE.p\") assert dn.read() == \"bar\" dn.write(\"qux\") assert dn.read() == \"qux\" dn.write(1998) assert dn.read() == 1998 def test_read_and_write(self): no_data_dn = PickleDataNode(\"foo\", Scope.SCENARIO) with pytest.raises(NoData): assert no_data_dn.read() is None no_data_dn.read_or_raise() pickle_str = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": \"bar\"}) assert isinstance(pickle_str.read(), str) assert pickle_str.read() == \"bar\" pickle_str.properties[\"default_data\"] = \"baz\" # this modifies the default data value but not the data itself assert pickle_str.read() == \"bar\" pickle_str.write(\"qux\") assert pickle_str.read() == \"qux\" pickle_str.write(1998) assert pickle_str.read() == 1998 assert isinstance(pickle_str.read(), int) pickle_int = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": 197}) assert isinstance(pickle_int.read(), int) assert pickle_int.read() == 197 pickle_dict = PickleDataNode( \"foo\", Scope.SCENARIO, properties={\"default_data\": {\"bar\": 12, \"baz\": \"qux\", \"quux\": [13]}} ) assert isinstance(pickle_dict.read(), dict) assert pickle_dict.read() == {\"bar\": 12, \"baz\": \"qux\", \"quux\": [13]} default_pandas = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}) new_pandas_df = pd.DataFrame({\"c\": [7, 8, 9], \"d\": [10, 11, 12]}) default_modin = modin_pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}) new_modin_df = modin_pd.DataFrame({\"c\": [7, 8, 9], \"d\": [10, 11, 12]}) pickle_pandas = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": default_pandas}) assert isinstance(pickle_pandas.read(), pd.DataFrame) assert default_pandas.equals(pickle_pandas.read()) pickle_pandas.write(new_pandas_df) assert new_pandas_df.equals(pickle_pandas.read()) assert isinstance(pickle_pandas.read(), pd.DataFrame) pickle_pandas.write(new_modin_df) assert new_modin_df.equals(pickle_pandas.read()) assert isinstance(pickle_pandas.read(), modin_pd.DataFrame) pickle_pandas.write(1998) assert pickle_pandas.read() == 1998 assert isinstance(pickle_pandas.read(), int) pickle_modin = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": default_modin}) assert isinstance(pickle_modin.read(), modin_pd.DataFrame) assert default_modin.equals(pickle_modin.read()) pickle_modin.write(new_modin_df) assert new_modin_df.equals(pickle_modin.read()) assert isinstance(pickle_modin.read(), modin_pd.DataFrame) pickle_modin.write(new_pandas_df) assert new_pandas_df.equals(pickle_modin.read()) assert isinstance(pickle_modin.read(), pd.DataFrame) pickle_modin.write(1998) assert pickle_modin.read() == 1998 assert isinstance(pickle_modin.read(), int) def test_path_overrides_default_path(self): dn = PickleDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_data\": \"bar\", \"default_path\": \"foo.FILE.p\", \"path\": \"bar.FILE.p\", }, ) assert dn.path == \"bar.FILE.p\" def test_set_path(self): dn = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": \"foo.p\"}) assert dn.path == \"foo.p\" dn.path = \"bar.p\" assert dn.path == \"bar.p\" def test_is_generated(self): dn = PickleDataNode(\"foo\", Scope.SCENARIO, properties={}) assert dn.is_generated dn.path = \"bar.p\" assert not dn.is_generated def test_read_write_after_modify_path(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.p\") new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.p\") dn = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path}) read_data = dn.read() assert read_data is not None dn.path = new_path with pytest.raises(FileNotFoundError): dn.read() dn.write({\"other\": \"stuff\"}) assert dn.read() == {\"other\": \"stuff\"} def test_get_system_modified_date_instead_of_last_edit_date(self, tmpdir_factory): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.pickle\")) pd.DataFrame([]).to_pickle(temp_file_path) dn = PickleDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": \"pandas\"}) dn.write(pd.DataFrame([1, 2, 3])) previous_edit_date = dn.last_edit_date sleep(0.1) pd.DataFrame([4, 5, 6]).to_pickle(temp_file_path) new_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert previous_edit_date < dn.last_edit_date assert new_edit_date == dn.last_edit_date sleep(0.1) dn.write(pd.DataFrame([7, 8, 9])) assert new_edit_date < dn.last_edit_date os.unlink(temp_file_path) "} {"text": "import pytest from src.taipy.core.data.data_node import DataNode from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.generic import GenericDataNode from src.taipy.core.exceptions.exceptions import MissingReadFunction, MissingRequiredProperty, MissingWriteFunction from taipy.config.common.scope import Scope from taipy.config.exceptions.exceptions import InvalidConfigurationId def read_fct(): return TestGenericDataNode.data def read_fct_with_args(inp): return [i + inp for i in TestGenericDataNode.data] def write_fct(data): data.append(data[-1] + 1) def write_fct_with_args(data, inp): for _ in range(inp): data.append(data[-1] + 1) def read_fct_modify_data_node_name(data_node_id: DataNodeId, name: str): import src.taipy.core as tp data_node = tp.get(data_node_id) assert isinstance(data_node, DataNode) data_node.name = name # type:ignore return data_node def reset_data(): TestGenericDataNode.data = [i for i in range(10)] class TestGenericDataNode: data = [i for i in range(10)] def test_create(self): dn = GenericDataNode( \"foo_bar\", Scope.SCENARIO, properties={\"read_fct\": read_fct, \"write_fct\": write_fct, \"name\": \"super name\"} ) assert isinstance(dn, GenericDataNode) assert dn.storage_type() == \"generic\" assert dn.config_id == \"foo_bar\" assert dn.name == \"super name\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.last_edit_date is not None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.properties[\"read_fct\"] == read_fct assert dn.properties[\"write_fct\"] == write_fct dn_1 = GenericDataNode( \"foo\", Scope.SCENARIO, properties={\"read_fct\": read_fct, \"write_fct\": None, \"name\": \"foo\"} ) assert isinstance(dn, GenericDataNode) assert dn_1.storage_type() == \"generic\" assert dn_1.config_id == \"foo\" assert dn_1.name == \"foo\" assert dn_1.scope == Scope.SCENARIO assert dn_1.id is not None assert dn_1.owner_id is None assert dn_1.last_edit_date is not None assert dn_1.job_ids == [] assert dn_1.is_ready_for_reading assert dn_1.properties[\"read_fct\"] == read_fct assert dn_1.properties[\"write_fct\"] is None dn_2 = GenericDataNode( \"xyz\", Scope.SCENARIO, properties={\"read_fct\": None, \"write_fct\": write_fct, \"name\": \"xyz\"} ) assert isinstance(dn, GenericDataNode) assert dn_2.storage_type() == \"generic\" assert dn_2.config_id == \"xyz\" assert dn_2.name == \"xyz\" assert dn_2.scope == Scope.SCENARIO assert dn_2.id is not None assert dn_2.owner_id is None assert dn_2.last_edit_date is not None assert dn_2.job_ids == [] assert dn_2.is_ready_for_reading assert dn_2.properties[\"read_fct\"] is None assert dn_2.properties[\"write_fct\"] == write_fct dn_3 = GenericDataNode(\"xyz\", Scope.SCENARIO, properties={\"read_fct\": read_fct, \"name\": \"xyz\"}) assert isinstance(dn, GenericDataNode) assert dn_3.storage_type() == \"generic\" assert dn_3.config_id == \"xyz\" assert dn_3.name == \"xyz\" assert dn_3.scope == Scope.SCENARIO assert dn_3.id is not None assert dn_3.owner_id is None assert dn_3.last_edit_date is not None assert dn_3.job_ids == [] assert dn_3.is_ready_for_reading assert dn_3.properties[\"read_fct\"] == read_fct assert dn_3.properties[\"write_fct\"] is None dn_4 = GenericDataNode(\"xyz\", Scope.SCENARIO, properties={\"write_fct\": write_fct, \"name\": \"xyz\"}) assert isinstance(dn, GenericDataNode) assert dn_4.storage_type() == \"generic\" assert dn_4.config_id == \"xyz\" assert dn_4.name == \"xyz\" assert dn_4.scope == Scope.SCENARIO assert dn_4.id is not None assert dn_4.owner_id is None assert dn_4.last_edit_date is not None assert dn_4.job_ids == [] assert dn_4.is_ready_for_reading assert dn_4.properties[\"read_fct\"] is None assert dn_4.properties[\"write_fct\"] == write_fct with pytest.raises(InvalidConfigurationId): GenericDataNode(\"foo bar\", Scope.SCENARIO, properties={\"read_fct\": read_fct, \"write_fct\": write_fct}) def test_get_user_properties(self): dn_1 = GenericDataNode( \"dn_1\", Scope.SCENARIO, properties={ \"read_fct\": read_fct, \"write_fct\": write_fct, \"read_fct_args\": 1, \"write_fct_args\": 2, \"foo\": \"bar\", }, ) # read_fct, read_fct_args, write_fct, write_fct_args are filtered out assert dn_1._get_user_properties() == {\"foo\": \"bar\"} def test_create_with_missing_parameters(self): with pytest.raises(MissingRequiredProperty): GenericDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\")) with pytest.raises(MissingRequiredProperty): GenericDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties={}) def test_read_write_generic_datanode(self): generic_dn = GenericDataNode(\"foo\", Scope.SCENARIO, properties={\"read_fct\": read_fct, \"write_fct\": write_fct}) assert generic_dn.read() == self.data assert len(generic_dn.read()) == 10 generic_dn.write(self.data) assert generic_dn.read() == self.data assert len(generic_dn.read()) == 11 generic_dn_1 = GenericDataNode(\"bar\", Scope.SCENARIO, properties={\"read_fct\": read_fct, \"write_fct\": None}) assert generic_dn_1.read() == self.data assert len(generic_dn_1.read()) == 11 with pytest.raises(MissingWriteFunction): generic_dn_1.write(self.data) generic_dn_2 = GenericDataNode(\"xyz\", Scope.SCENARIO, properties={\"read_fct\": None, \"write_fct\": write_fct}) generic_dn_2.write(self.data) assert len(self.data) == 12 with pytest.raises(MissingReadFunction): generic_dn_2.read() generic_dn_3 = GenericDataNode(\"bar\", Scope.SCENARIO, properties={\"read_fct\": None, \"write_fct\": None}) with pytest.raises(MissingReadFunction): generic_dn_3.read() with pytest.raises(MissingWriteFunction): generic_dn_3.write(self.data) reset_data() def test_read_write_generic_datanode_with_arguments(self): generic_dn = GenericDataNode( \"foo\", Scope.SCENARIO, properties={ \"read_fct\": read_fct_with_args, \"write_fct\": write_fct_with_args, \"read_fct_args\": [1], \"write_fct_args\": [2], }, ) assert all([a + 1 == b for a, b in zip(self.data, generic_dn.read())]) assert len(generic_dn.read()) == 10 generic_dn.write(self.data) assert len(generic_dn.read()) == 12 reset_data() def test_read_write_generic_datanode_with_non_list_arguments(self): generic_dn = GenericDataNode( \"foo\", Scope.SCENARIO, properties={ \"read_fct\": read_fct_with_args, \"write_fct\": write_fct_with_args, \"read_fct_args\": 1, \"write_fct_args\": 2, }, ) assert all([a + 1 == b for a, b in zip(self.data, generic_dn.read())]) assert len(generic_dn.read()) == 10 generic_dn.write(self.data) assert len(generic_dn.read()) == 12 reset_data() def test_save_data_node_when_read(self): generic_dn = GenericDataNode( \"foo\", Scope.SCENARIO, properties={\"read_fct\": read_fct_modify_data_node_name, \"write_fct\": write_fct} ) generic_dn._properties[\"read_fct_args\"] = (generic_dn.id, \"bar\") generic_dn.read() assert generic_dn.name == \"bar\" "} {"text": " from dataclasses import dataclass from datetime import datetime from unittest.mock import patch import mongomock import pymongo import pytest from bson import ObjectId from bson.errors import InvalidDocument from src.taipy.core import MongoDefaultDocument from src.taipy.core.common._mongo_connector import _connect_mongodb from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.mongo import MongoCollectionDataNode from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.exceptions.exceptions import InvalidCustomDocument, MissingRequiredProperty from taipy.config.common.scope import Scope @pytest.fixture(scope=\"function\", autouse=True) def clear_mongo_connection_cache(): _connect_mongodb.cache_clear() @dataclass class CustomObjectWithoutArgs: def __init__(self, foo=None, bar=None): self.foo = foo self.bar = bar class CustomObjectWithCustomEncoder: def __init__(self, _id=None, integer=None, text=None, time=None): self.id = _id self.integer = integer self.text = text self.time = time def encode(self): return {\"_id\": self.id, \"integer\": self.integer, \"text\": self.text, \"time\": self.time.isoformat()} class CustomObjectWithCustomEncoderDecoder(CustomObjectWithCustomEncoder): @classmethod def decode(cls, data): return cls(data[\"_id\"], data[\"integer\"], data[\"text\"], datetime.fromisoformat(data[\"time\"])) class TestMongoCollectionDataNode: __properties = [ { \"db_username\": \"\", \"db_password\": \"\", \"db_name\": \"taipy\", \"collection_name\": \"foo\", \"custom_document\": MongoDefaultDocument, \"db_extra_args\": { \"ssl\": \"true\", \"retrywrites\": \"false\", \"maxIdleTimeMS\": \"120000\", }, } ] @pytest.mark.parametrize(\"properties\", __properties) def test_create(self, properties): mongo_dn = MongoCollectionDataNode( \"foo_bar\", Scope.SCENARIO, properties=properties, ) assert isinstance(mongo_dn, MongoCollectionDataNode) assert mongo_dn.storage_type() == \"mongo_collection\" assert mongo_dn.config_id == \"foo_bar\" assert mongo_dn.scope == Scope.SCENARIO assert mongo_dn.id is not None assert mongo_dn.owner_id is None assert mongo_dn.job_ids == [] assert mongo_dn.is_ready_for_reading assert mongo_dn.custom_document == MongoDefaultDocument @pytest.mark.parametrize(\"properties\", __properties) def test_get_user_properties(self, properties): custom_properties = properties.copy() custom_properties[\"foo\"] = \"bar\" mongo_dn = MongoCollectionDataNode( \"foo_bar\", Scope.SCENARIO, properties=custom_properties, ) assert mongo_dn._get_user_properties() == {\"foo\": \"bar\"} @pytest.mark.parametrize( \"properties\", [ {}, {\"db_username\": \"foo\"}, {\"db_username\": \"foo\", \"db_password\": \"foo\"}, {\"db_username\": \"foo\", \"db_password\": \"foo\", \"db_name\": \"foo\"}, ], ) def test_create_with_missing_parameters(self, properties): with pytest.raises(MissingRequiredProperty): MongoCollectionDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\")) with pytest.raises(MissingRequiredProperty): MongoCollectionDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) @pytest.mark.parametrize(\"properties\", __properties) def test_raise_error_invalid_custom_document(self, properties): custom_properties = properties.copy() custom_properties[\"custom_document\"] = \"foo\" with pytest.raises(InvalidCustomDocument): MongoCollectionDataNode( \"foo\", Scope.SCENARIO, properties=custom_properties, ) @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_read(self, properties): mock_client = pymongo.MongoClient(\"localhost\") mock_client[properties[\"db_name\"]][properties[\"collection_name\"]].insert_many( [ {\"foo\": \"baz\", \"bar\": \"qux\"}, {\"foo\": \"quux\", \"bar\": \"quuz\"}, {\"foo\": \"corge\"}, {\"bar\": \"grault\"}, {\"KWARGS_KEY\": \"KWARGS_VALUE\"}, {}, ] ) mongo_dn = MongoCollectionDataNode( \"foo\", Scope.SCENARIO, properties=properties, ) data = mongo_dn.read() assert isinstance(data, list) assert isinstance(data[0], MongoDefaultDocument) assert isinstance(data[1], MongoDefaultDocument) assert isinstance(data[2], MongoDefaultDocument) assert isinstance(data[3], MongoDefaultDocument) assert isinstance(data[4], MongoDefaultDocument) assert isinstance(data[5], MongoDefaultDocument) assert isinstance(data[0]._id, ObjectId) assert data[0].foo == \"baz\" assert data[0].bar == \"qux\" assert isinstance(data[1]._id, ObjectId) assert data[1].foo == \"quux\" assert data[1].bar == \"quuz\" assert isinstance(data[2]._id, ObjectId) assert data[2].foo == \"corge\" assert isinstance(data[3]._id, ObjectId) assert data[3].bar == \"grault\" assert isinstance(data[4]._id, ObjectId) assert data[4].KWARGS_KEY == \"KWARGS_VALUE\" assert isinstance(data[5]._id, ObjectId) @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_read_empty_as(self, properties): mongo_dn = MongoCollectionDataNode( \"foo\", Scope.SCENARIO, properties=properties, ) data = mongo_dn.read() assert isinstance(data, list) assert len(data) == 0 @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) @pytest.mark.parametrize( \"data\", [ ([{\"foo\": 1, \"a\": 2}, {\"foo\": 3, \"bar\": 4}]), ({\"a\": 1, \"bar\": 2}), ], ) def test_read_wrong_object_properties_name(self, properties, data): custom_properties = properties.copy() custom_properties[\"custom_document\"] = CustomObjectWithoutArgs mongo_dn = MongoCollectionDataNode( \"foo\", Scope.SCENARIO, properties=custom_properties, ) mongo_dn.write(data) with pytest.raises(TypeError): data = mongo_dn.read() @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) @pytest.mark.parametrize( \"data\", [ ([{\"foo\": 11, \"bar\": 22}, {\"foo\": 33, \"bar\": 44}]), ({\"foz\": 1, \"baz\": 2}), ], ) def test_append(self, properties, data): mongo_dn = MongoCollectionDataNode(\"foo\", Scope.SCENARIO, properties=properties) mongo_dn.append(data) original_data = [{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}] mongo_dn.write(original_data) mongo_dn.append(data) assert len(mongo_dn.read()) == len(data if isinstance(data, list) else [data]) + len(original_data) @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) @pytest.mark.parametrize( \"data,written_data\", [ ([{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}], [{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}]), ({\"foo\": 1, \"bar\": 2}, [{\"foo\": 1, \"bar\": 2}]), ], ) def test_write(self, properties, data, written_data): mongo_dn = MongoCollectionDataNode(\"foo\", Scope.SCENARIO, properties=properties) mongo_dn.write(data) read_objects = mongo_dn.read() for read_object, written_dict in zip(read_objects, written_data): assert isinstance(read_object._id, ObjectId) assert read_object.foo == written_dict[\"foo\"] assert read_object.bar == written_dict[\"bar\"] @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) @pytest.mark.parametrize( \"data\", [ [], ], ) def test_write_empty_list(self, properties, data): mongo_dn = MongoCollectionDataNode( \"foo\", Scope.SCENARIO, properties=properties, ) mongo_dn.write(data) assert len(mongo_dn.read()) == 0 @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_write_non_serializable(self, properties): mongo_dn = MongoCollectionDataNode(\"foo\", Scope.SCENARIO, properties=properties) data = {\"a\": 1, \"b\": mongo_dn} with pytest.raises(InvalidDocument): mongo_dn.write(data) @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_write_custom_encoder(self, properties): custom_properties = properties.copy() custom_properties[\"custom_document\"] = CustomObjectWithCustomEncoder mongo_dn = MongoCollectionDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) data = [ CustomObjectWithCustomEncoder(\"1\", 1, \"abc\", datetime.now()), CustomObjectWithCustomEncoder(\"2\", 2, \"def\", datetime.now()), ] mongo_dn.write(data) read_data = mongo_dn.read() assert isinstance(read_data[0], CustomObjectWithCustomEncoder) assert isinstance(read_data[1], CustomObjectWithCustomEncoder) assert read_data[0].id == \"1\" assert read_data[0].integer == 1 assert read_data[0].text == \"abc\" assert isinstance(read_data[0].time, str) assert read_data[1].id == \"2\" assert read_data[1].integer == 2 assert read_data[1].text == \"def\" assert isinstance(read_data[1].time, str) @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_write_custom_encoder_decoder(self, properties): custom_properties = properties.copy() custom_properties[\"custom_document\"] = CustomObjectWithCustomEncoderDecoder mongo_dn = MongoCollectionDataNode(\"foo\", Scope.SCENARIO, properties=custom_properties) data = [ CustomObjectWithCustomEncoderDecoder(\"1\", 1, \"abc\", datetime.now()), CustomObjectWithCustomEncoderDecoder(\"2\", 2, \"def\", datetime.now()), ] mongo_dn.write(data) read_data = mongo_dn.read() assert isinstance(read_data[0], CustomObjectWithCustomEncoderDecoder) assert isinstance(read_data[1], CustomObjectWithCustomEncoderDecoder) assert read_data[0].id == \"1\" assert read_data[0].integer == 1 assert read_data[0].text == \"abc\" assert isinstance(read_data[0].time, datetime) assert read_data[1].id == \"2\" assert read_data[1].integer == 2 assert read_data[1].text == \"def\" assert isinstance(read_data[1].time, datetime) @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_filter(self, properties): mock_client = pymongo.MongoClient(\"localhost\") mock_client[properties[\"db_name\"]][properties[\"collection_name\"]].insert_many( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, {\"KWARGS_KEY\": \"KWARGS_VALUE\"}, ] ) mongo_dn = MongoCollectionDataNode( \"foo\", Scope.SCENARIO, properties=properties, ) assert len(mongo_dn.filter((\"foo\", 1, Operator.EQUAL))) == 3 assert len(mongo_dn.filter((\"foo\", 1, Operator.NOT_EQUAL))) == 3 assert len(mongo_dn.filter((\"bar\", 2, Operator.EQUAL))) == 3 assert len(mongo_dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)) == 4 assert mongo_dn[\"foo\"] == [1, 1, 1, 2, None, None] assert mongo_dn[\"bar\"] == [1, 2, None, 2, 2, None] assert [m.__dict__ for m in mongo_dn[:3]] == [m.__dict__ for m in mongo_dn.read()[:3]] assert mongo_dn[[\"foo\", \"bar\"]] == [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, {}, ] @mongomock.patch(servers=((\"localhost\", 27017),)) @pytest.mark.parametrize(\"properties\", __properties) def test_filter_does_not_read_all_entities(self, properties): mongo_dn = MongoCollectionDataNode(\"foo\", Scope.SCENARIO, properties=properties) # MongoCollectionDataNode.filter() should not call the MongoCollectionDataNode._read() method with patch.object(MongoCollectionDataNode, \"_read\") as read_mock: mongo_dn.filter((\"foo\", 1, Operator.EQUAL)) mongo_dn.filter((\"bar\", 2, Operator.NOT_EQUAL)) mongo_dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) assert read_mock[\"_read\"].call_count == 0 "} {"text": "from src.taipy.core.data.data_node import DataNode from src.taipy.core.data.in_memory import InMemoryDataNode from taipy.config.common.scope import Scope class FakeDataNode(InMemoryDataNode): read_has_been_called = 0 write_has_been_called = 0 def __init__(self, config_id, **kwargs): scope = kwargs.pop(\"scope\", Scope.SCENARIO) super().__init__(config_id=config_id, scope=scope, **kwargs) def _read(self, query=None): self.read_has_been_called += 1 def _write(self, data): self.write_has_been_called += 1 @classmethod def storage_type(cls) -> str: return \"fake_inmemory\" write = DataNode.write # Make sure that the writing behavior comes from DataNode class FakeDataframeDataNode(DataNode): COLUMN_NAME_1 = \"a\" COLUMN_NAME_2 = \"b\" def __init__(self, config_id, default_data_frame, **kwargs): super().__init__(config_id, **kwargs) self.data = default_data_frame def _read(self): return self.data @classmethod def storage_type(cls) -> str: return \"fake_df_dn\" class FakeNumpyarrayDataNode(DataNode): def __init__(self, config_id, default_array, **kwargs): super().__init__(config_id, **kwargs) self.data = default_array def _read(self): return self.data @classmethod def storage_type(cls) -> str: return \"fake_np_dn\" class FakeListDataNode(DataNode): class Row: def __init__(self, value): self.value = value def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = [self.Row(i) for i in range(10)] def _read(self): return self.data @classmethod def storage_type(cls) -> str: return \"fake_list_dn\" class CustomClass: def __init__(self, a, b): self.a = a self.b = b class FakeCustomDataNode(DataNode): def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = [CustomClass(i, i * 2) for i in range(10)] def _read(self): return self.data class FakeMultiSheetExcelDataFrameDataNode(DataNode): def __init__(self, config_id, default_data_frame, **kwargs): super().__init__(config_id, **kwargs) self.data = { \"Sheet1\": default_data_frame, \"Sheet2\": default_data_frame, } def _read(self): return self.data class FakeMultiSheetExcelCustomDataNode(DataNode): def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = { \"Sheet1\": [CustomClass(i, i * 2) for i in range(10)], \"Sheet2\": [CustomClass(i, i * 2) for i in range(10)], } def _read(self): return self.data "} {"text": "import os import pytest from src.taipy.core.data._data_fs_repository import _DataFSRepository from src.taipy.core.data._data_sql_repository import _DataSQLRepository from src.taipy.core.data.data_node import DataNode, DataNodeId from src.taipy.core.exceptions import ModelNotFound class TestDataNodeRepository: @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_save_and_load(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) obj = repository._load(data_node.id) assert isinstance(obj, DataNode) @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_exists(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) assert repository._exists(data_node.id) assert not repository._exists(\"not-existed-data-node\") @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_load_all(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") repository._save(data_node) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_load_all_with_filters(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") data_node.owner_id = f\"task-{i}\" repository._save(data_node) objs = repository._load_all(filters=[{\"owner_id\": \"task-2\"}]) assert len(objs) == 1 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) repository._delete(data_node.id) with pytest.raises(ModelNotFound): repository._load(data_node.id) @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete_all(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") repository._save(data_node) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete_many(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") repository._save(data_node) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_delete_by(self, data_node, repo, init_sql_repo): repository = repo() # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") data_node._version = f\"{(i+1) // 5}.0\" repository._save(data_node) objs = repository._load_all() assert len(objs) == 10 repository._delete_by(\"version\", \"1.0\") assert len(repository._load_all()) == 5 @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_search(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f\"data_node-{i}\") data_node.owner_id = f\"task-{i}\" repository._save(data_node) assert len(repository._load_all()) == 10 objs = repository._search(\"owner_id\", \"task-2\") assert len(objs) == 1 assert isinstance(objs[0], DataNode) objs = repository._search(\"owner_id\", \"task-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], DataNode) assert repository._search(\"owner_id\", \"task-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"repo\", [_DataFSRepository, _DataSQLRepository]) def test_export(self, tmpdir, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) repository._export(data_node.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _DataFSRepository else os.path.join(tmpdir.strpath, \"data_node\") assert os.path.exists(os.path.join(dir_path, f\"{data_node.id}.json\")) "} {"text": "import pytest from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import NoData from taipy.config.common.scope import Scope from taipy.config.exceptions.exceptions import InvalidConfigurationId class TestInMemoryDataNodeEntity: def test_create(self): dn = InMemoryDataNode( \"foobar_bazy\", Scope.SCENARIO, DataNodeId(\"id_uio\"), \"owner_id\", properties={\"default_data\": \"In memory Data Node\", \"name\": \"my name\"}, ) assert isinstance(dn, InMemoryDataNode) assert dn.storage_type() == \"in_memory\" assert dn.config_id == \"foobar_bazy\" assert dn.scope == Scope.SCENARIO assert dn.id == \"id_uio\" assert dn.name == \"my name\" assert dn.owner_id == \"owner_id\" assert dn.last_edit_date is not None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.read() == \"In memory Data Node\" dn_2 = InMemoryDataNode(\"foo\", Scope.SCENARIO) assert dn_2.last_edit_date is None assert not dn_2.is_ready_for_reading with pytest.raises(InvalidConfigurationId): InMemoryDataNode(\"foo bar\", Scope.SCENARIO, DataNodeId(\"dn_id\")) def test_get_user_properties(self): dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": 1, \"foo\": \"bar\"}) assert dn._get_user_properties() == {\"foo\": \"bar\"} def test_read_and_write(self): no_data_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\")) with pytest.raises(NoData): assert no_data_dn.read() is None no_data_dn.read_or_raise() in_mem_dn = InMemoryDataNode(\"foo\", Scope.SCENARIO, properties={\"default_data\": \"bar\"}) assert isinstance(in_mem_dn.read(), str) assert in_mem_dn.read() == \"bar\" in_mem_dn.properties[\"default_data\"] = \"baz\" # this modifies the default data value but not the data itself assert in_mem_dn.read() == \"bar\" in_mem_dn.write(\"qux\") assert in_mem_dn.read() == \"qux\" in_mem_dn.write(1998) assert isinstance(in_mem_dn.read(), int) assert in_mem_dn.read() == 1998 "} {"text": "import os import pathlib from datetime import datetime from time import sleep from typing import Dict import modin.pandas as modin_pd import numpy as np import pandas as pd import pytest from modin.pandas.test.utils import df_equals from pandas.testing import assert_frame_equal from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.excel import ExcelDataNode from src.taipy.core.data.operator import JoinOperator, Operator from src.taipy.core.exceptions.exceptions import ( ExposedTypeLengthMismatch, InvalidExposedType, NoData, NonExistingExcelSheet, SheetNameLengthMismatch, ) from taipy.config.common.scope import Scope from taipy.config.config import Config @pytest.fixture(scope=\"function\", autouse=True) def cleanup(): yield path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.xlsx\") if os.path.exists(path): os.remove(path) class MyCustomObject: def __init__(self, id, integer, text): self.id = id self.integer = integer self.text = text class MyCustomObject1: def __init__(self, id, integer, text): self.id = id self.integer = integer self.text = text class MyCustomObject2: def __init__(self, id, integer, text): self.id = id self.integer = integer self.text = text class TestExcelDataNode: def test_new_excel_data_node_with_existing_file_is_ready_for_reading(self): not_ready_dn_cfg = Config.configure_data_node(\"not_ready_data_node_config_id\", \"excel\", path=\"NOT_EXISTING.csv\") path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") ready_dn_cfg = Config.configure_data_node(\"ready_data_node_config_id\", \"excel\", path=path) dns = _DataManager._bulk_get_or_create([not_ready_dn_cfg, ready_dn_cfg]) assert not dns[not_ready_dn_cfg].is_ready_for_reading assert dns[ready_dn_cfg].is_ready_for_reading def test_create(self): path = \"data/node/path\" sheet_names = [\"sheet_name_1\", \"sheet_name_2\"] dn = ExcelDataNode( \"foo_bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": sheet_names, \"name\": \"super name\"}, ) assert isinstance(dn, ExcelDataNode) assert dn.storage_type() == \"excel\" assert dn.config_id == \"foo_bar\" assert dn.name == \"super name\" assert dn.scope == Scope.SCENARIO assert dn.id is not None assert dn.owner_id is None assert dn.parent_ids == set() assert dn.last_edit_date is None assert dn.job_ids == [] assert not dn.is_ready_for_reading assert dn.path == path assert dn.has_header is False assert dn.sheet_name == sheet_names def test_get_user_properties(self, excel_file): dn_1 = ExcelDataNode(\"dn_1\", Scope.SCENARIO, properties={\"path\": \"data/node/path\"}) assert dn_1._get_user_properties() == {} dn_2 = ExcelDataNode( \"dn_2\", Scope.SCENARIO, properties={ \"exposed_type\": \"numpy\", \"default_data\": \"foo\", \"default_path\": excel_file, \"has_header\": False, \"sheet_name\": [\"sheet_name_1\", \"sheet_name_2\"], \"foo\": \"bar\", }, ) # exposed_type, default_data, default_path, path, has_header are filtered out assert dn_2._get_user_properties() == {\"foo\": \"bar\"} def test_read_with_header(self): with pytest.raises(NoData): not_existing_excel = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": \"WRONG.xlsx\"}) assert not_existing_excel.read() is None not_existing_excel.read_or_raise() empty_excel_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/empty.xlsx\") empty_excel = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": empty_excel_path, \"exposed_type\": MyCustomObject, \"has_header\": True}, ) assert len(empty_excel.read()) == 0 path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") # Create ExcelDataNode without exposed_type (Default is pandas.DataFrame) excel_data_node_as_pandas = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": \"Sheet1\"} ) data_pandas = excel_data_node_as_pandas.read() assert isinstance(data_pandas, pd.DataFrame) assert len(data_pandas) == 5 assert np.array_equal(data_pandas.to_numpy(), pd.read_excel(path).to_numpy()) # Create ExcelDataNode with modin exposed_type excel_data_node_as_modin = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"modin\"} ) data_modin = excel_data_node_as_modin.read() assert isinstance(data_modin, modin_pd.DataFrame) assert len(data_modin) == 5 assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path).to_numpy()) # Create ExcelDataNode with numpy exposed_type excel_data_node_as_numpy = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"numpy\", \"sheet_name\": \"Sheet1\"} ) data_numpy = excel_data_node_as_numpy.read() assert isinstance(data_numpy, np.ndarray) assert len(data_numpy) == 5 assert np.array_equal(data_numpy, pd.read_excel(path).to_numpy()) # Create the same ExcelDataNode but with custom exposed_type non_existing_sheet_name_custom = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": \"abc\", \"exposed_type\": MyCustomObject}, ) with pytest.raises(NonExistingExcelSheet): non_existing_sheet_name_custom.read() excel_data_node_as_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": MyCustomObject, \"sheet_name\": \"Sheet1\"}, ) data_custom = excel_data_node_as_custom_object.read() assert isinstance(data_custom, list) assert len(data_custom) == 5 for (_, row_pandas), row_custom in zip(data_pandas.iterrows(), data_custom): assert isinstance(row_custom, MyCustomObject) assert row_pandas[\"id\"] == row_custom.id assert row_pandas[\"integer\"] == row_custom.integer assert row_pandas[\"text\"] == row_custom.text def test_read_without_header(self): not_existing_excel = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": \"WRONG.xlsx\", \"has_header\": False} ) with pytest.raises(NoData): assert not_existing_excel.read() is None not_existing_excel.read_or_raise() path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") # Create ExcelDataNode without exposed_type (Default is pandas.DataFrame) excel_data_node_as_pandas = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": \"Sheet1\"} ) data_pandas = excel_data_node_as_pandas.read() assert isinstance(data_pandas, pd.DataFrame) assert len(data_pandas) == 6 assert np.array_equal(data_pandas.to_numpy(), pd.read_excel(path, header=None).to_numpy()) # Create ExcelDataNode with modin exposed_type excel_data_node_as_modin = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"modin\"}, ) data_modin = excel_data_node_as_modin.read() assert isinstance(data_modin, modin_pd.DataFrame) assert len(data_modin) == 6 assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path, header=None).to_numpy()) # Create ExcelDataNode with numpy exposed_type excel_data_node_as_numpy = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": \"numpy\", \"sheet_name\": \"Sheet1\"}, ) data_numpy = excel_data_node_as_numpy.read() assert isinstance(data_numpy, np.ndarray) assert len(data_numpy) == 6 assert np.array_equal(data_numpy, pd.read_excel(path, header=None).to_numpy()) # Create the same ExcelDataNode but with custom exposed_type non_existing_sheet_name_custom = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": \"abc\", \"exposed_type\": MyCustomObject}, ) with pytest.raises(NonExistingExcelSheet): non_existing_sheet_name_custom.read() excel_data_node_as_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"has_header\": False, \"exposed_type\": MyCustomObject, \"sheet_name\": \"Sheet1\", }, ) data_custom = excel_data_node_as_custom_object.read() assert isinstance(data_custom, list) assert len(data_custom) == 6 for (_, row_pandas), row_custom in zip(data_pandas.iterrows(), data_custom): assert isinstance(row_custom, MyCustomObject) assert row_pandas[0] == row_custom.id assert row_pandas[1] == row_custom.integer assert row_pandas[2] == row_custom.text @pytest.mark.parametrize( \"content,columns\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], None), ([[11, 22, 33], [44, 55, 66]], None), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"]), ], ) def test_write(self, excel_file, default_data_frame, content, columns): excel_dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\"}) assert np.array_equal(excel_dn.read().values, default_data_frame.values) if not columns: excel_dn.write(content) df = pd.DataFrame(content) else: excel_dn.write_with_column_names(content, columns) df = pd.DataFrame(content, columns=columns) assert np.array_equal(excel_dn.read().values, df.values) excel_dn.write(None) assert len(excel_dn.read()) == 0 @pytest.mark.parametrize( \"content,sheet_name\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], \"sheet_name\"), ([[11, 22, 33], [44, 55, 66]], [\"sheet_name\"]), ], ) def test_write_with_sheet_name(self, excel_file_with_sheet_name, default_data_frame, content, sheet_name): excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_sheet_name, \"sheet_name\": sheet_name} ) df = pd.DataFrame(content) if isinstance(sheet_name, str): assert np.array_equal(excel_dn.read().values, default_data_frame.values) else: assert np.array_equal(excel_dn.read()[\"sheet_name\"].values, default_data_frame.values) excel_dn.write(content) if isinstance(sheet_name, str): assert np.array_equal(excel_dn.read().values, df.values) else: assert np.array_equal(excel_dn.read()[\"sheet_name\"].values, df.values) sheet_names = pd.ExcelFile(excel_file_with_sheet_name).sheet_names expected_sheet_name = sheet_name[0] if isinstance(sheet_name, list) else sheet_name assert sheet_names[0] == expected_sheet_name excel_dn.write(None) if isinstance(sheet_name, str): assert len(excel_dn.read()) == 0 else: assert len(excel_dn.read()) == 1 @pytest.mark.parametrize( \"content,sheet_name\", [ ([[11, 22, 33], [44, 55, 66]], [\"sheet_name_1\", \"sheet_name_2\"]), ], ) def test_raise_write_with_sheet_name_length_mismatch( self, excel_file_with_sheet_name, default_data_frame, content, sheet_name ): excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_sheet_name, \"sheet_name\": sheet_name} ) with pytest.raises(SheetNameLengthMismatch): excel_dn.write(content) @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), ], ) def test_write_without_sheet_name(self, excel_file_with_sheet_name, default_data_frame, content): excel_dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_sheet_name}) default_data_frame = {\"sheet_name\": default_data_frame} df = {\"Sheet1\": pd.DataFrame(content)} assert np.array_equal(excel_dn.read()[\"sheet_name\"].values, default_data_frame[\"sheet_name\"].values) excel_dn.write(content) assert np.array_equal(excel_dn.read()[\"Sheet1\"].values, df[\"Sheet1\"].values) sheet_names = pd.ExcelFile(excel_file_with_sheet_name).sheet_names expected_sheet_name = \"Sheet1\" assert sheet_names[0] == expected_sheet_name excel_dn.write(None) assert len(excel_dn.read()) == 1 @pytest.mark.parametrize( \"content,columns,sheet_name\", [ ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"], \"sheet_name\"), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"], [\"sheet_name\"]), ], ) def test_write_with_column_and_sheet_name( self, excel_file_with_sheet_name, default_data_frame, content, columns, sheet_name ): excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_sheet_name, \"sheet_name\": sheet_name} ) df = pd.DataFrame(content) if isinstance(sheet_name, str): assert np.array_equal(excel_dn.read().values, default_data_frame.values) else: assert np.array_equal(excel_dn.read()[\"sheet_name\"].values, default_data_frame.values) excel_dn.write_with_column_names(content, columns) if isinstance(sheet_name, str): assert np.array_equal(excel_dn.read().values, df.values) else: assert np.array_equal(excel_dn.read()[\"sheet_name\"].values, df.values) sheet_names = pd.ExcelFile(excel_file_with_sheet_name).sheet_names expected_sheet_name = sheet_name[0] if isinstance(sheet_name, list) else sheet_name assert sheet_names[0] == expected_sheet_name excel_dn.write(None) if isinstance(sheet_name, str): assert len(excel_dn.read()) == 0 else: assert len(excel_dn.read()) == 1 @pytest.mark.parametrize( \"content,columns\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], None), ([[11, 22, 33], [44, 55, 66]], None), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"]), ], ) def test_write_modin(self, excel_file, default_data_frame, content, columns): excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"modin\"} ) assert np.array_equal(excel_dn.read().values, default_data_frame.values) if not columns: excel_dn.write(content) df = modin_pd.DataFrame(content) else: excel_dn.write_with_column_names(content, columns) df = modin_pd.DataFrame(content, columns=columns) assert np.array_equal(excel_dn.read().values, df.values) excel_dn.write(None) assert len(excel_dn.read()) == 0 def test_read_multi_sheet_with_header(self): not_existing_excel = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": \"WRONG.xlsx\", \"sheet_name\": [\"sheet_name_1\", \"sheet_name_2\"]}, ) with pytest.raises(NoData): assert not_existing_excel.read() is None not_existing_excel.read_or_raise() path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") sheet_names = [\"Sheet1\", \"Sheet2\"] # Create ExcelDataNode without exposed_type (Default is pandas.DataFrame) excel_data_node_as_pandas = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": sheet_names} ) data_pandas = excel_data_node_as_pandas.read() assert isinstance(data_pandas, Dict) assert len(data_pandas) == 2 assert all( len(data_pandas[sheet_name] == 5) and isinstance(data_pandas[sheet_name], pd.DataFrame) for sheet_name in sheet_names ) assert list(data_pandas.keys()) == sheet_names for sheet_name in sheet_names: assert data_pandas[sheet_name].equals(pd.read_excel(path, sheet_name=sheet_name)) excel_data_node_as_pandas_no_sheet_name = ExcelDataNode(\"bar\", Scope.SCENARIO, properties={\"path\": path}) data_pandas_no_sheet_name = excel_data_node_as_pandas_no_sheet_name.read() assert isinstance(data_pandas_no_sheet_name, Dict) for key in data_pandas_no_sheet_name.keys(): assert isinstance(data_pandas_no_sheet_name[key], pd.DataFrame) assert data_pandas[key].equals(data_pandas_no_sheet_name[key]) # Create ExcelDataNode with modin exposed_type excel_data_node_as_modin = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": \"modin\"} ) data_modin = excel_data_node_as_modin.read() assert isinstance(data_modin, Dict) assert len(data_modin) == 2 assert all( len(data_modin[sheet_name] == 5) and isinstance(data_modin[sheet_name], modin_pd.DataFrame) for sheet_name in sheet_names ) assert list(data_modin.keys()) == sheet_names for sheet_name in sheet_names: assert data_modin[sheet_name].equals(modin_pd.read_excel(path, sheet_name=sheet_name)) excel_data_node_as_pandas_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"modin\"} ) data_modin_no_sheet_name = excel_data_node_as_pandas_no_sheet_name.read() assert isinstance(data_modin_no_sheet_name, Dict) for key in data_modin_no_sheet_name.keys(): assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame) assert data_modin[key].equals(data_modin_no_sheet_name[key]) # Create ExcelDataNode with numpy exposed_type excel_data_node_as_numpy = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": \"numpy\"}, ) data_numpy = excel_data_node_as_numpy.read() assert isinstance(data_numpy, Dict) assert len(data_numpy) == 2 assert all( len(data_numpy[sheet_name] == 5) and isinstance(data_numpy[sheet_name], np.ndarray) for sheet_name in sheet_names ) assert list(data_numpy.keys()) == sheet_names for sheet_name in sheet_names: assert np.array_equal(data_pandas[sheet_name], pd.read_excel(path, sheet_name=sheet_name).to_numpy()) excel_data_node_as_numpy_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": \"numpy\"}, ) data_numpy_no_sheet_name = excel_data_node_as_numpy_no_sheet_name.read() assert isinstance(data_numpy_no_sheet_name, Dict) for key in data_numpy_no_sheet_name.keys(): assert isinstance(data_numpy_no_sheet_name[key], np.ndarray) assert np.array_equal(data_numpy[key], data_numpy_no_sheet_name[key]) # Create the same ExcelDataNode but with custom exposed_type non_existing_sheet_name_custom = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"sheet_name\": [\"Sheet1\", \"xyz\"], \"exposed_type\": MyCustomObject1, }, ) with pytest.raises(NonExistingExcelSheet): non_existing_sheet_name_custom.read() excel_data_node_as_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": MyCustomObject1}, ) data_custom = excel_data_node_as_custom_object.read() assert isinstance(data_custom, Dict) assert len(data_custom) == 2 assert all(len(data_custom[sheet_name]) == 5 for sheet_name in sheet_names) assert list(data_custom.keys()) == sheet_names for sheet_name in sheet_names: sheet_data_pandas, sheet_data_custom = data_pandas[sheet_name], data_custom[sheet_name] for (_, row_pandas), row_custom in zip(sheet_data_pandas.iterrows(), sheet_data_custom): assert isinstance(row_custom, MyCustomObject1) assert row_pandas[\"id\"] == row_custom.id assert row_pandas[\"integer\"] == row_custom.integer assert row_pandas[\"text\"] == row_custom.text excel_data_node_as_custom_object_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": MyCustomObject1}, ) data_custom_no_sheet_name = excel_data_node_as_custom_object_no_sheet_name.read() assert isinstance(data_custom_no_sheet_name, Dict) assert len(data_custom_no_sheet_name) == 2 assert data_custom.keys() == data_custom_no_sheet_name.keys() for sheet_name in sheet_names: sheet_data_custom_no_sheet_name, sheet_data_custom = ( data_custom_no_sheet_name[sheet_name], data_custom[sheet_name], ) for row_custom_no_sheet_name, row_custom in zip(sheet_data_custom_no_sheet_name, sheet_data_custom): assert isinstance(row_custom_no_sheet_name, MyCustomObject1) assert row_custom_no_sheet_name.id == row_custom.id assert row_custom_no_sheet_name.integer == row_custom.integer assert row_custom_no_sheet_name.text == row_custom.text with pytest.raises(ExposedTypeLengthMismatch): dn = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"sheet_name\": [\"Sheet1\"], \"exposed_type\": [MyCustomObject1, MyCustomObject2], }, ) dn.read() custom_class_dict = {\"Sheet1\": MyCustomObject1, \"Sheet2\": MyCustomObject2} excel_data_node_as_multi_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": custom_class_dict}, ) assert excel_data_node_as_multi_custom_object.properties[\"exposed_type\"] == custom_class_dict excel_data_node_as_multi_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": [MyCustomObject1, MyCustomObject2]}, ) assert excel_data_node_as_multi_custom_object.properties[\"exposed_type\"] == [MyCustomObject1, MyCustomObject2] multi_data_custom = excel_data_node_as_multi_custom_object.read() assert isinstance(multi_data_custom, Dict) assert len(multi_data_custom) == 2 assert all(len(multi_data_custom[sheet_name]) == 5 for sheet_name in sheet_names) assert list(multi_data_custom.keys()) == sheet_names for sheet_name, custom_class in custom_class_dict.items(): sheet_data_pandas, sheet_data_custom = data_pandas[sheet_name], multi_data_custom[sheet_name] for (_, row_pandas), row_custom in zip(sheet_data_pandas.iterrows(), sheet_data_custom): assert isinstance(row_custom, custom_class) assert row_pandas[\"id\"] == row_custom.id assert row_pandas[\"integer\"] == row_custom.integer assert row_pandas[\"text\"] == row_custom.text excel_data_node_as_multi_custom_object_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"exposed_type\": custom_class_dict}, ) assert excel_data_node_as_multi_custom_object_no_sheet_name.properties[\"exposed_type\"] == custom_class_dict multi_data_custom_no_sheet_name = excel_data_node_as_multi_custom_object_no_sheet_name.read() assert isinstance(multi_data_custom_no_sheet_name, Dict) assert len(multi_data_custom_no_sheet_name) == 2 assert multi_data_custom.keys() == multi_data_custom_no_sheet_name.keys() for sheet_name, custom_class in custom_class_dict.items(): sheet_data_custom_no_sheet_name, sheet_data_custom = ( multi_data_custom_no_sheet_name[sheet_name], multi_data_custom[sheet_name], ) for row_custom_no_sheet_name, row_custom in zip(sheet_data_custom_no_sheet_name, sheet_data_custom): assert isinstance(row_custom_no_sheet_name, custom_class) assert row_custom_no_sheet_name.id == row_custom.id assert row_custom_no_sheet_name.integer == row_custom.integer assert row_custom_no_sheet_name.text == row_custom.text def test_read_multi_sheet_without_header(self): not_existing_excel = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": \"WRONG.xlsx\", \"has_header\": False, \"sheet_name\": [\"sheet_name_1\", \"sheet_name_2\"]}, ) with pytest.raises(NoData): assert not_existing_excel.read() is None not_existing_excel.read_or_raise() path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") sheet_names = [\"Sheet1\", \"Sheet2\"] # Create ExcelDataNode without exposed_type (Default is pandas.DataFrame) excel_data_node_as_pandas = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": sheet_names} ) data_pandas = excel_data_node_as_pandas.read() assert isinstance(data_pandas, Dict) assert len(data_pandas) == 2 assert all(len(data_pandas[sheet_name]) == 6 for sheet_name in sheet_names) assert list(data_pandas.keys()) == sheet_names for sheet_name in sheet_names: assert isinstance(data_pandas[sheet_name], pd.DataFrame) assert data_pandas[sheet_name].equals(pd.read_excel(path, header=None, sheet_name=sheet_name)) excel_data_node_as_pandas_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False} ) data_pandas_no_sheet_name = excel_data_node_as_pandas_no_sheet_name.read() assert isinstance(data_pandas_no_sheet_name, Dict) for key in data_pandas_no_sheet_name.keys(): assert isinstance(data_pandas_no_sheet_name[key], pd.DataFrame) assert data_pandas[key].equals(data_pandas_no_sheet_name[key]) # Create ExcelDataNode with modin exposed_type excel_data_node_as_modin = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": sheet_names, \"exposed_type\": \"modin\"}, ) data_modin = excel_data_node_as_modin.read() assert isinstance(data_modin, Dict) assert len(data_modin) == 2 assert all(len(data_modin[sheet_name]) == 6 for sheet_name in sheet_names) assert list(data_modin.keys()) == sheet_names for sheet_name in sheet_names: assert isinstance(data_modin[sheet_name], modin_pd.DataFrame) assert data_modin[sheet_name].equals(pd.read_excel(path, header=None, sheet_name=sheet_name)) excel_data_node_as_modin_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": \"modin\"} ) data_modin_no_sheet_name = excel_data_node_as_modin_no_sheet_name.read() assert isinstance(data_modin_no_sheet_name, Dict) for key in data_modin_no_sheet_name.keys(): assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame) assert data_modin[key].equals(data_modin_no_sheet_name[key]) # Create ExcelDataNode with numpy exposed_type excel_data_node_as_numpy = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"sheet_name\": sheet_names, \"exposed_type\": \"numpy\"}, ) data_numpy = excel_data_node_as_numpy.read() assert isinstance(data_numpy, Dict) assert len(data_numpy) == 2 assert all( len(data_numpy[sheet_name] == 6) and isinstance(data_numpy[sheet_name], np.ndarray) for sheet_name in sheet_names ) assert list(data_numpy.keys()) == sheet_names for sheet_name in sheet_names: assert np.array_equal( data_pandas[sheet_name], pd.read_excel(path, header=None, sheet_name=sheet_name).to_numpy() ) excel_data_node_as_numpy_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": \"numpy\"}, ) data_numpy_no_sheet_name = excel_data_node_as_numpy_no_sheet_name.read() assert isinstance(data_numpy_no_sheet_name, Dict) for key in data_numpy_no_sheet_name.keys(): assert isinstance(data_numpy_no_sheet_name[key], np.ndarray) assert np.array_equal(data_numpy[key], data_numpy_no_sheet_name[key]) # Create the same ExcelDataNode but with custom exposed_type non_existing_sheet_name_custom = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"has_header\": False, \"sheet_name\": [\"Sheet1\", \"xyz\"], \"exposed_type\": MyCustomObject1, }, ) with pytest.raises(NonExistingExcelSheet): non_existing_sheet_name_custom.read() excel_data_node_as_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"has_header\": False, \"sheet_name\": sheet_names, \"exposed_type\": MyCustomObject1, }, ) data_custom = excel_data_node_as_custom_object.read() assert excel_data_node_as_custom_object.exposed_type == MyCustomObject1 assert isinstance(data_custom, Dict) assert len(data_custom) == 2 assert all(len(data_custom[sheet_name]) == 6 for sheet_name in sheet_names) assert list(data_custom.keys()) == sheet_names for sheet_name in sheet_names: sheet_data_pandas, sheet_data_custom = data_pandas[sheet_name], data_custom[sheet_name] for (_, row_pandas), row_custom in zip(sheet_data_pandas.iterrows(), sheet_data_custom): assert isinstance(row_custom, MyCustomObject1) assert row_pandas[0] == row_custom.id assert row_pandas[1] == row_custom.integer assert row_pandas[2] == row_custom.text excel_data_node_as_custom_object_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": MyCustomObject1}, ) data_custom_no_sheet_name = excel_data_node_as_custom_object_no_sheet_name.read() assert isinstance(data_custom_no_sheet_name, Dict) assert len(data_custom_no_sheet_name) == 2 assert data_custom.keys() == data_custom_no_sheet_name.keys() for sheet_name in sheet_names: sheet_data_custom_no_sheet_name, sheet_data_custom = ( data_custom_no_sheet_name[sheet_name], data_custom[sheet_name], ) for row_custom_no_sheet_name, row_custom in zip(sheet_data_custom_no_sheet_name, sheet_data_custom): assert isinstance(row_custom_no_sheet_name, MyCustomObject1) assert row_custom_no_sheet_name.id == row_custom.id assert row_custom_no_sheet_name.integer == row_custom.integer assert row_custom_no_sheet_name.text == row_custom.text with pytest.raises(ExposedTypeLengthMismatch): dn = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"sheet_name\": [\"Sheet1\"], \"exposed_type\": [MyCustomObject1, MyCustomObject2], \"has_header\": False, }, ) dn.read() custom_class_dict = {\"Sheet1\": MyCustomObject1, \"Sheet2\": MyCustomObject2} excel_data_node_as_multi_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": custom_class_dict, \"has_header\": False, }, ) assert excel_data_node_as_multi_custom_object.properties[\"exposed_type\"] == custom_class_dict excel_data_node_as_multi_custom_object = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={ \"path\": path, \"sheet_name\": sheet_names, \"exposed_type\": [MyCustomObject1, MyCustomObject2], \"has_header\": False, }, ) assert excel_data_node_as_multi_custom_object.properties[\"exposed_type\"] == [MyCustomObject1, MyCustomObject2] multi_data_custom = excel_data_node_as_multi_custom_object.read() assert isinstance(multi_data_custom, Dict) assert len(multi_data_custom) == 2 assert all(len(multi_data_custom[sheet_name]) == 6 for sheet_name in sheet_names) assert list(multi_data_custom.keys()) == sheet_names for sheet_name, custom_class in custom_class_dict.items(): sheet_data_pandas, sheet_data_custom = data_pandas[sheet_name], multi_data_custom[sheet_name] for (_, row_pandas), row_custom in zip(sheet_data_pandas.iterrows(), sheet_data_custom): assert isinstance(row_custom, custom_class) assert row_pandas[0] == row_custom.id assert row_pandas[1] == row_custom.integer assert row_pandas[2] == row_custom.text excel_data_node_as_multi_custom_object_no_sheet_name = ExcelDataNode( \"bar\", Scope.SCENARIO, properties={\"path\": path, \"has_header\": False, \"exposed_type\": custom_class_dict}, ) multi_data_custom_no_sheet_name = excel_data_node_as_multi_custom_object_no_sheet_name.read() assert isinstance(multi_data_custom_no_sheet_name, Dict) assert len(multi_data_custom_no_sheet_name) == 2 assert multi_data_custom.keys() == multi_data_custom_no_sheet_name.keys() for sheet_name, custom_class in custom_class_dict.items(): sheet_data_custom_no_sheet_name, sheet_data_custom = ( multi_data_custom_no_sheet_name[sheet_name], multi_data_custom[sheet_name], ) for row_custom_no_sheet_name, row_custom in zip(sheet_data_custom_no_sheet_name, sheet_data_custom): assert isinstance(row_custom_no_sheet_name, custom_class) assert row_custom_no_sheet_name.id == row_custom.id assert row_custom_no_sheet_name.integer == row_custom.integer assert row_custom_no_sheet_name.text == row_custom.text @pytest.mark.parametrize( \"content,columns\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], None), ([[11, 22, 33], [44, 55, 66]], None), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"]), ], ) def test_write_multi_sheet(self, excel_file_with_multi_sheet, default_multi_sheet_data_frame, content, columns): sheet_names = [\"Sheet1\", \"Sheet2\"] excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_multi_sheet, \"sheet_name\": sheet_names}, ) for sheet_name in sheet_names: assert np.array_equal(excel_dn.read()[sheet_name].values, default_multi_sheet_data_frame[sheet_name].values) multi_sheet_content = {sheet_name: pd.DataFrame(content) for sheet_name in sheet_names} excel_dn.write(multi_sheet_content) for sheet_name in sheet_names: assert np.array_equal(excel_dn.read()[sheet_name].values, multi_sheet_content[sheet_name].values) def test_write_multi_sheet_numpy(self, excel_file_with_multi_sheet): sheet_names = [\"Sheet1\", \"Sheet2\"] excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_multi_sheet, \"sheet_name\": sheet_names, \"exposed_type\": \"numpy\"}, ) sheets_data = [[11, 22, 33], [44, 55, 66]] data = { sheet_name: pd.DataFrame(sheet_data).to_numpy() for sheet_name, sheet_data in zip(sheet_names, sheets_data) } excel_dn.write(data) read_data = excel_dn.read() assert all(np.array_equal(data[sheet_name], read_data[sheet_name]) for sheet_name in sheet_names) @pytest.mark.parametrize( \"content,columns\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}], None), ([[11, 22, 33], [44, 55, 66]], None), ([[11, 22, 33], [44, 55, 66]], [\"e\", \"f\", \"g\"]), ], ) def test_write_multi_sheet_with_modin( self, excel_file_with_multi_sheet, default_multi_sheet_data_frame, content, columns ): sheet_names = [\"Sheet1\", \"Sheet2\"] excel_dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_multi_sheet, \"sheet_name\": sheet_names, \"exposed_type\": \"modin\"}, ) for sheet_name in sheet_names: assert np.array_equal(excel_dn.read()[sheet_name].values, default_multi_sheet_data_frame[sheet_name].values) multi_sheet_content = {sheet_name: modin_pd.DataFrame(content) for sheet_name in sheet_names} excel_dn.write(multi_sheet_content) for sheet_name in sheet_names: assert np.array_equal(excel_dn.read()[sheet_name].values, multi_sheet_content[sheet_name].values) @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append_pandas_with_sheetname(self, excel_file, default_data_frame, content): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\"}) assert_frame_equal(dn.read(), default_data_frame) dn.append(content) assert_frame_equal( dn.read(), pd.concat([default_data_frame, pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index(drop=True), ) @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append_pandas_without_sheetname(self, excel_file, default_data_frame, content): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file}) assert_frame_equal(dn.read()[\"Sheet1\"], default_data_frame) dn.append(content) assert_frame_equal( dn.read()[\"Sheet1\"], pd.concat([default_data_frame, pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index(drop=True), ) @pytest.mark.parametrize( \"content\", [ ( { \"Sheet1\": pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}]), \"Sheet2\": pd.DataFrame([{\"a\": 44, \"b\": 55, \"c\": 66}]), } ), ( { \"Sheet1\": pd.DataFrame({\"a\": [11, 44], \"b\": [22, 55], \"c\": [33, 66]}), \"Sheet2\": pd.DataFrame([{\"a\": 77, \"b\": 88, \"c\": 99}]), } ), ({\"Sheet1\": np.array([[11, 22, 33], [44, 55, 66]]), \"Sheet2\": np.array([[77, 88, 99]])}), ], ) def test_append_pandas_multisheet(self, excel_file_with_multi_sheet, default_multi_sheet_data_frame, content): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_multi_sheet, \"sheet_name\": [\"Sheet1\", \"Sheet2\"]} ) assert_frame_equal(dn.read()[\"Sheet1\"], default_multi_sheet_data_frame[\"Sheet1\"]) assert_frame_equal(dn.read()[\"Sheet2\"], default_multi_sheet_data_frame[\"Sheet2\"]) dn.append(content) assert_frame_equal( dn.read()[\"Sheet1\"], pd.concat( [default_multi_sheet_data_frame[\"Sheet1\"], pd.DataFrame(content[\"Sheet1\"], columns=[\"a\", \"b\", \"c\"])] ).reset_index(drop=True), ) assert_frame_equal( dn.read()[\"Sheet2\"], pd.concat( [default_multi_sheet_data_frame[\"Sheet2\"], pd.DataFrame(content[\"Sheet2\"], columns=[\"a\", \"b\", \"c\"])] ).reset_index(drop=True), ) @pytest.mark.parametrize( \"content\", [ ({\"Sheet1\": pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}])}), (pd.DataFrame({\"a\": [11, 44], \"b\": [22, 55], \"c\": [33, 66]})), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append_only_first_sheet_of_a_multisheet_file( self, excel_file_with_multi_sheet, default_multi_sheet_data_frame, content ): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file_with_multi_sheet, \"sheet_name\": [\"Sheet1\", \"Sheet2\"]} ) assert_frame_equal(dn.read()[\"Sheet1\"], default_multi_sheet_data_frame[\"Sheet1\"]) assert_frame_equal(dn.read()[\"Sheet2\"], default_multi_sheet_data_frame[\"Sheet2\"]) dn.append(content) appended_content = content[\"Sheet1\"] if isinstance(content, dict) else content assert_frame_equal( dn.read()[\"Sheet1\"], pd.concat( [default_multi_sheet_data_frame[\"Sheet1\"], pd.DataFrame(appended_content, columns=[\"a\", \"b\", \"c\"])] ).reset_index(drop=True), ) assert_frame_equal(dn.read()[\"Sheet2\"], default_multi_sheet_data_frame[\"Sheet2\"]) @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (modin_pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append_modin_with_sheetname(self, excel_file, default_data_frame, content): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"modin\"} ) df_equals(dn.read(), modin_pd.DataFrame(default_data_frame)) dn.append(content) df_equals( dn.read(), modin_pd.concat( [modin_pd.DataFrame(default_data_frame), modin_pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])] ).reset_index(drop=True), ) @pytest.mark.parametrize( \"content\", [ ([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}]), (modin_pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}, {\"a\": 44, \"b\": 55, \"c\": 66}])), ([[11, 22, 33], [44, 55, 66]]), ], ) def test_append_modin_without_sheetname(self, excel_file, default_data_frame, content): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"exposed_type\": \"modin\"}) df_equals(dn.read()[\"Sheet1\"], default_data_frame) dn.append(content) df_equals( dn.read()[\"Sheet1\"], modin_pd.concat([default_data_frame, modin_pd.DataFrame(content, columns=[\"a\", \"b\", \"c\"])]).reset_index( drop=True ), ) @pytest.mark.parametrize( \"content\", [ ( { \"Sheet1\": modin_pd.DataFrame([{\"a\": 11, \"b\": 22, \"c\": 33}]), \"Sheet2\": modin_pd.DataFrame([{\"a\": 44, \"b\": 55, \"c\": 66}]), } ), ( { \"Sheet1\": modin_pd.DataFrame({\"a\": [11, 44], \"b\": [22, 55], \"c\": [33, 66]}), \"Sheet2\": modin_pd.DataFrame([{\"a\": 77, \"b\": 88, \"c\": 99}]), } ), ({\"Sheet1\": np.array([[11, 22, 33], [44, 55, 66]]), \"Sheet2\": np.array([[77, 88, 99]])}), ], ) def test_append_modin_multisheet(self, excel_file_with_multi_sheet, default_multi_sheet_data_frame, content): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"path\": excel_file_with_multi_sheet, \"sheet_name\": [\"Sheet1\", \"Sheet2\"], \"exposed_type\": \"modin\", }, ) df_equals(dn.read()[\"Sheet1\"], default_multi_sheet_data_frame[\"Sheet1\"]) df_equals(dn.read()[\"Sheet2\"], default_multi_sheet_data_frame[\"Sheet2\"]) dn.append(content) df_equals( dn.read()[\"Sheet1\"], modin_pd.concat( [ default_multi_sheet_data_frame[\"Sheet1\"], modin_pd.DataFrame(content[\"Sheet1\"], columns=[\"a\", \"b\", \"c\"]), ] ).reset_index(drop=True), ) df_equals( dn.read()[\"Sheet2\"], modin_pd.concat( [ default_multi_sheet_data_frame[\"Sheet2\"], modin_pd.DataFrame(content[\"Sheet2\"], columns=[\"a\", \"b\", \"c\"]), ] ).reset_index(drop=True), ) def test_filter_pandas_exposed_type_with_sheetname(self, excel_file): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"pandas\"} ) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(pd.Series([1, 1, 1, 2, None])) assert dn[\"bar\"].equals(pd.Series([1, 2, None, 2, 2])) assert dn[:2].equals(pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 1.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = pd.DataFrame([{\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"bar\", 2, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"bar\"] == 2] expected_data = pd.DataFrame([{\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = pd.DataFrame( [ {\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}, ] ) assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data) assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_pandas_exposed_type_without_sheetname(self, excel_file): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"exposed_type\": \"pandas\"}) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) assert len(dn.filter((\"foo\", 1, Operator.EQUAL))[\"Sheet1\"]) == 3 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))[\"Sheet1\"]) == 2 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))[\"Sheet1\"]) == 3 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)[\"Sheet1\"]) == 4 assert dn[\"Sheet1\"][\"foo\"].equals(pd.Series([1, 1, 1, 2, None])) assert dn[\"Sheet1\"][\"bar\"].equals(pd.Series([1, 2, None, 2, 2])) assert dn[\"Sheet1\"][:2].equals(pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) def test_filter_pandas_exposed_type_multisheet(self, excel_file): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": [\"sheet_1\", \"sheet_2\"], \"exposed_type\": \"pandas\"}, ) dn.write( { \"sheet_1\": pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ), \"sheet_2\": pd.DataFrame( [ {\"foo\": 1, \"bar\": 3}, {\"foo\": 1, \"bar\": 4}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 4}, {\"bar\": 4}, ] ), } ) assert len(dn.filter((\"foo\", 1, Operator.EQUAL))) == 2 assert len(dn.filter((\"foo\", 1, Operator.EQUAL))[\"sheet_1\"]) == 3 assert len(dn.filter((\"foo\", 1, Operator.EQUAL))[\"sheet_2\"]) == 3 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))) == 2 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))[\"sheet_1\"]) == 2 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))[\"sheet_2\"]) == 2 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))) == 2 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))[\"sheet_1\"]) == 3 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))[\"sheet_2\"]) == 0 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)) == 2 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)[\"sheet_1\"]) == 4 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)[\"sheet_2\"]) == 0 assert dn[\"sheet_1\"][\"foo\"].equals(pd.Series([1, 1, 1, 2, None])) assert dn[\"sheet_2\"][\"foo\"].equals(pd.Series([1, 1, 1, 2, None])) assert dn[\"sheet_1\"][\"bar\"].equals(pd.Series([1, 2, None, 2, 2])) assert dn[\"sheet_2\"][\"bar\"].equals(pd.Series([3, 4, None, 4, 4])) assert dn[\"sheet_1\"][:2].equals(pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) assert dn[\"sheet_2\"][:2].equals(pd.DataFrame([{\"foo\": 1.0, \"bar\": 3.0}, {\"foo\": 1.0, \"bar\": 4.0}])) def test_filter_modin_exposed_type_with_sheetname(self, excel_file): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"modin\"} ) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) # Test datanode indexing and slicing assert dn[\"foo\"].equals(modin_pd.Series([1, 1, 1, 2, None])) assert dn[\"bar\"].equals(modin_pd.Series([1, 2, None, 2, 2])) assert dn[:2].equals(modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) # Test filter data filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] == 1] expected_data = modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 1.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"foo\", 1, Operator.NOT_EQUAL)) filtered_by_indexing = dn[dn[\"foo\"] != 1] expected_data = modin_pd.DataFrame([{\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter((\"bar\", 2, Operator.EQUAL)) filtered_by_indexing = dn[dn[\"bar\"] == 2] expected_data = modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}]) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) filtered_by_filter_method = dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR) filtered_by_indexing = dn[(dn[\"bar\"] == 1) | (dn[\"bar\"] == 2)] expected_data = modin_pd.DataFrame( [ {\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}, {\"foo\": 2.0, \"bar\": 2.0}, {\"bar\": 2.0}, ] ) df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data) df_equals(filtered_by_indexing.reset_index(drop=True), expected_data) def test_filter_modin_exposed_type_without_sheetname(self, excel_file): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"exposed_type\": \"modin\"}) dn.write( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ) assert len(dn.filter((\"foo\", 1, Operator.EQUAL))[\"Sheet1\"]) == 3 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))[\"Sheet1\"]) == 2 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))[\"Sheet1\"]) == 3 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)[\"Sheet1\"]) == 4 assert dn[\"Sheet1\"][\"foo\"].equals(modin_pd.Series([1, 1, 1, 2, None])) assert dn[\"Sheet1\"][\"bar\"].equals(modin_pd.Series([1, 2, None, 2, 2])) assert dn[\"Sheet1\"][:2].equals(modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) def test_filter_modin_exposed_type_multisheet(self, excel_file): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": [\"sheet_1\", \"sheet_2\"], \"exposed_type\": \"modin\"}, ) dn.write( { \"sheet_1\": pd.DataFrame( [ {\"foo\": 1, \"bar\": 1}, {\"foo\": 1, \"bar\": 2}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 2}, {\"bar\": 2}, ] ), \"sheet_2\": pd.DataFrame( [ {\"foo\": 1, \"bar\": 3}, {\"foo\": 1, \"bar\": 4}, {\"foo\": 1}, {\"foo\": 2, \"bar\": 4}, {\"bar\": 4}, ] ), } ) assert len(dn.filter((\"foo\", 1, Operator.EQUAL))) == 2 assert len(dn.filter((\"foo\", 1, Operator.EQUAL))[\"sheet_1\"]) == 3 assert len(dn.filter((\"foo\", 1, Operator.EQUAL))[\"sheet_2\"]) == 3 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))) == 2 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))[\"sheet_1\"]) == 2 assert len(dn.filter((\"foo\", 1, Operator.NOT_EQUAL))[\"sheet_2\"]) == 2 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))) == 2 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))[\"sheet_1\"]) == 3 assert len(dn.filter((\"bar\", 2, Operator.EQUAL))[\"sheet_2\"]) == 0 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)) == 2 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)[\"sheet_1\"]) == 4 assert len(dn.filter([(\"bar\", 1, Operator.EQUAL), (\"bar\", 2, Operator.EQUAL)], JoinOperator.OR)[\"sheet_2\"]) == 0 assert dn[\"sheet_1\"][\"foo\"].equals(modin_pd.Series([1, 1, 1, 2, None])) assert dn[\"sheet_2\"][\"foo\"].equals(modin_pd.Series([1, 1, 1, 2, None])) assert dn[\"sheet_1\"][\"bar\"].equals(modin_pd.Series([1, 2, None, 2, 2])) assert dn[\"sheet_2\"][\"bar\"].equals(modin_pd.Series([3, 4, None, 4, 4])) assert dn[\"sheet_1\"][:2].equals(modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 1.0}, {\"foo\": 1.0, \"bar\": 2.0}])) assert dn[\"sheet_2\"][:2].equals(modin_pd.DataFrame([{\"foo\": 1.0, \"bar\": 3.0}, {\"foo\": 1.0, \"bar\": 4.0}])) def test_filter_numpy_exposed_type_with_sheetname(self, excel_file): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": \"Sheet1\", \"exposed_type\": \"numpy\"} ) dn.write( [ [1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], ] ) # Test datanode indexing and slicing assert np.array_equal(dn[0], np.array([1, 1])) assert np.array_equal(dn[1], np.array([1, 2])) assert np.array_equal(dn[:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[1:4, :1], np.array([[1], [1], [2]])) # Test filter data assert np.array_equal(dn.filter((0, 1, Operator.EQUAL)), np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[dn[:, 0] == 1], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn.filter((0, 1, Operator.NOT_EQUAL)), np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn[dn[:, 0] != 1], np.array([[2, 1], [2, 2], [2, 3]])) assert np.array_equal(dn.filter((1, 2, Operator.EQUAL)), np.array([[1, 2], [2, 2]])) assert np.array_equal(dn[dn[:, 1] == 2], np.array([[1, 2], [2, 2]])) assert np.array_equal( dn.filter([(1, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR), np.array([[1, 1], [1, 2], [2, 1], [2, 2]]), ) assert np.array_equal(dn[(dn[:, 1] == 1) | (dn[:, 1] == 2)], np.array([[1, 1], [1, 2], [2, 1], [2, 2]])) def test_filter_numpy_exposed_type_without_sheetname(self, excel_file): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"exposed_type\": \"numpy\"}) dn.write( [ [1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], ] ) assert len(dn.filter((0, 1, Operator.EQUAL))[\"Sheet1\"]) == 3 assert len(dn.filter((0, 1, Operator.NOT_EQUAL))[\"Sheet1\"]) == 3 assert len(dn.filter((1, 2, Operator.EQUAL))[\"Sheet1\"]) == 2 assert len(dn.filter([(0, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR)[\"Sheet1\"]) == 4 assert np.array_equal(dn[\"Sheet1\"][0], np.array([1, 1])) assert np.array_equal(dn[\"Sheet1\"][1], np.array([1, 2])) assert np.array_equal(dn[\"Sheet1\"][:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[\"Sheet1\"][:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[\"Sheet1\"][1:4, :1], np.array([[1], [1], [2]])) def test_filter_numpy_exposed_type_multisheet(self, excel_file): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"path\": excel_file, \"sheet_name\": [\"sheet_1\", \"sheet_2\"], \"exposed_type\": \"numpy\"}, ) dn.write( { \"sheet_1\": pd.DataFrame( [ [1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], ] ), \"sheet_2\": pd.DataFrame( [ [1, 4], [1, 5], [1, 6], [2, 4], [2, 5], [2, 6], ] ), } ) assert len(dn.filter((0, 1, Operator.EQUAL))) == 2 assert len(dn.filter((0, 1, Operator.EQUAL))[\"sheet_1\"]) == 3 assert len(dn.filter((0, 1, Operator.EQUAL))[\"sheet_2\"]) == 3 assert len(dn.filter((0, 1, Operator.NOT_EQUAL))) == 2 assert len(dn.filter((0, 1, Operator.NOT_EQUAL))[\"sheet_1\"]) == 3 assert len(dn.filter((0, 1, Operator.NOT_EQUAL))[\"sheet_2\"]) == 3 assert len(dn.filter((1, 2, Operator.EQUAL))) == 2 assert len(dn.filter((1, 2, Operator.EQUAL))[\"sheet_1\"]) == 2 assert len(dn.filter((1, 2, Operator.EQUAL))[\"sheet_2\"]) == 0 assert len(dn.filter([(1, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR)) == 2 assert len(dn.filter([(1, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR)[\"sheet_1\"]) == 4 assert len(dn.filter([(1, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR)[\"sheet_2\"]) == 0 assert np.array_equal(dn[\"sheet_1\"][0], np.array([1, 1])) assert np.array_equal(dn[\"sheet_2\"][0], np.array([1, 4])) assert np.array_equal(dn[\"sheet_1\"][1], np.array([1, 2])) assert np.array_equal(dn[\"sheet_2\"][1], np.array([1, 5])) assert np.array_equal(dn[\"sheet_1\"][:3], np.array([[1, 1], [1, 2], [1, 3]])) assert np.array_equal(dn[\"sheet_2\"][:3], np.array([[1, 4], [1, 5], [1, 6]])) assert np.array_equal(dn[\"sheet_1\"][:, 0], np.array([1, 1, 1, 2, 2, 2])) assert np.array_equal(dn[\"sheet_2\"][:, 1], np.array([4, 5, 6, 4, 5, 6])) assert np.array_equal(dn[\"sheet_1\"][1:4, :1], np.array([[1], [1], [2]])) assert np.array_equal(dn[\"sheet_2\"][1:4, 1:2], np.array([[5], [6], [4]])) def test_set_path(self): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": \"foo.xlsx\"}) assert dn.path == \"foo.xlsx\" dn.path = \"bar.xlsx\" assert dn.path == \"bar.xlsx\" @pytest.mark.parametrize( [\"properties\", \"exists\"], [ ({}, False), ({\"default_data\": {\"a\": [\"foo\", \"bar\"]}}, True), ], ) def test_create_with_default_data(self, properties, exists): dn = ExcelDataNode(\"foo\", Scope.SCENARIO, DataNodeId(\"dn_id\"), properties=properties) assert os.path.exists(dn.path) is exists def test_read_write_after_modify_path(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/temp.xlsx\") dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path}) read_data = dn.read() assert read_data is not None dn.path = new_path with pytest.raises(FileNotFoundError): dn.read() dn.write(read_data) for sheet, df in dn.read().items(): assert np.array_equal(df.values, read_data[sheet].values) def test_exposed_type_custom_class_after_modify_path(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") # [\"Sheet1\", \"Sheet2\"] new_path = os.path.join( pathlib.Path(__file__).parent.resolve(), \"data_sample/example_2.xlsx\" ) # [\"Sheet1\", \"Sheet2\", \"Sheet3\"] dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path, \"exposed_type\": MyCustomObject1}) assert dn.exposed_type == MyCustomObject1 dn.read() dn.path = new_path dn.read() dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": path, \"exposed_type\": MyCustomObject1, \"sheet_name\": [\"Sheet4\"]}, ) assert dn.exposed_type == MyCustomObject1 with pytest.raises(NonExistingExcelSheet): dn.read() def test_exposed_type_dict(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") # [\"Sheet1\", \"Sheet2\"] dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_path\": path, \"exposed_type\": { \"Sheet1\": MyCustomObject1, \"Sheet2\": MyCustomObject2, \"Sheet3\": MyCustomObject1, }, }, ) data = dn.read() assert isinstance(data, Dict) assert isinstance(data[\"Sheet1\"][0], MyCustomObject1) assert isinstance(data[\"Sheet2\"][0], MyCustomObject2) def test_exposed_type_list(self): path_1 = os.path.join( pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\" ) # [\"Sheet1\", \"Sheet2\"] path_2 = os.path.join( pathlib.Path(__file__).parent.resolve(), \"data_sample/example_2.xlsx\" ) # [\"Sheet1\", \"Sheet2\", \"Sheet3\"] dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": path_1, \"exposed_type\": [MyCustomObject1, MyCustomObject2]}, ) data = dn.read() assert isinstance(data, Dict) assert isinstance(data[\"Sheet1\"][0], MyCustomObject1) assert isinstance(data[\"Sheet2\"][0], MyCustomObject2) dn.path = path_2 with pytest.raises(ExposedTypeLengthMismatch): dn.read() def test_not_trying_to_read_sheet_names_when_exposed_type_is_set(self): dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": \"notexistyet.xlsx\", \"exposed_type\": MyCustomObject1} ) assert dn.path == \"notexistyet.xlsx\" assert dn.exposed_type == MyCustomObject1 dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": \"notexistyet.xlsx\", \"exposed_type\": [MyCustomObject1, MyCustomObject2]}, ) assert dn.path == \"notexistyet.xlsx\" assert dn.exposed_type == [MyCustomObject1, MyCustomObject2] dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_path\": \"notexistyet.xlsx\", \"exposed_type\": {\"Sheet1\": MyCustomObject1, \"Sheet2\": MyCustomObject2}, }, ) assert dn.path == \"notexistyet.xlsx\" assert dn.exposed_type == {\"Sheet1\": MyCustomObject1, \"Sheet2\": MyCustomObject2} def test_exposed_type_default(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"default_path\": path, \"sheet_name\": \"Sheet1\"}) assert dn.exposed_type == \"pandas\" data = dn.read() assert isinstance(data, pd.DataFrame) def test_pandas_exposed_type(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": path, \"exposed_type\": \"pandas\", \"sheet_name\": \"Sheet1\"} ) assert dn.exposed_type == \"pandas\" data = dn.read() assert isinstance(data, pd.DataFrame) def test_complex_exposed_type_dict(self): # [\"Sheet1\", \"Sheet2\", \"Sheet3\", \"Sheet4\", \"Sheet5\"] path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example_4.xlsx\") dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_path\": path, \"exposed_type\": { \"Sheet1\": MyCustomObject1, \"Sheet2\": \"numpy\", \"Sheet3\": \"pandas\", }, \"sheet_name\": [\"Sheet1\", \"Sheet2\", \"Sheet3\", \"Sheet4\"], }, ) data = dn.read() assert isinstance(data, dict) assert isinstance(data[\"Sheet1\"], list) assert isinstance(data[\"Sheet1\"][0], MyCustomObject1) assert isinstance(data[\"Sheet2\"], np.ndarray) assert isinstance(data[\"Sheet3\"], pd.DataFrame) assert isinstance(data[\"Sheet4\"], pd.DataFrame) assert data.get(\"Sheet5\") is None def test_complex_exposed_type_list(self): # [\"Sheet1\", \"Sheet2\", \"Sheet3\", \"Sheet4\",\"Sheet5\"] path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example_4.xlsx\") dn = ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_path\": path, \"exposed_type\": [MyCustomObject1, \"numpy\", \"pandas\"], \"sheet_name\": [\"Sheet1\", \"Sheet2\", \"Sheet3\"], }, ) data = dn.read() assert isinstance(data, dict) assert isinstance(data[\"Sheet1\"], list) assert isinstance(data[\"Sheet1\"][0], MyCustomObject1) assert isinstance(data[\"Sheet2\"], np.ndarray) assert isinstance(data[\"Sheet3\"], pd.DataFrame) def test_invalid_exposed_type(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/example.xlsx\") with pytest.raises(InvalidExposedType): ExcelDataNode( \"foo\", Scope.SCENARIO, properties={\"default_path\": path, \"exposed_type\": \"invalid\", \"sheet_name\": \"Sheet1\"}, ) with pytest.raises(InvalidExposedType): ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_path\": path, \"exposed_type\": [\"numpy\", \"invalid\", \"pandas\"], \"sheet_name\": \"Sheet1\", }, ) with pytest.raises(InvalidExposedType): ExcelDataNode( \"foo\", Scope.SCENARIO, properties={ \"default_path\": path, \"exposed_type\": {\"Sheet1\": \"pandas\", \"Sheet2\": \"invalid\"}, \"sheet_name\": \"Sheet1\", }, ) def test_get_system_modified_date_instead_of_last_edit_date(self, tmpdir_factory): temp_file_path = str(tmpdir_factory.mktemp(\"data\").join(\"temp.xlsx\")) pd.DataFrame([]).to_excel(temp_file_path) dn = ExcelDataNode(\"foo\", Scope.SCENARIO, properties={\"path\": temp_file_path, \"exposed_type\": \"pandas\"}) dn.write(pd.DataFrame([1, 2, 3])) previous_edit_date = dn.last_edit_date sleep(0.1) pd.DataFrame([4, 5, 6]).to_excel(temp_file_path) new_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path)) assert previous_edit_date < dn.last_edit_date assert new_edit_date == dn.last_edit_date sleep(0.1) dn.write(pd.DataFrame([7, 8, 9])) assert new_edit_date < dn.last_edit_date os.unlink(temp_file_path) "} {"text": "import os import pathlib import pytest from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config.data_node_config import DataNodeConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.data.csv import CSVDataNode from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import InvalidDataNodeType, ModelNotFound from taipy.config.common.scope import Scope from taipy.config.config import Config def file_exists(file_path: str) -> bool: return os.path.exists(file_path) def init_managers(): _DataManagerFactory._build_manager()._delete_all() class TestDataManager: def test_create_data_node_and_modify_properties_does_not_modify_config(self, init_sql_repo): init_managers() dn_config = Config.configure_data_node(id=\"name\", foo=\"bar\") dn = _DataManager._create_and_set(dn_config, None, None) assert dn_config.properties.get(\"foo\") == \"bar\" assert dn_config.properties.get(\"baz\") is None dn.properties[\"baz\"] = \"qux\" _DataManager._set(dn) assert dn_config.properties.get(\"foo\") == \"bar\" assert dn_config.properties.get(\"baz\") is None assert dn.properties.get(\"foo\") == \"bar\" assert dn.properties.get(\"baz\") == \"qux\" def test_create_raises_exception_with_wrong_type(self, init_sql_repo): init_managers() wrong_type_dn_config = DataNodeConfig(id=\"foo\", storage_type=\"bar\", scope=DataNodeConfig._DEFAULT_SCOPE) with pytest.raises(InvalidDataNodeType): _DataManager._create_and_set(wrong_type_dn_config, None, None) def test_create_from_same_config_generates_new_data_node_and_new_id(self, init_sql_repo): init_managers() dn_config = Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") dn = _DataManager._create_and_set(dn_config, None, None) dn_2 = _DataManager._create_and_set(dn_config, None, None) assert dn_2.id != dn.id def test_create_uses_overridden_attributes_in_config_file(self, init_sql_repo): init_managers() Config.override(os.path.join(pathlib.Path(__file__).parent.resolve(), \"data_sample/config.toml\")) csv_dn_cfg = Config.configure_data_node(id=\"foo\", storage_type=\"csv\", path=\"bar\", has_header=True) csv_dn = _DataManager._create_and_set(csv_dn_cfg, None, None) assert csv_dn.config_id == \"foo\" assert isinstance(csv_dn, CSVDataNode) assert csv_dn._path == \"path_from_config_file\" assert csv_dn.has_header csv_dn_cfg = Config.configure_data_node(id=\"baz\", storage_type=\"csv\", path=\"bar\", has_header=True) csv_dn = _DataManager._create_and_set(csv_dn_cfg, None, None) assert csv_dn.config_id == \"baz\" assert isinstance(csv_dn, CSVDataNode) assert csv_dn._path == \"bar\" assert csv_dn.has_header def test_get_if_not_exists(self, init_sql_repo): init_managers() with pytest.raises(ModelNotFound): _DataManager._repository._load(\"test_data_node_2\") def test_get_all(self, init_sql_repo): init_managers() _DataManager._delete_all() assert len(_DataManager._get_all()) == 0 dn_config_1 = Config.configure_data_node(id=\"foo\", storage_type=\"in_memory\") _DataManager._create_and_set(dn_config_1, None, None) assert len(_DataManager._get_all()) == 1 dn_config_2 = Config.configure_data_node(id=\"baz\", storage_type=\"in_memory\") _DataManager._create_and_set(dn_config_2, None, None) _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_all()) == 3 assert len([dn for dn in _DataManager._get_all() if dn.config_id == \"foo\"]) == 1 assert len([dn for dn in _DataManager._get_all() if dn.config_id == \"baz\"]) == 2 def test_get_all_on_multiple_versions_environment(self, init_sql_repo): init_managers() # Create 5 data nodes with 2 versions each # Only version 1.0 has the data node with config_id = \"config_id_1\" # Only version 2.0 has the data node with config_id = \"config_id_6\" for version in range(1, 3): for i in range(5): _DataManager._set( InMemoryDataNode( f\"config_id_{i+version}\", Scope.SCENARIO, id=DataNodeId(f\"id{i}_v{version}\"), version=f\"{version}.0\", ) ) _VersionManager._set_experiment_version(\"1.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_development_version(\"1.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_1\"}])) == 1 assert len(_DataManager._get_all_by(filters=[{\"version\": \"1.0\", \"config_id\": \"config_id_6\"}])) == 0 _VersionManager._set_experiment_version(\"2.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 _VersionManager._set_development_version(\"2.0\") assert len(_DataManager._get_all()) == 5 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_1\"}])) == 0 assert len(_DataManager._get_all_by(filters=[{\"version\": \"2.0\", \"config_id\": \"config_id_6\"}])) == 1 def test_set(self, init_sql_repo): init_managers() dn = InMemoryDataNode( \"config_id\", Scope.SCENARIO, id=DataNodeId(\"id\"), owner_id=None, parent_ids={\"task_id_1\"}, last_edit_date=None, edits=[], edit_in_progress=False, properties={\"foo\": \"bar\"}, ) assert len(_DataManager._get_all()) == 0 assert not _DataManager._exists(dn.id) _DataManager._set(dn) assert len(_DataManager._get_all()) == 1 assert _DataManager._exists(dn.id) # changing data node attribute dn.config_id = \"foo\" assert dn.config_id == \"foo\" _DataManager._set(dn) assert len(_DataManager._get_all()) == 1 assert dn.config_id == \"foo\" assert _DataManager._get(dn.id).config_id == \"foo\" def test_delete(self, init_sql_repo): init_managers() _DataManager._delete_all() dn_1 = InMemoryDataNode(\"config_id\", Scope.SCENARIO, id=\"id_1\") dn_2 = InMemoryDataNode(\"config_id\", Scope.SCENARIO, id=\"id_2\") dn_3 = InMemoryDataNode(\"config_id\", Scope.SCENARIO, id=\"id_3\") assert len(_DataManager._get_all()) == 0 _DataManager._set(dn_1) _DataManager._set(dn_2) _DataManager._set(dn_3) assert len(_DataManager._get_all()) == 3 assert all(_DataManager._exists(dn.id) for dn in [dn_1, dn_2, dn_3]) _DataManager._delete(dn_1.id) assert len(_DataManager._get_all()) == 2 assert _DataManager._get(dn_2.id).id == dn_2.id assert _DataManager._get(dn_3.id).id == dn_3.id assert _DataManager._get(dn_1.id) is None assert all(_DataManager._exists(dn.id) for dn in [dn_2, dn_3]) assert not _DataManager._exists(dn_1.id) _DataManager._delete_all() assert len(_DataManager._get_all()) == 0 assert not any(_DataManager._exists(dn.id) for dn in [dn_2, dn_3]) def test_get_or_create(self, init_sql_repo): def _get_or_create_dn(config, *args): return _DataManager._bulk_get_or_create([config], *args)[config] init_managers() global_dn_config = Config.configure_data_node( id=\"test_data_node\", storage_type=\"in_memory\", scope=Scope.GLOBAL, data=\"In memory Data Node\" ) cycle_dn_config = Config.configure_data_node( id=\"test_data_node1\", storage_type=\"in_memory\", scope=Scope.CYCLE, data=\"In memory scenario\" ) scenario_dn_config = Config.configure_data_node( id=\"test_data_node2\", storage_type=\"in_memory\", scope=Scope.SCENARIO, data=\"In memory scenario\" ) _DataManager._delete_all() assert len(_DataManager._get_all()) == 0 global_dn = _get_or_create_dn(global_dn_config, None, None) assert len(_DataManager._get_all()) == 1 global_dn_bis = _get_or_create_dn(global_dn_config, None) assert len(_DataManager._get_all()) == 1 assert global_dn.id == global_dn_bis.id scenario_dn = _get_or_create_dn(scenario_dn_config, None, \"scenario_id\") assert len(_DataManager._get_all()) == 2 scenario_dn_bis = _get_or_create_dn(scenario_dn_config, None, \"scenario_id\") assert len(_DataManager._get_all()) == 2 assert scenario_dn.id == scenario_dn_bis.id scenario_dn_ter = _get_or_create_dn(scenario_dn_config, None, \"scenario_id\") assert len(_DataManager._get_all()) == 2 assert scenario_dn.id == scenario_dn_bis.id assert scenario_dn_bis.id == scenario_dn_ter.id scenario_dn_quater = _get_or_create_dn(scenario_dn_config, None, \"scenario_id_2\") assert len(_DataManager._get_all()) == 3 assert scenario_dn.id == scenario_dn_bis.id assert scenario_dn_bis.id == scenario_dn_ter.id assert scenario_dn_ter.id != scenario_dn_quater.id assert len(_DataManager._get_all()) == 3 cycle_dn = _get_or_create_dn(cycle_dn_config, \"cycle_id\", None) assert len(_DataManager._get_all()) == 4 cycle_dn_1 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", None) assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_1.id cycle_dn_2 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", \"scenario_id\") assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_2.id cycle_dn_3 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", None) assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_3.id cycle_dn_4 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", \"scenario_id\") assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_4.id cycle_dn_5 = _get_or_create_dn(cycle_dn_config, \"cycle_id\", \"scenario_id_2\") assert len(_DataManager._get_all()) == 4 assert cycle_dn.id == cycle_dn_5.id assert cycle_dn_1.id == cycle_dn_2.id assert cycle_dn_2.id == cycle_dn_3.id assert cycle_dn_3.id == cycle_dn_4.id assert cycle_dn_4.id == cycle_dn_5.id def test_get_data_nodes_by_config_id(self, init_sql_repo): init_managers() dn_config_1 = Config.configure_data_node(\"dn_1\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(\"dn_2\", scope=Scope.SCENARIO) dn_config_3 = Config.configure_data_node(\"dn_3\", scope=Scope.SCENARIO) dn_1_1 = _DataManager._create_and_set(dn_config_1, None, None) dn_1_2 = _DataManager._create_and_set(dn_config_1, None, None) dn_1_3 = _DataManager._create_and_set(dn_config_1, None, None) assert len(_DataManager._get_all()) == 3 dn_2_1 = _DataManager._create_and_set(dn_config_2, None, None) dn_2_2 = _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_all()) == 5 dn_3_1 = _DataManager._create_and_set(dn_config_3, None, None) assert len(_DataManager._get_all()) == 6 dn_1_datanodes = _DataManager._get_by_config_id(dn_config_1.id) assert len(dn_1_datanodes) == 3 assert sorted([dn_1_1.id, dn_1_2.id, dn_1_3.id]) == sorted([sequence.id for sequence in dn_1_datanodes]) dn_2_datanodes = _DataManager._get_by_config_id(dn_config_2.id) assert len(dn_2_datanodes) == 2 assert sorted([dn_2_1.id, dn_2_2.id]) == sorted([sequence.id for sequence in dn_2_datanodes]) dn_3_datanodes = _DataManager._get_by_config_id(dn_config_3.id) assert len(dn_3_datanodes) == 1 assert sorted([dn_3_1.id]) == sorted([sequence.id for sequence in dn_3_datanodes]) def test_get_data_nodes_by_config_id_in_multiple_versions_environment(self, init_sql_repo): init_managers() dn_config_1 = Config.configure_data_node(\"dn_1\", scope=Scope.SCENARIO) dn_config_2 = Config.configure_data_node(\"dn_2\", scope=Scope.SCENARIO) _VersionManager._set_experiment_version(\"1.0\") _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_2, None, None) _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_by_config_id(dn_config_1.id)) == 3 assert len(_DataManager._get_by_config_id(dn_config_2.id)) == 2 _VersionManager._set_experiment_version(\"2.0\") _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_1, None, None) _DataManager._create_and_set(dn_config_2, None, None) _DataManager._create_and_set(dn_config_2, None, None) assert len(_DataManager._get_by_config_id(dn_config_1.id)) == 3 assert len(_DataManager._get_by_config_id(dn_config_2.id)) == 2 "} {"text": "import os import pytest from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.exceptions import ModelNotFound from src.taipy.core.job._job_manager_factory import _JobManagerFactory from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task from taipy.config.config import Config from tests.core.conftest import init_sql_repo def configure_fs_repo(): Config.configure_core(repository_type=\"default\") def configure_sql_repo(): init_sql_repo class TestSubmissionRepository: @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_save_and_load(self, data_node, job, configure_repo): configure_repo() _DataManagerFactory._build_manager()._repository._save(data_node) task = Task(\"task_config_id\", {}, print, [data_node], [data_node]) _TaskManagerFactory._build_manager()._repository._save(task) job._task = task _JobManagerFactory._build_manager()._repository._save(job) submission = Submission(task.id, task._ID_PREFIX) submission_repository = _SubmissionManagerFactory._build_manager()._repository submission_repository._save(submission) submission.jobs = [job] obj = submission_repository._load(submission.id) assert isinstance(obj, Submission) @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_exists(self, configure_repo): configure_repo() submission = Submission(\"entity_id\", \"ENTITY_TYPE\") submission_repository = _SubmissionManagerFactory._build_manager()._repository submission_repository._save(submission) assert submission_repository._exists(submission.id) assert not submission_repository._exists(\"not-existed-submission\") @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_load_all(self, configure_repo): configure_repo() repository = _SubmissionManagerFactory._build_manager()._repository submission = Submission(\"entity_id\", \"ENTITY_TYPE\") for i in range(10): submission.id = f\"submission-{i}\" repository._save(submission) submissions = repository._load_all() assert len(submissions) == 10 @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_delete(self, configure_repo): configure_repo() repository = _SubmissionManagerFactory._build_manager()._repository submission = Submission(\"entity_id\", \"ENTITY_TYPE\") repository._save(submission) repository._delete(submission.id) with pytest.raises(ModelNotFound): repository._load(submission.id) @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_delete_all(self, configure_repo): configure_repo() submission_repository = _SubmissionManagerFactory._build_manager()._repository submission = Submission(\"entity_id\", \"ENTITY_TYPE\") for i in range(10): submission.id = f\"submission-{i}\" submission_repository._save(submission) assert len(submission_repository._load_all()) == 10 submission_repository._delete_all() assert len(submission_repository._load_all()) == 0 @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_delete_many(self, configure_repo): configure_repo() submission = Submission(\"entity_id\", \"ENTITY_TYPE\") submission_repository = _SubmissionManagerFactory._build_manager()._repository for i in range(10): submission.id = f\"submission-{i}\" submission_repository._save(submission) objs = submission_repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] submission_repository._delete_many(ids) assert len(submission_repository._load_all()) == 7 @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_delete_by(self, configure_repo): configure_repo() # Create 5 entities with version 1.0 and 5 entities with version 2.0 submission_repository = _SubmissionManagerFactory._build_manager()._repository submission = Submission(\"entity_id\", \"ENTITY_TYPE\") for i in range(10): submission.id = f\"submission-{i}\" submission._version = f\"{(i+1) // 5}.0\" submission_repository._save(submission) objs = submission_repository._load_all() assert len(objs) == 10 submission_repository._delete_by(\"version\", \"1.0\") assert len(submission_repository._load_all()) == 5 @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_search(self, configure_repo): configure_repo() submission_repository = _SubmissionManagerFactory._build_manager()._repository submission = Submission(\"entity_id\", \"ENTITY_TYPE\", version=\"random_version_number\") for i in range(10): submission.id = f\"submission-{i}\" submission_repository._save(submission) assert len(submission_repository._load_all()) == 10 objs = submission_repository._search(\"id\", \"submission-2\") assert len(objs) == 1 assert isinstance(objs[0], Submission) objs = submission_repository._search(\"id\", \"submission-2\", filters=[{\"version\": \"random_version_number\"}]) assert len(objs) == 1 assert isinstance(objs[0], Submission) assert submission_repository._search(\"id\", \"submission-2\", filters=[{\"version\": \"non_existed_version\"}]) == [] @pytest.mark.parametrize(\"configure_repo\", [configure_fs_repo, configure_sql_repo]) def test_export(self, tmpdir, configure_repo): configure_repo() repository = _SubmissionManagerFactory._build_manager()._repository submission = Submission(\"entity_id\", \"ENTITY_TYPE\") repository._save(submission) repository._export(submission.id, tmpdir.strpath) dir_path = ( repository.dir_path if Config.core.repository_type == \"default\" else os.path.join(tmpdir.strpath, \"submission\") ) assert os.path.exists(os.path.join(dir_path, f\"{submission.id}.json\")) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from datetime import datetime from time import sleep from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.submission.submission_status import SubmissionStatus from src.taipy.core.task.task import Task def test_create_submission(scenario): submission_1 = _SubmissionManagerFactory._build_manager()._create(scenario.id, scenario._ID_PREFIX) assert submission_1.id is not None assert submission_1.entity_id == scenario.id assert submission_1.jobs == [] assert isinstance(submission_1.creation_date, datetime) assert submission_1._submission_status == SubmissionStatus.SUBMITTED def test_get_submission(): submission_manager = _SubmissionManagerFactory._build_manager() assert submission_manager._get(\"random_submission_id\") is None submission_1 = submission_manager._create(\"entity_id\", \"ENTITY_TYPE\") submission_2 = submission_manager._get(submission_1.id) assert submission_1.id == submission_2.id assert submission_1.entity_id == submission_2.entity_id == \"entity_id\" assert submission_1.jobs == submission_2.jobs assert submission_1.creation_date == submission_2.creation_date assert submission_1.submission_status == submission_2.submission_status def test_get_all_submission(): submission_manager = _SubmissionManagerFactory._build_manager() version_manager = _VersionManagerFactory._build_manager() submission_manager._set(Submission(\"entity_id\", \"submission_id\", version=version_manager._get_latest_version())) for version_name in [\"abc\", \"xyz\"]: for i in range(10): submission_manager._set( Submission(\"entity_id\", f\"submission_{version_name}_{i}\", version=f\"{version_name}\") ) assert len(submission_manager._get_all()) == 1 version_manager._set_experiment_version(\"xyz\") version_manager._set_experiment_version(\"abc\") assert len(submission_manager._get_all()) == 10 assert len(submission_manager._get_all(\"abc\")) == 10 assert len(submission_manager._get_all(\"xyz\")) == 10 def test_get_latest_submission(): task_1 = Task(\"task_config_1\", {}, print, id=\"task_id_1\") task_2 = Task(\"task_config_2\", {}, print, id=\"task_id_2\") submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(task_1.id, task_1._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_2 = submission_manager._create(task_2.id, task_2._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_3 = submission_manager._create(task_1.id, task_1._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_4 = submission_manager._create(task_2.id, task_2._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_4 def test_delete_submission(): submission_manager = _SubmissionManagerFactory._build_manager() submission = Submission(\"entity_id\", \"submission_id\") submission_manager._set(submission) for i in range(10): submission_manager._set(Submission(\"entity_id\", f\"submission_{i}\")) assert len(submission_manager._get_all()) == 11 assert isinstance(submission_manager._get(submission.id), Submission) submission_manager._delete(submission.id) assert len(submission_manager._get_all()) == 10 assert submission_manager._get(submission.id) is None submission_manager._delete_all() assert len(submission_manager._get_all()) == 0 "} {"text": "from datetime import datetime from functools import partial from typing import Union from unittest import mock from unittest.mock import patch import pytest from src.taipy.core import TaskId from src.taipy.core.job._job_manager_factory import _JobManagerFactory from src.taipy.core.job.job import Job from src.taipy.core.job.status import Status from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.submission.submission_status import SubmissionStatus from src.taipy.core.task._task_manager_factory import _TaskManagerFactory from src.taipy.core.task.task import Task def test_create_submission(scenario, job, current_datetime): submission_1 = Submission(scenario.id, scenario._ID_PREFIX) assert submission_1.id is not None assert submission_1.entity_id == scenario.id assert submission_1.jobs == [] assert isinstance(submission_1.creation_date, datetime) assert submission_1._submission_status == SubmissionStatus.SUBMITTED assert submission_1._version is not None submission_2 = Submission( scenario.id, scenario._ID_PREFIX, \"submission_id\", [job], current_datetime, SubmissionStatus.COMPLETED, \"version_id\", ) assert submission_2.id == \"submission_id\" assert submission_2.entity_id == scenario.id assert submission_2._jobs == [job] assert submission_2.creation_date == current_datetime assert submission_2._submission_status == SubmissionStatus.COMPLETED assert submission_2._version == \"version_id\" class MockJob: def __init__(self, id: str, status): self.status = status self.id = id def is_failed(self): return self.status == Status.FAILED def is_canceled(self): return self.status == Status.CANCELED def is_blocked(self): return self.status == Status.BLOCKED def is_pending(self): return self.status == Status.PENDING def is_running(self): return self.status == Status.RUNNING def is_completed(self): return self.status == Status.COMPLETED def is_skipped(self): return self.status == Status.SKIPPED def is_abandoned(self): return self.status == Status.ABANDONED def is_submitted(self): return self.status == Status.SUBMITTED def __test_update_submission_status(job_ids, expected_submission_status): jobs = { \"job0_submitted\": MockJob(\"job0_submitted\", Status.SUBMITTED), \"job1_failed\": MockJob(\"job1_failed\", Status.FAILED), \"job2_canceled\": MockJob(\"job2_canceled\", Status.CANCELED), \"job3_blocked\": MockJob(\"job3_blocked\", Status.BLOCKED), \"job4_pending\": MockJob(\"job4_pending\", Status.PENDING), \"job5_running\": MockJob(\"job5_running\", Status.RUNNING), \"job6_completed\": MockJob(\"job6_completed\", Status.COMPLETED), \"job7_skipped\": MockJob(\"job7_skipped\", Status.SKIPPED), \"job8_abandoned\": MockJob(\"job8_abandoned\", Status.ABANDONED), } submission = Submission(\"submission_id\", \"ENTITY_TYPE\") submission.jobs = [jobs[job_id] for job_id in job_ids] for job_id in job_ids: job = jobs[job_id] submission._update_submission_status(job) assert submission.submission_status == expected_submission_status @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job1_failed\"], SubmissionStatus.FAILED), ([\"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job3_blocked\"], SubmissionStatus.BLOCKED), ([\"job4_pending\"], SubmissionStatus.PENDING), ([\"job5_running\"], SubmissionStatus.RUNNING), ([\"job6_completed\"], SubmissionStatus.COMPLETED), ([\"job7_skipped\"], SubmissionStatus.COMPLETED), ([\"job8_abandoned\"], SubmissionStatus.UNDEFINED), ], ) def test_update_single_submission_status(job_ids, expected_submission_status): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job1_failed\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job2_canceled\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job3_blocked\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job4_pending\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job5_running\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job6_completed\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job7_skipped\"], SubmissionStatus.FAILED), ([\"job1_failed\", \"job8_abandoned\"], SubmissionStatus.FAILED), ([\"job2_canceled\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job3_blocked\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job4_pending\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job5_running\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job6_completed\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job7_skipped\", \"job1_failed\"], SubmissionStatus.FAILED), ([\"job8_abandoned\", \"job1_failed\"], SubmissionStatus.FAILED), ], ) def test_update_submission_status_with_one_failed_job_in_jobs(job_ids, expected_submission_status): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job2_canceled\", \"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job3_blocked\"], SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job4_pending\"], SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job5_running\"], SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job6_completed\"], SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job7_skipped\"], SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job8_abandoned\"], SubmissionStatus.CANCELED), ([\"job3_blocked\", \"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job4_pending\", \"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job5_running\", \"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job6_completed\", \"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job7_skipped\", \"job2_canceled\"], SubmissionStatus.CANCELED), ([\"job8_abandoned\", \"job2_canceled\"], SubmissionStatus.CANCELED), ], ) def test_update_submission_status_with_one_canceled_job_in_jobs(job_ids, expected_submission_status): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job4_pending\", \"job3_blocked\"], SubmissionStatus.PENDING), ([\"job4_pending\", \"job4_pending\"], SubmissionStatus.PENDING), ([\"job4_pending\", \"job6_completed\"], SubmissionStatus.PENDING), ([\"job4_pending\", \"job7_skipped\"], SubmissionStatus.PENDING), ([\"job3_blocked\", \"job4_pending\"], SubmissionStatus.PENDING), ([\"job6_completed\", \"job4_pending\"], SubmissionStatus.PENDING), ([\"job7_skipped\", \"job4_pending\"], SubmissionStatus.PENDING), ], ) def test_update_submission_status_with_no_failed_or_cancel_one_pending_in_jobs(job_ids, expected_submission_status): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job5_running\", \"job3_blocked\"], SubmissionStatus.RUNNING), ([\"job5_running\", \"job4_pending\"], SubmissionStatus.RUNNING), ([\"job5_running\", \"job5_running\"], SubmissionStatus.RUNNING), ([\"job5_running\", \"job6_completed\"], SubmissionStatus.RUNNING), ([\"job5_running\", \"job7_skipped\"], SubmissionStatus.RUNNING), ([\"job3_blocked\", \"job5_running\"], SubmissionStatus.RUNNING), ([\"job4_pending\", \"job5_running\"], SubmissionStatus.RUNNING), ([\"job6_completed\", \"job5_running\"], SubmissionStatus.RUNNING), ([\"job7_skipped\", \"job5_running\"], SubmissionStatus.RUNNING), ], ) def test_update_submission_status_with_no_failed_cancel_nor_pending_one_running_in_jobs( job_ids, expected_submission_status ): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job3_blocked\", \"job3_blocked\"], SubmissionStatus.BLOCKED), ([\"job3_blocked\", \"job6_completed\"], SubmissionStatus.BLOCKED), ([\"job3_blocked\", \"job7_skipped\"], SubmissionStatus.BLOCKED), ([\"job6_completed\", \"job3_blocked\"], SubmissionStatus.BLOCKED), ([\"job7_skipped\", \"job3_blocked\"], SubmissionStatus.BLOCKED), ], ) def test_update_submission_status_with_no_failed_cancel_pending_nor_running_one_blocked_in_jobs( job_ids, expected_submission_status ): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job6_completed\", \"job6_completed\"], SubmissionStatus.COMPLETED), ([\"job6_completed\", \"job7_skipped\"], SubmissionStatus.COMPLETED), ([\"job7_skipped\", \"job6_completed\"], SubmissionStatus.COMPLETED), ([\"job7_skipped\", \"job7_skipped\"], SubmissionStatus.COMPLETED), ], ) def test_update_submission_status_with_only_completed_or_skipped_in_jobs(job_ids, expected_submission_status): __test_update_submission_status(job_ids, expected_submission_status) @pytest.mark.parametrize( \"job_ids, expected_submission_status\", [ ([\"job3_blocked\", \"job8_abandoned\"], SubmissionStatus.UNDEFINED), ([\"job4_pending\", \"job8_abandoned\"], SubmissionStatus.UNDEFINED), ([\"job5_running\", \"job8_abandoned\"], SubmissionStatus.UNDEFINED), ([\"job6_completed\", \"job8_abandoned\"], SubmissionStatus.UNDEFINED), ([\"job7_skipped\", \"job8_abandoned\"], SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job8_abandoned\"], SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job3_blocked\"], SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job4_pending\"], SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job5_running\"], SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job6_completed\"], SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job7_skipped\"], SubmissionStatus.UNDEFINED), ], ) def test_update_submission_status_with_wrong_case_abandoned_without_cancel_or_failed_in_jobs( job_ids, expected_submission_status ): __test_update_submission_status(job_ids, expected_submission_status) def test_auto_set_and_reload(): task = Task(config_id=\"name_1\", properties={}, function=print, id=TaskId(\"task_1\")) submission_1 = Submission(task.id, task._ID_PREFIX) job_1 = Job(\"job_1\", task, submission_1.id, submission_1.entity_id) job_2 = Job(\"job_2\", task, submission_1.id, submission_1.entity_id) _TaskManagerFactory._build_manager()._set(task) _SubmissionManagerFactory._build_manager()._set(submission_1) _JobManagerFactory._build_manager()._set(job_1) _JobManagerFactory._build_manager()._set(job_2) submission_2 = _SubmissionManagerFactory._build_manager()._get(submission_1) assert submission_1.id == submission_2.id assert submission_1.entity_id == submission_2.entity_id assert submission_1.creation_date == submission_2.creation_date assert submission_1.submission_status == submission_2.submission_status # auto set & reload on jobs attribute assert submission_1.jobs == [] assert submission_2.jobs == [] submission_1.jobs = [job_1] assert submission_1.jobs == [job_1] assert submission_2.jobs == [job_1] submission_2.jobs = [job_2] assert submission_1.jobs == [job_2] assert submission_2.jobs == [job_2] submission_1.jobs = [job_1, job_2] assert submission_1.jobs == [job_1, job_2] assert submission_2.jobs == [job_1, job_2] submission_2.jobs = [job_2, job_1] assert submission_1.jobs == [job_2, job_1] assert submission_2.jobs == [job_2, job_1] # auto set & reload on submission_status attribute assert submission_1.submission_status == SubmissionStatus.SUBMITTED assert submission_2.submission_status == SubmissionStatus.SUBMITTED submission_1.submission_status = SubmissionStatus.BLOCKED assert submission_1.submission_status == SubmissionStatus.BLOCKED assert submission_2.submission_status == SubmissionStatus.BLOCKED submission_2.submission_status = SubmissionStatus.COMPLETED assert submission_1.submission_status == SubmissionStatus.COMPLETED assert submission_2.submission_status == SubmissionStatus.COMPLETED with submission_1 as submission: assert submission.jobs == [job_2, job_1] assert submission.submission_status == SubmissionStatus.COMPLETED submission.jobs = [job_1] submission.submission_status = SubmissionStatus.PENDING assert submission.jobs == [job_2, job_1] assert submission.submission_status == SubmissionStatus.COMPLETED assert submission_1.jobs == [job_1] assert submission_1.submission_status == SubmissionStatus.PENDING assert submission_2.jobs == [job_1] assert submission_2.submission_status == SubmissionStatus.PENDING @pytest.mark.parametrize( \"job_statuses, expected_submission_statuses\", [ ( [Status.SUBMITTED, Status.PENDING, Status.RUNNING, Status.COMPLETED], [SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.COMPLETED], ), ( [Status.SUBMITTED, Status.PENDING, Status.RUNNING, Status.SKIPPED], [SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.COMPLETED], ), ( [Status.SUBMITTED, Status.PENDING, Status.RUNNING, Status.FAILED], [SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.FAILED], ), ( [Status.SUBMITTED, Status.PENDING, Status.CANCELED], [SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.CANCELED], ), ( [Status.SUBMITTED, Status.PENDING, Status.RUNNING, Status.CANCELED], [SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.CANCELED], ), ([Status.SUBMITTED, Status.BLOCKED], [SubmissionStatus.PENDING, SubmissionStatus.BLOCKED]), ([Status.SUBMITTED, Status.SKIPPED], [SubmissionStatus.PENDING, SubmissionStatus.COMPLETED]), ], ) def test_update_submission_status_with_single_job_completed(job_statuses, expected_submission_statuses): job = MockJob(\"job_id\", Status.SUBMITTED) submission = Submission(\"submission_id\", \"ENTITY_TYPE\") assert submission.submission_status == SubmissionStatus.SUBMITTED for job_status, submission_status in zip(job_statuses, expected_submission_statuses): job.status = job_status submission._update_submission_status(job) assert submission.submission_status == submission_status def __test_update_submission_status_with_two_jobs(job_ids, job_statuses, expected_submission_statuses): jobs = {job_id: MockJob(job_id, Status.SUBMITTED) for job_id in job_ids} submission = Submission(\"submission_id\", \"ENTITY_TYPE\") assert submission.submission_status == SubmissionStatus.SUBMITTED for (job_id, job_status), submission_status in zip(job_statuses, expected_submission_statuses): job = jobs[job_id] job.status = job_status submission._update_submission_status(job) assert submission.submission_status == submission_status @pytest.mark.parametrize( \"job_ids, job_statuses, expected_submission_statuses\", [ ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.PENDING), (\"job_2\", Status.PENDING), (\"job_1\", Status.RUNNING), (\"job_2\", Status.RUNNING), (\"job_1\", Status.COMPLETED), (\"job_2\", Status.COMPLETED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.COMPLETED, ], ), ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.PENDING), (\"job_1\", Status.RUNNING), (\"job_2\", Status.PENDING), (\"job_2\", Status.RUNNING), (\"job_1\", Status.COMPLETED), (\"job_2\", Status.COMPLETED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.COMPLETED, ], ), ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.BLOCKED), (\"job_2\", Status.PENDING), (\"job_2\", Status.RUNNING), (\"job_2\", Status.COMPLETED), (\"job_1\", Status.PENDING), (\"job_1\", Status.RUNNING), (\"job_1\", Status.COMPLETED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.BLOCKED, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.COMPLETED, ], ), ], ) def test_update_submission_status_with_two_jobs_completed(job_ids, job_statuses, expected_submission_statuses): __test_update_submission_status_with_two_jobs(job_ids, job_statuses, expected_submission_statuses) @pytest.mark.parametrize( \"job_ids, job_statuses, expected_submission_statuses\", [ ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.PENDING), (\"job_2\", Status.PENDING), (\"job_1\", Status.RUNNING), (\"job_2\", Status.SKIPPED), (\"job_1\", Status.COMPLETED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.COMPLETED, ], ), ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.PENDING), (\"job_1\", Status.RUNNING), (\"job_2\", Status.PENDING), (\"job_2\", Status.SKIPPED), (\"job_1\", Status.COMPLETED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.COMPLETED, ], ), ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.BLOCKED), (\"job_2\", Status.PENDING), (\"job_2\", Status.RUNNING), (\"job_2\", Status.COMPLETED), (\"job_1\", Status.PENDING), (\"job_1\", Status.SKIPPED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.BLOCKED, SubmissionStatus.PENDING, SubmissionStatus.COMPLETED, ], ), ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.PENDING), (\"job_2\", Status.PENDING), (\"job_1\", Status.SKIPPED), (\"job_2\", Status.SKIPPED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.COMPLETED, ], ), ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.PENDING), (\"job_1\", Status.SKIPPED), (\"job_2\", Status.PENDING), (\"job_2\", Status.SKIPPED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.COMPLETED, ], ), ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.BLOCKED), (\"job_2\", Status.PENDING), (\"job_2\", Status.SKIPPED), (\"job_1\", Status.PENDING), (\"job_1\", Status.SKIPPED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.BLOCKED, SubmissionStatus.PENDING, SubmissionStatus.COMPLETED, ], ), ], ) def test_update_submission_status_with_two_jobs_skipped(job_ids, job_statuses, expected_submission_statuses): __test_update_submission_status_with_two_jobs(job_ids, job_statuses, expected_submission_statuses) @pytest.mark.parametrize( \"job_ids, job_statuses, expected_submission_statuses\", [ ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.PENDING), (\"job_2\", Status.PENDING), (\"job_1\", Status.RUNNING), (\"job_2\", Status.RUNNING), (\"job_1\", Status.FAILED), (\"job_2\", Status.COMPLETED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.FAILED, SubmissionStatus.FAILED, ], ), ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.PENDING), (\"job_1\", Status.RUNNING), (\"job_2\", Status.PENDING), (\"job_2\", Status.RUNNING), (\"job_1\", Status.COMPLETED), (\"job_2\", Status.FAILED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.FAILED, ], ), ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.BLOCKED), (\"job_2\", Status.PENDING), (\"job_2\", Status.RUNNING), (\"job_2\", Status.FAILED), (\"job_1\", Status.ABANDONED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.FAILED, SubmissionStatus.FAILED, ], ), ], ) def test_update_submission_status_with_two_jobs_failed(job_ids, job_statuses, expected_submission_statuses): __test_update_submission_status_with_two_jobs(job_ids, job_statuses, expected_submission_statuses) @pytest.mark.parametrize( \"job_ids, job_statuses, expected_submission_statuses\", [ ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.PENDING), (\"job_2\", Status.PENDING), (\"job_1\", Status.RUNNING), (\"job_2\", Status.RUNNING), (\"job_1\", Status.CANCELED), (\"job_2\", Status.COMPLETED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.CANCELED, SubmissionStatus.CANCELED, ], ), ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.PENDING), (\"job_1\", Status.RUNNING), (\"job_2\", Status.PENDING), (\"job_2\", Status.RUNNING), (\"job_1\", Status.COMPLETED), (\"job_2\", Status.CANCELED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.RUNNING, SubmissionStatus.CANCELED, ], ), ( [\"job_1\", \"job_2\"], [ (\"job_1\", Status.SUBMITTED), (\"job_2\", Status.SUBMITTED), (\"job_1\", Status.BLOCKED), (\"job_2\", Status.PENDING), (\"job_2\", Status.RUNNING), (\"job_2\", Status.CANCELED), (\"job_1\", Status.ABANDONED), ], [ SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.PENDING, SubmissionStatus.RUNNING, SubmissionStatus.CANCELED, SubmissionStatus.CANCELED, ], ), ], ) def test_update_submission_status_with_two_jobs_canceled(job_ids, job_statuses, expected_submission_statuses): __test_update_submission_status_with_two_jobs(job_ids, job_statuses, expected_submission_statuses) "} {"text": "from datetime import datetime from time import sleep from src.taipy.core import Task from src.taipy.core._repository.db._sql_connection import _SQLConnection from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.submission.submission_status import SubmissionStatus def init_managers(): _VersionManagerFactory._build_manager()._delete_all() _SubmissionManagerFactory._build_manager()._delete_all() def test_create_submission(scenario, init_sql_repo): init_managers() submission_1 = _SubmissionManagerFactory._build_manager()._create(scenario.id, scenario._ID_PREFIX) assert submission_1.id is not None assert submission_1.entity_id == scenario.id assert submission_1.jobs == [] assert isinstance(submission_1.creation_date, datetime) assert submission_1._submission_status == SubmissionStatus.SUBMITTED def test_get_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(\"entity_id\", \"ENTITY_TYPE\") submission_2 = submission_manager._get(submission_1.id) assert submission_1.id == submission_2.id assert submission_1.entity_id == submission_2.entity_id == \"entity_id\" assert submission_1.jobs == submission_2.jobs assert submission_1.creation_date == submission_2.creation_date assert submission_1.submission_status == submission_2.submission_status def test_get_all_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() version_manager = _VersionManagerFactory._build_manager() submission_manager._set(Submission(\"entity_id\", \"submission_id\", version=version_manager._get_latest_version())) for version_name in [\"abc\", \"xyz\"]: for i in range(10): submission_manager._set( Submission(\"entity_id\", f\"submission_{version_name}_{i}\", version=f\"{version_name}\") ) assert len(submission_manager._get_all()) == 1 version_manager._set_experiment_version(\"xyz\") version_manager._set_experiment_version(\"abc\") assert len(submission_manager._get_all()) == 10 assert len(submission_manager._get_all(\"abc\")) == 10 assert len(submission_manager._get_all(\"xyz\")) == 10 def test_get_latest_submission(init_sql_repo): init_managers() task_1 = Task(\"task_config_1\", {}, print, id=\"task_id_1\") task_2 = Task(\"task_config_2\", {}, print, id=\"task_id_2\") submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(task_1.id, task_1._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_2 = submission_manager._create(task_2.id, task_2._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_3 = submission_manager._create(task_1.id, task_1._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_4 = submission_manager._create(task_2.id, task_2._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_4 def test_delete_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() submission = Submission(\"entity_id\", \"submission_id\") submission_manager._set(submission) for i in range(10): submission_manager._set(Submission(\"entity_id\", f\"submission_{i}\")) assert len(submission_manager._get_all()) == 11 assert isinstance(submission_manager._get(submission.id), Submission) submission_manager._delete(submission.id) assert len(submission_manager._get_all()) == 10 assert submission_manager._get(submission.id) is None submission_manager._delete_all() assert len(submission_manager._get_all()) == 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import pathlib from unittest import TestCase, mock from src.taipy.logger._taipy_logger import _TaipyLogger class TestTaipyLogger(TestCase): def test_taipy_logger(self): _TaipyLogger._get_logger().info(\"baz\") _TaipyLogger._get_logger().debug(\"qux\") def test_taipy_logger_configured_by_file(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"logger.conf\") with mock.patch.dict(os.environ, {\"TAIPY_LOGGER_CONFIG_PATH\": path}): _TaipyLogger._get_logger().info(\"baz\") _TaipyLogger._get_logger().debug(\"qux\") "} {"text": "import os import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked from tests.config.utils.named_temporary_file import NamedTemporaryFile config_from_filename = NamedTemporaryFile( \"\"\" [TAIPY] custom_property_not_overwritten = true custom_property_overwritten = 10 \"\"\" ) config_from_environment = NamedTemporaryFile( \"\"\" [TAIPY] custom_property_overwritten = 11 \"\"\" ) def test_load_from_environment_overwrite_load_from_filename(): os.environ[Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH] = config_from_environment.filename Config.load(config_from_filename.filename) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 11 os.environ.pop(Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH) def test_block_load_from_environment_overwrite_load_from_filename(): Config.load(config_from_filename.filename) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 10 Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): os.environ[Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH] = config_from_environment.filename Config.load(config_from_filename.filename) os.environ.pop(Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 10 # The Config.load is failed to override "} {"text": "import pytest from src.taipy.config._config import _Config from src.taipy.config._config_comparator._config_comparator import _ConfigComparator from src.taipy.config._serializer._toml_serializer import _TomlSerializer from src.taipy.config.checker.issue_collector import IssueCollector from src.taipy.config.config import Config from src.taipy.config.section import Section from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest @pytest.fixture(scope=\"function\", autouse=True) def reset(): reset_configuration_singleton() register_test_sections() def reset_configuration_singleton(): Config.unblock_update() Config._default_config = _Config()._default_config() Config._python_config = _Config() Config._file_config = _Config() Config._env_file_config = _Config() Config._applied_config = _Config() Config._collector = IssueCollector() Config._serializer = _TomlSerializer() Config._comparator = _ConfigComparator() def register_test_sections(): Config._register_default(UniqueSectionForTest(\"default_attribute\")) Config.configure_unique_section_for_tests = UniqueSectionForTest._configure Config.unique_section_name = Config.unique_sections[UniqueSectionForTest.name] Config._register_default(SectionForTest(Section._DEFAULT_KEY, \"default_attribute\", prop=\"default_prop\", prop_int=0)) Config.configure_section_for_tests = SectionForTest._configure Config.section_name = Config.sections[SectionForTest.name] "} {"text": "from unittest import mock from src.taipy.config import Config from src.taipy.config._config import _Config from src.taipy.config._config_comparator._comparator_result import _ComparatorResult from src.taipy.config.global_app.global_app_config import GlobalAppConfig from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest class TestConfigComparator: unique_section_1 = UniqueSectionForTest(attribute=\"unique_attribute_1\", prop=\"unique_prop_1\") unique_section_1b = UniqueSectionForTest(attribute=\"unique_attribute_1\", prop=\"unique_prop_1b\") section_1 = SectionForTest(\"section_1\", attribute=\"attribute_1\", prop=\"prop_1\") section_2 = SectionForTest(\"section_2\", attribute=2, prop=\"prop_2\") section_2b = SectionForTest(\"section_2\", attribute=\"attribute_2\", prop=\"prop_2b\") section_3 = SectionForTest(\"section_3\", attribute=[1, 2, 3, 4], prop=[\"prop_1\"]) section_3b = SectionForTest(\"section_3\", attribute=[1, 2], prop=[\"prop_1\", \"prop_2\", \"prop_3\"]) section_3c = SectionForTest(\"section_3\", attribute=[2, 1], prop=[\"prop_3\", \"prop_1\", \"prop_2\"]) def test_comparator_compare_method_call(self): _config_1 = _Config._default_config() _config_2 = _Config._default_config() with mock.patch( \"src.taipy.config._config_comparator._config_comparator._ConfigComparator._find_conflict_config\" ) as mck: Config._comparator._find_conflict_config(_config_1, _config_2) mck.assert_called_once_with(_config_1, _config_2) def test_comparator_without_diff(self): _config_1 = _Config._default_config() _config_2 = _Config._default_config() config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert isinstance(config_diff, _ComparatorResult) assert config_diff == {} def test_comparator_with_updated_global_config(self): _config_1 = _Config._default_config() _config_1._global_config = GlobalAppConfig(foo=\"bar\") _config_2 = _Config._default_config() _config_2._global_config = GlobalAppConfig(foo=\"baz\", bar=\"foo\") config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert config_diff.get(\"unconflicted_sections\") is None assert config_diff.get(\"conflicted_sections\") is not None conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"modified_items\"]) == 1 assert conflicted_config_diff[\"modified_items\"][0] == ( (\"Global Configuration\", \"foo\", None), (\"bar\", \"baz\"), ) assert len(conflicted_config_diff[\"added_items\"]) == 1 assert conflicted_config_diff[\"added_items\"][0] == ( (\"Global Configuration\", \"bar\", None), \"foo\", ) def test_comparator_with_new_section(self): _config_1 = _Config._default_config() # The first \"section_name\" is added to the Config _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {\"section_1\": self.section_1} config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"added_items\"]) == 1 assert conflicted_config_diff[\"added_items\"][0] == ( (\"section_name\", None, None), {\"section_1\": {\"attribute\": \"attribute_1\", \"prop\": \"prop_1\"}}, ) assert conflicted_config_diff.get(\"modified_items\") is None assert conflicted_config_diff.get(\"removed_items\") is None # A new \"section_name\" is added to the Config _config_3 = _Config._default_config() _config_3._sections[SectionForTest.name] = {\"section_1\": self.section_1, \"section_2\": self.section_2} config_diff = Config._comparator._find_conflict_config(_config_2, _config_3) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"added_items\"]) == 1 assert conflicted_config_diff[\"added_items\"][0] == ( (\"section_name\", \"section_2\", None), {\"attribute\": \"2:int\", \"prop\": \"prop_2\"}, ) assert conflicted_config_diff.get(\"modified_items\") is None assert conflicted_config_diff.get(\"removed_items\") is None def test_comparator_with_removed_section(self): _config_1 = _Config._default_config() # All \"section_name\" sections are removed from the Config _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {\"section_1\": self.section_1} config_diff = Config._comparator._find_conflict_config(_config_2, _config_1) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"removed_items\"]) == 1 assert conflicted_config_diff[\"removed_items\"][0] == ( (\"section_name\", None, None), {\"section_1\": {\"attribute\": \"attribute_1\", \"prop\": \"prop_1\"}}, ) assert conflicted_config_diff.get(\"modified_items\") is None assert conflicted_config_diff.get(\"added_items\") is None # Section \"section_1\" is removed from the Config _config_3 = _Config._default_config() _config_3._sections[SectionForTest.name] = {\"section_1\": self.section_1, \"section_2\": self.section_2} config_diff = Config._comparator._find_conflict_config(_config_3, _config_2) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"removed_items\"]) == 1 assert conflicted_config_diff[\"removed_items\"][0] == ( (\"section_name\", \"section_2\", None), {\"attribute\": \"2:int\", \"prop\": \"prop_2\"}, ) assert conflicted_config_diff.get(\"modified_items\") is None assert conflicted_config_diff.get(\"added_items\") is None def test_comparator_with_modified_section(self): _config_1 = _Config._default_config() _config_1._sections[SectionForTest.name] = {\"section_2\": self.section_2} # All \"section_name\" sections are removed from the Config _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {\"section_2\": self.section_2b} config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"modified_items\"]) == 2 assert conflicted_config_diff[\"modified_items\"][0] == ( (\"section_name\", \"section_2\", \"attribute\"), (\"2:int\", \"attribute_2\"), ) assert conflicted_config_diff[\"modified_items\"][1] == ( (\"section_name\", \"section_2\", \"prop\"), (\"prop_2\", \"prop_2b\"), ) assert conflicted_config_diff.get(\"removed_items\") is None assert conflicted_config_diff.get(\"added_items\") is None def test_comparator_with_modified_list_attribute(self): _config_1 = _Config._default_config() _config_1._sections[SectionForTest.name] = {\"section_3\": self.section_3} # All \"section_name\" sections are removed from the Config _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {\"section_3\": self.section_3b} config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"modified_items\"]) == 2 assert conflicted_config_diff[\"modified_items\"][0] == ( (\"section_name\", \"section_3\", \"prop\"), ([\"prop_1\"], [\"prop_1\", \"prop_2\", \"prop_3\"]), ) assert conflicted_config_diff[\"modified_items\"][1] == ( (\"section_name\", \"section_3\", \"attribute\"), ([\"1:int\", \"2:int\", \"3:int\", \"4:int\"], [\"1:int\", \"2:int\"]), ) assert conflicted_config_diff.get(\"removed_items\") is None assert conflicted_config_diff.get(\"added_items\") is None def test_comparator_with_different_order_list_attributes(self): _config_1 = _Config._default_config() _config_1._unique_sections _config_1._sections[SectionForTest.name] = {\"section_3\": self.section_3b} # Create _config_2 with different order of list attributes _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {\"section_3\": self.section_3c} config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) # There should be no difference since the order of list attributes is ignored assert config_diff == {} def test_comparator_with_new_unique_section(self): _config_1 = _Config._default_config() _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"added_items\"]) == 1 assert conflicted_config_diff[\"added_items\"][0] == ( (\"unique_section_name\", None, None), {\"attribute\": \"unique_attribute_1\", \"prop\": \"unique_prop_1\"}, ) assert conflicted_config_diff.get(\"modified_items\") is None assert conflicted_config_diff.get(\"removed_items\") is None def test_comparator_with_removed_unique_section(self): _config_1 = _Config._default_config() _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 config_diff = Config._comparator._find_conflict_config(_config_2, _config_1) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"removed_items\"]) == 1 assert conflicted_config_diff[\"removed_items\"][0] == ( (\"unique_section_name\", None, None), {\"attribute\": \"unique_attribute_1\", \"prop\": \"unique_prop_1\"}, ) assert conflicted_config_diff.get(\"modified_items\") is None assert conflicted_config_diff.get(\"added_items\") is None def test_comparator_with_modified_unique_section(self): _config_1 = _Config._default_config() _config_1._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 # All \"section_name\" sections are removed from the Config _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1b config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"modified_items\"]) == 1 assert conflicted_config_diff[\"modified_items\"][0] == ( (\"unique_section_name\", \"prop\", None), (\"unique_prop_1\", \"unique_prop_1b\"), ) assert conflicted_config_diff.get(\"removed_items\") is None assert conflicted_config_diff.get(\"added_items\") is None def test_unconflicted_section_name_store_statically(self): Config._comparator._add_unconflicted_section(\"section_name_1\") assert Config._comparator._unconflicted_sections == {\"section_name_1\"} Config._comparator._add_unconflicted_section(\"section_name_2\") assert Config._comparator._unconflicted_sections == {\"section_name_1\", \"section_name_2\"} Config._comparator._add_unconflicted_section(\"section_name_1\") assert Config._comparator._unconflicted_sections == {\"section_name_1\", \"section_name_2\"} def test_unconflicted_diff_is_stored_separated_from_conflicted_ones(self): _config_1 = _Config._default_config() _config_1._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 _config_1._sections[SectionForTest.name] = {\"section_2\": self.section_2} _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1b _config_2._sections[SectionForTest.name] = {\"section_2\": self.section_2b} # Compare 2 Configuration config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert config_diff.get(\"unconflicted_sections\") is None assert config_diff.get(\"conflicted_sections\") is not None assert len(config_diff[\"conflicted_sections\"][\"modified_items\"]) == 3 # Ignore any diff of \"section_name\" and compare Config._comparator._add_unconflicted_section(\"section_name\") config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert config_diff.get(\"unconflicted_sections\") is not None assert len(config_diff[\"unconflicted_sections\"][\"modified_items\"]) == 2 assert config_diff.get(\"conflicted_sections\") is not None assert len(config_diff[\"conflicted_sections\"][\"modified_items\"]) == 1 # Ignore any diff of Global Config and compare Config._comparator._add_unconflicted_section([\"unique_section_name\"]) config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert config_diff.get(\"unconflicted_sections\") is not None assert len(config_diff[\"unconflicted_sections\"][\"modified_items\"]) == 3 assert config_diff.get(\"conflicted_sections\") is None def test_comparator_log_message(self, caplog): _config_1 = _Config._default_config() _config_1._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 _config_1._sections[SectionForTest.name] = {\"section_2\": self.section_2} _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1b _config_2._sections[SectionForTest.name] = {\"section_2\": self.section_2b} # Ignore any diff of \"section_name\" and compare Config._comparator._add_unconflicted_section(\"section_name\") Config._comparator._find_conflict_config(_config_1, _config_2) error_messages = caplog.text.strip().split(\"\\n\") assert len(error_messages) == 5 assert all( t in error_messages[0] for t in [ \"INFO\", \"There are non-conflicting changes between the current configuration and the current configuration:\", ] ) assert 'section_name \"section_2\" has attribute \"attribute\" modified: 2:int -> attribute_2' in error_messages[1] assert 'section_name \"section_2\" has attribute \"prop\" modified: prop_2 -> prop_2b' in error_messages[2] assert all( t in error_messages[3] for t in [ \"ERROR\", \"The current configuration conflicts with the current configuration:\", ] ) assert 'unique_section_name \"prop\" was modified: unique_prop_1 -> unique_prop_1b' in error_messages[4] caplog.clear() Config._comparator._find_conflict_config(_config_1, _config_2, old_version_number=\"1.0\") error_messages = caplog.text.strip().split(\"\\n\") assert len(error_messages) == 5 assert all( t in error_messages[0] for t in [ \"INFO\", \"There are non-conflicting changes between the configuration for version 1.0 and the current \" \"configuration:\", ] ) assert all( t in error_messages[3] for t in [ \"ERROR\", \"The configuration for version 1.0 conflicts with the current configuration:\", ] ) caplog.clear() Config._comparator._compare( _config_1, _config_2, version_number_1=\"1.0\", version_number_2=\"2.0\", ) error_messages = caplog.text.strip().split(\"\\n\") assert len(error_messages) == 3 assert all( t in error_messages[0] for t in [\"INFO\", \"Differences between version 1.0 Configuration and version 2.0 Configuration:\"] ) caplog.clear() "} {"text": "import os from unittest import mock import pytest from src.taipy.config.exceptions.exceptions import InvalidConfigurationId from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest class WrongUniqueSection(UniqueSectionForTest): name = \"1wrong_id\" class WrongSection(SectionForTest): name = \"correct_name\" def test_section_uses_valid_id(): with pytest.raises(InvalidConfigurationId): WrongUniqueSection(attribute=\"foo\") with pytest.raises(InvalidConfigurationId): WrongSection(\"wrong id\", attribute=\"foo\") with pytest.raises(InvalidConfigurationId): WrongSection(\"1wrong_id\", attribute=\"foo\") with pytest.raises(InvalidConfigurationId): WrongSection(\"wrong_@id\", attribute=\"foo\") def test_templated_properties_are_replaced(): with mock.patch.dict(os.environ, {\"foo\": \"bar\", \"baz\": \"1\"}): u_sect = UniqueSectionForTest(attribute=\"attribute\", tpl_property=\"ENV[foo]\") assert u_sect.tpl_property == \"bar\" sect = SectionForTest(id=\"my_id\", attribute=\"attribute\", tpl_property=\"ENV[baz]:int\") assert sect.tpl_property == 1 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.config.config import Config from src.taipy.config.global_app.global_app_config import GlobalAppConfig from src.taipy.config.section import Section from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest def _test_default_global_app_config(global_config: GlobalAppConfig): assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_default_configuration(): default_config = Config._default_config assert default_config._unique_sections is not None assert len(default_config._unique_sections) == 1 assert default_config._unique_sections[UniqueSectionForTest.name] is not None assert default_config._unique_sections[UniqueSectionForTest.name].attribute == \"default_attribute\" assert default_config._sections is not None assert len(default_config._sections) == 1 _test_default_global_app_config(default_config._global_config) _test_default_global_app_config(Config.global_config) _test_default_global_app_config(GlobalAppConfig().default_config()) def test_register_default_configuration(): Config._register_default(SectionForTest(Section._DEFAULT_KEY, \"default_attribute\", prop1=\"prop1\")) # Replace the first default section Config._register_default(SectionForTest(Section._DEFAULT_KEY, \"default_attribute\", prop2=\"prop2\")) default_section = Config.sections[SectionForTest.name][Section._DEFAULT_KEY] assert len(default_section.properties) == 1 assert default_section.prop2 == \"prop2\" assert default_section.prop1 is None "} {"text": "import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import LoadingError from tests.config.utils.named_temporary_file import NamedTemporaryFile def test_node_can_not_appear_twice(): config = NamedTemporaryFile( \"\"\" [unique_section_name] attribute = \"my_attribute\" [unique_section_name] attribute = \"other_attribute\" \"\"\" ) with pytest.raises(LoadingError, match=\"Can not load configuration\"): Config.load(config.filename) def test_skip_configuration_outside_nodes(): config = NamedTemporaryFile( \"\"\" foo = \"bar\" \"\"\" ) Config.load(config.filename) assert Config.global_config.foo is None "} {"text": "import os from unittest import mock import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import InconsistentEnvVariableError, MissingEnvVariableError from tests.config.utils.named_temporary_file import NamedTemporaryFile def test_override_default_configuration_with_code_configuration(): assert not Config.global_config.root_folder == \"foo\" assert len(Config.unique_sections) == 1 assert Config.unique_sections[\"unique_section_name\"] is not None assert Config.unique_sections[\"unique_section_name\"].attribute == \"default_attribute\" assert Config.unique_sections[\"unique_section_name\"].prop is None assert len(Config.sections) == 1 assert len(Config.sections[\"section_name\"]) == 1 assert Config.sections[\"section_name\"] is not None assert Config.sections[\"section_name\"][\"default\"].attribute == \"default_attribute\" Config.configure_global_app(root_folder=\"foo\") assert Config.global_config.root_folder == \"foo\" Config.configure_unique_section_for_tests(\"foo\", prop=\"bar\") assert len(Config.unique_sections) == 1 assert Config.unique_sections[\"unique_section_name\"] is not None assert Config.unique_sections[\"unique_section_name\"].attribute == \"foo\" assert Config.unique_sections[\"unique_section_name\"].prop == \"bar\" Config.configure_section_for_tests(\"my_id\", \"baz\", prop=\"qux\") assert len(Config.unique_sections) == 1 assert Config.sections[\"section_name\"] is not None assert Config.sections[\"section_name\"][\"my_id\"].attribute == \"baz\" assert Config.sections[\"section_name\"][\"my_id\"].prop == \"qux\" def test_override_default_config_with_code_config_including_env_variable_values(): Config.configure_global_app() assert Config.global_config.foo is None Config.configure_global_app(foo=\"bar\") assert Config.global_config.foo == \"bar\" with mock.patch.dict(os.environ, {\"FOO\": \"foo\"}): Config.configure_global_app(foo=\"ENV[FOO]\") assert Config.global_config.foo == \"foo\" def test_override_default_configuration_with_file_configuration(): tf = NamedTemporaryFile( \"\"\" [TAIPY] foo = \"bar\" \"\"\" ) assert Config.global_config.foo is None Config.load(tf.filename) assert Config.global_config.foo == \"bar\" def test_override_default_config_with_file_config_including_env_variable_values(): tf = NamedTemporaryFile( \"\"\" [TAIPY] foo_attribute = \"ENV[FOO]:int\" bar_attribute = \"ENV[BAR]:bool\" \"\"\" ) assert Config.global_config.foo_attribute is None assert Config.global_config.bar_attribute is None with mock.patch.dict(os.environ, {\"FOO\": \"foo\", \"BAR\": \"true\"}): with pytest.raises(InconsistentEnvVariableError): Config.load(tf.filename) Config.global_config.foo_attribute with mock.patch.dict(os.environ, {\"FOO\": \"5\"}): with pytest.raises(MissingEnvVariableError): Config.load(tf.filename) Config.global_config.bar_attribute with mock.patch.dict(os.environ, {\"FOO\": \"6\", \"BAR\": \"TRUe\"}): Config.load(tf.filename) assert Config.global_config.foo_attribute == 6 assert Config.global_config.bar_attribute def test_code_configuration_does_not_override_file_configuration(): config_from_filename = NamedTemporaryFile( \"\"\" [TAIPY] foo = 2 \"\"\" ) Config.override(config_from_filename.filename) Config.configure_global_app(foo=21) assert Config.global_config.foo == 2 # From file config def test_code_configuration_does_not_override_file_configuration_including_env_variable_values(): config_from_filename = NamedTemporaryFile( \"\"\" [TAIPY] foo = 2 \"\"\" ) Config.override(config_from_filename.filename) with mock.patch.dict(os.environ, {\"FOO\": \"21\"}): Config.configure_global_app(foo=\"ENV[FOO]\") assert Config.global_config.foo == 2 # From file config def test_file_configuration_overrides_code_configuration(): config_from_filename = NamedTemporaryFile( \"\"\" [TAIPY] foo = 2 \"\"\" ) Config.configure_global_app(foo=21) Config.load(config_from_filename.filename) assert Config.global_config.foo == 2 # From file config def test_file_configuration_overrides_code_configuration_including_env_variable_values(): config_from_filename = NamedTemporaryFile( \"\"\" [TAIPY] foo = \"ENV[FOO]:int\" \"\"\" ) Config.configure_global_app(foo=21) with mock.patch.dict(os.environ, {\"FOO\": \"2\"}): Config.load(config_from_filename.filename) assert Config.global_config.foo == 2 # From file config def test_override_default_configuration_with_multiple_configurations(): file_config = NamedTemporaryFile( \"\"\" [TAIPY] foo = 10 bar = \"baz\" \"\"\" ) # Default config is applied assert Config.global_config.foo is None assert Config.global_config.bar is None # Code config is applied Config.configure_global_app(foo=\"bar\") assert Config.global_config.foo == \"bar\" assert Config.global_config.bar is None # File config is applied Config.load(file_config.filename) assert Config.global_config.foo == 10 assert Config.global_config.bar == \"baz\" def test_override_default_configuration_with_multiple_configurations_including_environment_variable_values(): file_config = NamedTemporaryFile( \"\"\" [TAIPY] att = \"ENV[BAZ]\" \"\"\" ) with mock.patch.dict(os.environ, {\"FOO\": \"bar\", \"BAZ\": \"qux\"}): # Default config is applied assert Config.global_config.att is None # Code config is applied Config.configure_global_app(att=\"ENV[FOO]\") assert Config.global_config.att == \"bar\" # File config is applied Config.load(file_config.filename) assert Config.global_config.att == \"qux\" "} {"text": "import pytest from src.taipy.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest def test_unique_section_registration_and_usage(): assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == \"default_attribute\" assert Config.unique_sections[UniqueSectionForTest.name].prop is None mySection = Config.configure_unique_section_for_tests(attribute=\"my_attribute\", prop=\"my_prop\") assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert mySection is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == \"my_attribute\" assert mySection.attribute == \"my_attribute\" assert Config.unique_sections[UniqueSectionForTest.name].prop == \"my_prop\" assert mySection.prop == \"my_prop\" myNewSection = Config.configure_unique_section_for_tests(attribute=\"my_new_attribute\", prop=\"my_new_prop\") assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert myNewSection is not None assert mySection is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == \"my_new_attribute\" assert myNewSection.attribute == \"my_new_attribute\" assert mySection.attribute == \"my_new_attribute\" assert Config.unique_sections[UniqueSectionForTest.name].prop == \"my_new_prop\" assert myNewSection.prop == \"my_new_prop\" assert mySection.prop == \"my_new_prop\" def test_sections_exposed_as_attribute(): assert Config.unique_section_name.attribute == \"default_attribute\" Config.configure_unique_section_for_tests(\"my_attribute\") assert Config.unique_section_name.attribute == \"my_attribute\" assert Config.section_name[\"default\"].attribute == \"default_attribute\" Config.configure_section_for_tests(id=\"my_id\", attribute=\"my_attribute\") assert Config.section_name[\"my_id\"].attribute == \"my_attribute\" def test_section_registration_and_usage(): assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 1 assert Config.sections[SectionForTest.name][\"default\"] is not None assert Config.sections[SectionForTest.name][\"default\"].attribute == \"default_attribute\" assert Config.sections[SectionForTest.name][\"default\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"default\"].foo is None myFirstSection = Config.configure_section_for_tests(id=\"first\", attribute=\"my_attribute\", prop=\"my_prop\", foo=\"bar\") assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 2 assert Config.sections[SectionForTest.name][\"default\"] is not None assert Config.sections[SectionForTest.name][\"default\"].attribute == \"default_attribute\" assert Config.sections[SectionForTest.name][\"default\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"default\"].foo is None assert Config.sections[SectionForTest.name][\"first\"] is not None assert Config.sections[SectionForTest.name][\"first\"].attribute == \"my_attribute\" assert Config.sections[SectionForTest.name][\"first\"].prop == \"my_prop\" assert Config.sections[SectionForTest.name][\"first\"].foo == \"bar\" assert myFirstSection.attribute == \"my_attribute\" assert myFirstSection.prop == \"my_prop\" assert myFirstSection.foo == \"bar\" myNewSection = Config.configure_section_for_tests(id=\"second\", attribute=\"my_new_attribute\", prop=\"my_new_prop\") assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 3 assert Config.sections[SectionForTest.name][\"default\"] is not None assert Config.sections[SectionForTest.name][\"default\"].attribute == \"default_attribute\" assert Config.sections[SectionForTest.name][\"default\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"default\"].foo is None assert Config.sections[SectionForTest.name][\"first\"] is not None assert Config.sections[SectionForTest.name][\"first\"].attribute == \"my_attribute\" assert Config.sections[SectionForTest.name][\"first\"].prop == \"my_prop\" assert Config.sections[SectionForTest.name][\"first\"].foo == \"bar\" assert Config.sections[SectionForTest.name][\"second\"] is not None assert Config.sections[SectionForTest.name][\"second\"].attribute == \"my_new_attribute\" assert Config.sections[SectionForTest.name][\"second\"].prop == \"my_new_prop\" assert Config.sections[SectionForTest.name][\"second\"].foo is None assert myFirstSection.attribute == \"my_attribute\" assert myFirstSection.prop == \"my_prop\" assert myFirstSection.foo == \"bar\" assert myNewSection.attribute == \"my_new_attribute\" assert myNewSection.prop == \"my_new_prop\" assert myNewSection.foo is None my2ndSection = Config.configure_section_for_tests(id=\"second\", attribute=\"my_2nd_attribute\", prop=\"my_2nd_prop\") assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 3 assert Config.sections[SectionForTest.name][\"default\"] is not None assert Config.sections[SectionForTest.name][\"default\"].attribute == \"default_attribute\" assert Config.sections[SectionForTest.name][\"default\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"default\"].foo is None assert Config.sections[SectionForTest.name][\"first\"] is not None assert Config.sections[SectionForTest.name][\"first\"].attribute == \"my_attribute\" assert Config.sections[SectionForTest.name][\"first\"].prop == \"my_prop\" assert Config.sections[SectionForTest.name][\"first\"].foo == \"bar\" assert Config.sections[SectionForTest.name][\"second\"] is not None assert Config.sections[SectionForTest.name][\"second\"].attribute == \"my_2nd_attribute\" assert Config.sections[SectionForTest.name][\"second\"].prop == \"my_2nd_prop\" assert Config.sections[SectionForTest.name][\"second\"].foo is None assert myFirstSection.attribute == \"my_attribute\" assert myFirstSection.prop == \"my_prop\" assert myFirstSection.foo == \"bar\" assert myNewSection.attribute == \"my_2nd_attribute\" assert myNewSection.prop == \"my_2nd_prop\" assert myNewSection.foo is None assert my2ndSection.attribute == \"my_2nd_attribute\" assert my2ndSection.prop == \"my_2nd_prop\" assert my2ndSection.foo is None def test_block_registration(): myUniqueSection = Config.configure_unique_section_for_tests(attribute=\"my_unique_attribute\", prop=\"my_unique_prop\") mySection = Config.configure_section_for_tests(id=\"section_id\", attribute=\"my_attribute\", prop=\"my_prop\", foo=\"bar\") Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_unique_section_for_tests(attribute=\"my_new_unique_attribute\", prop=\"my_new_unique_prop\") with pytest.raises(ConfigurationUpdateBlocked): Config.configure_section_for_tests(id=\"new\", attribute=\"my_attribute\", prop=\"my_prop\", foo=\"bar\") with pytest.raises(ConfigurationUpdateBlocked): myUniqueSection.attribute = \"foo\" with pytest.raises(ConfigurationUpdateBlocked): myUniqueSection.properties = {\"foo\": \"bar\"} # myUniqueSection stay the same assert myUniqueSection.attribute == \"my_unique_attribute\" assert myUniqueSection.properties == {\"prop\": \"my_unique_prop\"} with pytest.raises(ConfigurationUpdateBlocked): mySection.attribute = \"foo\" with pytest.raises(ConfigurationUpdateBlocked): mySection.properties = {\"foo\": \"foo\"} # mySection stay the same assert mySection.attribute == \"my_attribute\" assert mySection.properties == {\"prop\": \"my_prop\", \"foo\": \"bar\", \"prop_int\": 0} "} {"text": "import pytest from src.taipy.config.config import Config from src.taipy.config.section import Section from tests.config.utils.named_temporary_file import NamedTemporaryFile from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.section_of_sections_list_for_tests import SectionOfSectionsListForTest @pytest.fixture def _init_list_section_for_test(): Config._register_default(SectionOfSectionsListForTest(Section._DEFAULT_KEY, [], prop=\"default_prop\", prop_int=0)) Config.configure_list_section_for_tests = SectionOfSectionsListForTest._configure Config.list_section_name = Config.sections[SectionOfSectionsListForTest.name] def test_applied_config_compilation_does_not_change_other_configs(): assert len(Config._default_config._unique_sections) == 1 assert Config._default_config._unique_sections[\"unique_section_name\"] is not None assert Config._default_config._unique_sections[\"unique_section_name\"].attribute == \"default_attribute\" assert Config._default_config._unique_sections[\"unique_section_name\"].prop is None assert len(Config._python_config._unique_sections) == 0 assert len(Config._file_config._unique_sections) == 0 assert len(Config._env_file_config._unique_sections) == 0 assert len(Config._applied_config._unique_sections) == 1 assert Config._applied_config._unique_sections[\"unique_section_name\"] is not None assert Config._applied_config._unique_sections[\"unique_section_name\"].attribute == \"default_attribute\" assert Config._applied_config._unique_sections[\"unique_section_name\"].prop is None assert len(Config.unique_sections) == 1 assert Config.unique_sections[\"unique_section_name\"] is not None assert Config.unique_sections[\"unique_section_name\"].attribute == \"default_attribute\" assert Config.unique_sections[\"unique_section_name\"].prop is None assert ( Config._applied_config._unique_sections[\"unique_section_name\"] is not Config._default_config._unique_sections[\"unique_section_name\"] ) Config.configure_unique_section_for_tests(\"qwe\", prop=\"rty\") assert len(Config._default_config._unique_sections) == 1 assert Config._default_config._unique_sections[\"unique_section_name\"] is not None assert Config._default_config._unique_sections[\"unique_section_name\"].attribute == \"default_attribute\" assert Config._default_config._unique_sections[\"unique_section_name\"].prop is None assert len(Config._python_config._unique_sections) == 1 assert Config._python_config._unique_sections[\"unique_section_name\"] is not None assert Config._python_config._unique_sections[\"unique_section_name\"].attribute == \"qwe\" assert Config._python_config._unique_sections[\"unique_section_name\"].prop == \"rty\" assert ( Config._python_config._unique_sections[\"unique_section_name\"] != Config._default_config._unique_sections[\"unique_section_name\"] ) assert len(Config._file_config._unique_sections) == 0 assert len(Config._env_file_config._unique_sections) == 0 assert len(Config._applied_config._unique_sections) == 1 assert Config._applied_config._unique_sections[\"unique_section_name\"] is not None assert Config._applied_config._unique_sections[\"unique_section_name\"].attribute == \"qwe\" assert Config._applied_config._unique_sections[\"unique_section_name\"].prop == \"rty\" assert ( Config._python_config._unique_sections[\"unique_section_name\"] != Config._applied_config._unique_sections[\"unique_section_name\"] ) assert ( Config._default_config._unique_sections[\"unique_section_name\"] != Config._applied_config._unique_sections[\"unique_section_name\"] ) assert len(Config.unique_sections) == 1 assert Config.unique_sections[\"unique_section_name\"] is not None assert Config.unique_sections[\"unique_section_name\"].attribute == \"qwe\" assert Config.unique_sections[\"unique_section_name\"].prop == \"rty\" def test_nested_section_instance_in_python(_init_list_section_for_test): s1_cfg = Config.configure_section_for_tests(\"s1\", attribute=\"foo\") s2_cfg = Config.configure_section_for_tests(\"s2\", attribute=\"bar\") ss_cfg = Config.configure_list_section_for_tests(\"ss\", attribute=\"foo\", sections_list=[s1_cfg, s2_cfg]) s1_config_applied_instance = Config.section_name[\"s1\"] s1_config_python_instance = Config._python_config._sections[SectionForTest.name][\"s1\"] s2_config_applied_instance = Config.section_name[\"s2\"] s2_config_python_instance = Config._python_config._sections[SectionForTest.name][\"s2\"] assert ss_cfg.sections_list[0] is s1_config_applied_instance assert ss_cfg.sections_list[0] is not s1_config_python_instance assert ss_cfg.sections_list[1] is s2_config_applied_instance assert ss_cfg.sections_list[1] is not s2_config_python_instance def _configure_in_toml(): return NamedTemporaryFile( content=\"\"\" [TAIPY] [section_name.s1] attribute = \"foo\" [section_name.s2] attribute = \"bar\" [list_section_name.ss] sections_list = [ \"foo\", \"s1:SECTION\", \"s2:SECTION\"] \"\"\" ) def test_nested_section_instance_load_toml(_init_list_section_for_test): toml_config = _configure_in_toml() Config.load(toml_config) s1_config_applied_instance = Config.section_name[\"s1\"] s1_config_python_instance = Config._python_config._sections[SectionForTest.name][\"s1\"] s2_config_applied_instance = Config.section_name[\"s2\"] s2_config_python_instance = Config._python_config._sections[SectionForTest.name][\"s2\"] ss_cfg = Config.list_section_name[\"ss\"] assert ss_cfg.sections_list[0] == \"foo\" assert ss_cfg.sections_list[1] is s1_config_applied_instance assert ss_cfg.sections_list[1] is not s1_config_python_instance assert ss_cfg.sections_list[2] is s2_config_applied_instance assert ss_cfg.sections_list[2] is not s2_config_python_instance def test_nested_section_instance_override_toml(_init_list_section_for_test): toml_config = _configure_in_toml() Config.override(toml_config) s1_config_applied_instance = Config.section_name[\"s1\"] s1_config_python_instance = Config._file_config._sections[SectionForTest.name][\"s1\"] s2_config_applied_instance = Config.section_name[\"s2\"] s2_config_python_instance = Config._file_config._sections[SectionForTest.name][\"s2\"] ss_cfg = Config.list_section_name[\"ss\"] assert ss_cfg.sections_list[0] == \"foo\" assert ss_cfg.sections_list[1] is s1_config_applied_instance assert ss_cfg.sections_list[1] is not s1_config_python_instance assert ss_cfg.sections_list[2] is s2_config_applied_instance assert ss_cfg.sections_list[1] is not s2_config_python_instance "} {"text": "import datetime import json import os from unittest import mock from src.taipy.config import Config from src.taipy.config._serializer._json_serializer import _JsonSerializer from src.taipy.config.common.frequency import Frequency from src.taipy.config.common.scope import Scope from tests.config.utils.named_temporary_file import NamedTemporaryFile from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest def add(a, b): return a + b class CustomClass: a = None b = None class CustomEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, datetime): result = {\"__type__\": \"Datetime\", \"__value__\": o.isoformat()} else: result = json.JSONEncoder.default(self, o) return result class CustomDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) def object_hook(self, source): if source.get(\"__type__\") == \"Datetime\": return datetime.fromisoformat(source.get(\"__value__\")) else: return source def test_write_toml_configuration_file(): expected_toml_config = \"\"\" [TAIPY] [unique_section_name] attribute = \"my_attribute\" prop = \"my_prop\" prop_int = \"1:int\" prop_bool = \"False:bool\" prop_list = [ \"p1\", \"1991-01-01T00:00:00:datetime\", \"1d0h0m0s:timedelta\",] prop_scope = \"SCENARIO:SCOPE\" prop_freq = \"QUARTERLY:FREQUENCY\" baz = \"ENV[QUX]\" quux = \"ENV[QUUZ]:bool\" corge = [ \"grault\", \"ENV[GARPLY]\", \"ENV[WALDO]:int\", \"3.0:float\",] [section_name.default] attribute = \"default_attribute\" prop = \"default_prop\" prop_int = \"0:int\" [section_name.my_id] attribute = \"my_attribute\" prop = \"default_prop\" prop_int = \"1:int\" prop_bool = \"False:bool\" prop_list = [ \"unique_section_name:SECTION\",] prop_scope = \"SCENARIO\" baz = \"ENV[QUX]\" \"\"\".strip() tf = NamedTemporaryFile() with mock.patch.dict( os.environ, {\"FOO\": \"in_memory\", \"QUX\": \"qux\", \"QUUZ\": \"true\", \"GARPLY\": \"garply\", \"WALDO\": \"17\"} ): unique_section = Config.configure_unique_section_for_tests( attribute=\"my_attribute\", prop=\"my_prop\", prop_int=1, prop_bool=False, prop_list=[\"p1\", datetime.datetime(1991, 1, 1), datetime.timedelta(days=1)], prop_scope=Scope.SCENARIO, prop_freq=Frequency.QUARTERLY, baz=\"ENV[QUX]\", quux=\"ENV[QUUZ]:bool\", corge=(\"grault\", \"ENV[GARPLY]\", \"ENV[WALDO]:int\", 3.0), ) Config.configure_section_for_tests( \"my_id\", \"my_attribute\", prop_int=1, prop_bool=False, prop_list=[unique_section], prop_scope=\"SCENARIO\", baz=\"ENV[QUX]\", ) Config.backup(tf.filename) actual_config = tf.read().strip() assert actual_config == expected_toml_config def test_read_toml_configuration_file(): toml_config = \"\"\" [TAIPY] foo = \"bar\" [unique_section_name] attribute = \"my_attribute\" prop = \"my_prop\" prop_int = \"1:int\" prop_bool = \"False:bool\" prop_list = [ \"p1\", \"1991-01-01T00:00:00:datetime\", \"1d0h0m0s:timedelta\",] prop_scope = \"SCENARIO:SCOPE\" prop_freq = \"QUARTERLY:FREQUENCY\" baz = \"ENV[QUX]\" quux = \"ENV[QUUZ]:bool\" corge = [ \"grault\", \"ENV[GARPLY]\", \"ENV[WALDO]:int\", \"3.0:float\",] [TAIPY.custom_properties] bar = \"baz\" [section_name.default] attribute = \"default_attribute\" prop = \"default_prop\" prop_int = \"0:int\" [section_name.my_id] attribute = \"my_attribute\" prop = \"default_prop\" prop_int = \"1:int\" prop_bool = \"False:bool\" prop_list = [ \"unique_section_name\", \"section_name.my_id\",] prop_scope = \"SCENARIO:SCOPE\" baz = \"ENV[QUX]\" \"\"\".strip() tf = NamedTemporaryFile(toml_config) with mock.patch.dict( os.environ, {\"FOO\": \"in_memory\", \"QUX\": \"qux\", \"QUUZ\": \"true\", \"GARPLY\": \"garply\", \"WALDO\": \"17\"} ): Config.override(tf.filename) assert Config.global_config.foo == \"bar\" assert Config.global_config.custom_properties.get(\"bar\") == \"baz\" assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == \"my_attribute\" assert Config.unique_sections[UniqueSectionForTest.name].prop == \"my_prop\" assert Config.unique_sections[UniqueSectionForTest.name].prop_int == 1 assert Config.unique_sections[UniqueSectionForTest.name].prop_bool is False assert Config.unique_sections[UniqueSectionForTest.name].prop_list == [ \"p1\", datetime.datetime(1991, 1, 1), datetime.timedelta(days=1), ] assert Config.unique_sections[UniqueSectionForTest.name].prop_scope == Scope.SCENARIO assert Config.unique_sections[UniqueSectionForTest.name].prop_freq == Frequency.QUARTERLY assert Config.unique_sections[UniqueSectionForTest.name].baz == \"qux\" assert Config.unique_sections[UniqueSectionForTest.name].quux is True assert Config.unique_sections[UniqueSectionForTest.name].corge == [ \"grault\", \"garply\", 17, 3.0, ] assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 2 assert Config.sections[SectionForTest.name][\"default\"] is not None assert Config.sections[SectionForTest.name][\"default\"].attribute == \"default_attribute\" assert Config.sections[SectionForTest.name][\"default\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"default\"].prop_int == 0 assert Config.sections[SectionForTest.name][\"my_id\"] is not None assert Config.sections[SectionForTest.name][\"my_id\"].attribute == \"my_attribute\" assert Config.sections[SectionForTest.name][\"my_id\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"my_id\"].prop_int == 1 assert Config.sections[SectionForTest.name][\"my_id\"].prop_bool is False assert Config.sections[SectionForTest.name][\"my_id\"].prop_list == [\"unique_section_name\", \"section_name.my_id\"] assert Config.sections[SectionForTest.name][\"my_id\"].prop_scope == Scope.SCENARIO assert Config.sections[SectionForTest.name][\"my_id\"].baz == \"qux\" tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_config_2 = tf2.read().strip() assert actual_config_2 == toml_config def test_read_write_toml_configuration_file_with_function_and_class(): expected_toml_config = \"\"\" [TAIPY] [unique_section_name] attribute = \"my_attribute\" prop = \"my_prop\" prop_list = [ \"tests.config.test_section_serialization.CustomEncoder:class\", \\ \"tests.config.test_section_serialization.CustomDecoder:class\",] [section_name.default] attribute = \"default_attribute\" prop = \"default_prop\" prop_int = \"0:int\" [section_name.my_id] attribute = \"my_attribute\" prop = \"default_prop\" prop_int = \"0:int\" prop_fct_list = [ \"tests.config.test_section_serialization.add:function\",] prop_class_list = [ \"tests.config.test_section_serialization.CustomClass:class\",] [section_name.my_id_2] attribute = \"my_attribute_2\" prop = \"default_prop\" prop_int = \"0:int\" prop_fct_list = [ \"builtins.print:function\", \"builtins.pow:function\",] \"\"\".strip() tf = NamedTemporaryFile() Config.configure_unique_section_for_tests( attribute=\"my_attribute\", prop=\"my_prop\", prop_list=[CustomEncoder, CustomDecoder], ) Config.configure_section_for_tests( \"my_id\", \"my_attribute\", prop_fct_list=[add], prop_class_list=[CustomClass], ) Config.configure_section_for_tests( \"my_id_2\", \"my_attribute_2\", prop_fct_list=[print, pow], ) Config.backup(tf.filename) actual_exported_toml = tf.read().strip() assert actual_exported_toml == expected_toml_config Config.override(tf.filename) tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_exported_toml_2 = tf2.read().strip() assert actual_exported_toml_2 == expected_toml_config def test_write_json_configuration_file(): expected_json_config = \"\"\" { \"TAIPY\": {}, \"unique_section_name\": { \"attribute\": \"my_attribute\", \"prop\": \"my_prop\", \"prop_int\": \"1:int\", \"prop_bool\": \"False:bool\", \"prop_list\": [ \"p1\", \"1991-01-01T00:00:00:datetime\", \"1d0h0m0s:timedelta\" ], \"prop_scope\": \"SCENARIO:SCOPE\", \"prop_freq\": \"QUARTERLY:FREQUENCY\" }, \"section_name\": { \"default\": { \"attribute\": \"default_attribute\", \"prop\": \"default_prop\", \"prop_int\": \"0:int\" }, \"my_id\": { \"attribute\": \"my_attribute\", \"prop\": \"default_prop\", \"prop_int\": \"1:int\", \"prop_bool\": \"False:bool\", \"prop_list\": [ \"unique_section_name:SECTION\" ], \"prop_scope\": \"SCENARIO\", \"baz\": \"ENV[QUX]\" } } } \"\"\".strip() tf = NamedTemporaryFile() Config._serializer = _JsonSerializer() unique_section = Config.configure_unique_section_for_tests( attribute=\"my_attribute\", prop=\"my_prop\", prop_int=1, prop_bool=False, prop_list=[\"p1\", datetime.datetime(1991, 1, 1), datetime.timedelta(days=1)], prop_scope=Scope.SCENARIO, prop_freq=Frequency.QUARTERLY, ) Config.configure_section_for_tests( \"my_id\", \"my_attribute\", prop_int=1, prop_bool=False, prop_list=[unique_section], prop_scope=\"SCENARIO\", baz=\"ENV[QUX]\", ) Config.backup(tf.filename) actual_config = tf.read() assert actual_config == expected_json_config def test_read_json_configuration_file(): json_config = \"\"\" { \"TAIPY\": { \"root_folder\": \"./taipy/\", \"storage_folder\": \".data/\", \"repository_type\": \"filesystem\" }, \"unique_section_name\": { \"attribute\": \"my_attribute\", \"prop\": \"my_prop\", \"prop_int\": \"1:int\", \"prop_bool\": \"False:bool\", \"prop_list\": [ \"p1\", \"1991-01-01T00:00:00:datetime\", \"1d0h0m0s:timedelta\" ], \"prop_scope\": \"SCENARIO:SCOPE\", \"prop_freq\": \"QUARTERLY:FREQUENCY\" }, \"section_name\": { \"default\": { \"attribute\": \"default_attribute\", \"prop\": \"default_prop\", \"prop_int\": \"0:int\" }, \"my_id\": { \"attribute\": \"my_attribute\", \"prop\": \"default_prop\", \"prop_int\": \"1:int\", \"prop_bool\": \"False:bool\", \"prop_list\": [ \"unique_section_name\" ], \"prop_scope\": \"SCENARIO\" } } } \"\"\".strip() Config._serializer = _JsonSerializer() tf = NamedTemporaryFile(json_config) Config.override(tf.filename) assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == \"my_attribute\" assert Config.unique_sections[UniqueSectionForTest.name].prop == \"my_prop\" assert Config.unique_sections[UniqueSectionForTest.name].prop_int == 1 assert Config.unique_sections[UniqueSectionForTest.name].prop_bool is False assert Config.unique_sections[UniqueSectionForTest.name].prop_list == [ \"p1\", datetime.datetime(1991, 1, 1), datetime.timedelta(days=1), ] assert Config.unique_sections[UniqueSectionForTest.name].prop_scope == Scope.SCENARIO assert Config.unique_sections[UniqueSectionForTest.name].prop_freq == Frequency.QUARTERLY assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 2 assert Config.sections[SectionForTest.name][\"default\"] is not None assert Config.sections[SectionForTest.name][\"default\"].attribute == \"default_attribute\" assert Config.sections[SectionForTest.name][\"default\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"default\"].prop_int == 0 assert Config.sections[SectionForTest.name][\"my_id\"] is not None assert Config.sections[SectionForTest.name][\"my_id\"].attribute == \"my_attribute\" assert Config.sections[SectionForTest.name][\"my_id\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"my_id\"].prop_int == 1 assert Config.sections[SectionForTest.name][\"my_id\"].prop_bool is False assert Config.sections[SectionForTest.name][\"my_id\"].prop_list == [\"unique_section_name\"] tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_config_2 = tf2.read().strip() assert actual_config_2 == json_config def test_read_write_json_configuration_file_with_function_and_class(): expected_json_config = \"\"\" { \"TAIPY\": {}, \"unique_section_name\": { \"attribute\": \"my_attribute\", \"prop\": \"my_prop\", \"prop_list\": [ \"tests.config.test_section_serialization.CustomEncoder:class\", \"tests.config.test_section_serialization.CustomDecoder:class\" ] }, \"section_name\": { \"default\": { \"attribute\": \"default_attribute\", \"prop\": \"default_prop\", \"prop_int\": \"0:int\" }, \"my_id\": { \"attribute\": \"my_attribute\", \"prop\": \"default_prop\", \"prop_int\": \"0:int\", \"prop_fct_list\": [ \"tests.config.test_section_serialization.add:function\" ], \"prop_class_list\": [ \"tests.config.test_section_serialization.CustomClass:class\" ] }, \"my_id_2\": { \"attribute\": \"my_attribute_2\", \"prop\": \"default_prop\", \"prop_int\": \"0:int\", \"prop_fct_list\": [ \"builtins.print:function\", \"builtins.pow:function\" ] } } } \"\"\".strip() Config._serializer = _JsonSerializer() tf = NamedTemporaryFile() Config.configure_unique_section_for_tests( attribute=\"my_attribute\", prop=\"my_prop\", prop_list=[CustomEncoder, CustomDecoder], ) Config.configure_section_for_tests( \"my_id\", \"my_attribute\", prop_fct_list=[add], prop_class_list=[CustomClass], ) Config.configure_section_for_tests( \"my_id_2\", \"my_attribute_2\", prop_fct_list=[print, pow], ) Config.backup(tf.filename) actual_exported_json = tf.read().strip() assert actual_exported_json == expected_json_config Config.override(tf.filename) tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_exported_json_2 = tf2.read().strip() assert actual_exported_json_2 == expected_json_config "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.config._config import _Config from src.taipy.config.checker._checker import _Checker class TestDefaultConfigChecker: def test_check_default_config(self): config = _Config._default_config() collector = _Checker._check(config) assert len(collector._errors) == 0 assert len(collector._infos) == 0 assert len(collector._warnings) == 0 "} {"text": "from src.taipy.config.checker.issue import Issue from src.taipy.config.checker.issue_collector import IssueCollector class TestIssueCollector: def test_add_error(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_error(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 1 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", \"value\", \"message\", \"checker\") collector._add_error(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 2 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", \"value\", \"message\", \"checker\") assert collector.all[1] == Issue(IssueCollector._ERROR_LEVEL, \"field\", \"value\", \"message\", \"checker\") def test_add_warning(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_warning(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 0 assert len(collector.warnings) == 1 assert len(collector.infos) == 0 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, \"field\", \"value\", \"message\", \"checker\") collector._add_warning(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 0 assert len(collector.warnings) == 2 assert len(collector.infos) == 0 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, \"field\", \"value\", \"message\", \"checker\") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, \"field\", \"value\", \"message\", \"checker\") def test_add_info(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_info(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 1 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, \"field\", \"value\", \"message\", \"checker\") collector._add_info(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 2 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, \"field\", \"value\", \"message\", \"checker\") assert collector.all[1] == Issue(IssueCollector._INFO_LEVEL, \"field\", \"value\", \"message\", \"checker\") def test_all(self): collector = IssueCollector() collector._add_info(\"foo\", \"bar\", \"baz\", \"qux\") assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"qux\") collector._add_warning(\"foo2\", \"bar2\", \"baz2\", \"qux2\") assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, \"foo2\", \"bar2\", \"baz2\", \"qux2\") assert collector.all[1] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"qux\") collector._add_warning(\"foo3\", \"bar3\", \"baz3\", \"qux3\") assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, \"foo2\", \"bar2\", \"baz2\", \"qux2\") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, \"foo3\", \"bar3\", \"baz3\", \"qux3\") assert collector.all[2] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"qux\") collector._add_info(\"field\", \"value\", \"message\", \"checker\") collector._add_error(\"field\", \"value\", \"message\", \"checker\") assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", \"value\", \"message\", \"checker\") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, \"foo2\", \"bar2\", \"baz2\", \"qux2\") assert collector.all[2] == Issue(IssueCollector._WARNING_LEVEL, \"foo3\", \"bar3\", \"baz3\", \"qux3\") assert collector.all[3] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"qux\") assert collector.all[4] == Issue(IssueCollector._INFO_LEVEL, \"field\", \"value\", \"message\", \"checker\") "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os from unittest import mock from unittest.mock import MagicMock from src.taipy.config import Config from src.taipy.config.checker._checker import _Checker from src.taipy.config.checker.issue_collector import IssueCollector from tests.config.utils.checker_for_tests import CheckerForTest def test_register_checker(): checker = CheckerForTest checker._check = MagicMock() _Checker.add_checker(checker) Config.check() checker._check.assert_called_once() "} {"text": "import logging from unittest import mock from src.taipy.config._config import _Config from src.taipy.config.checker._checkers._config_checker import _ConfigChecker from src.taipy.config.checker.issue import Issue from src.taipy.config.checker.issue_collector import IssueCollector class MyCustomChecker(_ConfigChecker): def _check(self) -> IssueCollector: # type: ignore pass def test__error(): with mock.patch.object(logging.Logger, \"error\"): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._error(\"field\", 17, \"my message\") assert len(collector.all) == 1 assert len(collector.errors) == 1 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert collector.errors[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") MyCustomChecker(_Config(), collector)._error(\"foo\", \"bar\", \"baz\") assert len(collector.all) == 2 assert len(collector.errors) == 2 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert collector.errors[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") assert collector.errors[1] == Issue(IssueCollector._ERROR_LEVEL, \"foo\", \"bar\", \"baz\", \"MyCustomChecker\") def test__warning(): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._warning(\"field\", 17, \"my message\") assert len(collector.all) == 1 assert len(collector.warnings) == 1 assert len(collector.errors) == 0 assert len(collector.infos) == 0 assert collector.warnings[0] == Issue(IssueCollector._WARNING_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") MyCustomChecker(_Config(), collector)._warning(\"foo\", \"bar\", \"baz\") assert len(collector.all) == 2 assert len(collector.warnings) == 2 assert len(collector.errors) == 0 assert len(collector.infos) == 0 assert collector.warnings[0] == Issue(IssueCollector._WARNING_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") assert collector.warnings[1] == Issue(IssueCollector._WARNING_LEVEL, \"foo\", \"bar\", \"baz\", \"MyCustomChecker\") def test__info(): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._info(\"field\", 17, \"my message\") assert len(collector.all) == 1 assert len(collector.infos) == 1 assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert collector.infos[0] == Issue(IssueCollector._INFO_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") MyCustomChecker(_Config(), collector)._info(\"foo\", \"bar\", \"baz\") assert len(collector.all) == 2 assert len(collector.infos) == 2 assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert collector.infos[0] == Issue(IssueCollector._INFO_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") assert collector.infos[1] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"MyCustomChecker\") "} {"text": "from src.taipy.config import IssueCollector from src.taipy.config.checker._checkers._config_checker import _ConfigChecker class CheckerForTest(_ConfigChecker): def _check(self) -> IssueCollector: return self._collector "} {"text": "from copy import copy from typing import Any, Dict, List, Optional from src.taipy.config import Config, Section from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker from .section_for_tests import SectionForTest class SectionOfSectionsListForTest(Section): name = \"list_section_name\" _MY_ATTRIBUTE_KEY = \"attribute\" _SECTIONS_LIST_KEY = \"sections_list\" def __init__(self, id: str, attribute: Any = None, sections_list: List = None, **properties): self._attribute = attribute self._sections_list = sections_list if sections_list else [] super().__init__(id, **properties) def __copy__(self): return SectionOfSectionsListForTest( self.id, self._attribute, copy(self._sections_list), **copy(self._properties) ) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val @property def sections_list(self): return list(self._sections_list) @sections_list.setter # type: ignore @_ConfigBlocker._check() def sections_list(self, val): self._sections_list = val def _clean(self): self._attribute = None self._sections_list = [] self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute if self._sections_list: as_dict[self._SECTIONS_LIST_KEY] = self._sections_list as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, id) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) section_configs = config._sections.get(SectionForTest.name, None) or [] # type: ignore sections_list = [] if inputs_as_str := as_dict.pop(cls._SECTIONS_LIST_KEY, None): for section_id in inputs_as_str: if section_id in section_configs: sections_list.append(section_configs[section_id]) else: sections_list.append(section_id) return SectionOfSectionsListForTest(id=id, attribute=attribute, sections_list=sections_list, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._sections_list = as_dict.pop(self._SECTIONS_LIST_KEY, self._sections_list) if self._sections_list is None and default_section: self._sections_list = default_section._sections_list self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(id: str, attribute: str, sections_list: List = None, **properties): section = SectionOfSectionsListForTest(id, attribute, sections_list, **properties) Config._register(section) return Config.sections[SectionOfSectionsListForTest.name][id] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import tempfile class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile(\"w\", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, \"r\") as fp: return fp.read() def __del__(self): os.unlink(self.filename) "} {"text": "from copy import copy from typing import Any, Dict, Optional from src.taipy.config import Config, Section from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker class SectionForTest(Section): name = \"section_name\" _MY_ATTRIBUTE_KEY = \"attribute\" def __init__(self, id: str, attribute: Any = None, **properties): self._attribute = attribute super().__init__(id, **properties) def __copy__(self): return SectionForTest(self.id, self._attribute, **copy(self._properties)) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val def _clean(self): self._attribute = None self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, id) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) return SectionForTest(id=id, attribute=attribute, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(id: str, attribute: str, **properties): section = SectionForTest(id, attribute, **properties) Config._register(section) return Config.sections[SectionForTest.name][id] "} {"text": "from copy import copy from typing import Any, Dict, Optional from src.taipy.config import Config from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker from src.taipy.config.unique_section import UniqueSection class UniqueSectionForTest(UniqueSection): name = \"unique_section_name\" _MY_ATTRIBUTE_KEY = \"attribute\" def __init__(self, attribute: str = None, **properties): self._attribute = attribute super().__init__(**properties) def __copy__(self): return UniqueSectionForTest(self._attribute, **copy(self._properties)) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val def _clean(self): self._attribute = None self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id=None, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, None) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) return UniqueSectionForTest(attribute=attribute, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(attribute: str, **properties): section = UniqueSectionForTest(attribute, **properties) Config._register(section) return Config.unique_sections[UniqueSectionForTest.name] "} {"text": "import pytest from src.taipy.config.common._validate_id import _validate_id from src.taipy.config.exceptions.exceptions import InvalidConfigurationId class TestId: def test_validate_id(self): s = _validate_id(\"foo\") assert s == \"foo\" with pytest.raises(InvalidConfigurationId): _validate_id(\"1foo\") with pytest.raises(InvalidConfigurationId): _validate_id(\"foo bar\") with pytest.raises(InvalidConfigurationId): _validate_id(\"foo/foo$\") with pytest.raises(InvalidConfigurationId): _validate_id(\"\") with pytest.raises(InvalidConfigurationId): _validate_id(\" \") with pytest.raises(InvalidConfigurationId): _validate_id(\"class\") with pytest.raises(InvalidConfigurationId): _validate_id(\"def\") with pytest.raises(InvalidConfigurationId): _validate_id(\"with\") with pytest.raises(InvalidConfigurationId): _validate_id(\"CYCLE\") with pytest.raises(InvalidConfigurationId): _validate_id(\"SCENARIO\") with pytest.raises(InvalidConfigurationId): _validate_id(\"SEQUENCE\") with pytest.raises(InvalidConfigurationId): _validate_id(\"TASK\") with pytest.raises(InvalidConfigurationId): _validate_id(\"DATANODE\") "} {"text": "import pytest from src.taipy.config.common.scope import Scope def test_scope(): # Test __ge__ method assert Scope.GLOBAL >= Scope.GLOBAL assert Scope.GLOBAL >= Scope.CYCLE assert Scope.CYCLE >= Scope.CYCLE assert Scope.GLOBAL >= Scope.SCENARIO assert Scope.CYCLE >= Scope.SCENARIO assert Scope.SCENARIO >= Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO >= \"testing string\" # Test __gt__ method assert Scope.GLOBAL > Scope.CYCLE assert Scope.GLOBAL > Scope.SCENARIO assert Scope.CYCLE > Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO > \"testing string\" # Test __le__ method assert Scope.GLOBAL <= Scope.GLOBAL assert Scope.CYCLE <= Scope.GLOBAL assert Scope.CYCLE <= Scope.CYCLE assert Scope.SCENARIO <= Scope.GLOBAL assert Scope.SCENARIO <= Scope.CYCLE assert Scope.SCENARIO <= Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO <= \"testing string\" # Test __lt__ method assert Scope.SCENARIO < Scope.GLOBAL assert Scope.SCENARIO < Scope.GLOBAL assert Scope.SCENARIO < Scope.CYCLE with pytest.raises(TypeError): assert Scope.SCENARIO < \"testing string\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import argparse import re import sys import pytest from src.taipy._cli._base_cli import _CLI if sys.version_info >= (3, 10): argparse_options_str = \"options:\" else: argparse_options_str = \"optional arguments:\" def preprocess_stdout(stdout): stdout = stdout.replace(\"\\n\", \" \").replace(\"\\t\", \" \") return re.sub(\" +\", \" \", stdout) def remove_subparser(name: str): \"\"\"Remove a subparser from argparse.\"\"\" _CLI._sub_taipyparsers.pop(name, None) if _CLI._subparser_action: _CLI._subparser_action._name_parser_map.pop(name, None) for action in _CLI._subparser_action._choices_actions: if action.dest == name: _CLI._subparser_action._choices_actions.remove(action) @pytest.fixture(autouse=True, scope=\"function\") def clean_argparser(): _CLI._parser = argparse.ArgumentParser(conflict_handler=\"resolve\") _CLI._arg_groups = {} subcommands = list(_CLI._sub_taipyparsers.keys()) for subcommand in subcommands: remove_subparser(subcommand) yield def test_subparser(capfd): subcommand_1 = _CLI._add_subparser(\"subcommand_1\", help=\"subcommand_1 help\") subcommand_1.add_argument(\"--foo\", \"-f\", help=\"foo help\") subcommand_1.add_argument(\"--bar\", \"-b\", help=\"bar help\") subcommand_2 = _CLI._add_subparser(\"subcommand_2\", help=\"subcommand_2 help\") subcommand_2.add_argument(\"--doo\", \"-d\", help=\"doo help\") subcommand_2.add_argument(\"--baz\", \"-z\", help=\"baz help\") expected_subcommand_1_help_message = f\"\"\"subcommand_1 [-h] [--foo FOO] [--bar BAR] {argparse_options_str} -h, --help show this help message and exit --foo FOO, -f FOO foo help --bar BAR, -b BAR bar help \"\"\" subcommand_1.print_help() stdout, _ = capfd.readouterr() assert preprocess_stdout(expected_subcommand_1_help_message) in preprocess_stdout(stdout) expected_subcommand_2_help_message = f\"\"\"subcommand_2 [-h] [--doo DOO] [--baz BAZ] {argparse_options_str} -h, --help show this help message and exit --doo DOO, -d DOO doo help --baz BAZ, -z BAZ baz help \"\"\" subcommand_2.print_help() stdout, _ = capfd.readouterr() assert preprocess_stdout(expected_subcommand_2_help_message) in preprocess_stdout(stdout) def test_duplicate_subcommand(): subcommand_1 = _CLI._add_subparser(\"subcommand_1\", help=\"subcommand_1 help\") subcommand_1.add_argument(\"--foo\", \"-f\", help=\"foo help\") subcommand_2 = _CLI._add_subparser(\"subcommand_1\", help=\"subcommand_2 help\") subcommand_2.add_argument(\"--bar\", \"-b\", help=\"bar help\") # The title of subcommand_2 is duplicated with subcommand_1, and therefore # there will be no new subcommand created assert len(_CLI._sub_taipyparsers) == 1 def test_groupparser(capfd): group_1 = _CLI._add_groupparser(\"group_1\", \"group_1 desc\") group_1.add_argument(\"--foo\", \"-f\", help=\"foo help\") group_1.add_argument(\"--bar\", \"-b\", help=\"bar help\") group_2 = _CLI._add_groupparser(\"group_2\", \"group_2 desc\") group_2.add_argument(\"--doo\", \"-d\", help=\"doo help\") group_2.add_argument(\"--baz\", \"-z\", help=\"baz help\") expected_help_message = \"\"\" group_1: group_1 desc --foo FOO, -f FOO foo help --bar BAR, -b BAR bar help group_2: group_2 desc --doo DOO, -d DOO doo help --baz BAZ, -z BAZ baz help \"\"\".strip() _CLI._parser.print_help() stdout, _ = capfd.readouterr() assert expected_help_message in stdout def test_duplicate_group(): group_1 = _CLI._add_groupparser(\"group_1\", \"group_1 desc\") group_1.add_argument(\"--foo\", \"-f\", help=\"foo help\") group_2 = _CLI._add_groupparser(\"group_1\", \"group_2 desc\") group_2.add_argument(\"--bar\", \"-b\", help=\"bar help\") # The title of group_2 is duplicated with group_1, and therefore # there will be no new group created assert len(_CLI._arg_groups) == 1 "} {"text": "import datetime import os from unittest import mock import pytest from src.taipy.config.common._template_handler import _TemplateHandler from src.taipy.config.common.frequency import Frequency from src.taipy.config.common.scope import Scope from src.taipy.config.exceptions.exceptions import InconsistentEnvVariableError def test_replace_if_template(): assert_does_not_change(\"123\") assert_does_not_change(\"foo\") assert_does_not_change(\"_foo\") assert_does_not_change(\"_foo_\") assert_does_not_change(\"foo_\") assert_does_not_change(\"foo\") assert_does_not_change(\"foo_1\") assert_does_not_change(\"1foo_1\") assert_does_not_change(\"env(foo)\") assert_does_not_change(\"env\") assert_does_not_change(\"env[foo]\") assert_does_not_change(\"Env[foo]\") assert_does_not_change(\"ENV[1foo]\") assert_does_not_change(\"123:bool\") assert_does_not_change(\"foo:bool\") assert_does_not_change(\"_foo:bool\") assert_does_not_change(\"_foo_:bool\") assert_does_not_change(\"foo_:bool\") assert_does_not_change(\"foo:bool\") assert_does_not_change(\"foo_1:bool\") assert_does_not_change(\"1foo_1:bool\") assert_does_not_change(\"env(foo):bool\") assert_does_not_change(\"env:bool\") assert_does_not_change(\"env[foo]:bool\") assert_does_not_change(\"Env[foo]:bool\") assert_does_not_change(\"ENV[1foo]:bool\") assert_does_not_change(\"ENV[foo]:\") assert_does_not_change(\"ENV[_foo]:\") assert_does_not_change(\"ENV[foo_]:\") assert_does_not_change(\"ENV[foo0]:\") assert_does_not_change(\"ENV[foo_0]:\") assert_does_not_change(\"ENV[_foo_0]:\") assert_does_not_change(\"ENV[foo]:foo\") assert_does_not_change(\"ENV[_foo]:foo\") assert_does_not_change(\"ENV[foo_]:foo\") assert_does_not_change(\"ENV[foo0]:foo\") assert_does_not_change(\"ENV[foo_0]:foo\") assert_does_not_change(\"ENV[_foo_0]:foo\") assert_does_replace(\"ENV[foo]\", \"foo\", \"VALUE\", str) assert_does_replace(\"ENV[_foo]\", \"_foo\", \"VALUE\", str) assert_does_replace(\"ENV[foo_]\", \"foo_\", \"VALUE\", str) assert_does_replace(\"ENV[foo0]\", \"foo0\", \"VALUE\", str) assert_does_replace(\"ENV[foo_0]\", \"foo_0\", \"VALUE\", str) assert_does_replace(\"ENV[_foo_0]\", \"_foo_0\", \"VALUE\", str) assert_does_replace(\"ENV[foo]:str\", \"foo\", \"VALUE\", str) assert_does_replace(\"ENV[_foo]:str\", \"_foo\", \"VALUE\", str) assert_does_replace(\"ENV[foo_]:str\", \"foo_\", \"VALUE\", str) assert_does_replace(\"ENV[foo0]:str\", \"foo0\", \"VALUE\", str) assert_does_replace(\"ENV[foo_0]:str\", \"foo_0\", \"VALUE\", str) assert_does_replace(\"ENV[_foo_0]:str\", \"_foo_0\", \"VALUE\", str) assert_does_replace(\"ENV[foo]:int\", \"foo\", \"1\", int) assert_does_replace(\"ENV[_foo]:int\", \"_foo\", \"1\", int) assert_does_replace(\"ENV[foo_]:int\", \"foo_\", \"1\", int) assert_does_replace(\"ENV[foo0]:int\", \"foo0\", \"1\", int) assert_does_replace(\"ENV[foo_0]:int\", \"foo_0\", \"1\", int) assert_does_replace(\"ENV[_foo_0]:int\", \"_foo_0\", \"1\", int) assert_does_replace(\"ENV[foo]:float\", \"foo\", \"1.\", float) assert_does_replace(\"ENV[_foo]:float\", \"_foo\", \"1.\", float) assert_does_replace(\"ENV[foo_]:float\", \"foo_\", \"1.\", float) assert_does_replace(\"ENV[foo0]:float\", \"foo0\", \"1.\", float) assert_does_replace(\"ENV[foo_0]:float\", \"foo_0\", \"1.\", float) assert_does_replace(\"ENV[_foo_0]:float\", \"_foo_0\", \"1.\", float) assert_does_replace(\"ENV[foo]:bool\", \"foo\", \"True\", bool) assert_does_replace(\"ENV[_foo]:bool\", \"_foo\", \"True\", bool) assert_does_replace(\"ENV[foo_]:bool\", \"foo_\", \"True\", bool) assert_does_replace(\"ENV[foo0]:bool\", \"foo0\", \"True\", bool) assert_does_replace(\"ENV[foo_0]:bool\", \"foo_0\", \"True\", bool) assert_does_replace(\"ENV[_foo_0]:bool\", \"_foo_0\", \"True\", bool) def assert_does_replace(template, env_variable_name, replaced_by, as_type): with mock.patch.dict(os.environ, {env_variable_name: replaced_by}): tpl = _TemplateHandler() assert tpl._replace_templates(template) == as_type(replaced_by) def assert_does_not_change(template): tpl = _TemplateHandler() assert tpl._replace_templates(template) == template def test_replace_tuple_list_dict(): with mock.patch.dict(os.environ, {\"FOO\": \"true\", \"BAR\": \"3\", \"BAZ\": \"qux\"}): tpl = _TemplateHandler() now = datetime.datetime.now() actual = tpl._replace_templates((\"ENV[FOO]:bool\", now, \"ENV[BAR]:int\", \"ENV[BAZ]\", \"quz\")) assert actual == (True, now, 3, \"qux\", \"quz\") actual = tpl._replace_templates((\"ENV[FOO]:bool\", now, \"ENV[BAR]:int\", \"ENV[BAZ]\", \"quz\")) assert actual == (True, now, 3, \"qux\", \"quz\") def test_to_bool(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_bool(\"okhds\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_bool(\"no\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_bool(\"tru\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_bool(\"tru_e\") assert _TemplateHandler._to_bool(\"true\") assert _TemplateHandler._to_bool(\"True\") assert _TemplateHandler._to_bool(\"TRUE\") assert _TemplateHandler._to_bool(\"TruE\") assert _TemplateHandler._to_bool(\"TrUE\") assert not _TemplateHandler._to_bool(\"false\") assert not _TemplateHandler._to_bool(\"False\") assert not _TemplateHandler._to_bool(\"FALSE\") assert not _TemplateHandler._to_bool(\"FalSE\") assert not _TemplateHandler._to_bool(\"FalSe\") def test_to_int(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_int(\"okhds\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_int(\"_45\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_int(\"12.5\") assert 12 == _TemplateHandler._to_int(\"12\") assert 0 == _TemplateHandler._to_int(\"0\") assert -2 == _TemplateHandler._to_int(\"-2\") assert 156165 == _TemplateHandler._to_int(\"156165\") def test_to_float(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_float(\"okhds\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_float(\"_45\") assert 12.5 == _TemplateHandler._to_float(\"12.5\") assert 2.0 == _TemplateHandler._to_float(\"2\") assert 0.0 == _TemplateHandler._to_float(\"0\") assert -2.1 == _TemplateHandler._to_float(\"-2.1\") assert 156165.3 == _TemplateHandler._to_float(\"156165.3\") def test_to_scope(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_scope(\"okhds\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_scope(\"plop\") assert Scope.GLOBAL == _TemplateHandler._to_scope(\"global\") assert Scope.GLOBAL == _TemplateHandler._to_scope(\"GLOBAL\") assert Scope.SCENARIO == _TemplateHandler._to_scope(\"SCENARIO\") assert Scope.CYCLE == _TemplateHandler._to_scope(\"cycle\") def test_to_frequency(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_frequency(\"okhds\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_frequency(\"plop\") assert Frequency.DAILY == _TemplateHandler._to_frequency(\"DAILY\") assert Frequency.DAILY == _TemplateHandler._to_frequency(\"Daily\") assert Frequency.WEEKLY == _TemplateHandler._to_frequency(\"weekly\") assert Frequency.WEEKLY == _TemplateHandler._to_frequency(\"WEEKLY\") assert Frequency.MONTHLY == _TemplateHandler._to_frequency(\"Monthly\") assert Frequency.MONTHLY == _TemplateHandler._to_frequency(\"MONThLY\") assert Frequency.QUARTERLY == _TemplateHandler._to_frequency(\"QuaRtERlY\") assert Frequency.YEARLY == _TemplateHandler._to_frequency(\"Yearly\") "} {"text": "import pytest from src.taipy.config.common._classproperty import _Classproperty class TestClassProperty: def test_class_property(self): class TestClass: @_Classproperty def test_property(cls): return \"test_property\" assert TestClass.test_property == \"test_property\" assert TestClass().test_property == \"test_property\" with pytest.raises(TypeError): TestClass.test_property() "} {"text": "import os from unittest import mock import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked def test_global_config_with_env_variable_value(): with mock.patch.dict(os.environ, {\"FOO\": \"bar\", \"BAZ\": \"qux\"}): Config.configure_global_app(foo=\"ENV[FOO]\", bar=\"ENV[BAZ]\") assert Config.global_config.foo == \"bar\" assert Config.global_config.bar == \"qux\" def test_default_global_app_config(): global_config = Config.global_config assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_block_update_global_app_config(): Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_global_app(foo=\"bar\") with pytest.raises(ConfigurationUpdateBlocked): Config.global_config.properties = {\"foo\": \"bar\"} # Test if the global_config stay as default assert Config.global_config.foo is None assert len(Config.global_config.properties) == 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import argparse import re from unittest.mock import patch import pytest from src.taipy._entrypoint import _entrypoint from taipy._cli._base_cli import _CLI def preprocess_stdout(stdout): stdout = stdout.replace(\"\\n\", \" \").replace(\"\\t\", \" \") return re.sub(\" +\", \" \", stdout) def remove_subparser(name: str): \"\"\"Remove a subparser from the _CLI class.\"\"\" _CLI._sub_taipyparsers.pop(name, None) if _CLI._subparser_action: _CLI._subparser_action._name_parser_map.pop(name, None) for action in _CLI._subparser_action._choices_actions: if action.dest == name: _CLI._subparser_action._choices_actions.remove(action) @pytest.fixture(autouse=True, scope=\"function\") def clean_argparser(): _CLI._parser = argparse.ArgumentParser(conflict_handler=\"resolve\") _CLI._subparser_action = None _CLI._arg_groups = {} subcommands = list(_CLI._sub_taipyparsers.keys()) for subcommand in subcommands: remove_subparser(subcommand) yield expected_help = \"\"\"{run,manage-versions,create,migrate,help} ... positional arguments: {run,manage-versions,create,migrate,help} run Run a Taipy application. manage-versions Taipy version control system. create Create a new Taipy application. migrate Migrate entities created from old taipy versions to be compatible with the current taipy version. The entity migration should be performed only after updating taipy code to the current version. help Show the Taipy help message. \"\"\" def test_taipy_command_alone_print_help(capsys): with patch(\"sys.argv\", [\"prog\"]): _entrypoint() out, _ = capsys.readouterr() assert preprocess_stdout(expected_help) in preprocess_stdout(out) def test_taipy_help_command(capsys): with patch(\"sys.argv\", [\"prog\", \"help\"]): with pytest.raises(SystemExit): _entrypoint() out, _ = capsys.readouterr() assert preprocess_stdout(expected_help) in preprocess_stdout(out) def test_help_non_existed_command(caplog): with patch(\"sys.argv\", [\"prog\", \"help\", \"non_existed_command\"]): with pytest.raises(SystemExit): _entrypoint() assert \"non_existed_command is not a valid command.\" in caplog.text def test_taipy_create_help(capsys): expected_help = \"create [-h] [--template\" with patch(\"sys.argv\", [\"prog\", \"help\", \"create\"]): with pytest.raises(SystemExit): _entrypoint() out, _ = capsys.readouterr() assert preprocess_stdout(expected_help) in preprocess_stdout(out) "} {"text": "import os import sys from importlib.util import find_spec from pathlib import Path import pandas as pd # type: ignore import pytest from flask import Flask, g def pytest_configure(config): if (find_spec(\"src\") and find_spec(\"src.taipy\")) and (not find_spec(\"taipy\") or not find_spec(\"taipy.gui\")): import src.taipy.gui import src.taipy.gui._renderers.builder import src.taipy.gui._warnings import src.taipy.gui.builder import src.taipy.gui.data.decimator.lttb import src.taipy.gui.data.decimator.minmax import src.taipy.gui.data.decimator.rdp import src.taipy.gui.data.decimator.scatter_decimator import src.taipy.gui.data.utils import src.taipy.gui.extension import src.taipy.gui.utils._map_dict import src.taipy.gui.utils._variable_directory import src.taipy.gui.utils.expr_var_name sys.modules[\"taipy.gui._warnings\"] = sys.modules[\"src.taipy.gui._warnings\"] sys.modules[\"taipy.gui._renderers.builder\"] = sys.modules[\"src.taipy.gui._renderers.builder\"] sys.modules[\"taipy.gui.utils._variable_directory\"] = sys.modules[\"src.taipy.gui.utils._variable_directory\"] sys.modules[\"taipy.gui.utils.expr_var_name\"] = sys.modules[\"src.taipy.gui.utils.expr_var_name\"] sys.modules[\"taipy.gui.utils._map_dict\"] = sys.modules[\"src.taipy.gui.utils._map_dict\"] sys.modules[\"taipy.gui.extension\"] = sys.modules[\"src.taipy.gui.extension\"] sys.modules[\"taipy.gui.data.utils\"] = sys.modules[\"src.taipy.gui.data.utils\"] sys.modules[\"taipy.gui.data.decimator.lttb\"] = sys.modules[\"src.taipy.gui.data.decimator.lttb\"] sys.modules[\"taipy.gui.data.decimator.rdp\"] = sys.modules[\"src.taipy.gui.data.decimator.rdp\"] sys.modules[\"taipy.gui.data.decimator.minmax\"] = sys.modules[\"src.taipy.gui.data.decimator.minmax\"] sys.modules[\"taipy.gui.data.decimator.scatter_decimator\"] = sys.modules[ \"src.taipy.gui.data.decimator.scatter_decimator\" ] sys.modules[\"taipy.gui\"] = sys.modules[\"src.taipy.gui\"] sys.modules[\"taipy.gui.builder\"] = sys.modules[\"src.taipy.gui.builder\"] csv = pd.read_csv( f\"{Path(Path(__file__).parent.resolve())}{os.path.sep}current-covid-patients-hospital.csv\", parse_dates=[\"Day\"] ) small_dataframe_data = {\"name\": [\"A\", \"B\", \"C\"], \"value\": [1, 2, 3]} @pytest.fixture(scope=\"function\") def csvdata(): yield csv @pytest.fixture(scope=\"function\") def small_dataframe(): yield small_dataframe_data @pytest.fixture(scope=\"function\") def gui(helpers): from taipy.gui import Gui gui = Gui() yield gui # Delete Gui instance and state of some classes after each test gui.stop() helpers.test_cleanup() @pytest.fixture def helpers(): from .helpers import Helpers return Helpers @pytest.fixture def test_client(): flask_app = Flask(\"Test App\") # Create a test client using the Flask application configured for testing with flask_app.test_client() as testing_client: # Establish an application context with flask_app.app_context(): g.client_id = \"test client id\" yield testing_client # this is where the testing happens! "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import inspect import json import logging import socket import time import typing as t import warnings from types import FrameType from taipy.gui import Gui, Html, Markdown from taipy.gui._renderers.builder import _Builder from taipy.gui._warnings import TaipyGuiWarning from taipy.gui.utils._variable_directory import _reset_name_map from taipy.gui.utils.expr_var_name import _reset_expr_var_name class Helpers: @staticmethod def test_cleanup(): _Builder._reset_key() _reset_name_map() _reset_expr_var_name() @staticmethod def test_control_md(gui: Gui, md_string: str, expected_values: t.Union[str, t.List]): gui.add_page(\"test\", Markdown(md_string, frame=None)) Helpers._test_control(gui, expected_values) @staticmethod def test_control_html(gui: Gui, html_string: str, expected_values: t.Union[str, t.List]): gui.add_page(\"test\", Html(html_string, frame=None)) Helpers._test_control(gui, expected_values) @staticmethod def test_control_builder(gui: Gui, builder_page, expected_values: t.Union[str, t.List]): gui.add_page(\"test\", builder_page) Helpers._test_control(gui, expected_values) @staticmethod def _test_control(gui: Gui, expected_values: t.Union[str, t.List]): gui.run(run_server=False, single_client=True, stylekit=False) client = gui._server.test_client() response = client.get(\"/taipy-jsx/test\") assert response.status_code == 200, f\"response.status_code {response.status_code} != 200\" response_data = json.loads(response.get_data().decode(\"utf-8\", \"ignore\")) assert isinstance(response_data, t.Dict), \"response_data is not Dict\" assert \"jsx\" in response_data, \"jsx not in response_data\" jsx = response_data[\"jsx\"] logging.getLogger().debug(jsx) if isinstance(expected_values, str): assert jsx == expected_values, f\"{jsx} != {expected_values}\" elif isinstance(expected_values, list): for expected_value in expected_values: assert expected_value in jsx, f\"{expected_value} not in {jsx}\" @staticmethod def assert_outward_ws_message(received_message, type, varname, value): assert isinstance(received_message, dict) assert \"name\" in received_message and received_message[\"name\"] == \"message\" assert \"args\" in received_message args = received_message[\"args\"] assert \"type\" in args and args[\"type\"] == type assert \"payload\" in args payload = args[\"payload\"][0] assert \"name\" in payload and varname in payload[\"name\"] assert \"payload\" in payload and \"value\" in payload[\"payload\"] and payload[\"payload\"][\"value\"] == value logging.getLogger().debug(payload[\"payload\"][\"value\"]) @staticmethod def assert_outward_simple_ws_message(received_message, type, varname, value): assert isinstance(received_message, dict) assert \"name\" in received_message and received_message[\"name\"] == \"message\" assert \"args\" in received_message args = received_message[\"args\"] assert \"type\" in args and args[\"type\"] == type assert \"name\" in args and args[\"name\"] == varname assert \"payload\" in args payload = args[\"payload\"] assert \"value\" in payload and payload[\"value\"] == value logging.getLogger().debug(payload[\"value\"]) @staticmethod def assert_outward_ws_simple_message(received_message, aType, values): assert isinstance(received_message, dict) assert \"name\" in received_message and received_message[\"name\"] == \"message\" assert \"args\" in received_message args = received_message[\"args\"] assert \"type\" in args and args[\"type\"] == aType for k, v in values.items(): assert k in args and args[k] == v logging.getLogger().debug(f\"{k}: {args[k]}\") @staticmethod def assert_outward_ws_multiple_message(received_message, type, array_len: int): assert isinstance(received_message, dict) assert \"name\" in received_message and received_message[\"name\"] == \"message\" assert \"args\" in received_message args = received_message[\"args\"] assert \"type\" in args and args[\"type\"] == type assert \"payload\" in args payload = args[\"payload\"] assert isinstance(payload, list) assert len(payload) == array_len logging.getLogger().debug(payload) @staticmethod def create_scope_and_get_sid(gui: Gui) -> str: sid = \"test\" gui._bindings()._get_or_create_scope(sid) return sid @staticmethod def port_check(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(1) if s.connect_ex((\"127.0.0.1\", 5000)) == 0: s.close() return True else: s.close() return False @staticmethod def run_e2e(gui, **kwargs): kwargs[\"run_in_thread\"] = True kwargs[\"single_client\"] = True kwargs[\"run_browser\"] = False kwargs[\"stylekit\"] = kwargs.get(\"stylekit\", False) with warnings.catch_warnings(record=True): gui.run(**kwargs) while not Helpers.port_check(): time.sleep(0.1) @staticmethod def run_e2e_multi_client(gui: Gui): with warnings.catch_warnings(record=True): gui.run(run_server=False, run_browser=False, single_client=False, stylekit=False) gui._server.run( host=gui._get_config(\"host\", \"127.0.0.1\"), port=gui._get_config(\"port\", 5000), debug=False, use_reloader=False, flask_log=False, run_in_thread=True, allow_unsafe_werkzeug=False, notebook_proxy=False, ) while not Helpers.port_check(): time.sleep(0.1) @staticmethod def get_taipy_warnings(warns: t.List[warnings.WarningMessage]) -> t.List[warnings.WarningMessage]: return [w for w in warns if w.category is TaipyGuiWarning] "} {"text": "import inspect from taipy.gui import Gui, Html def test_simple_html(gui: Gui, helpers): # html_string = \"

test

\" html_string = \"

test

\" gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Html(html_string)) gui.run(run_server=False) client = gui._server.test_client() jsx = client.get(\"/taipy-jsx/test\").json[\"jsx\"] assert jsx == \"

test

\" "} {"text": "import pytest from taipy.gui import Gui def test_invalid_control_name(gui: Gui, helpers): md_string = \"<|invalid|invalid|>\" expected_list = [\"INVALID SYNTAX - Control is 'invalid'\"] helpers.test_control_md(gui, md_string, expected_list) def test_value_to_negated_property(gui: Gui, helpers): md_string = \"<|button|not active=true|>\" expected_list = [\"\" expected_list = [\"\"] helpers.test_control_md(gui, md_string, expected_list) def test_opening_unknown_block(gui: Gui, helpers): md_string = \"<|unknown\" expected_list = [\"\" expected_list = [\"
\", \"No matching opened tag\", \"
\"] helpers.test_control_md(gui, md_string, expected_list) def test_md_link(gui: Gui, helpers): md_string = \"[content](link)\" expected_list = [\"\"] helpers.test_control_md(gui, md_string, expected_list) "} {"text": "import pytest from taipy.gui.utils._bindings import _Bindings def test_exception_binding_twice(gui, test_client): bind = _Bindings(gui) bind._new_scopes() bind._bind(\"x\", 10) with pytest.raises(ValueError): bind._bind(\"x\", 10) def test_exception_binding_invalid_name(gui): bind = _Bindings(gui) bind._new_scopes() with pytest.raises(ValueError): bind._bind(\"invalid identifier\", 10) "} {"text": "from email import message import pytest from taipy.gui._page import _Page def test_exception_page(gui): page = _Page() page._route = \"page1\" with pytest.raises(RuntimeError, match=\"Can't render page page1: no renderer found\"): page.render(gui) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import inspect import typing as t from pathlib import Path import pytest from taipy.gui import Gui from taipy.gui.extension import Element, ElementLibrary, ElementProperty, PropertyType def render_xhtml_4_my_library(properties: t.Dict[str, t.Any]) -> str: return f\"

{properties.get('value', '')}

\" def render_xhtml_4_my_library_fail(properties: t.Dict[str, t.Any]) -> str: return f\"

{properties.get('value', '')}}\", ), }, ), } def get_name(self) -> str: return \"test_lib\" def get_elements(self) -> t.Dict[str, Element]: return MyLibrary.elts def get_resource(self, name: str) -> Path: return Path(name) class MyBadLibrary(ElementLibrary): def get_name(self) -> str: return \"bad name\" def get_elements(self) -> t.Dict[str, Element]: return {} class MyGoodLibrary(ElementLibrary): def get_name(self) -> str: return \"test_lib\" def get_elements(self) -> t.Dict[str, Element]: return {} Gui.add_library(MyLibrary()) def test_lib_input_md(gui: Gui, test_client, helpers): val = \"\" # noqa: F841 gui._set_frame(inspect.currentframe()) md_string = \"<|{val}|test_lib.testinput|multiline|>\" expected_list = [ \"\" expected = [f\"

{val}

\"] helpers.test_control_md(gui, md_string, expected) def test_lib_xhtml_fail_md(gui: Gui, test_client, helpers): val = \"title\" # noqa: F841 gui._set_frame(inspect.currentframe()) md_string = \"<|{val}|test_lib.title_fail|>\" expected = [\"title_fail.render_xhtml() did not return a valid XHTML string. unclosed token: line 1, column 9\"] helpers.test_control_md(gui, md_string, expected) def test_lib_input_html_1(gui: Gui, test_client, helpers): val = \"\" # noqa: F841 gui._set_frame(inspect.currentframe()) html_string = '' expected_list = [ \"\", ] helpers.test_control_html(gui, html_string, expected_list) def test_lib_input_html_2(gui: Gui, test_client, helpers): val = \"\" # noqa: F841 gui._set_frame(inspect.currentframe()) html_string = '{val}' expected_list = [ \"\", ] helpers.test_control_html(gui, html_string, expected_list) def test_lib_inner_md(gui: Gui, test_client, helpers): val = \"title\" # noqa: F841 gui._set_frame(inspect.currentframe()) md_string = \"<|{val}|test_lib.inner|>\" expected = [ \"\" expected = [\"first page

\" in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") gui.stop() while helpers.port_check(): time.sleep(0.1) gui.run(run_in_thread=True, run_browser=False) while not helpers.port_check(): time.sleep(0.1) assert \">first page\" in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") "} {"text": "import pytest from taipy.gui import Gui def test_add_shared_variables(gui: Gui): Gui.add_shared_variable(\"var1\", \"var2\") assert isinstance(gui._Gui__shared_variables, list) assert len(gui._Gui__shared_variables) == 2 Gui.add_shared_variables(\"var1\", \"var2\") assert len(gui._Gui__shared_variables) == 2 "} {"text": "import json from taipy.gui.gui import Gui def test_multiple_instance(): gui1 = Gui(\"<|gui1|>\") gui2 = Gui(\"<|gui2|>\") gui1.run(run_server=False) gui2.run(run_server=False) client1 = gui1._server.test_client() client2 = gui2._server.test_client() assert_multiple_instance(client1, 'value=\"gui1\"') assert_multiple_instance(client2, 'value=\"gui2\"') def assert_multiple_instance(client, expected_value): response = client.get(\"/taipy-jsx/TaiPy_root_page\") response_data = json.loads(response.get_data().decode(\"utf-8\", \"ignore\")) assert response.status_code == 200 assert isinstance(response_data, dict) assert \"jsx\" in response_data assert expected_value in response_data[\"jsx\"] "} {"text": "from taipy.gui.utils._variable_directory import _MODULE_NAME_MAP, _variable_decode, _variable_encode def test_variable_encode_decode(): assert _variable_encode(\"x\", \"module\") == \"x_TPMDL_0\" assert _MODULE_NAME_MAP[0] == \"module\" assert _variable_decode(\"x_TPMDL_0\") == (\"x\", \"module\") assert _variable_encode(\"x\", None) == \"x\" assert _variable_decode(\"x\") == (\"x\", None) assert _variable_encode(\"TpExPr_x\", \"module1\") == \"TpExPr_x_TPMDL_1\" assert _MODULE_NAME_MAP[1] == \"module1\" assert _variable_decode(\"TpExPr_x_TPMDL_1\") == (\"x\", \"module1\") "} {"text": "import inspect import warnings from taipy.gui import Gui, Markdown, State, navigate def test_navigate(gui: Gui, helpers): def navigate_to(state: State): navigate(state, \"test\") with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"#This is a page\")) gui.run(run_server=False) client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) client.get(f\"/taipy-jsx/test/?client_id={sid}\") ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"A\", \"name\": \"my_button\", \"payload\": \"navigate_to\"}) # assert for received message (message that would be sent to the front-end client) assert ws_client.get_received() def test_navigate_to_no_route(gui: Gui, helpers): def navigate_to(state: State): navigate(state, \"toto\") with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"#This is a page\")) gui.run(run_server=False) client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) client.get(f\"/taipy-jsx/test/?client_id={sid}\") ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"A\", \"name\": \"my_button\", \"payload\": \"navigate_to\"}) # assert for received message (message that would be sent to the front-end client) assert not ws_client.get_received() def test_on_navigate_to_inexistant(gui: Gui, helpers): def on_navigate(state: State, page: str): return \"test2\" if page == \"test\" else page with warnings.catch_warnings(record=True) as records: gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"#This is a page\")) gui.run(run_server=False) client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) client.get(f\"/taipy-jsx/test?client_id={sid}\") warns = helpers.get_taipy_warnings(records) assert len(warns) == 1 text = warns[0].message.args[0] if isinstance(warns[0].message, Warning) else warns[0].message assert text == 'Cannot navigate to \"test2\": unknown page.' def test_on_navigate_to_existant(gui: Gui, helpers): def on_navigate(state: State, page: str): return \"test2\" if page == \"test1\" else page with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.add_page(\"test1\", Markdown(\"#This is a page test1\")) gui.add_page(\"test2\", Markdown(\"#This is a page test2\")) gui.run(run_server=False) client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) content = client.get(f\"/taipy-jsx/test1?client_id={sid}\") assert content.status_code == 302 "} {"text": "import inspect import pandas as pd # type: ignore from taipy.gui import Gui def test_expression_text_control_str(gui: Gui, test_client, helpers): gui._bind_var_val(\"x\", \"Hello World!\") md_string = \"<|{x}|>\" expected_list = [\"\" expected_list = [\"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \" 0}|>\" expected_list = [ \"\" expected_list = [\"\" expected_list = [ \"\" expected_list = [ \" 2 routes + 1 default route assert len(gui._config.pages) == 3 # 2 files -> 2 pages + 1 default page "} {"text": "import inspect import json import warnings from taipy.gui import Gui def test_render_route(gui: Gui): gui._set_frame(inspect.currentframe()) gui.add_page(\"page1\", \"# first page\") gui.add_page(\"page2\", \"# second page\") gui.run(run_server=False) with warnings.catch_warnings(record=True): client = gui._server.test_client() response = client.get(\"/taipy-init\") response_data = json.loads(response.get_data().decode(\"utf-8\", \"ignore\")) assert response.status_code == 200 assert isinstance(response_data, dict) assert isinstance(response_data[\"locations\"], dict) assert \"/page1\" in response_data[\"locations\"] assert \"/page2\" in response_data[\"locations\"] assert \"/\" in response_data[\"locations\"] assert response_data[\"locations\"] == {\"/\": \"/TaiPy_root_page\", \"/page1\": \"/page1\", \"/page2\": \"/page2\"} "} {"text": "import json import pandas as pd import pytest from taipy.gui import Gui from taipy.gui.utils import _TaipyContent def test__get_real_var_name(gui: Gui): res = gui._get_real_var_name(\"\") assert isinstance(res, tuple) assert res[0] == \"\" assert res[1] == \"\" gui.run(run_server=False) with gui.get_flask_app().app_context(): with pytest.raises(NameError): res = gui._get_real_var_name(f\"{_TaipyContent.get_hash()}_var\") def test__get_user_instance(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): with pytest.warns(UserWarning): gui._get_user_instance(\"\", type(None)) def test__call_broadcast_callback(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._call_broadcast_callback(lambda s, t: t, [\"Hello World\"], \"mine\") assert res == \"Hello World\" with gui.get_flask_app().app_context(): with pytest.warns(UserWarning): res = gui._call_broadcast_callback(print, [\"Hello World\"], \"mine\") assert res is None def test__refresh_expr(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._refresh_expr(\"var\", None) assert res is None def test__tbl_cols(gui: Gui): data = pd.DataFrame({\"col1\": [0, 1, 2], \"col2\": [True, True, False]}) gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._tbl_cols(True, None, json.dumps({}), json.dumps({\"data\": \"data\"}), data=data) d = json.loads(res) assert isinstance(d, dict) assert d[\"col1\"][\"type\"] == \"int\" res = gui._tbl_cols(False, None, \"\", \"\") assert repr(res) == \"Taipy: Do not update\" def test__chart_conf(gui: Gui): data = pd.DataFrame({\"col1\": [0, 1, 2], \"col2\": [True, True, False]}) gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._chart_conf(True, None, json.dumps({}), json.dumps({\"data\": \"data\"}), data=data) d = json.loads(res) assert isinstance(d, dict) assert d[\"columns\"][\"col1\"][\"type\"] == \"int\" res = gui._chart_conf(False, None, \"\", \"\") assert repr(res) == \"Taipy: Do not update\" with pytest.warns(UserWarning): res = gui._chart_conf(True, None, \"\", \"\") assert repr(res) == \"Taipy: Do not update\" def test__get_valid_adapter_result(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._get_valid_adapter_result((\"id\", \"label\")) assert isinstance(res, tuple) assert res[0] == \"id\" "} {"text": "import json import warnings from types import SimpleNamespace from taipy.gui import Gui, Markdown def test_partial(gui: Gui): with warnings.catch_warnings(record=True): gui.add_partial(Markdown(\"#This is a partial\")) gui.run(run_server=False) client = gui._server.test_client() response = client.get(f\"/taipy-jsx/{gui._config.partial_routes[0]}\") response_data = json.loads(response.get_data().decode(\"utf-8\", \"ignore\")) assert response.status_code == 200 assert \"jsx\" in response_data and \"This is a partial\" in response_data[\"jsx\"] def test_partial_update(gui: Gui): with warnings.catch_warnings(record=True): partial = gui.add_partial(Markdown(\"#This is a partial\")) gui.run(run_server=False, single_client=True) client = gui._server.test_client() response = client.get(f\"/taipy-jsx/{gui._config.partial_routes[0]}\") response_data = json.loads(response.get_data().decode(\"utf-8\", \"ignore\")) assert response.status_code == 200 assert \"jsx\" in response_data and \"This is a partial\" in response_data[\"jsx\"] # update partial fake_state = SimpleNamespace() fake_state._gui = gui partial.update_content(fake_state, \"#partial updated\") # type: ignore response = client.get(f\"/taipy-jsx/{gui._config.partial_routes[0]}\") response_data = json.loads(response.get_data().decode(\"utf-8\", \"ignore\")) assert response.status_code == 200 assert \"jsx\" in response_data and \"partial updated\" in response_data[\"jsx\"] "} {"text": "from taipy.gui import Gui, Markdown def test_variable_binding(helpers): \"\"\" Tests the binding of a few variables and a function \"\"\" def another_function(gui): pass x = 10 y = 20 z = \"button label\" gui = Gui() gui.add_page(\"test\", Markdown(\"<|{x}|> | <|{y}|> | <|{z}|button|on_action=another_function|>\")) gui.run(run_server=False, single_client=True) client = gui._server.test_client() jsx = client.get(\"/taipy-jsx/test\").json[\"jsx\"] for expected in [\"\")) gui.run(run_server=False) client = gui._server.test_client() jsx = client.get(\"/taipy-jsx/test\").json[\"jsx\"] for expected in [\"\") gui.run(run_server=False) client = gui._server.test_client() jsx = client.get(\"/taipy-jsx/TaiPy_root_page\").json[\"jsx\"] for expected in [\" \"\"\" ) "} {"text": "import inspect import pytest from taipy.gui import Gui from taipy.gui.extension import Element, ElementLibrary class MyLibrary(ElementLibrary): def get_name(self) -> str: return \"taipy_extension_example\" def get_elements(self): return dict() def test_extension_no_config(gui: Gui, helpers): gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with pytest.warns(UserWarning): ret = flask_client.get(\"/taipy-extension/toto/titi\") assert ret.status_code == 404 def test_extension_config_wrong_path(gui: Gui, helpers): Gui.add_library(MyLibrary()) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with pytest.warns(UserWarning): ret = flask_client.get(\"/taipy-extension/taipy_extension_example/titi\") assert ret.status_code == 404 "} {"text": "import inspect import pytest from flask import g from taipy.gui import Gui def test_get_status(gui: Gui): gui.run(run_server=False) flask_client = gui._server.test_client() ret = flask_client.get(\"/taipy.status.json\") assert ret.status_code == 200, f\"status_code => {ret.status_code} != 200\" assert ret.mimetype == \"application/json\", f\"mimetype => {ret.mimetype} != application/json\" assert ret.json, \"json is not defined\" assert \"gui\" in ret.json, \"json has no key gui\" gui = ret.json.get(\"gui\") assert isinstance(gui, dict), \"json.gui is not a dict\" assert \"user_status\" in gui, \"json.gui has no key user_status\" assert gui.get(\"user_status\") == \"\", \"json.gui.user_status is not empty\" def test_get_extended_status(gui: Gui): gui.run(run_server=False, extended_status=True) flask_client = gui._server.test_client() ret = flask_client.get(\"/taipy.status.json\") assert ret.status_code == 200, f\"status_code => {ret.status_code} != 200\" assert ret.mimetype == \"application/json\", f\"mimetype => {ret.mimetype} != application/json\" assert ret.json, \"json is not defined\" gui = ret.json.get(\"gui\") assert \"backend_version\" in gui, \"json.gui has no key backend_version\" assert \"flask_version\" in gui, \"json.gui has no key flask_version\" assert \"frontend_version\" in gui, \"json.gui has no key frontend_version\" assert \"host\" in gui, \"json.gui has no key host\" assert \"python_version\" in gui, \"json.gui has no key python_version\" assert \"user_status\" in gui, \"json.gui has no key user_status\" assert gui.get(\"user_status\") == \"\", \"json.gui.user_status is not empty\" def test_get_status_with_user_status(gui: Gui): user_status = \"user_status\" def on_status(state): return user_status gui._set_frame(inspect.currentframe()) gui.run(run_server=False) flask_client = gui._server.test_client() ret = flask_client.get(\"/taipy.status.json\") assert ret.status_code == 200, f\"status_code => {ret.status_code} != 200\" assert ret.json, \"json is not defined\" gui = ret.json.get(\"gui\") assert \"user_status\" in gui, \"json.gui has no key user_status\" assert gui.get(\"user_status\") == user_status, f'json.gui.user_status => {gui.get(\"user_status\")} != {user_status}' "} {"text": "import inspect import io import pathlib import tempfile import pytest from taipy.gui import Gui from taipy.gui.data.data_scope import _DataScopes from taipy.gui.utils import _get_non_existent_file_path def test_file_upload_no_varname(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) with pytest.warns(UserWarning): ret = flask_client.post(f\"/taipy-uploads?client_id={sid}\") assert ret.status_code == 400 def test_file_upload_no_blob(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) with pytest.warns(UserWarning): ret = flask_client.post(f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": \"varname\"}) assert ret.status_code == 400 def test_file_upload_no_filename(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() file = (io.BytesIO(b\"abcdef\"), \"\") # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) with pytest.warns(UserWarning): ret = flask_client.post(f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": \"varname\", \"blob\": file}) assert ret.status_code == 400 def test_file_upload_simple(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) file_name = \"test.jpg\" file = (io.BytesIO(b\"abcdef\"), file_name) upload_path = pathlib.Path(gui._get_config(\"upload_folder\", tempfile.gettempdir())) file_name = _get_non_existent_file_path(upload_path, file_name).name ret = flask_client.post( f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": \"varname\", \"blob\": file}, content_type=\"multipart/form-data\", ) assert ret.status_code == 200 created_file = upload_path / file_name assert created_file.exists() def test_file_upload_multi_part(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) file_name = \"test2.jpg\" file0 = (io.BytesIO(b\"abcdef\"), file_name) file1 = (io.BytesIO(b\"abcdef\"), file_name) upload_path = pathlib.Path(gui._get_config(\"upload_folder\", tempfile.gettempdir())) file_name = _get_non_existent_file_path(upload_path, file_name).name ret = flask_client.post( f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": \"varname\", \"blob\": file0, \"total\": \"2\", \"part\": \"0\"}, content_type=\"multipart/form-data\", ) assert ret.status_code == 200 file0_path = upload_path / f\"{file_name}.part.0\" assert file0_path.exists() ret = flask_client.post( f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": \"varname\", \"blob\": file1, \"total\": \"2\", \"part\": \"1\"}, content_type=\"multipart/form-data\", ) assert ret.status_code == 200 file1_path = upload_path / f\"{file_name}.part.1\" assert file1_path.exists() file_path = upload_path / file_name assert file_path.exists() def test_file_upload_multiple(gui: Gui, helpers): var_name = \"varname\" gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with gui.get_flask_app().app_context(): gui._bind_var_val(var_name, None) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = _DataScopes._GLOBAL_ID file = (io.BytesIO(b\"abcdef\"), \"test.jpg\") ret = flask_client.post( f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": var_name, \"blob\": file}, content_type=\"multipart/form-data\" ) assert ret.status_code == 200 created_file = pathlib.Path(gui._get_config(\"upload_folder\", tempfile.gettempdir())) / \"test.jpg\" assert created_file.exists() file2 = (io.BytesIO(b\"abcdef\"), \"test2.jpg\") ret = flask_client.post( f\"/taipy-uploads?client_id={sid}\", data={\"var_name\": var_name, \"blob\": file2, \"multiple\": \"True\"}, content_type=\"multipart/form-data\", ) assert ret.status_code == 200 created_file = pathlib.Path(gui._get_config(\"upload_folder\", tempfile.gettempdir())) / \"test2.jpg\" assert created_file.exists() value = getattr(gui._bindings()._get_all_scopes()[sid], var_name) assert len(value) == 2 "} {"text": "import pathlib import pytest from taipy.gui import Gui def test_image_path_not_found(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) ret = flask_client.get(f\"/taipy-images/images/img.png?client_id={sid}\") assert ret.status_code == 404 def test_image_path_found(gui: Gui, helpers): url = gui._get_content( \"img\", str((pathlib.Path(__file__).parent.parent.parent / \"resources\" / \"fred.png\").resolve()), True ) gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) ret = flask_client.get(f\"{url}?client_id={sid}\") assert ret.status_code == 200 def test_image_data_too_big(gui: Gui, helpers): with open((pathlib.Path(__file__).parent.parent.parent / \"resources\" / \"taipan.jpg\"), \"rb\") as big_file: url = gui._get_content(\"img\", big_file.read(), True) assert not url.startswith(\"data:\") "} {"text": "import inspect import pytest from taipy.gui import Gui def test_user_content_without_callback(gui: Gui, helpers): gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with pytest.warns(UserWarning): ret = flask_client.get(gui._get_user_content_url(\"path\")) assert ret.status_code == 404 def test_user_content_with_wrong_callback(gui: Gui, helpers): def on_user_content_cb(state, path, args): return None on_user_content = on_user_content_cb gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with pytest.warns(UserWarning): ret = flask_client.get(gui._get_user_content_url(\"path\", {\"a\": \"b\"})) assert ret.status_code == 404 def test_user_content_with_callback(gui: Gui, helpers): def on_user_content_cb(state, path, args): return \"\" on_user_content = on_user_content_cb gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() ret = flask_client.get(gui._get_user_content_url(\"path\")) assert ret.status_code == 200 "} {"text": "import inspect from taipy.gui import Gui, Markdown from taipy.gui.data.data_scope import _DataScopes def test_sending_messages_in_group(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = _DataScopes._GLOBAL_ID # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") assert gui._bindings()._get_all_scopes()[cid].name == \"World!\" # type: ignore assert gui._bindings()._get_all_scopes()[cid].btn_id == \"button1\" # type: ignore with gui.get_flask_app().test_request_context(f\"/taipy-jsx/test/?client_id={cid}\", data={\"client_id\": cid}): with gui as aGui: aGui._Gui__state.name = \"Monde!\" aGui._Gui__state.btn_id = \"button2\" assert gui._bindings()._get_all_scopes()[cid].name == \"Monde!\" assert gui._bindings()._get_all_scopes()[cid].btn_id == \"button2\" # type: ignore received_messages = ws_client.get_received() helpers.assert_outward_ws_multiple_message(received_messages[0], \"MS\", 2) "} {"text": "import inspect import logging import pathlib import pytest from taipy.gui import Gui, download def test_download_file(gui: Gui, helpers): def do_something(state, id): download(state, (pathlib.Path(__file__).parent.parent.parent / \"resources\" / \"taipan.jpg\")) # Bind a page so that the function will be called # gui.add_page( # \"test\", Markdown(\"<|Do something!|button|on_action=do_something|id=my_button|>\") # ) # set gui frame gui._set_frame(inspect.currentframe()) gui.run(run_server=False) # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"A\", \"name\": \"my_button\", \"payload\": \"do_something\"}) # assert for received message (message that would be sent to the front-end client) received_messages = ws_client.get_received() assert len(received_messages) == 1 assert isinstance(received_messages[0], dict) assert \"name\" in received_messages[0] and received_messages[0][\"name\"] == \"message\" assert \"args\" in received_messages[0] args = received_messages[0][\"args\"] assert \"type\" in args and args[\"type\"] == \"DF\" assert \"content\" in args and args[\"content\"] == \"/taipy-content/taipyStatic0/taipan.jpg\" logging.getLogger().debug(args[\"content\"]) "} {"text": "import inspect from taipy.gui import Gui, Markdown def ws_u_assert_template(gui: Gui, helpers, value_before_update, value_after_update, payload): # Bind test variable var = value_before_update # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) # Bind a page so that the variable will be evaluated as expression gui.add_page(\"test\", Markdown(\"<|{var}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") assert gui._bindings()._get_all_scopes()[sid].var == value_before_update ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"U\", \"name\": \"tpec_TpExPr_var_TPMDL_0\", \"payload\": payload}) assert gui._bindings()._get_all_scopes()[sid].var == value_after_update # assert for received message (message that would be sent to the front-end client) received_message = ws_client.get_received() assert len(received_message) helpers.assert_outward_ws_message(received_message[0], \"MU\", \"tpec_TpExPr_var_TPMDL_0\", value_after_update) def test_ws_u_string(gui: Gui, helpers): value_before_update = \"a random string\" value_after_update = \"a random string is added\" payload = {\"value\": value_after_update} # set gui frame gui._set_frame(inspect.currentframe()) ws_u_assert_template(gui, helpers, value_before_update, value_after_update, payload) def test_ws_u_number(gui: Gui, helpers): value_before_update = 10 value_after_update = \"11\" payload = {\"value\": value_after_update} # set gui frame gui._set_frame(inspect.currentframe()) ws_u_assert_template(gui, helpers, value_before_update, value_after_update, payload) "} {"text": "import inspect from taipy.gui import Gui, Markdown def test_du_table_data_fetched(gui: Gui, helpers, csvdata): # Bind test variables csvdata = csvdata # set gui frame gui._set_frame(inspect.currentframe()) Gui._set_timezone(\"UTC\") # Bind a page so that the variable will be evaluated as expression gui.add_page( \"test\", Markdown( \"<|{csvdata}|table|page_size=10|page_size_options=10;30;100|columns=Day;Entity;Code;Daily hospital occupancy|date_format=eee dd MMM yyyy|>\" ), ) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) sid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") ws_client.emit( \"message\", { \"client_id\": sid, \"type\": \"DU\", \"name\": \"_TpD_tpec_TpExPr_csvdata_TPMDL_0\", \"payload\": { \"columns\": [\"Day\", \"Entity\", \"Code\", \"Daily hospital occupancy\"], \"pagekey\": \"0-100--asc\", \"start\": 0, \"end\": 9, \"orderby\": \"\", \"sort\": \"asc\", }, }, ) # assert for received message (message that would be sent to the front-end client) received_messages = ws_client.get_received() assert received_messages helpers.assert_outward_ws_message( received_messages[0], \"MU\", \"_TpD_tpec_TpExPr_csvdata_TPMDL_0\", { \"data\": [ { \"Code\": \"AUT\", \"Day_str\": \"2020-04-01T00:00:00.000000Z\", \"Daily hospital occupancy\": 856, \"Entity\": \"Austria\", \"_tp_index\": 0, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-02T00:00:00.000000Z\", \"Daily hospital occupancy\": 823, \"Entity\": \"Austria\", \"_tp_index\": 1, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-03T00:00:00.000000Z\", \"Daily hospital occupancy\": 829, \"Entity\": \"Austria\", \"_tp_index\": 2, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-04T00:00:00.000000Z\", \"Daily hospital occupancy\": 826, \"Entity\": \"Austria\", \"_tp_index\": 3, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-05T00:00:00.000000Z\", \"Daily hospital occupancy\": 712, \"Entity\": \"Austria\", \"_tp_index\": 4, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-06T00:00:00.000000Z\", \"Daily hospital occupancy\": 824, \"Entity\": \"Austria\", \"_tp_index\": 5, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-07T00:00:00.000000Z\", \"Daily hospital occupancy\": 857, \"Entity\": \"Austria\", \"_tp_index\": 6, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-08T00:00:00.000000Z\", \"Daily hospital occupancy\": 829, \"Entity\": \"Austria\", \"_tp_index\": 7, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-09T00:00:00.000000Z\", \"Daily hospital occupancy\": 820, \"Entity\": \"Austria\", \"_tp_index\": 8, }, { \"Code\": \"AUT\", \"Day_str\": \"2020-04-10T00:00:00.000000Z\", \"Daily hospital occupancy\": 771, \"Entity\": \"Austria\", \"_tp_index\": 9, }, ], \"rowcount\": 14477, \"start\": 0, \"format\": \"JSON\", }, ) "} {"text": "import inspect import pytest from taipy.gui import Gui, Markdown def test_default_on_change(gui: Gui, helpers): st = {\"d\": False} def on_change(state, var, value): st[\"d\"] = True x = 10 # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|{x}|input|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") # fake var update ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"U\", \"name\": \"x\", \"payload\": {\"value\": \"20\"}}) assert ws_client.get_received() assert st[\"d\"] is True def test_specific_on_change(gui: Gui, helpers): st = {\"d\": False, \"s\": False} def on_change(state, var, value): st[\"d\"] = True def on_input_change(state, var, value): st[\"s\"] = True x = 10 # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|{x}|input|on_change=on_input_change|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") # fake var update ws_client.emit( \"message\", {\"client_id\": sid, \"type\": \"U\", \"name\": \"x\", \"payload\": {\"value\": \"20\", \"on_change\": \"on_input_change\"}}, ) assert ws_client.get_received() assert st[\"s\"] is True assert st[\"d\"] is False "} {"text": "import inspect import pytest from taipy.gui import Gui, Markdown def test_ru_selector(gui: Gui, helpers, csvdata): # Bind test variables selected_val = [\"value1\", \"value2\"] # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) # Bind a page so that the variable will be evaluated as expression gui.add_page( \"test\", Markdown(\"<|{selected_val}|selector|multiple|>\"), ) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) sid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"RU\", \"name\": \"\", \"payload\": {\"names\": [\"selected_val\"]}}) # assert for received message (message that would be sent to the front-end client) received_messages = ws_client.get_received() assert len(received_messages) helpers.assert_outward_ws_message(received_messages[0], \"MU\", \"selected_val\", [\"value1\", \"value2\"]) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import inspect import pytest from taipy.gui import Gui, Markdown def test_broadcast(gui: Gui, helpers): # Bind test variables selected_val = [\"value1\", \"value2\"] # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) # Bind a page so that the variable will be evaluated as expression gui.add_page( \"test\", Markdown(\"<|{selected_val}|selector|multiple|>\"), ) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) sid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") gui._broadcast(\"broadcast_name\", \"broadcast_value\") received_messages = ws_client.get_received() assert len(received_messages) helpers.assert_outward_simple_ws_message(received_messages[0], \"U\", \"_bc_broadcast_name\", \"broadcast_value\") "} {"text": "import inspect import time from taipy.gui import Gui, Markdown def test_a_button_pressed(gui: Gui, helpers): def do_something(state, id): state.x = state.x + 10 state.text = \"a random text\" x = 10 # noqa: F841 text = \"hi\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) # Bind a page so that the variable will be evaluated as expression gui.add_page( \"test\", Markdown(\"<|Do something!|button|on_action=do_something|id=my_button|> | <|{x}|> | <|{text}|>\") ) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) flask_client.get(f\"/taipy-jsx/test?client_id={sid}\") assert gui._bindings()._get_all_scopes()[sid].x == 10 # type: ignore assert gui._bindings()._get_all_scopes()[sid].text == \"hi\" # type: ignore ws_client.emit(\"message\", {\"client_id\": sid, \"type\": \"A\", \"name\": \"my_button\", \"payload\": \"do_something\"}) assert gui._bindings()._get_all_scopes()[sid].text == \"a random text\" assert gui._bindings()._get_all_scopes()[sid].x == 20 # type: ignore # assert for received message (message that would be sent to the front-end client) received_messages = ws_client.get_received() helpers.assert_outward_ws_message(received_messages[0], \"MU\", \"x\", 20) helpers.assert_outward_ws_message(received_messages[1], \"MU\", \"text\", \"a random text\") "} {"text": "import inspect import warnings from flask import g from taipy.gui import Gui from taipy.gui.utils.types import _TaipyNumber def test_unbind_variable_in_expression(gui: Gui, helpers): gui.run(run_server=False, single_client=True) with warnings.catch_warnings(record=True) as records: with gui.get_flask_app().app_context(): gui._evaluate_expr(\"{x}\") warns = helpers.get_taipy_warnings(records) assert len(warns) == 3 assert \"Variable 'x' is not available in\" in str(warns[0].message) assert \"Variable 'x' is not defined\" in str(warns[1].message) assert \"Cannot evaluate expression 'x'\" in str(warns[2].message) assert \"name 'x' is not defined\" in str(warns[2].message) def test_evaluate_same_expression_multiple_times(gui: Gui): x = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) with gui.get_flask_app().app_context(): s1 = gui._evaluate_expr(\"x + 10 = {x + 10}\") s2 = gui._evaluate_expr(\"x + 10 = {x + 10}\") assert s1 == s2 def test_evaluate_expressions_same_variable(gui: Gui): x = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) with gui.get_flask_app().app_context(): s1 = gui._evaluate_expr(\"x + 10 = {x + 10}\") s2 = gui._evaluate_expr(\"x = {x}\") assert \"tp_TpExPr_x\" in s1 and \"tp_TpExPr_x\" in s2 def test_evaluate_holder(gui: Gui): x = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) with warnings.catch_warnings(record=True): with gui.get_flask_app().app_context(): gui._evaluate_expr(\"{x + 10}\") hash = gui._evaluate_bind_holder(_TaipyNumber, \"TpExPr_x + 10_TPMDL_0\") assert \"_TpN_tp_TpExPr_x_10_TPMDL_0_0\" in hash lst = gui._evaluate_holders(\"TpExPr_x + 10_TPMDL_0\") assert len(lst) == 1 assert \"_TpN_tp_TpExPr_x_10_TPMDL_0_0\" in lst[0] # test re-evaluate holders gui._bindings().x = 20 gui._re_evaluate_expr(lst[0]) def test_evaluate_not_expression_type(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): assert \"x + 10\" == gui._evaluate_expr(\"x + 10\") def test_evaluate_expression_2_clients(gui: Gui): x = 10 # noqa: F841 y = 20 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.run(run_server=False) with gui.get_flask_app().app_context(): gui._bindings()._get_or_create_scope(\"A\") gui._bindings()._get_or_create_scope(\"B\") g.client_id = \"A\" gui._evaluate_expr(\"x + y = {x + y}\") g.client_id = \"B\" gui._evaluate_expr(\"x\") gui._re_evaluate_expr(\"x\") "} {"text": "import inspect import pytest from taipy.gui.gui import Gui from taipy.gui.utils import _MapDict def test_map_dict(): d = {\"a\": 1, \"b\": 2, \"c\": 3} md = _MapDict(d) md_copy = _MapDict(d).copy() assert len(md) == 3 assert md.__getitem__(\"a\") == d[\"a\"] md.__setitem__(\"a\", 4) assert md.__getitem__(\"a\") == 4 assert d[\"a\"] == 4 v1 = d[\"b\"] v2 = md.pop(\"b\") assert v1 == v2 assert \"b\" not in d.keys() assert \"c\" in md assert len(md) == 2 v1 = d[\"c\"] v2 = md.popitem() assert v2 == (\"c\", v1) assert len(md) == 1 md.clear() assert len(md) == 0 assert len(d) == 0 assert len(md_copy) == 3 v1 = \"\" for k in md_copy: v1 += k assert v1 == \"abc\" v1 = \"\" for k in md_copy.keys(): v1 += k assert v1 == \"abc\" v1 = \"\" for k in md_copy.__reversed__(): v1 += k assert v1 == \"cba\" v1 = 0 for k in md_copy.values(): v1 += k assert v1 == 6 # 1+2+3 v1 = md_copy.setdefault(\"a\", 5) assert v1 == 1 v1 = md_copy.setdefault(\"d\", 5) assert v1 == 5 try: md = _MapDict(\"not_a_dict\") assert False except Exception: assert True pass def test_map_dict_update(): update_values = {} def update(k, v): update_values[0] = k update_values[1] = v pass d = {\"a\": 1, \"b\": \"2\"} md = _MapDict(d, update) md.__setitem__(\"a\", 3) assert update_values[0] == \"a\" assert update_values[1] == 3 pass def test_map_dict_update_full_dictionary_1(): values = {\"a\": 1, \"b\": 2} update_values = {\"a\": 3, \"b\": 5} md = _MapDict(values) assert md[\"a\"] == 1 assert md[\"b\"] == 2 md.update(update_values) assert md[\"a\"] == 3 assert md[\"b\"] == 5 def test_map_dict_update_full_dictionary_2(): temp_values = {} def update(k, v): temp_values[k] = v values = {\"a\": 1, \"b\": 2} update_values = {\"a\": 3, \"b\": 5} md = _MapDict(values, update) assert md[\"a\"] == 1 assert md[\"b\"] == 2 md.update(update_values) assert temp_values[\"a\"] == 3 assert temp_values[\"b\"] == 5 def test_map_dict_set(gui: Gui, test_client): d = {\"a\": 1} # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) with gui.get_flask_app().app_context(): assert isinstance(gui._Gui__state.d, _MapDict) gui._Gui__state.d = {\"b\": 2} assert isinstance(gui._Gui__state.d, _MapDict) assert len(gui._Gui__state.d) == 1 assert gui._Gui__state.d.get(\"a\", None) is None assert gui._Gui__state.d.get(\"b\", None) == 2 def test_map_dict_items(): def update(k, v): pass values = {\"a\": 1, \"b\": {\"c\": \"list c\"}} md = _MapDict(values) mdu = _MapDict(values, update) assert md[\"a\"] == 1 assert isinstance(md[\"b\"], _MapDict) assert isinstance(mdu[\"b\"], _MapDict) assert md[\"b\"][\"c\"] == \"list c\" assert mdu[\"b\"][\"c\"] == \"list c\" del md[\"a\"] with pytest.raises(KeyError): md[\"e\"] setattr(md, \"a\", 1) assert md[\"a\"] == 1 "} {"text": "import pathlib import tempfile from taipy.gui import Gui from taipy.gui.utils import _get_non_existent_file_path def test_empty_file_name(gui: Gui, helpers): assert _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), \"\").name def test_non_existent_file(gui: Gui, helpers): assert not _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), \"\").exists() def test_existent_file(gui: Gui, helpers): file_path = _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), \"\") with open(file_path, \"w\") as file_handler: file_handler.write(\"hello\") assert file_path.exists() file_stem = file_path.stem.split(\".\", 1)[0] file_suffix = file_path.suffixes[-1] index = int(file_path.suffixes[0][1:]) if len(file_path.suffixes) > 1 else -1 file_path = _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), \"\") assert file_path.name == f\"{file_stem}.{index + 1}{file_suffix}\" with open(file_path, \"w\") as file_handler: file_handler.write(\"hello 2\") assert file_path.exists() file_path = _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), \"\") assert file_path.name == f\"{file_stem}.{index + 2}{file_suffix}\" "} {"text": " import warnings import pytest from taipy.gui.utils.date import _string_to_date from taipy.gui.utils.types import _TaipyBase, _TaipyBool, _TaipyDate, _TaipyNumber def test_taipy_base(): tb = _TaipyBase(\"value\", \"hash\") assert tb.get() == \"value\" assert tb.get_name() == \"hash\" tb.set(\"a value\") assert tb.get() == \"a value\" assert tb.get_hash() == NotImplementedError def test_taipy_bool(): assert _TaipyBool(0, \"v\").get() is False assert _TaipyBool(1, \"v\").get() is True assert _TaipyBool(False, \"v\").get() is False assert _TaipyBool(True, \"v\").get() is True assert _TaipyBool(\"\", \"v\").get() is False assert _TaipyBool(\"hey\", \"v\").get() is True assert _TaipyBool([], \"v\").get() is False assert _TaipyBool([\"an item\"], \"v\").get() is True def test_taipy_number(): with pytest.raises(TypeError): _TaipyNumber(\"a string\", \"x\").get() with warnings.catch_warnings(record=True): _TaipyNumber(\"a string\", \"x\").cast_value(\"a string\") _TaipyNumber(0, \"x\").cast_value(0) def test_taipy_date(): assert _TaipyDate(_string_to_date(\"2022-03-03 00:00:00 UTC\"), \"x\").get() == \"2022-03-03T00:00:00+00:00\" assert _TaipyDate(\"2022-03-03 00:00:00 UTC\", \"x\").get() == \"2022-03-03 00:00:00 UTC\" assert _TaipyDate(None, \"x\").get() is None _TaipyDate(\"\", \"x\").cast_value(\"2022-03-03 00:00:00 UTC\") _TaipyDate(\"\", \"x\").cast_value(_string_to_date(\"2022-03-03 00:00:00 UTC\")) "} {"text": "import inspect from time import sleep import pytest from taipy.gui import Gui, State, invoke_long_callback def test_long_callback(gui: Gui): status = None # noqa: F841 def heavy_function(delay=1): sleep(delay) def heavy_function_with_exception(delay=1): sleep(delay) raise Exception(\"Heavy function Exception\") def heavy_function_status(state: State, status: int): state.status = status def on_exception(state: State, function_name: str, e: Exception): state.status = -1 gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) state = gui._Gui__state with gui.get_flask_app().app_context(): assert state.status is None invoke_long_callback(state, heavy_function) invoke_long_callback(state, heavy_function_with_exception) invoke_long_callback(state, heavy_function, (), heavy_function_status) invoke_long_callback(state, heavy_function, (2), heavy_function_status, (), 1000) invoke_long_callback(state, heavy_function_with_exception, (), heavy_function_status) "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, get_state_id def test_get_state_id(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().app_context(): g.client_id = cid assert cid == get_state_id(gui._Gui__state) "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, State, download def test_download(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 def on_download_action(state: State): pass # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().test_request_context(f\"/taipy-jsx/test/?client_id={cid}\", data={\"client_id\": cid}): g.client_id = cid download(gui._Gui__state, \"some text\", \"filename.txt\", \"on_download_action\") received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message( received_messages[0], \"DF\", {\"name\": \"filename.txt\", \"onAction\": \"on_download_action\"} ) "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, navigate def test_navigate(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().test_request_context(f\"/taipy-jsx/test/?client_id={cid}\", data={\"client_id\": cid}): g.client_id = cid navigate(gui._Gui__state, \"test\") received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message(received_messages[0], \"NA\", {\"to\": \"test\"}) "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, State, invoke_callback def test_invoke_callback(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 val = 1 # noqa: F841 def user_callback(state: State): state.val = 10 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\\n<|{val}|>\")) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() # client id cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().app_context(): g.client_id = cid invoke_callback(gui, cid, user_callback, []) assert gui._Gui__state.val == 10 "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, hold_control def test_hold_control(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().test_request_context(f\"/taipy-jsx/test/?client_id={cid}\", data={\"client_id\": cid}): g.client_id = cid hold_control(gui._Gui__state) received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message( received_messages[0], \"BL\", {\"action\": \"_taipy_on_cancel_block_ui\", \"message\": \"Work in Progress...\"} ) "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, resume_control def test_resume_control(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().test_request_context(f\"/taipy-jsx/test/?client_id={cid}\", data={\"client_id\": cid}): g.client_id = cid resume_control(gui._Gui__state) received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message(received_messages[0], \"BL\", {\"message\": None}) "} {"text": "import inspect from flask import g from taipy.gui import Gui, Markdown, notify def test_notify(gui: Gui, helpers): name = \"World!\" # noqa: F841 btn_id = \"button1\" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", Markdown(\"<|Hello {name}|button|id={btn_id}|>\")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().test_request_context(f\"/taipy-jsx/test/?client_id={cid}\", data={\"client_id\": cid}): g.client_id = cid notify(gui._Gui__state, \"Info\", \"Message\") received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message(received_messages[0], \"AL\", {\"atype\": \"Info\", \"message\": \"Message\"}) "} {"text": "import contextlib import time from urllib.request import urlopen import pytest from testbook import testbook @pytest.mark.filterwarnings(\"ignore::RuntimeWarning\") @testbook(\"tests/gui/notebook/simple_gui.ipynb\") def test_notebook_simple_gui(tb, helpers): tb.execute_cell(\"import\") tb.execute_cell(\"page_declaration\") tb.execute_cell(\"gui_init\") tb.execute_cell(\"gui_run\") while not helpers.port_check(): time.sleep(0.1) assert \">Hello\" in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") assert 'defaultValue=\\\\\"10\\\\\"' in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") # Test state manipulation within notebook tb.execute_cell(\"get_variable\") assert \"10\" in tb.cell_output_text(\"get_variable\") assert 'defaultValue=\\\\\"10\\\\\"' in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") tb.execute_cell(\"set_variable\") assert 'defaultValue=\\\\\"20\\\\\"' in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") tb.execute_cell(\"re_get_variable\") assert \"20\" in tb.cell_output_text(\"re_get_variable\") # Test page reload tb.execute_cell(\"gui_stop\") with pytest.raises(Exception) as exc_info: urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\") assert \"501: Gateway error\" in str(exc_info.value) tb.execute_cell(\"gui_re_run\") while True: with contextlib.suppress(Exception): urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\") break assert \">Hello\" in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") tb.execute_cell(\"gui_reload\") while True: with contextlib.suppress(Exception): urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\") break assert \">Hello\" in urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\").read().decode(\"utf-8\") tb.execute_cell(\"gui_re_stop\") with pytest.raises(Exception) as exc_info: urlopen(\"http://127.0.0.1:5000/taipy-jsx/page1\") assert \"501: Gateway error\" in str(exc_info.value) "} {"text": "from taipy.gui import Gui, Markdown "} {"text": "import inspect from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_redirect(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Redirect Successfully|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() == \"Redirect Successfully\" "} {"text": "import pytest @pytest.fixture(scope=\"session\") def browser_context_args(browser_context_args, e2e_port, e2e_base_url): return { **browser_context_args, \"base_url\": f\"http://127.0.0.1:{e2e_port}{e2e_base_url}\", \"timezone_id\": \"Europe/Paris\", } @pytest.fixture(scope=\"function\") def gui(helpers, e2e_base_url): from taipy.gui import Gui gui = Gui() gui.load_config({\"base_url\": e2e_base_url, \"host\": \"0.0.0.0\" if e2e_base_url != \"/\" else \"127.0.0.1\"}) yield gui # Delete Gui instance and state of some classes after each test gui.stop() helpers.test_cleanup() "} {"text": "import inspect import re from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from playwright.sync_api import expect from taipy.gui import Gui @pytest.mark.teste2e def test_navbar_navigate(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) gui.add_page(name=\"Data\", page=\"<|navbar|id=nav1|> <|Data|id=text-data|>\") gui.add_page(name=\"Test\", page=\"<|navbar|id=nav1|> <|Test|id=text-test|>\") helpers.run_e2e(gui) page.goto(\"./Data\") page.expect_websocket() page.wait_for_selector(\"#text-data\") page.click(\"#nav1 button:nth-child(2)\") page.wait_for_selector(\"#text-test\") expect(page).to_have_url(re.compile(\".*Test\")) page.click(\"#nav1 button:nth-child(1)\") page.wait_for_selector(\"#text-data\") expect(page).to_have_url(re.compile(\".*Data\")) "} {"text": "import inspect from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui from taipy.gui.utils.date import _string_to_date @pytest.mark.teste2e def test_timzone_specified_1(page: \"Page\", gui: Gui, helpers): _timezone_test_template(page, gui, helpers, \"Etc/GMT\", [\"2022-03-03 00:00:00 UTC\"]) @pytest.mark.teste2e def test_timzone_specified_2(page: \"Page\", gui: Gui, helpers): _timezone_test_template( page, gui, helpers, \"Europe/Paris\", [\"2022-03-03 01:00:00 GMT+1\", \"2022-03-03 01:00:00 UTC+1\"] ) @pytest.mark.teste2e def test_timzone_specified_3(page: \"Page\", gui: Gui, helpers): _timezone_test_template( page, gui, helpers, \"Asia/Ho_Chi_Minh\", [\"2022-03-03 07:00:00 GMT+7\", \"2022-03-03 07:00:00 UTC+7\"] ) @pytest.mark.teste2e def test_timzone_specified_4(page: \"Page\", gui: Gui, helpers): _timezone_test_template( page, gui, helpers, \"America/Sao_Paulo\", [\"2022-03-02 21:00:00 GMT-3\", \"2022-03-02 21:00:00 UTC\u22123\"] ) @pytest.mark.teste2e def test_timezone_client_side(page: \"Page\", gui: Gui, helpers): _timezone_test_template(page, gui, helpers, \"client\", [\"2022-03-03 01:00:00 GMT+1\", \"2022-03-03 01:00:00 UTC+1\"]) def _timezone_test_template(page: \"Page\", gui: Gui, helpers, time_zone, texts): page_md = \"\"\" <|{t}|id=text1|> \"\"\" t = _string_to_date(\"2022-03-03T00:00:00.000Z\") # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, time_zone=time_zone) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() in texts def test_date_only(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{t}|id=text1|> \"\"\" t = _string_to_date(\"Wed Jul 28 1993\") # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() in [\"1993-07-28\"] "} {"text": "import inspect from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_accessor_json(page: \"Page\", gui: Gui, csvdata, helpers): table_data = csvdata # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page( name=\"test\", page=\"<|{table_data}|table|columns=Day;Entity;Code;Daily hospital occupancy|date_format=eee dd MMM yyyy|id=table1|>\", ) helpers.run_e2e(gui, use_arrow=False) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#table1 tr:nth-child(32)\") # wait for data to be loaded (30 rows of skeleton while loading) assert_table_content(page) @pytest.mark.teste2e def test_accessor_arrow(page: \"Page\", gui: Gui, csvdata, helpers): if util.find_spec(\"pyarrow\"): table_data = csvdata # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page( name=\"test\", page=\"<|{table_data}|table|columns=Day;Entity;Code;Daily hospital occupancy|date_format=eee dd MMM yyyy|id=table1|>\", ) helpers.run_e2e(gui, use_arrow=True) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector( \"#table1 tr:nth-child(32)\" ) # wait for data to be loaded (30 rows of skeleton while loading) assert_table_content(page) def assert_table_content(page: \"Page\"): # assert page.query_selector(\"#table1 tbody tr:nth-child(1) td:nth-child(1)\").inner_text() == \"Wed 01 Apr 2020\" assert page.query_selector(\"#table1 tbody tr:nth-child(1) td:nth-child(2)\").inner_text() == \"Austria\" assert page.query_selector(\"#table1 tbody tr:nth-child(1) td:nth-child(4)\").inner_text() == \"856\" "} {"text": "import inspect from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_theme_light(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=False) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") background_color = page.evaluate( 'window.getComputedStyle(document.querySelector(\"main\"), null).getPropertyValue(\"background-color\")' ) assert background_color == \"rgb(255, 255, 255)\" @pytest.mark.teste2e def test_theme_dark(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=True) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") background_color = page.evaluate( 'window.getComputedStyle(document.querySelector(\"main\"), null).getPropertyValue(\"background-color\")' ) assert background_color == \"rgb(18, 18, 18)\" "} {"text": "import inspect from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_margin_1(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=False, margin=\"10rem\") page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") margin = page.evaluate('window.getComputedStyle(document.querySelector(\"#root\"), null).getPropertyValue(\"margin\")') assert margin == \"160px\" @pytest.mark.teste2e def test_margin_2(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=False) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") margin = page.evaluate('window.getComputedStyle(document.querySelector(\"#root\"), null).getPropertyValue(\"margin\")') assert margin == \"16px\" @pytest.mark.teste2e def test_margin_3(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=False, margin=\"10rem\", stylekit=True) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") margin = page.evaluate('window.getComputedStyle(document.querySelector(\"#root\"), null).getPropertyValue(\"margin\")') assert margin == \"160px\" @pytest.mark.teste2e def test_margin_4(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=False, stylekit={\"root_margin\": \"20rem\"}) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") margin = page.evaluate('window.getComputedStyle(document.querySelector(\"#root\"), null).getPropertyValue(\"margin\")') assert margin == \"320px\" @pytest.mark.teste2e def test_margin_5(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|Just a page|id=text1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui, dark_mode=False, stylekit={\"root_margin\": \"20rem\"}, margin=\"10rem\") page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#text1\") margin = page.evaluate('window.getComputedStyle(document.querySelector(\"#root\"), null).getPropertyValue(\"margin\")') assert margin == \"320px\" "} {"text": "import inspect import os import time from importlib import util from pathlib import Path from urllib.request import urlopen import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui, Html from taipy.gui.server import _Server @pytest.mark.teste2e def test_html_render_with_style(page: \"Page\", gui: Gui, helpers): html_content = \"\"\" Hey There \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(\"page1\", Html(html_content)) helpers.run_e2e(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#text1\") retry = 0 while ( retry < 10 and page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') != \"rgb(0, 128, 0)\" ): retry += 1 time.sleep(0.2) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') == \"rgb(0, 128, 0)\" ) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text2\"), null).getPropertyValue(\"color\")') == \"rgb(0, 0, 255)\" ) @pytest.mark.teste2e def test_html_render_bind_assets(page: \"Page\", gui: Gui, helpers, e2e_base_url, e2e_port): gui._set_frame(inspect.currentframe()) gui.add_pages(pages=f\"{Path(Path(__file__).parent.resolve())}{os.path.sep}test-assets\") helpers.run_e2e(gui) assert \".taipy-text\" in urlopen( f\"http://127.0.0.1:{e2e_port}{e2e_base_url}test-assets/style/style.css\" ).read().decode(\"utf-8\") page.goto(\"./test-assets/page1\") page.expect_websocket() page.wait_for_selector(\"#text1\") retry = 0 while ( retry < 10 and page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') != \"rgb(0, 128, 0)\" ): retry += 1 time.sleep(0.1) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') == \"rgb(0, 128, 0)\" ) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text2\"), null).getPropertyValue(\"color\")') == \"rgb(0, 0, 255)\" ) @pytest.mark.teste2e def test_html_render_path_mapping(page: \"Page\", gui: Gui, helpers, e2e_base_url, e2e_port): gui._server = _Server( gui, path_mapping={\"style\": f\"{Path(Path(__file__).parent.resolve())}{os.path.sep}test-assets{os.path.sep}style\"}, flask=gui._flask, async_mode=\"gevent\", ) gui.add_page(\"page1\", Html(f\"{Path(Path(__file__).parent.resolve())}{os.path.sep}page1.html\")) helpers.run_e2e(gui) assert \".taipy-text\" in urlopen(f\"http://127.0.0.1:{e2e_port}{e2e_base_url}/style/style.css\").read().decode(\"utf-8\") page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#text1\") retry = 0 while ( retry < 10 and page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') != \"rgb(0, 128, 0)\" ): retry += 1 time.sleep(0.1) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') == \"rgb(0, 128, 0)\" ) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text2\"), null).getPropertyValue(\"color\")') == \"rgb(0, 0, 255)\" ) "} {"text": "import inspect import logging from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_markdown_render_with_style(page: \"Page\", gui: Gui, helpers): markdown_content = \"\"\" <|Hey|id=text1|> <|There|id=text2|class_name=custom-text|> \"\"\" style = \"\"\" .taipy-text { color: green; } .custom-text { color: blue; } \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(\"page1\", markdown_content, style=style) helpers.run_e2e(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#text1\") page.wait_for_selector(\"#Taipy_style\", state=\"attached\") function_evaluated = True try: page.wait_for_function( 'window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\") !== \"rgb(255, 255, 255)\"' ) except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text1\"), null).getPropertyValue(\"color\")') == \"rgb(0, 128, 0)\" ) assert ( page.evaluate('window.getComputedStyle(document.querySelector(\"#text2\"), null).getPropertyValue(\"color\")') == \"rgb(0, 0, 255)\" ) "} {"text": "import inspect import logging from importlib import util import pytest from taipy.gui import Gui if util.find_spec(\"playwright\"): from playwright._impl._page import Page from .assets2_class_scopes.page1 import Page1 from .assets2_class_scopes.page2 import Page2 def helpers_assert_value(page, s1, s2, v1): s1_val = page.input_value(\"#s1 input\") assert str(s1_val).startswith(s1) s2_val = page.input_value(\"#s2 input\") assert str(s2_val).startswith(s2) val1 = page.query_selector(\"#v1\").inner_text() assert str(val1).startswith(v1) @pytest.mark.timeout(300) @pytest.mark.teste2e @pytest.mark.filterwarnings(\"ignore::Warning\") def test_class_scopes_binding(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) operand_1 = 0 # noqa: F841 gui.add_page(\"page1\", Page1()) gui.add_page(\"page2\", Page2()) helpers.run_e2e(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#s1\") helpers_assert_value(page, \"0\", \"0\", \"0\") page.fill(\"#s1 input\", \"15\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '0'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"15\", \"0\", \"15\") page.fill(\"#s2 input\", \"20\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '15'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"15\", \"20\", \"35\") page.goto(\"./page2\") page.expect_websocket() page.wait_for_selector(\"#s1\") helpers_assert_value(page, \"15\", \"0\", \"0\") page.fill(\"#s2 input\", \"5\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '0'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"15\", \"5\", \"75\") page.fill(\"#s1 input\", \"17\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '75'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"17\", \"5\", \"85\") page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#s1\") helpers_assert_value(page, \"17\", \"20\", \"37\") page.click(\"#btn_reset\") try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '37'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"17\", \"0\", \"17\") "} {"text": "import inspect import logging from importlib import util import pytest from taipy.gui import Gui if util.find_spec(\"playwright\"): from playwright._impl._page import Page from .assets.page1 import page as page1 from .assets.page2 import page as page2 from .assets.page3 import page as page3 @pytest.mark.timeout(300) @pytest.mark.teste2e def test_page_scopes(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) def on_change(state, var, val, module): if var == \"x\" and \"page3\" in module: state.y = val * 10 gui.add_page(\"page1\", page1) gui.add_page(\"page2\", page2) gui.add_page(\"page3\", page3) helpers.run_e2e(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#x1\") assert page.query_selector(\"#x1\").inner_text() == \"10\" assert page.query_selector(\"#x2\").inner_text() == \"20\" assert page.query_selector(\"#y1\").inner_text() == \"20\" assert page.query_selector(\"#y2\").inner_text() == \"40\" page.goto(\"./page2\") page.expect_websocket() page.wait_for_selector(\"#x1\") assert page.query_selector(\"#x1\").inner_text() == \"20\" assert page.query_selector(\"#x2\").inner_text() == \"40\" assert page.query_selector(\"#y1\").inner_text() == \"10\" assert page.query_selector(\"#y2\").inner_text() == \"20\" page.goto(\"./page3\") page.expect_websocket() page.wait_for_selector(\"#x1\") assert page.query_selector(\"#x1\").inner_text() == \"50\" assert page.query_selector(\"#x2\").inner_text() == \"100\" page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#x1\") page.fill(\"#xinput\", \"15\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#y2').innerText !== '40'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return assert page.query_selector(\"#x1\").inner_text() == \"15\" assert page.query_selector(\"#x2\").inner_text() == \"30\" assert page.query_selector(\"#y1\").inner_text() == \"45\" assert page.query_selector(\"#y2\").inner_text() == \"90\" page.goto(\"./page2\") page.expect_websocket() page.wait_for_selector(\"#x1\") assert page.query_selector(\"#x1\").inner_text() == \"45\" assert page.query_selector(\"#x2\").inner_text() == \"90\" assert page.query_selector(\"#y1\").inner_text() == \"15\" assert page.query_selector(\"#y2\").inner_text() == \"30\" page.fill(\"#xinput\", \"37\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#y2').innerText !== '30'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return assert page.query_selector(\"#x1\").inner_text() == \"37\" assert page.query_selector(\"#x2\").inner_text() == \"74\" assert page.query_selector(\"#y1\").inner_text() == \"185\" assert page.query_selector(\"#y2\").inner_text() == \"370\" page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#x1\") assert page.query_selector(\"#x1\").inner_text() == \"185\" assert page.query_selector(\"#x2\").inner_text() == \"370\" assert page.query_selector(\"#y1\").inner_text() == \"37\" assert page.query_selector(\"#y2\").inner_text() == \"74\" page.goto(\"./page3\") page.expect_websocket() page.wait_for_selector(\"#x1\") assert page.query_selector(\"#x1\").inner_text() == \"50\" assert page.query_selector(\"#x2\").inner_text() == \"100\" "} {"text": "import inspect import logging from importlib import util import pytest from taipy.gui import Gui, Markdown if util.find_spec(\"playwright\"): from playwright._impl._page import Page from .assets3.page1 import page as page1 def helpers_assert_text(page, s): val1 = page.query_selector(\"#t1\").inner_text() assert str(val1).startswith(s) # for issue #583 @pytest.mark.teste2e @pytest.mark.filterwarnings(\"ignore::Warning\") def test_page_scopes_main_var_access(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) n = \"Hello\" # noqa: F841 root_md = Markdown( \"\"\" <|{n}|input|id=i1|> \"\"\" ) gui.add_pages({\"/\": root_md, \"page1\": page1}) helpers.run_e2e(gui) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#t1\") page.wait_for_selector(\"#i1\") helpers_assert_text(page, \"Hello\") page.fill(\"#i1\", \"Hello World\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#t1').innerText !== 'Hello'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_text(page, \"Hello World\") "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import inspect import logging from importlib import util import pytest from taipy.gui import Gui, Markdown if util.find_spec(\"playwright\"): from playwright._impl._page import Page from .assets3_class_scopes.page1 import Page1 def helpers_assert_text(page, s): val1 = page.query_selector(\"#t1\").inner_text() assert str(val1).startswith(s) # for issue #583 @pytest.mark.teste2e @pytest.mark.filterwarnings(\"ignore::Warning\") def test_class_scopes_main_var_access(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) n = \"Hello\" # noqa: F841 root_md = Markdown( \"\"\" <|{n}|input|id=i1|> \"\"\" ) gui.add_pages({\"/\": root_md, \"page1\": Page1()}) helpers.run_e2e(gui) page.goto(\"./\") page.expect_websocket() page.wait_for_selector(\"#t1\") page.wait_for_selector(\"#i1\") helpers_assert_text(page, \"Hello\") page.fill(\"#i1\", \"Hello World\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#t1').innerText !== 'Hello'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_text(page, \"Hello World\") "} {"text": "import inspect import logging from importlib import util import pytest from taipy.gui import Gui if util.find_spec(\"playwright\"): from playwright._impl._page import Page from .assets4.page1 import page as page1 from .assets4.page1 import reset_d @pytest.mark.timeout(300) @pytest.mark.teste2e @pytest.mark.filterwarnings(\"ignore::Warning\") def test_page_scopes_state_runtime(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) def test(state): reset_d(state) def test2(state): state[\"page1\"].d = 30 page_md = \"\"\" <|button|on_action=test|id=btn1|> <|button|on_action=test2|id=btn2|> \"\"\" gui.add_page(\"page1\", page1) gui.add_page(name=Gui._get_root_page_name(), page=page_md) helpers.run_e2e(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#n1\") text1 = page.query_selector(\"#t1\") assert text1.inner_text() == \"20\" page.fill(\"#n1\", \"21\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#t1').innerText !== '20'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return text1 = page.query_selector(\"#t1\") assert text1.inner_text() == \"21\" page.click(\"#btn1\") try: page.wait_for_function(\"document.querySelector('#t1').innerText !== '21'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return text1 = page.query_selector(\"#t1\") assert text1.inner_text() == \"20\" page.click(\"#btn2\") try: page.wait_for_function(\"document.querySelector('#t1').innerText !== '20'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return text1 = page.query_selector(\"#t1\") assert text1.inner_text() == \"30\" "} {"text": "import inspect import logging from importlib import util import pytest from taipy.gui import Gui if util.find_spec(\"playwright\"): from playwright._impl._page import Page from .assets2.page1 import page as page1 from .assets2.page2 import page as page2 def helpers_assert_value(page, s1, s2, v1): s1_val = page.input_value(\"#s1 input\") assert str(s1_val).startswith(s1) s2_val = page.input_value(\"#s2 input\") assert str(s2_val).startswith(s2) val1 = page.query_selector(\"#v1\").inner_text() assert str(val1).startswith(v1) @pytest.mark.timeout(300) @pytest.mark.teste2e @pytest.mark.filterwarnings(\"ignore::Warning\") def test_page_scopes_binding(page: \"Page\", gui: Gui, helpers): gui._set_frame(inspect.currentframe()) operand_1 = 0 # noqa: F841 gui.add_page(\"page1\", page1) gui.add_page(\"page2\", page2) helpers.run_e2e(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#s1\") helpers_assert_value(page, \"0\", \"0\", \"0\") page.fill(\"#s1 input\", \"15\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '0'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"15\", \"0\", \"15\") page.fill(\"#s2 input\", \"20\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '15'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"15\", \"20\", \"35\") page.goto(\"./page2\") page.expect_websocket() page.wait_for_selector(\"#s1\") helpers_assert_value(page, \"15\", \"0\", \"0\") page.fill(\"#s2 input\", \"5\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '0'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"15\", \"5\", \"75\") page.fill(\"#s1 input\", \"17\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#v1').innerText !== '75'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return helpers_assert_value(page, \"17\", \"5\", \"85\") page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#s1\") helpers_assert_value(page, \"17\", \"20\", \"37\") "} {"text": "from taipy.gui import Markdown, Page class Page1(Page): def __init__(self): self.operand_2 = 0 super().__init__() def create_page(self): return Markdown(\"page1.md\") def reset(state): state.operand_2 = 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from taipy.gui import Markdown, Page class Page2(Page): def __init__(self): self.operand_2 = 0 super().__init__() def create_page(self): return Markdown(\"page2.md\") "} {"text": "from taipy.gui import Markdown, Page class Page1(Page): def create_page(self): return Markdown( \"\"\" <|{n}|id=t1|> \"\"\" ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from taipy.gui import Markdown x = 10 y = 20 def on_change(state, var, val): if var == \"x\": state.y = val * 3 page = Markdown( \"\"\" x = <|{x}|id=x1|> x * 2 = <|{x*2}|id=x2|> x number: <|{x}|number|id=xinput|> y = <|{y}|id=y1|> y * 2 = <|{y*2}|id=y2|> \"\"\" ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from taipy.gui import Markdown from .page1 import x as y from .page1 import y as x def on_change(state, var, val): if var == \"x\": state.y = val * 5 page = Markdown( \"\"\" y = <|{x}|id=x1|> y * 2 = <|{x*2}|id=x2|> y number: <|{x}|number|id=xinput|> x = <|{y}|id=y1|> x * 2 = <|{y*2}|id=y2|> \"\"\" ) "} {"text": "from taipy.gui import Markdown x = 50 page = Markdown( \"\"\" <|{x}|id=x1|> x * 2 = <|{x*2}|id=x2|> <|{x}|number|id=xinput|> \"\"\" ) "} {"text": "from taipy.gui import Markdown page = Markdown( \"\"\" # Page1 - Add Operand 1: <|{operand_1}|slider|id=s1|> Operand 2: <|{operand_2}|slider|id=s2|> Operand 1 + Operand 2 = <|{operand_1 + operand_2}|id=v1|> \"\"\" ) operand_2 = 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from taipy.gui import Markdown page = Markdown( \"\"\" # Page2 - Multiply Operand 1: <|{operand_1}|slider|id=s1|> Operand 2: <|{operand_2}|slider|id=s2|> Operand 1 * Operand 2 = <|{operand_1 * operand_2}|id=v1|> \"\"\" ) operand_2 = 0 "} {"text": "from taipy.gui import Markdown d = 20 def reset_d(state): state.d = d # a page = Markdown( \"\"\" <|{d}|text|id=t1|> <|{d}|number|id=n1|> \"\"\" ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from taipy.gui import Markdown page = Markdown( \"\"\" <|{n}|id=t1|> \"\"\" ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import inspect import logging from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_slider_action(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{x}|id=text1|> <|{x}|slider|id=slider1|> \"\"\" x = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() == \"10\" page.wait_for_selector(\"#slider1\") page.fill(\"#slider1 input\", \"20\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#text1').innerText !== '10'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: text1_2 = page.query_selector(\"#text1\") assert text1_2.inner_text() == \"20\" @pytest.mark.teste2e def test_slider_action_on_change(page: \"Page\", gui: Gui, helpers): d = {\"v1\": 10, \"v2\": 10} # noqa: F841 def on_change(state, var, val): if var == \"d.v2\": d = {\"v1\": 2 * val} state.d.update(d) page_md = \"\"\" Value: <|{d.v1}|id=text1|> Slider: <|{d.v2}|slider|id=slider1|> \"\"\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() == \"10\" page.wait_for_selector(\"#slider1\") page.fill(\"#slider1 input\", \"20\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#text1').innerText !== '10'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: text1_2 = page.query_selector(\"#text1\") assert text1_2.inner_text() == \"40\" "} {"text": "import inspect import logging from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_button_action(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{x}|id=text1|> <|Action|button|on_action=do_something_fn|id=button1|> \"\"\" x = 10 # noqa: F841 def do_something_fn(state): state.x = state.x * 2 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() == \"10\" page.click(\"#button1\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#text1').innerText !== '10'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: text1_2 = page.query_selector(\"#text1\") assert text1_2.inner_text() == \"20\" "} {"text": "import inspect import time from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui, State @pytest.mark.teste2e def test_selector_action(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{x}|selector|lov=Item 1;Item 2;Item 3|id=selector1|> \"\"\" x = \"Item 1\" # noqa: F841 def on_init(state: State): assert state.x == \"Item 1\" def on_change(state: State, var, val): if var == \"x\": assert val == \"Item 3\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"ul#selector1\") page.click('#selector1 > div[data-id=\"Item 3\"]') page.wait_for_function( \"document.querySelector('#selector1 > div[data-id=\\\"Item 3\\\"]').classList.contains('Mui-selected')\" ) "} {"text": "import inspect import logging from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui def edit_and_assert_page(page: \"Page\"): assert_input(page, \"0\") page.fill(\"#input2 input\", \"20\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#val1').innerText !== '0'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return assert_input(page, \"20\") page.fill(\"#input1\", \"30\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#val1').innerText !== '20'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if not function_evaluated: return assert_input(page, \"30\") def assert_input(page: \"Page\", val: str): val1 = page.query_selector(\"#val1\").inner_text() assert str(val1).startswith(val) val2 = page.query_selector(\"#val2\").inner_text() assert str(val2).startswith(f\"Val: {val}\") inp1 = page.input_value(\"input#input1\") assert str(inp1).startswith(val) inp2 = page.input_value(\"#input2 input\") assert str(inp2).startswith(val) @pytest.mark.filterwarnings(\"ignore::Warning\") @pytest.mark.teste2e def test_slider_input_reload(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" #Test Multi Number <|{val}|id=val1|> <|Val: {val}|id=val2|> <|{val}|number|id=input1|> <|{val}|slider|id=input2|> \"\"\" val = 0 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"page1\", page=page_md) helpers.run_e2e_multi_client(gui) page.goto(\"./page1\") page.expect_websocket() page.wait_for_selector(\"#val1\") edit_and_assert_page(page) page.reload() page.expect_websocket() page.wait_for_selector(\"#val1\") assert_input(page, \"30\") page.evaluate(\"window.localStorage.removeItem('TaipyClientId')\") page.reload() page.expect_websocket() page.wait_for_selector(\"#val1\") assert_input(page, \"0\") "} {"text": "import inspect import logging from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_dict(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{a_dict[a_key]}|input|id=inp1|> <|{a_dict.key}|input|id=inp2|> <|test|button|on_action=on_action_1|id=btn1|> <|test|button|on_action=on_action_2|id=btn2|> \"\"\" a_key = \"key\" a_dict = {a_key: \"Taipy\"} # noqa: F841 def on_action_1(state): state.a_dict.key = \"Hello\" def on_action_2(state): state.a_dict[state.a_key] = \"World\" gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#inp1\") assert_text(page, \"Taipy\", \"Taipy\") page.fill(\"input#inp1\", \"Taipy is the best\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#inp2').value !== 'Taipy'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: assert_text(page, \"Taipy is the best\", \"Taipy is the best\") page.fill(\"#inp2\", \"Taipy-Gui\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#inp1').value !== 'Taipy is the best'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: assert_text(page, \"Taipy-Gui\", \"Taipy-Gui\") page.click(\"#btn1\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#inp1').value !== 'Taipy-Gui'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: assert_text(page, \"Hello\", \"Hello\") page.click(\"#btn2\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#inp1').value !== 'Hello'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: assert_text(page, \"World\", \"World\") def assert_text(page, inp1, inp2): assert page.input_value(\"input#inp1\") == inp1 assert page.input_value(\"input#inp2\") == inp2 "} {"text": "import inspect import logging from importlib import util import pytest if util.find_spec(\"playwright\"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_text_edit(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{x}|text|id=text1|> <|{x}|input|id=input1|> \"\"\" x = \"Hey\" # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() == \"Hey\" page.wait_for_selector(\"#input1\") page.fill(\"#input1\", \"There\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#text1').innerText !== 'Hey'\") except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: text1_2 = page.query_selector(\"#text1\") assert text1_2.inner_text() == \"There\" @pytest.mark.teste2e def test_number_edit(page: \"Page\", gui: Gui, helpers): page_md = \"\"\" <|{x}|text|id=text1|> <|{x}|number|id=number1|> \"\"\" x = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page(name=\"test\", page=page_md) helpers.run_e2e(gui) page.goto(\"./test\") page.expect_websocket() page.wait_for_selector(\"#text1\") text1 = page.query_selector(\"#text1\") assert text1.inner_text() == \"10\" page.wait_for_selector(\"#number1\") page.fill(\"#number1\", \"20\") function_evaluated = True try: page.wait_for_function(\"document.querySelector('#text1').innerText !== '10'\") function_evaluated = True except Exception as e: function_evaluated = False logging.getLogger().debug(f\"Function evaluation timeout.\\n{e}\") if function_evaluated: text1_2 = page.query_selector(\"#text1\") assert text1_2.inner_text() == \"20\" "} {"text": "import inspect from datetime import datetime from importlib import util import pandas # type: ignore from flask import g from taipy.gui import Gui from taipy.gui.data.data_format import _DataFormat from taipy.gui.data.decimator import ScatterDecimator from taipy.gui.data.pandas_data_accessor import _PandasDataAccessor def test_simple_data(gui: Gui, helpers, small_dataframe): accessor = _PandasDataAccessor() pd = pandas.DataFrame(data=small_dataframe) ret_data = accessor.get_data(gui, \"x\", pd, {\"start\": 0, \"end\": -1}, _DataFormat.JSON) assert ret_data value = ret_data[\"value\"] assert value assert value[\"rowcount\"] == 3 data = value[\"data\"] assert len(data) == 3 def test_simple_data_with_arrow(gui: Gui, helpers, small_dataframe): if util.find_spec(\"pyarrow\"): accessor = _PandasDataAccessor() pd = pandas.DataFrame(data=small_dataframe) ret_data = accessor.get_data(gui, \"x\", pd, {\"start\": 0, \"end\": -1}, _DataFormat.APACHE_ARROW) assert ret_data value = ret_data[\"value\"] assert value assert value[\"rowcount\"] == 3 data = value[\"data\"] assert isinstance(data, bytes) def test_get_all_simple_data(gui: Gui, helpers, small_dataframe): accessor = _PandasDataAccessor() pd = pandas.DataFrame(data=small_dataframe) ret_data = accessor.get_data(gui, \"x\", pd, {\"alldata\": True}, _DataFormat.JSON) assert ret_data assert ret_data[\"alldata\"] is True value = ret_data[\"value\"] assert value data = value[\"data\"] assert data == small_dataframe def test_slice(gui: Gui, helpers, small_dataframe): accessor = _PandasDataAccessor() pd = pandas.DataFrame(data=small_dataframe) value = accessor.get_data(gui, \"x\", pd, {\"start\": 0, \"end\": 1}, _DataFormat.JSON)[\"value\"] assert value[\"rowcount\"] == 3 data = value[\"data\"] assert len(data) == 2 value = accessor.get_data(gui, \"x\", pd, {\"start\": \"0\", \"end\": \"1\"}, _DataFormat.JSON)[\"value\"] data = value[\"data\"] assert len(data) == 2 def test_sort(gui: Gui, helpers, small_dataframe): accessor = _PandasDataAccessor() pd = pandas.DataFrame(data=small_dataframe) query = {\"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"orderby\": \"name\", \"sort\": \"desc\"} data = accessor.get_data(gui, \"x\", pd, query, _DataFormat.JSON)[\"value\"][\"data\"] assert data[0][\"name\"] == \"C\" def test_aggregate(gui: Gui, helpers, small_dataframe): accessor = _PandasDataAccessor() pd = pandas.DataFrame(data=small_dataframe) pd = pandas.concat( [pd, pandas.DataFrame(data={\"name\": [\"A\"], \"value\": [4]})], axis=0, join=\"outer\", ignore_index=True ) query = {\"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"aggregates\": [\"name\"], \"applies\": {\"value\": \"sum\"}} value = accessor.get_data(gui, \"x\", pd, query, _DataFormat.JSON)[\"value\"] assert value[\"rowcount\"] == 3 data = value[\"data\"] assert next(v.get(\"value\") for v in data if v.get(\"name\") == \"A\") == 5 def test_filters(gui: Gui, helpers, small_dataframe): accessor = _PandasDataAccessor() pd = pandas.DataFrame(data=small_dataframe) pd = pandas.concat( [pd, pandas.DataFrame(data={\"name\": [\"A\"], \"value\": [4]})], axis=0, join=\"outer\", ignore_index=True ) query = { \"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"filters\": [{\"col\": \"name\", \"action\": \"!=\", \"value\": \"\"}], } value = accessor.get_data(gui, \"x\", pd, query, _DataFormat.JSON) assert len(value[\"value\"][\"data\"]) == 4 query = { \"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"filters\": [{\"col\": \"name\", \"action\": \"==\", \"value\": \"\"}], } value = accessor.get_data(gui, \"x\", pd, query, _DataFormat.JSON) assert len(value[\"value\"][\"data\"]) == 0 query = { \"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"filters\": [{\"col\": \"name\", \"action\": \"==\", \"value\": \"A\"}], } value = accessor.get_data(gui, \"x\", pd, query, _DataFormat.JSON) assert len(value[\"value\"][\"data\"]) == 2 query = { \"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"filters\": [{\"col\": \"name\", \"action\": \"==\", \"value\": \"A\"}, {\"col\": \"value\", \"action\": \"==\", \"value\": 2}], } value = accessor.get_data(gui, \"x\", pd, query, _DataFormat.JSON) assert len(value[\"value\"][\"data\"]) == 0 query = { \"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"filters\": [{\"col\": \"name\", \"action\": \"!=\", \"value\": \"A\"}, {\"col\": \"value\", \"action\": \"==\", \"value\": 2}], } value = accessor.get_data(gui, \"x\", pd, query, _DataFormat.JSON) assert len(value[\"value\"][\"data\"]) == 1 assert value[\"value\"][\"data\"][0][\"_tp_index\"] == 1 def test_filter_by_date(gui: Gui, helpers, small_dataframe): accessor = _PandasDataAccessor() pd = pandas.DataFrame(data=small_dataframe) pd[\"a date\"] = [ datetime.fromisocalendar(2022, 28, 1), datetime.fromisocalendar(2022, 28, 2), datetime.fromisocalendar(2022, 28, 3), ] query = { \"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"filters\": [{\"col\": \"a date\", \"action\": \">\", \"value\": datetime.fromisocalendar(2022, 28, 3).isoformat() + \"Z\"}], } value = accessor.get_data(gui, \"x\", pd, query, _DataFormat.JSON) assert len(value[\"value\"][\"data\"]) == 0 query = { \"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"filters\": [{\"col\": \"a date\", \"action\": \">\", \"value\": datetime.fromisocalendar(2022, 28, 2).isoformat() + \"Z\"}], } value = accessor.get_data(gui, \"x\", pd, query, _DataFormat.JSON) assert len(value[\"value\"][\"data\"]) == 1 query = { \"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"filters\": [{\"col\": \"a date\", \"action\": \"<\", \"value\": datetime.fromisocalendar(2022, 28, 3).isoformat() + \"Z\"}], } value = accessor.get_data(gui, \"x\", pd, query, _DataFormat.JSON) assert len(value[\"value\"][\"data\"]) == 2 query = { \"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"filters\": [ {\"col\": \"a date\", \"action\": \"<\", \"value\": datetime.fromisocalendar(2022, 28, 2).isoformat() + \"Z\"}, {\"col\": \"a date\", \"action\": \">\", \"value\": datetime.fromisocalendar(2022, 28, 2).isoformat() + \"Z\"}, ], } value = accessor.get_data(gui, \"x\", pd, query, _DataFormat.JSON) assert len(value[\"value\"][\"data\"]) == 0 query = { \"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"filters\": [ {\"col\": \"a date\", \"action\": \"<\", \"value\": datetime.fromisocalendar(2022, 28, 3).isoformat() + \"Z\"}, {\"col\": \"a date\", \"action\": \">\", \"value\": datetime.fromisocalendar(2022, 28, 1).isoformat() + \"Z\"}, ], } value = accessor.get_data(gui, \"x\", pd, query, _DataFormat.JSON) assert len(value[\"value\"][\"data\"]) == 1 def test_decimator(gui: Gui, helpers, small_dataframe): a_decimator = ScatterDecimator() accessor = _PandasDataAccessor() pd = pandas.DataFrame(data=small_dataframe) # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page(\"test\", \"<|Hello {a_decimator}|button|id={btn_id}|>\") gui.run(run_server=False) flask_client = gui._server.test_client() cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f\"/taipy-jsx/test?client_id={cid}\") with gui.get_flask_app().test_request_context(f\"/taipy-jsx/test/?client_id={cid}\", data={\"client_id\": cid}): g.client_id = cid ret_data = accessor.get_data( gui, \"x\", pd, { \"start\": 0, \"end\": -1, \"alldata\": True, \"decimatorPayload\": { \"decimators\": [{\"decimator\": \"a_decimator\", \"chartMode\": \"markers\"}], \"width\": 100, }, }, _DataFormat.JSON, ) assert ret_data value = ret_data[\"value\"] assert value data = value[\"data\"] assert len(data) == 2 "} {"text": "from importlib import util from taipy.gui import Gui from taipy.gui.data.array_dict_data_accessor import _ArrayDictDataAccessor from taipy.gui.data.data_format import _DataFormat from taipy.gui.utils import _MapDict an_array = [1, 2, 3] def test_simple_data(gui: Gui, helpers): accessor = _ArrayDictDataAccessor() ret_data = accessor.get_data(gui, \"x\", an_array, {\"start\": 0, \"end\": -1}, _DataFormat.JSON) assert ret_data value = ret_data[\"value\"] assert value assert value[\"rowcount\"] == 3 data = value[\"data\"] assert len(data) == 3 def test_simple_data_with_arrow(gui: Gui, helpers): if util.find_spec(\"pyarrow\"): accessor = _ArrayDictDataAccessor() ret_data = accessor.get_data(gui, \"x\", an_array, {\"start\": 0, \"end\": -1}, _DataFormat.APACHE_ARROW) assert ret_data value = ret_data[\"value\"] assert value assert value[\"rowcount\"] == 3 data = value[\"data\"] assert isinstance(data, bytes) def test_slice(gui: Gui, helpers): accessor = _ArrayDictDataAccessor() value = accessor.get_data(gui, \"x\", an_array, {\"start\": 0, \"end\": 1}, _DataFormat.JSON)[\"value\"] assert value[\"rowcount\"] == 3 data = value[\"data\"] assert len(data) == 2 value = accessor.get_data(gui, \"x\", an_array, {\"start\": \"0\", \"end\": \"1\"}, _DataFormat.JSON)[\"value\"] data = value[\"data\"] assert len(data) == 2 def test_sort(gui: Gui, helpers): accessor = _ArrayDictDataAccessor() a_dict = {\"name\": [\"A\", \"B\", \"C\"], \"value\": [3, 2, 1]} query = {\"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"orderby\": \"name\", \"sort\": \"desc\"} data = accessor.get_data(gui, \"x\", a_dict, query, _DataFormat.JSON)[\"value\"][\"data\"] assert data[0][\"name\"] == \"C\" def test_aggregate(gui: Gui, helpers, small_dataframe): accessor = _ArrayDictDataAccessor() a_dict = {\"name\": [\"A\", \"B\", \"C\", \"A\"], \"value\": [3, 2, 1, 2]} query = {\"columns\": [\"name\", \"value\"], \"start\": 0, \"end\": -1, \"aggregates\": [\"name\"], \"applies\": {\"value\": \"sum\"}} value = accessor.get_data(gui, \"x\", a_dict, query, _DataFormat.JSON)[\"value\"] assert value[\"rowcount\"] == 3 data = value[\"data\"] agregValue = next(v.get(\"value\") for v in data if v.get(\"name\") == \"A\") assert agregValue == 5 def test_array_of_array(gui: Gui, helpers, small_dataframe): accessor = _ArrayDictDataAccessor() an_array = [[1, 2, 3], [2, 4, 6]] ret_data = accessor.get_data(gui, \"x\", an_array, {\"start\": 0, \"end\": -1}, _DataFormat.JSON) assert ret_data value = ret_data[\"value\"] assert value assert value[\"rowcount\"] == 2 data = value[\"data\"] assert len(data) == 2 assert len(data[0]) == 4 # including _tp_index def test_empty_array(gui: Gui, helpers, small_dataframe): accessor = _ArrayDictDataAccessor() an_array: list[str] = [] ret_data = accessor.get_data(gui, \"x\", an_array, {\"start\": 0, \"end\": -1}, _DataFormat.JSON) assert ret_data value = ret_data[\"value\"] assert value assert value[\"rowcount\"] == 0 data = value[\"data\"] assert len(data) == 0 def test_array_of_diff_array(gui: Gui, helpers, small_dataframe): accessor = _ArrayDictDataAccessor() an_array = [[1, 2, 3], [2, 4]] ret_data = accessor.get_data(gui, \"x\", an_array, {\"start\": 0, \"end\": -1, \"alldata\": True}, _DataFormat.JSON) assert ret_data value = ret_data[\"value\"] assert value assert value[\"multi\"] is True data = value[\"data\"] assert len(data) == 2 assert len(data[0][\"0/0\"]) == 3 assert len(data[1][\"1/0\"]) == 2 def test_array_of_dicts(gui: Gui, helpers, small_dataframe): accessor = _ArrayDictDataAccessor() an_array_of_dicts = [ { \"temperatures\": [ [17.2, 27.4, 28.6, 21.5], [5.6, 15.1, 20.2, 8.1], [26.6, 22.8, 21.8, 24.0], [22.3, 15.5, 13.4, 19.6], [3.9, 18.9, 25.7, 9.8], ], \"cities\": [\"Hanoi\", \"Paris\", \"Rio de Janeiro\", \"Sydney\", \"Washington\"], }, {\"seasons\": [\"Winter\", \"Summer\", \"Spring\", \"Autumn\"]}, ] ret_data = accessor.get_data( gui, \"x\", an_array_of_dicts, {\"start\": 0, \"end\": -1, \"alldata\": True}, _DataFormat.JSON ) assert ret_data value = ret_data[\"value\"] assert value assert value[\"multi\"] is True data = value[\"data\"] assert len(data) == 2 assert len(data[0][\"temperatures\"]) == 5 assert len(data[1][\"seasons\"]) == 4 def test_array_of_Mapdicts(gui: Gui, helpers, small_dataframe): accessor = _ArrayDictDataAccessor() dict1 = _MapDict( { \"temperatures\": [ [17.2, 27.4, 28.6, 21.5], [5.6, 15.1, 20.2, 8.1], [26.6, 22.8, 21.8, 24.0], [22.3, 15.5, 13.4, 19.6], [3.9, 18.9, 25.7, 9.8], ], \"cities\": [\"Hanoi\", \"Paris\", \"Rio de Janeiro\", \"Sydney\", \"Washington\"], } ) dict2 = _MapDict({\"seasons\": [\"Winter\", \"Summer\", \"Spring\", \"Autumn\"]}) ret_data = accessor.get_data(gui, \"x\", [dict1, dict2], {\"start\": 0, \"end\": -1, \"alldata\": True}, _DataFormat.JSON) assert ret_data value = ret_data[\"value\"] assert value assert value[\"multi\"] is True data = value[\"data\"] assert len(data) == 2 assert len(data[0][\"temperatures\"]) == 5 assert len(data[1][\"seasons\"]) == 4 "} {"text": "import inspect import taipy.gui.builder as tgb from taipy.gui import Gui def test_slider_builder(gui: Gui, test_client, helpers): gui._bind_var_val(\"x\", 10) with tgb.Page(frame=None) as page: tgb.slider(value=\"{x}\") expected_list = [ \"This is a header', '

This is a paragraph.', 'a text', \"
\", \"This is bold text inside the paragrah.\", ] helpers.test_control_builder(gui, page, expected_list) "} {"text": "import inspect import taipy.gui.builder as tgb from taipy.gui import Gui def test_status_builder(gui: Gui, helpers): status = [{\"status\": \"info\", \"message\": \"Info Message\"}] # noqa: F841 with tgb.Page(frame=None) as page: tgb.status(value=\"{status}\") expected_list = [ \"\" expected_list = [ \"\" expected_list = [\"\" expected_list = [ \"\" expected_list = [\"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"{x}\" expected_list = [ \"\" expected_list = [\"\" expected_list = [\"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"{x}\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \" \"\"\" expected_list = [ \" # This is an expandable section <|expandable.end|> \"\"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"{dates}\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [\"\" expected_list = [\"\" expected_list = [ \" \"\"\" expected_list = [ \"\", ] helpers.test_control_md(gui, md_string, expected_list) def test_pane_persistent_md(gui: Gui, test_client, helpers): gui._bind_var_val(\"show_pane\", False) md_string = \"\"\" <|{show_pane}|pane|persistent| # This is a Pane |> \"\"\" expected_list = [ \"\", ] helpers.test_control_md(gui, md_string, expected_list) def test_pane_html(gui: Gui, test_client, helpers): gui._bind_var_val(\"show_pane\", False) html_string = '

This is a Pane

' expected_list = [ \"\", ] helpers.test_control_html(gui, html_string, expected_list) "} {"text": "from taipy.gui import Gui def test_text_md_1(gui: Gui, test_client, helpers): gui._bind_var_val(\"x\", 10) md_string = \"<|{x}|>\" expected_list = [\"{x}\" expected_list = [\"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [\"\" expected_list = [\"\" expected_list = [ \"\" expected_list = [ \"\" ) expected_list = [ \"' ) expected_list = [ \"' ) expected_list = [ \"' ) expected_list = [ \"\" expected_list = [ \" \"\"\" expected_list = [\" # This is a layout section <|layout.end|> \"\"\" expected_list = [\"\" expected_list = [\"\" expected_list = [ \"{x}\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"{date}\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" expected_list = [ \"\" gui._set_frame(inspect.currentframe()) expected_list = [ \"\" gui._set_frame(inspect.currentframe()) expected_list = [ \"\" gui._set_frame(inspect.currentframe()) expected_list = [ \" \"\"\" expected_list = [\" # This is a part <|part.end|> \"\"\" expected_list = [\" None: self.assign = kwargs.get(\"assign\") class TestGuiCoreContext_is_submittable: def test_submit_entity(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get), patch( \"src.taipy.gui_core._context.is_submittable\", side_effect=mock_is_true ): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.submit_entity( MockState(assign=assign), \"\", { \"args\": [ {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sv_error\" assert str(assign.call_args.args[1]).startswith(\"Error submitting entity.\") with patch(\"src.taipy.gui_core._context.is_submittable\", side_effect=mock_is_submittable_false): assign.reset_mock() gui_core_context.submit_entity( MockState(assign=assign), \"\", { \"args\": [ {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sv_error\" assert str(assign.call_args.args[1]).endswith(\"is not submittable.\") "} {"text": "from unittest.mock import Mock, patch import pytest from src.taipy.gui_core._context import _GuiCoreContext, _SubmissionStatus from taipy.core import Status class MockJob: def __init__(self, id: str, status): self.status = status self.id = id def is_failed(self): return self.status == Status.FAILED def is_canceled(self): return self.status == Status.CANCELED def is_blocked(self): return self.status == Status.BLOCKED def is_pending(self): return self.status == Status.PENDING def is_running(self): return self.status == Status.RUNNING def is_completed(self): return self.status == Status.COMPLETED def is_skipped(self): return self.status == Status.SKIPPED def is_abandoned(self): return self.status == Status.ABANDONED def is_submitted(self): return self.status == Status.SUBMITTED def mock_core_get(entity_id): jobs = { \"job0_submitted\": MockJob(\"job0_submitted\", Status.SUBMITTED), \"job1_failed\": MockJob(\"job1_failed\", Status.FAILED), \"job2_canceled\": MockJob(\"job2_canceled\", Status.CANCELED), \"job3_blocked\": MockJob(\"job3_blocked\", Status.BLOCKED), \"job4_pending\": MockJob(\"job4_pending\", Status.PENDING), \"job5_running\": MockJob(\"job5_running\", Status.RUNNING), \"job6_completed\": MockJob(\"job6_completed\", Status.COMPLETED), \"job7_skipped\": MockJob(\"job7_skipped\", Status.SKIPPED), \"job8_abandoned\": MockJob(\"job8_abandoned\", Status.ABANDONED), } return jobs[entity_id] class TestGuiCoreContext_SubmissionStatus: @pytest.mark.parametrize( \"job_ids, expected_status\", [ ([\"job1_failed\"], _SubmissionStatus.FAILED), ([\"job2_canceled\"], _SubmissionStatus.CANCELED), ([\"job3_blocked\"], _SubmissionStatus.BLOCKED), ([\"job4_pending\"], _SubmissionStatus.WAITING), ([\"job5_running\"], _SubmissionStatus.RUNNING), ([\"job6_completed\"], _SubmissionStatus.COMPLETED), ([\"job7_skipped\"], _SubmissionStatus.COMPLETED), ([\"job8_abandoned\"], _SubmissionStatus.UNDEFINED), ], ) def test_single_job(self, job_ids, expected_status): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) status = gui_core_context._get_submittable_status(job_ids) assert status == expected_status @pytest.mark.parametrize( \"job_ids, expected_status\", [ ([\"job1_failed\", \"job1_failed\"], _SubmissionStatus.FAILED), ([\"job1_failed\", \"job2_canceled\"], _SubmissionStatus.FAILED), ([\"job1_failed\", \"job3_blocked\"], _SubmissionStatus.FAILED), ([\"job1_failed\", \"job4_pending\"], _SubmissionStatus.FAILED), ([\"job1_failed\", \"job5_running\"], _SubmissionStatus.FAILED), ([\"job1_failed\", \"job6_completed\"], _SubmissionStatus.FAILED), ([\"job1_failed\", \"job7_skipped\"], _SubmissionStatus.FAILED), ([\"job1_failed\", \"job8_abandoned\"], _SubmissionStatus.FAILED), ([\"job2_canceled\", \"job1_failed\"], _SubmissionStatus.FAILED), ([\"job3_blocked\", \"job1_failed\"], _SubmissionStatus.FAILED), ([\"job4_pending\", \"job1_failed\"], _SubmissionStatus.FAILED), ([\"job5_running\", \"job1_failed\"], _SubmissionStatus.FAILED), ([\"job6_completed\", \"job1_failed\"], _SubmissionStatus.FAILED), ([\"job7_skipped\", \"job1_failed\"], _SubmissionStatus.FAILED), ([\"job8_abandoned\", \"job1_failed\"], _SubmissionStatus.FAILED), ], ) def test_one_failed_job(self, job_ids, expected_status): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) status = gui_core_context._get_submittable_status(job_ids) assert status == expected_status @pytest.mark.parametrize( \"job_ids, expected_status\", [ ([\"job2_canceled\", \"job2_canceled\"], _SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job3_blocked\"], _SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job4_pending\"], _SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job5_running\"], _SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job6_completed\"], _SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job7_skipped\"], _SubmissionStatus.CANCELED), ([\"job2_canceled\", \"job8_abandoned\"], _SubmissionStatus.CANCELED), ([\"job3_blocked\", \"job2_canceled\"], _SubmissionStatus.CANCELED), ([\"job4_pending\", \"job2_canceled\"], _SubmissionStatus.CANCELED), ([\"job5_running\", \"job2_canceled\"], _SubmissionStatus.CANCELED), ([\"job6_completed\", \"job2_canceled\"], _SubmissionStatus.CANCELED), ([\"job7_skipped\", \"job2_canceled\"], _SubmissionStatus.CANCELED), ([\"job8_abandoned\", \"job2_canceled\"], _SubmissionStatus.CANCELED), ], ) def test_no_failed_one_cancel(self, job_ids, expected_status): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) status = gui_core_context._get_submittable_status(job_ids) assert status == expected_status @pytest.mark.parametrize( \"job_ids, expected_status\", [ ([\"job4_pending\", \"job3_blocked\"], _SubmissionStatus.WAITING), ([\"job4_pending\", \"job4_pending\"], _SubmissionStatus.WAITING), ([\"job4_pending\", \"job6_completed\"], _SubmissionStatus.WAITING), ([\"job4_pending\", \"job7_skipped\"], _SubmissionStatus.WAITING), ([\"job3_blocked\", \"job4_pending\"], _SubmissionStatus.WAITING), ([\"job6_completed\", \"job4_pending\"], _SubmissionStatus.WAITING), ([\"job7_skipped\", \"job4_pending\"], _SubmissionStatus.WAITING), ], ) def test_no_failed_or_cancel_one_pending(self, job_ids, expected_status): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) status = gui_core_context._get_submittable_status(job_ids) assert status == expected_status @pytest.mark.parametrize( \"job_ids, expected_status\", [ ([\"job5_running\", \"job3_blocked\"], _SubmissionStatus.RUNNING), ([\"job5_running\", \"job4_pending\"], _SubmissionStatus.RUNNING), ([\"job5_running\", \"job5_running\"], _SubmissionStatus.RUNNING), ([\"job5_running\", \"job6_completed\"], _SubmissionStatus.RUNNING), ([\"job5_running\", \"job7_skipped\"], _SubmissionStatus.RUNNING), ([\"job3_blocked\", \"job5_running\"], _SubmissionStatus.RUNNING), ([\"job4_pending\", \"job5_running\"], _SubmissionStatus.RUNNING), ([\"job6_completed\", \"job5_running\"], _SubmissionStatus.RUNNING), ([\"job7_skipped\", \"job5_running\"], _SubmissionStatus.RUNNING), ], ) def test_no_failed_cancel_nor_pending_one_running(self, job_ids, expected_status): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) status = gui_core_context._get_submittable_status(job_ids) assert status == expected_status @pytest.mark.parametrize( \"job_ids, expected_status\", [ ([\"job3_blocked\", \"job3_blocked\"], _SubmissionStatus.BLOCKED), ([\"job3_blocked\", \"job6_completed\"], _SubmissionStatus.BLOCKED), ([\"job3_blocked\", \"job7_skipped\"], _SubmissionStatus.BLOCKED), ([\"job6_completed\", \"job3_blocked\"], _SubmissionStatus.BLOCKED), ([\"job7_skipped\", \"job3_blocked\"], _SubmissionStatus.BLOCKED), ], ) def test_no_failed_cancel_pending_nor_running_one_blocked(self, job_ids, expected_status): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) status = gui_core_context._get_submittable_status(job_ids) assert status == expected_status @pytest.mark.parametrize( \"job_ids, expected_status\", [ ([\"job6_completed\", \"job6_completed\"], _SubmissionStatus.COMPLETED), ([\"job6_completed\", \"job7_skipped\"], _SubmissionStatus.COMPLETED), ([\"job7_skipped\", \"job6_completed\"], _SubmissionStatus.COMPLETED), ([\"job7_skipped\", \"job7_skipped\"], _SubmissionStatus.COMPLETED), ], ) def test_only_completed_or_skipped(self, job_ids, expected_status): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) status = gui_core_context._get_submittable_status(job_ids) assert status == expected_status @pytest.mark.parametrize( \"job_ids, expected_status\", [ ([\"job3_blocked\", \"job8_abandoned\"], _SubmissionStatus.UNDEFINED), ([\"job4_pending\", \"job8_abandoned\"], _SubmissionStatus.UNDEFINED), ([\"job5_running\", \"job8_abandoned\"], _SubmissionStatus.UNDEFINED), ([\"job6_completed\", \"job8_abandoned\"], _SubmissionStatus.UNDEFINED), ([\"job7_skipped\", \"job8_abandoned\"], _SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job8_abandoned\"], _SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job3_blocked\"], _SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job4_pending\"], _SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job5_running\"], _SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job6_completed\"], _SubmissionStatus.UNDEFINED), ([\"job8_abandoned\", \"job7_skipped\"], _SubmissionStatus.UNDEFINED), ], ) def test_WRONG_CASE_abandoned_without_cancel_or_failed(self, job_ids, expected_status): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) status = gui_core_context._get_submittable_status(job_ids) assert status == expected_status def test_no_job(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) status = gui_core_context._get_submittable_status([]) assert status == _SubmissionStatus.UNDEFINED "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from unittest.mock import Mock, patch import pytest from src.taipy.gui_core._context import _GuiCoreContext from taipy.config.common.scope import Scope from taipy.core import Job, Scenario, Task from taipy.core.data.pickle import PickleDataNode from taipy.gui import Gui a_scenario = Scenario(\"scenario_config_id\", [], {}, sequences={\"sequence\": {}}) a_task = Task(\"task_config_id\", {}, print) a_job = Job(\"JOB_job_id\", a_task, \"submit_id\", a_scenario.id) a_job.isfinished = lambda s: True a_datanode = PickleDataNode(\"data_node_config_id\", Scope.SCENARIO) def mock_is_readable_false(entity_id): return False def mock_is_true(entity_id): return True def mock_core_get(entity_id): if entity_id == a_scenario.id: return a_scenario if entity_id == a_job.id: return a_job if entity_id == a_datanode.id: return a_datanode return a_task class MockState: def __init__(self, **kwargs) -> None: self.assign = kwargs.get(\"assign\") class TestGuiCoreContext_is_readable: def test_scenario_adapter(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) outcome = gui_core_context.scenario_adapter(a_scenario) assert isinstance(outcome, tuple) assert outcome[0] == a_scenario.id with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): outcome = gui_core_context.scenario_adapter(a_scenario) assert outcome is None def test_get_scenario_by_id(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) outcome = gui_core_context.get_scenario_by_id(a_scenario.id) assert outcome is not None with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): outcome = gui_core_context.get_scenario_by_id(a_scenario.id) assert outcome is None def test_crud_scenario(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.crud_scenario( MockState(assign=assign), \"\", { \"args\": [ True, False, {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_not_called() with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): assign.reset_mock() gui_core_context.crud_scenario( MockState(assign=assign), \"\", { \"args\": [ True, False, {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sc_error\" assert str(assign.call_args.args[1]).endswith(\"is not readable.\") def test_edit_entity(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.edit_entity( MockState(assign=assign), \"\", { \"args\": [ {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sv_error\" assert assign.call_args.args[1] == \"\" with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): assign.reset_mock() gui_core_context.edit_entity( MockState(assign=assign), \"\", { \"args\": [ {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sv_error\" assert str(assign.call_args.args[1]).endswith(\"is not readable.\") def test_scenario_status_callback(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get) as mockget: mockget.reset_mock() gui_core_context = _GuiCoreContext(Mock()) gui_core_context.scenario_status_callback(a_job.id) mockget.assert_called() found = False for call in mockget.call_args_list: if call.args[0] == a_job.id: found = True break assert found is True mockget.reset_mock() with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): gui_core_context.scenario_status_callback(a_job.id) mockget.assert_not_called() def test_data_node_adapter(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) outcome = gui_core_context.data_node_adapter(a_datanode) assert isinstance(outcome, tuple) assert outcome[0] == a_datanode.id with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): outcome = gui_core_context.data_node_adapter(a_datanode) assert outcome is None def test_job_adapter(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) outcome = gui_core_context.job_adapter(a_job) assert isinstance(outcome, tuple) assert outcome[0] == a_job.id with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): outcome = gui_core_context.job_adapter(a_job) assert outcome is None def test_act_on_jobs(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get), patch( \"src.taipy.gui_core._context.is_deletable\", side_effect=mock_is_true ): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.act_on_jobs( MockState(assign=assign), \"\", { \"args\": [ {\"id\": [a_job.id], \"action\": \"delete\"}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_js_error\" assert str(assign.call_args.args[1]).find(\"is not readable.\") == -1 assign.reset_mock() gui_core_context.act_on_jobs( MockState(assign=assign), \"\", { \"args\": [ {\"id\": [a_job.id], \"action\": \"cancel\"}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_js_error\" assert str(assign.call_args.args[1]).find(\"is not readable.\") == -1 assign.reset_mock() with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): gui_core_context.act_on_jobs( MockState(assign=assign), \"\", { \"args\": [ {\"id\": [a_job.id], \"action\": \"delete\"}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_js_error\" assert str(assign.call_args.args[1]).endswith(\"is not readable.\") assign.reset_mock() gui_core_context.act_on_jobs( MockState(assign=assign), \"\", { \"args\": [ {\"id\": [a_job.id], \"action\": \"cancel\"}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_js_error\" assert str(assign.call_args.args[1]).endswith(\"is not readable.\") def test_edit_data_node(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.edit_data_node( MockState(assign=assign), \"\", { \"args\": [ {\"id\": a_datanode.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_dv_error\" assert assign.call_args.args[1] == \"\" with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): assign.reset_mock() gui_core_context.edit_data_node( MockState(assign=assign), \"\", { \"args\": [ {\"id\": a_datanode.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_dv_error\" assert str(assign.call_args.args[1]).endswith(\"is not readable.\") def test_lock_datanode_for_edit(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): mockGui = Mock(Gui) mockGui._get_client_id = lambda: \"a_client_id\" gui_core_context = _GuiCoreContext(mockGui) assign = Mock() gui_core_context.lock_datanode_for_edit( MockState(assign=assign), \"\", { \"args\": [ {\"id\": a_datanode.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_dv_error\" assert assign.call_args.args[1] == \"\" with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): assign.reset_mock() gui_core_context.lock_datanode_for_edit( MockState(assign=assign), \"\", { \"args\": [ {\"id\": a_datanode.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_dv_error\" assert str(assign.call_args.args[1]).endswith(\"is not readable.\") def test_get_scenarios_for_owner(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get) as mockget: gui_core_context = _GuiCoreContext(Mock()) gui_core_context.get_scenarios_for_owner(a_scenario.id) mockget.assert_called_once() mockget.reset_mock() with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): gui_core_context.scenario_status_callback(a_scenario.id) mockget.assert_not_called() def test_update_data(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): mockGui = Mock(Gui) mockGui._get_client_id = lambda: \"a_client_id\" gui_core_context = _GuiCoreContext(mockGui) assign = Mock() gui_core_context.update_data( MockState(assign=assign), \"\", { \"args\": [ {\"id\": a_datanode.id}, ] }, ) assign.assert_called() assert assign.call_args_list[0].args[0] == \"gui_core_dv_error\" assert assign.call_args_list[0].args[1] == \"\" assign.reset_mock() with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): gui_core_context.update_data( MockState(assign=assign), \"\", { \"args\": [ {\"id\": a_datanode.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_dv_error\" assert str(assign.call_args.args[1]).endswith(\"is not readable.\") def test_tabular_data_edit(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): mockGui = Mock(Gui) mockGui._get_client_id = lambda: \"a_client_id\" gui_core_context = _GuiCoreContext(mockGui) assign = Mock() gui_core_context.tabular_data_edit( MockState(assign=assign), \"\", { \"user_data\": {\"dn_id\": a_datanode.id}, }, ) assign.assert_called_once() assert assign.call_args_list[0].args[0] == \"gui_core_dv_error\" assert ( assign.call_args_list[0].args[1] == \"Error updating Datanode tabular value: type does not support at[] indexer.\" ) assign.reset_mock() with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): gui_core_context.tabular_data_edit( MockState(assign=assign), \"\", { \"user_data\": {\"dn_id\": a_datanode.id}, }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_dv_error\" assert str(assign.call_args.args[1]).endswith(\"is not readable.\") def test_get_data_node_tabular_data(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get) as mockget: gui_core_context = _GuiCoreContext(Mock()) gui_core_context.get_data_node_tabular_data(a_datanode, a_datanode.id) mockget.assert_called_once() mockget.reset_mock() with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): gui_core_context.get_data_node_tabular_data(a_datanode, a_datanode.id) mockget.assert_not_called() def test_get_data_node_tabular_columns(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get) as mockget: gui_core_context = _GuiCoreContext(Mock()) gui_core_context.get_data_node_tabular_columns(a_datanode, a_datanode.id) mockget.assert_called_once() mockget.reset_mock() with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): gui_core_context.get_data_node_tabular_columns(a_datanode, a_datanode.id) mockget.assert_not_called() def test_get_data_node_chart_config(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get) as mockget: gui_core_context = _GuiCoreContext(Mock()) gui_core_context.get_data_node_chart_config(a_datanode, a_datanode.id) mockget.assert_called_once() mockget.reset_mock() with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_readable_false): gui_core_context.get_data_node_chart_config(a_datanode, a_datanode.id) mockget.assert_not_called() "} {"text": "from unittest.mock import Mock, patch import pytest from src.taipy.gui_core._context import _GuiCoreContext from taipy.config.common.scope import Scope from taipy.core import Job, Scenario, Task from taipy.core.data.pickle import PickleDataNode a_scenario = Scenario(\"scenario_config_id\", [], {}, sequences={\"sequence\": {}}) a_task = Task(\"task_config_id\", {}, print) a_job = Job(\"JOB_job_id\", a_task, \"submit_id\", a_scenario.id) a_job.isfinished = lambda s: True a_datanode = PickleDataNode(\"data_node_config_id\", Scope.SCENARIO) def mock_core_get(entity_id): if entity_id == a_scenario.id: return a_scenario if entity_id == a_job.id: return a_job if entity_id == a_datanode.id: return a_datanode return a_task def mock_is_deletable_false(entity_id): return False def mock_is_true(entity_id): return True class MockState: def __init__(self, **kwargs) -> None: self.assign = kwargs.get(\"assign\") class TestGuiCoreContext_is_deletable: def test_crud_scenario(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get), patch( \"src.taipy.gui_core._context.is_deletable\", side_effect=mock_is_true ): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.crud_scenario( MockState(assign=assign), \"\", { \"args\": [ True, True, {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sc_error\" assert str(assign.call_args.args[1]).startswith(\"Error deleting Scenario.\") with patch(\"src.taipy.gui_core._context.is_deletable\", side_effect=mock_is_deletable_false): assign.reset_mock() gui_core_context.crud_scenario( MockState(assign=assign), \"\", { \"args\": [ True, True, {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sc_error\" assert str(assign.call_args.args[1]).endswith(\"is not deletable.\") def test_act_on_jobs(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get), patch( \"src.taipy.gui_core._context.is_deletable\", side_effect=mock_is_true ): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.act_on_jobs( MockState(assign=assign), \"\", { \"args\": [ {\"id\": [a_job.id], \"action\": \"delete\"}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_js_error\" assert str(assign.call_args.args[1]).find(\"is not deletable.\") == -1 assign.reset_mock() with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_deletable_false): gui_core_context.act_on_jobs( MockState(assign=assign), \"\", { \"args\": [ {\"id\": [a_job.id], \"action\": \"delete\"}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_js_error\" assert str(assign.call_args.args[1]).endswith(\"is not readable.\") "} {"text": "from unittest.mock import Mock, patch import pytest from src.taipy.gui_core._context import _GuiCoreContext from taipy.config.common.scope import Scope from taipy.core import Job, Scenario, Task from taipy.core.data.pickle import PickleDataNode a_scenario = Scenario(\"scenario_config_id\", [], {}, sequences={\"sequence\": {}}) a_task = Task(\"task_config_id\", {}, print) a_job = Job(\"JOB_job_id\", a_task, \"submit_id\", a_scenario.id) a_job.isfinished = lambda s: True a_datanode = PickleDataNode(\"data_node_config_id\", Scope.SCENARIO) def mock_core_get(entity_id): if entity_id == a_scenario.id: return a_scenario if entity_id == a_job.id: return a_job if entity_id == a_datanode.id: return a_datanode return a_task def mock_is_promotable_false(entity_id): return False def mock_is_true(entity_id): return True class MockState: def __init__(self, **kwargs) -> None: self.assign = kwargs.get(\"assign\") class TestGuiCoreContext_is_promotable: def test_edit_entity(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get), patch( \"src.taipy.gui_core._context.is_promotable\", side_effect=mock_is_true ): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.edit_entity( MockState(assign=assign), \"\", { \"args\": [ {\"name\": \"name\", \"id\": a_scenario.id, \"primary\": True}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sv_error\" assert str(assign.call_args.args[1]).endswith(\"to primary because it doesn't belong to a cycle.\") assign.reset_mock() with patch(\"src.taipy.gui_core._context.is_promotable\", side_effect=mock_is_promotable_false): gui_core_context.edit_entity( MockState(assign=assign), \"\", { \"args\": [ {\"name\": \"name\", \"id\": a_scenario.id, \"primary\": True}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sv_error\" assert str(assign.call_args.args[1]).endswith(\"is not promotable.\") "} {"text": "from unittest.mock import Mock, patch import pytest from src.taipy.gui_core._context import _GuiCoreContext from taipy.config.common.scope import Scope from taipy.core import Job, Scenario, Task from taipy.core.data.pickle import PickleDataNode from taipy.gui import Gui a_scenario = Scenario(\"scenario_config_id\", [], {}, sequences={\"sequence\": {}}) a_task = Task(\"task_config_id\", {}, print) a_job = Job(\"JOB_job_id\", a_task, \"submit_id\", a_scenario.id) a_job.isfinished = lambda s: True a_datanode = PickleDataNode(\"data_node_config_id\", Scope.SCENARIO) def mock_core_get(entity_id): if entity_id == a_scenario.id: return a_scenario if entity_id == a_job.id: return a_job if entity_id == a_datanode.id: return a_datanode return a_task def mock_is_editable_false(entity_id): return False def mock_is_true(entity_id): return True class MockState: def __init__(self, **kwargs) -> None: self.assign = kwargs.get(\"assign\") class TestGuiCoreContext_is_editable: def test_crud_scenario(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.crud_scenario( MockState(assign=assign), \"\", { \"args\": [ True, False, {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_not_called() with patch(\"src.taipy.gui_core._context.is_editable\", side_effect=mock_is_editable_false): assign.reset_mock() gui_core_context.crud_scenario( MockState(assign=assign), \"\", { \"args\": [ True, False, {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sc_error\" assert str(assign.call_args.args[1]).endswith(\"is not editable.\") def test_edit_entity(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.edit_entity( MockState(assign=assign), \"\", { \"args\": [ {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sv_error\" assert assign.call_args.args[1] == \"\" with patch(\"src.taipy.gui_core._context.is_editable\", side_effect=mock_is_editable_false): assign.reset_mock() gui_core_context.edit_entity( MockState(assign=assign), \"\", { \"args\": [ {\"name\": \"name\", \"id\": a_scenario.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_sv_error\" assert str(assign.call_args.args[1]).endswith(\"is not editable.\") def test_act_on_jobs(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get), patch( \"src.taipy.gui_core._context.is_deletable\", side_effect=mock_is_true ): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.act_on_jobs( MockState(assign=assign), \"\", { \"args\": [ {\"id\": [a_job.id], \"action\": \"cancel\"}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_js_error\" assert str(assign.call_args.args[1]).find(\"is not editable.\") == -1 assign.reset_mock() with patch(\"src.taipy.gui_core._context.is_readable\", side_effect=mock_is_editable_false): gui_core_context.act_on_jobs( MockState(assign=assign), \"\", { \"args\": [ {\"id\": [a_job.id], \"action\": \"cancel\"}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_js_error\" assert str(assign.call_args.args[1]).endswith(\"is not readable.\") def test_edit_data_node(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): gui_core_context = _GuiCoreContext(Mock()) assign = Mock() gui_core_context.edit_data_node( MockState(assign=assign), \"\", { \"args\": [ {\"id\": a_datanode.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_dv_error\" assert assign.call_args.args[1] == \"\" with patch(\"src.taipy.gui_core._context.is_editable\", side_effect=mock_is_editable_false): assign.reset_mock() gui_core_context.edit_data_node( MockState(assign=assign), \"\", { \"args\": [ {\"id\": a_datanode.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_dv_error\" assert str(assign.call_args.args[1]).endswith(\"is not editable.\") def test_lock_datanode_for_edit(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): mockGui = Mock(Gui) mockGui._get_client_id = lambda: \"a_client_id\" gui_core_context = _GuiCoreContext(mockGui) assign = Mock() gui_core_context.lock_datanode_for_edit( MockState(assign=assign), \"\", { \"args\": [ {\"id\": a_datanode.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_dv_error\" assert assign.call_args.args[1] == \"\" with patch(\"src.taipy.gui_core._context.is_editable\", side_effect=mock_is_editable_false): assign.reset_mock() gui_core_context.lock_datanode_for_edit( MockState(assign=assign), \"\", { \"args\": [ {\"id\": a_datanode.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_dv_error\" assert str(assign.call_args.args[1]).endswith(\"is not editable.\") def test_update_data(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): mockGui = Mock(Gui) mockGui._get_client_id = lambda: \"a_client_id\" gui_core_context = _GuiCoreContext(mockGui) assign = Mock() gui_core_context.update_data( MockState(assign=assign), \"\", { \"args\": [ {\"id\": a_datanode.id}, ] }, ) assign.assert_called() assert assign.call_args_list[0].args[0] == \"gui_core_dv_error\" assert assign.call_args_list[0].args[1] == \"\" assign.reset_mock() with patch(\"src.taipy.gui_core._context.is_editable\", side_effect=mock_is_editable_false): gui_core_context.update_data( MockState(assign=assign), \"\", { \"args\": [ {\"id\": a_datanode.id}, ] }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_dv_error\" assert str(assign.call_args.args[1]).endswith(\"is not editable.\") def test_tabular_data_edit(self): with patch(\"src.taipy.gui_core._context.core_get\", side_effect=mock_core_get): mockGui = Mock(Gui) mockGui._get_client_id = lambda: \"a_client_id\" gui_core_context = _GuiCoreContext(mockGui) assign = Mock() gui_core_context.tabular_data_edit( MockState(assign=assign), \"\", { \"user_data\": {\"dn_id\": a_datanode.id}, }, ) assign.assert_called_once() assert assign.call_args_list[0].args[0] == \"gui_core_dv_error\" assert ( assign.call_args_list[0].args[1] == \"Error updating Datanode tabular value: type does not support at[] indexer.\" ) assign.reset_mock() with patch(\"src.taipy.gui_core._context.is_editable\", side_effect=mock_is_editable_false): gui_core_context.tabular_data_edit( MockState(assign=assign), \"\", { \"user_data\": {\"dn_id\": a_datanode.id}, }, ) assign.assert_called_once() assert assign.call_args.args[0] == \"gui_core_dv_error\" assert str(assign.call_args.args[1]).endswith(\"is not editable.\") "} {"text": "from unittest import mock import pytest from flask import url_for from src.taipy.rest.api.exceptions.exceptions import ScenarioIdMissingException, SequenceNameMissingException from taipy.core.exceptions.exceptions import NonExistingScenario from taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory def test_get_sequence(client, default_sequence): # test 404 user_url = url_for(\"api.sequence_by_id\", sequence_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.sequence._sequence_manager._SequenceManager._get\") as manager_mock: manager_mock.return_value = default_sequence # test get_sequence rep = client.get(url_for(\"api.sequence_by_id\", sequence_id=\"foo\")) assert rep.status_code == 200 def test_delete_sequence(client): # test 404 user_url = url_for(\"api.sequence_by_id\", sequence_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.sequence._sequence_manager._SequenceManager._delete\"), mock.patch( \"taipy.core.sequence._sequence_manager._SequenceManager._get\" ): # test get_sequence rep = client.delete(url_for(\"api.sequence_by_id\", sequence_id=\"foo\")) assert rep.status_code == 200 def test_create_sequence(client, default_scenario): sequences_url = url_for(\"api.sequences\") rep = client.post(sequences_url, json={}) assert rep.status_code == 400 assert rep.json == {\"message\": \"Scenario id is missing.\"} sequences_url = url_for(\"api.sequences\") rep = client.post(sequences_url, json={\"scenario_id\": \"SCENARIO_scenario_id\"}) assert rep.status_code == 400 assert rep.json == {\"message\": \"Sequence name is missing.\"} sequences_url = url_for(\"api.sequences\") rep = client.post(sequences_url, json={\"scenario_id\": \"SCENARIO_scenario_id\", \"sequence_name\": \"sequence\"}) assert rep.status_code == 404 _ScenarioManagerFactory._build_manager()._set(default_scenario) with mock.patch(\"taipy.core.scenario._scenario_manager._ScenarioManager._get\") as config_mock: config_mock.return_value = default_scenario sequences_url = url_for(\"api.sequences\") rep = client.post( sequences_url, json={\"scenario_id\": default_scenario.id, \"sequence_name\": \"sequence\", \"tasks\": []} ) assert rep.status_code == 201 def test_get_all_sequences(client, default_scenario_config_list): for ds in range(10): with mock.patch(\"src.taipy.rest.api.resources.scenario.ScenarioList.fetch_config\") as config_mock: config_mock.return_value = default_scenario_config_list[ds] scenario_url = url_for(\"api.scenarios\", config_id=config_mock.name) client.post(scenario_url) sequences_url = url_for(\"api.sequences\") rep = client.get(sequences_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 @pytest.mark.xfail() def test_execute_sequence(client, default_sequence): # test 404 user_url = url_for(\"api.sequence_submit\", sequence_id=\"foo\") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.sequence._sequence_manager._SequenceManager._get\") as manager_mock: manager_mock.return_value = default_sequence # test get_sequence rep = client.post(url_for(\"api.sequence_submit\", sequence_id=\"foo\")) assert rep.status_code == 200 "} {"text": "from unittest import mock from flask import url_for def test_get_job(client, default_job): # test 404 user_url = url_for(\"api.job_by_id\", job_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.job._job_manager._JobManager._get\") as manager_mock: manager_mock.return_value = default_job # test get_job rep = client.get(url_for(\"api.job_by_id\", job_id=\"foo\")) assert rep.status_code == 200 def test_delete_job(client): # test 404 user_url = url_for(\"api.job_by_id\", job_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.job._job_manager._JobManager._delete\"), mock.patch( \"taipy.core.job._job_manager._JobManager._get\" ): # test get_job rep = client.delete(url_for(\"api.job_by_id\", job_id=\"foo\")) assert rep.status_code == 200 def test_create_job(client, default_task_config): # without config param jobs_url = url_for(\"api.jobs\") rep = client.post(jobs_url) assert rep.status_code == 400 with mock.patch(\"src.taipy.rest.api.resources.job.JobList.fetch_config\") as config_mock: config_mock.return_value = default_task_config jobs_url = url_for(\"api.jobs\", task_id=\"foo\") rep = client.post(jobs_url) assert rep.status_code == 201 def test_get_all_jobs(client, create_job_list): jobs_url = url_for(\"api.jobs\") rep = client.get(jobs_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 def test_cancel_job(client, default_job): # test 404 from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory _OrchestratorFactory._build_orchestrator() _OrchestratorFactory._build_dispatcher() user_url = url_for(\"api.job_cancel\", job_id=\"foo\") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.job._job_manager._JobManager._get\") as manager_mock: manager_mock.return_value = default_job # test get_job rep = client.post(url_for(\"api.job_cancel\", job_id=\"foo\")) assert rep.status_code == 200 "} {"text": "import os import shutil import uuid from datetime import datetime, timedelta import pandas as pd import pytest from dotenv import load_dotenv from src.taipy.rest.app import create_app from taipy.config import Config from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.core import Cycle, DataNodeId, Job, JobId, Scenario, Sequence, Task from taipy.core.cycle._cycle_manager import _CycleManager from taipy.core.data.in_memory import InMemoryDataNode from taipy.core.job._job_manager import _JobManager from taipy.core.task._task_manager import _TaskManager from .setup.shared.algorithms import evaluate, forecast @pytest.fixture def setup_end_to_end(): model_cfg = Config.configure_data_node(\"model\", path=\"setup/my_model.p\", storage_type=\"pickle\") day_cfg = Config.configure_data_node(id=\"day\") forecasts_cfg = Config.configure_data_node(id=\"forecasts\") forecast_task_cfg = Config.configure_task( id=\"forecast_task\", input=[model_cfg, day_cfg], function=forecast, output=forecasts_cfg, ) historical_temperature_cfg = Config.configure_data_node( \"historical_temperature\", storage_type=\"csv\", path=\"setup/historical_temperature.csv\", has_header=True, ) evaluation_cfg = Config.configure_data_node(\"evaluation\") evaluate_task_cfg = Config.configure_task( \"evaluate_task\", input=[historical_temperature_cfg, forecasts_cfg, day_cfg], function=evaluate, output=evaluation_cfg, ) scenario_config = Config.configure_scenario( \"scenario\", [forecast_task_cfg, evaluate_task_cfg], frequency=Frequency.DAILY ) scenario_config.add_sequences({\"sequence\": [forecast_task_cfg, evaluate_task_cfg]}) @pytest.fixture() def app(): load_dotenv(\".testenv\") app = create_app(testing=True) app.config.update( { \"TESTING\": True, } ) with app.app_context(), app.test_request_context(): yield app @pytest.fixture() def client(app): return app.test_client() @pytest.fixture def datanode_data(): return { \"name\": \"foo\", \"storage_type\": \"in_memory\", \"scope\": \"scenario\", \"default_data\": [\"1991-01-01T00:00:00\"], } @pytest.fixture def task_data(): return { \"config_id\": \"foo\", \"input_ids\": [\"DATASOURCE_foo_3b888e17-1974-4a56-a42c-c7c96bc9cd54\"], \"function_name\": \"print\", \"function_module\": \"builtins\", \"output_ids\": [\"DATASOURCE_foo_4d9923b8-eb9f-4f3c-8055-3a1ce8bee309\"], } @pytest.fixture def sequence_data(): return { \"name\": \"foo\", \"task_ids\": [\"TASK_foo_3b888e17-1974-4a56-a42c-c7c96bc9cd54\"], } @pytest.fixture def scenario_data(): return { \"name\": \"foo\", \"sequence_ids\": [\"SEQUENCE_foo_3b888e17-1974-4a56-a42c-c7c96bc9cd54\"], \"properties\": {}, } @pytest.fixture def default_datanode(): return InMemoryDataNode( \"input_ds\", Scope.SCENARIO, DataNodeId(\"f\"), \"my name\", \"owner_id\", properties={\"default_data\": [1, 2, 3, 4, 5, 6]}, ) @pytest.fixture def default_df_datanode(): return InMemoryDataNode( \"input_ds\", Scope.SCENARIO, DataNodeId(\"id_uio2\"), \"my name\", \"owner_id\", properties={\"default_data\": pd.DataFrame([{\"a\": 1, \"b\": 2}, {\"a\": 3, \"b\": 4}, {\"a\": 5, \"b\": 6}])}, ) @pytest.fixture def default_datanode_config(): return Config.configure_data_node(f\"taipy_{uuid.uuid4().hex}\", \"in_memory\", Scope.SCENARIO) @pytest.fixture def default_datanode_config_list(): configs = [] for i in range(10): configs.append(Config.configure_data_node(id=f\"ds_{i}\", storage_type=\"in_memory\", scope=Scope.SCENARIO)) return configs def __default_task(): input_ds = InMemoryDataNode( \"input_ds\", Scope.SCENARIO, DataNodeId(\"id_uio\"), \"my name\", \"owner_id\", properties={\"default_data\": \"In memory Data Source\"}, ) output_ds = InMemoryDataNode( \"output_ds\", Scope.SCENARIO, DataNodeId(\"id_uio\"), \"my name\", \"owner_id\", properties={\"default_data\": \"In memory Data Source\"}, ) return Task( config_id=\"foo\", properties={}, function=print, input=[input_ds], output=[output_ds], id=None, ) @pytest.fixture def default_task(): return __default_task() @pytest.fixture def default_task_config(): return Config.configure_task(\"task1\", print, [], []) @pytest.fixture def default_task_config_list(): configs = [] for i in range(10): configs.append(Config.configure_task(f\"task_{i}\", print, [], [])) return configs def __default_sequence(): return Sequence(properties={\"name\": \"foo\"}, tasks=[__default_task()], sequence_id=\"SEQUENCE_foo_SCENARIO_acb\") def __task_config(): return Config.configure_task(\"task1\", print, [], []) @pytest.fixture def default_sequence(): return __default_sequence() @pytest.fixture def default_scenario_config(): task_config = __task_config() scenario_config = Config.configure_scenario( f\"taipy_{uuid.uuid4().hex}\", [task_config], ) scenario_config.add_sequences({\"sequence\": [task_config]}) return scenario_config @pytest.fixture def default_scenario_config_list(): configs = [] for _ in range(10): task_config = Config.configure_task(f\"taipy_{uuid.uuid4().hex}\", print) scenario_config = Config.configure_scenario( f\"taipy_{uuid.uuid4().hex}\", [task_config], ) scenario_config.add_sequences({\"sequence\": [task_config]}) configs.append(scenario_config) return configs @pytest.fixture def default_scenario(): return Scenario(config_id=\"foo\", properties={}, tasks=[__default_task()], scenario_id=\"SCENARIO_scenario_id\") def __create_cycle(name=\"foo\"): now = datetime.now() return Cycle( name=name, frequency=Frequency.DAILY, properties={}, creation_date=now, start_date=now, end_date=now + timedelta(days=5), ) @pytest.fixture def create_cycle_list(): cycles = [] manager = _CycleManager for i in range(10): c = __create_cycle(f\"cycle_{1}\") return cycles @pytest.fixture def cycle_data(): return { \"name\": \"foo\", \"frequency\": \"daily\", \"properties\": {}, \"creation_date\": \"2022-02-03T22:17:27.317114\", \"start_date\": \"2022-02-03T22:17:27.317114\", \"end_date\": \"2022-02-08T22:17:27.317114\", } @pytest.fixture def default_cycle(): return __create_cycle() def __create_job(): task_manager = _TaskManager task = __default_task() task_manager._set(task) submit_id = f\"SUBMISSION_{str(uuid.uuid4())}\" return Job(id=JobId(f\"JOB_{uuid.uuid4()}\"), task=task, submit_id=submit_id, submit_entity_id=task.id) @pytest.fixture def default_job(): return __create_job() @pytest.fixture def create_job_list(): jobs = [] manager = _JobManager for i in range(10): c = __create_job() return jobs @pytest.fixture(scope=\"function\", autouse=True) def cleanup_files(): Config.unblock_update() Config.configure_core(repository_type=\"filesystem\") if os.path.exists(\".data\"): shutil.rmtree(\".data\", ignore_errors=True) if os.path.exists(\".my_data\"): shutil.rmtree(\".my_data\", ignore_errors=True) "} {"text": "from unittest import mock from flask import url_for def test_get_task(client, default_task): # test 404 user_url = url_for(\"api.task_by_id\", task_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.task._task_manager._TaskManager._get\") as manager_mock: manager_mock.return_value = default_task # test get_task rep = client.get(url_for(\"api.task_by_id\", task_id=\"foo\")) assert rep.status_code == 200 def test_delete_task(client): # test 404 user_url = url_for(\"api.task_by_id\", task_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.task._task_manager._TaskManager._delete\"), mock.patch( \"taipy.core.task._task_manager._TaskManager._get\" ): # test get_task rep = client.delete(url_for(\"api.task_by_id\", task_id=\"foo\")) assert rep.status_code == 200 def test_create_task(client, default_task_config): # without config param tasks_url = url_for(\"api.tasks\") rep = client.post(tasks_url) assert rep.status_code == 400 # config does not exist tasks_url = url_for(\"api.tasks\", config_id=\"foo\") rep = client.post(tasks_url) assert rep.status_code == 404 with mock.patch(\"src.taipy.rest.api.resources.task.TaskList.fetch_config\") as config_mock: config_mock.return_value = default_task_config tasks_url = url_for(\"api.tasks\", config_id=\"bar\") rep = client.post(tasks_url) assert rep.status_code == 201 def test_get_all_tasks(client, task_data, default_task_config_list): for ds in range(10): with mock.patch(\"src.taipy.rest.api.resources.task.TaskList.fetch_config\") as config_mock: config_mock.return_value = default_task_config_list[ds] tasks_url = url_for(\"api.tasks\", config_id=config_mock.name) client.post(tasks_url) rep = client.get(tasks_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 def test_execute_task(client, default_task): # test 404 user_url = url_for(\"api.task_submit\", task_id=\"foo\") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.task._task_manager._TaskManager._get\") as manager_mock: manager_mock.return_value = default_task # test get_task rep = client.post(url_for(\"api.task_submit\", task_id=\"foo\")) assert rep.status_code == 200 "} {"text": "from functools import wraps from unittest.mock import MagicMock, patch from src.taipy.rest.api.middlewares._middleware import _middleware def mock_enterprise_middleware(f): @wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs) return wrapper @patch(\"src.taipy.rest.api.middlewares._middleware._using_enterprise\") @patch(\"src.taipy.rest.api.middlewares._middleware._enterprise_middleware\") def test_enterprise_middleware_applied_when_enterprise_is_installed( enterprise_middleware: MagicMock, using_enterprise: MagicMock ): enterprise_middleware.return_value = mock_enterprise_middleware using_enterprise.return_value = True @_middleware def f(): return \"f\" rv = f() assert rv == \"f\" using_enterprise.assert_called_once() enterprise_middleware.assert_called_once() @patch(\"src.taipy.rest.api.middlewares._middleware._using_enterprise\") @patch(\"src.taipy.rest.api.middlewares._middleware._enterprise_middleware\") def test_enterprise_middleware_not_applied_when_enterprise_is_not_installed( enterprise_middleware: MagicMock, using_enterprise: MagicMock ): enterprise_middleware.return_value = mock_enterprise_middleware using_enterprise.return_value = False @_middleware def f(): return \"f\" rv = f() assert rv == \"f\" using_enterprise.assert_called_once() enterprise_middleware.assert_not_called() "} {"text": "from unittest import mock import pytest from flask import url_for def test_get_datanode(client, default_datanode): # test 404 user_url = url_for(\"api.datanode_by_id\", datanode_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.data._data_manager._DataManager._get\") as manager_mock: manager_mock.return_value = default_datanode # test get_datanode rep = client.get(url_for(\"api.datanode_by_id\", datanode_id=\"foo\")) assert rep.status_code == 200 def test_delete_datanode(client): # test 404 user_url = url_for(\"api.datanode_by_id\", datanode_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.data._data_manager._DataManager._delete\"), mock.patch( \"taipy.core.data._data_manager._DataManager._get\" ): # test get_datanode rep = client.delete(url_for(\"api.datanode_by_id\", datanode_id=\"foo\")) assert rep.status_code == 200 def test_create_datanode(client, default_datanode_config): # without config param datanodes_url = url_for(\"api.datanodes\") rep = client.post(datanodes_url) assert rep.status_code == 400 # config does not exist datanodes_url = url_for(\"api.datanodes\", config_id=\"foo\") rep = client.post(datanodes_url) assert rep.status_code == 404 with mock.patch(\"src.taipy.rest.api.resources.datanode.DataNodeList.fetch_config\") as config_mock: config_mock.return_value = default_datanode_config datanodes_url = url_for(\"api.datanodes\", config_id=\"bar\") rep = client.post(datanodes_url) assert rep.status_code == 201 def test_get_all_datanodes(client, default_datanode_config_list): for ds in range(10): with mock.patch(\"src.taipy.rest.api.resources.datanode.DataNodeList.fetch_config\") as config_mock: config_mock.return_value = default_datanode_config_list[ds] datanodes_url = url_for(\"api.datanodes\", config_id=config_mock.name) client.post(datanodes_url) rep = client.get(datanodes_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 def test_read_datanode(client, default_df_datanode): with mock.patch(\"taipy.core.data._data_manager._DataManager._get\") as config_mock: config_mock.return_value = default_df_datanode # without operators datanodes_url = url_for(\"api.datanode_reader\", datanode_id=\"foo\") rep = client.get(datanodes_url, json={}) assert rep.status_code == 200 # Without operators and body rep = client.get(datanodes_url) assert rep.status_code == 200 # TODO: Revisit filter test # operators = {\"operators\": [{\"key\": \"a\", \"value\": 5, \"operator\": \"LESS_THAN\"}]} # rep = client.get(datanodes_url, json=operators) # assert rep.status_code == 200 def test_write_datanode(client, default_datanode): with mock.patch(\"taipy.core.data._data_manager._DataManager._get\") as config_mock: config_mock.return_value = default_datanode # Get DataNode datanodes_read_url = url_for(\"api.datanode_reader\", datanode_id=default_datanode.id) rep = client.get(datanodes_read_url, json={}) assert rep.status_code == 200 assert rep.json == {\"data\": [1, 2, 3, 4, 5, 6]} datanodes_write_url = url_for(\"api.datanode_writer\", datanode_id=default_datanode.id) rep = client.put(datanodes_write_url, json=[1, 2, 3]) assert rep.status_code == 200 rep = client.get(datanodes_read_url, json={}) assert rep.status_code == 200 assert rep.json == {\"data\": [1, 2, 3]} "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import json from typing import Dict from flask import url_for def create_and_submit_scenario(config_id: str, client) -> Dict: response = client.post(url_for(\"api.scenarios\", config_id=config_id)) assert response.status_code == 201 scenario = response.json.get(\"scenario\") assert (set(scenario) - set(json.load(open(\"tests/rest/json/expected/scenario.json\")))) == set() response = client.post(url_for(\"api.scenario_submit\", scenario_id=scenario.get(\"id\"))) assert response.status_code == 200 return scenario def get(url, name, client) -> Dict: response = client.get(url) returned_data = response.json.get(name) assert (set(returned_data) - set(json.load(open(f\"tests/rest/json/expected/{name}.json\")))) == set() return returned_data def get_assert_status(url, client, status_code) -> None: response = client.get(url) assert response.status_code == status_code def get_all(url, expected_quantity, client): response = client.get(url) assert len(response.json) == expected_quantity def delete(url, client): response = client.delete(url) assert response.status_code == 200 def test_end_to_end(client, setup_end_to_end): # Create Scenario: Should also create all of its dependencies(sequences, tasks, datanodes, etc) scenario = create_and_submit_scenario(\"scenario\", client) # Get other models and verify if they return the necessary fields cycle = get(url_for(\"api.cycle_by_id\", cycle_id=scenario.get(\"cycle\")), \"cycle\", client) sequence = get( url_for(\"api.sequence_by_id\", sequence_id=f\"SEQUENCE_sequence_{scenario['id']}\"), \"sequence\", client, ) task = get(url_for(\"api.task_by_id\", task_id=sequence.get(\"tasks\")[0]), \"task\", client) datanode = get( url_for(\"api.datanode_by_id\", datanode_id=task.get(\"input_ids\")[0]), \"datanode\", client, ) # Get All get_all(url_for(\"api.scenarios\"), 1, client) get_all(url_for(\"api.cycles\"), 1, client) get_all(url_for(\"api.sequences\"), 1, client) get_all(url_for(\"api.tasks\"), 2, client) get_all(url_for(\"api.datanodes\"), 5, client) get_all(url_for(\"api.jobs\"), 2, client) # Delete entities delete(url_for(\"api.cycle_by_id\", cycle_id=cycle.get(\"id\")), client) delete(url_for(\"api.sequence_by_id\", sequence_id=sequence.get(\"id\")), client) delete(url_for(\"api.task_by_id\", task_id=task.get(\"id\")), client) delete(url_for(\"api.datanode_by_id\", datanode_id=datanode.get(\"id\")), client) # Check status code # Non-existing entities should return 404 get_assert_status(url_for(\"api.cycle_by_id\", cycle_id=9999999), client, 404) get_assert_status(url_for(\"api.scenario_by_id\", scenario_id=9999999), client, 404) get_assert_status(url_for(\"api.sequence_by_id\", sequence_id=9999999), client, 404) get_assert_status(url_for(\"api.task_by_id\", task_id=9999999), client, 404) get_assert_status(url_for(\"api.datanode_by_id\", datanode_id=9999999), client, 404) # Check URL with and without trailing slashes url_with_slash = url_for(\"api.scenarios\") url_without_slash = url_for(\"api.scenarios\")[:-1] get_all(url_with_slash, 1, client) get_all(url_without_slash, 1, client) "} {"text": "from unittest import mock from flask import url_for def test_get_cycle(client, default_cycle): # test 404 cycle_url = url_for(\"api.cycle_by_id\", cycle_id=\"foo\") rep = client.get(cycle_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.cycle._cycle_manager._CycleManager._get\") as manager_mock: manager_mock.return_value = default_cycle # test get_cycle rep = client.get(url_for(\"api.cycle_by_id\", cycle_id=\"foo\")) assert rep.status_code == 200 def test_delete_cycle(client): # test 404 cycle_url = url_for(\"api.cycle_by_id\", cycle_id=\"foo\") rep = client.get(cycle_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.cycle._cycle_manager._CycleManager._delete\"), mock.patch( \"taipy.core.cycle._cycle_manager._CycleManager._get\" ): # test get_cycle rep = client.delete(url_for(\"api.cycle_by_id\", cycle_id=\"foo\")) assert rep.status_code == 200 def test_create_cycle(client, cycle_data): # without config param cycles_url = url_for(\"api.cycles\") data = {\"bad\": \"data\"} rep = client.post(cycles_url, json=data) assert rep.status_code == 400 rep = client.post(cycles_url, json=cycle_data) assert rep.status_code == 201 def test_get_all_cycles(client, create_cycle_list): cycles_url = url_for(\"api.cycles\") rep = client.get(cycles_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 "} {"text": "from unittest import mock import pytest from flask import url_for def test_get_scenario(client, default_scenario): # test 404 user_url = url_for(\"api.scenario_by_id\", scenario_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.scenario._scenario_manager._ScenarioManager._get\") as manager_mock: manager_mock.return_value = default_scenario # test get_scenario rep = client.get(url_for(\"api.scenario_by_id\", scenario_id=\"foo\")) assert rep.status_code == 200 def test_delete_scenario(client): # test 404 user_url = url_for(\"api.scenario_by_id\", scenario_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.scenario._scenario_manager._ScenarioManager._delete\"), mock.patch( \"taipy.core.scenario._scenario_manager._ScenarioManager._get\" ): # test get_scenario rep = client.delete(url_for(\"api.scenario_by_id\", scenario_id=\"foo\")) assert rep.status_code == 200 def test_create_scenario(client, default_scenario_config): # without config param scenarios_url = url_for(\"api.scenarios\") rep = client.post(scenarios_url) assert rep.status_code == 400 # config does not exist scenarios_url = url_for(\"api.scenarios\", config_id=\"foo\") rep = client.post(scenarios_url) assert rep.status_code == 404 with mock.patch(\"src.taipy.rest.api.resources.scenario.ScenarioList.fetch_config\") as config_mock: config_mock.return_value = default_scenario_config scenarios_url = url_for(\"api.scenarios\", config_id=\"bar\") rep = client.post(scenarios_url) assert rep.status_code == 201 def test_get_all_scenarios(client, default_sequence, default_scenario_config_list): for ds in range(10): with mock.patch(\"src.taipy.rest.api.resources.scenario.ScenarioList.fetch_config\") as config_mock: config_mock.return_value = default_scenario_config_list[ds] scenarios_url = url_for(\"api.scenarios\", config_id=config_mock.name) client.post(scenarios_url) rep = client.get(scenarios_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 @pytest.mark.xfail() def test_execute_scenario(client, default_scenario): # test 404 user_url = url_for(\"api.scenario_submit\", scenario_id=\"foo\") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.scenario._scenario_manager._ScenarioManager._get\") as manager_mock: manager_mock.return_value = default_scenario # test get_scenario rep = client.post(url_for(\"api.scenario_submit\", scenario_id=\"foo\")) assert rep.status_code == 200 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pickle import random from datetime import datetime, timedelta from typing import Any, Dict import pandas as pd n_predictions = 14 def forecast(model, date: datetime): dates = [date + timedelta(days=i) for i in range(n_predictions)] forecasts = [f + random.uniform(0, 2) for f in model.forecast(len(dates))] days = [str(dt.date()) for dt in dates] res = {\"Date\": days, \"Forecast\": forecasts} return pd.DataFrame.from_dict(res) def evaluate(cleaned: pd.DataFrame, forecasts: pd.DataFrame, date: datetime) -> Dict[str, Any]: cleaned = cleaned[cleaned[\"Date\"].isin(forecasts[\"Date\"].tolist())] forecasts_as_series = pd.Series(forecasts[\"Forecast\"].tolist(), name=\"Forecast\") res = pd.concat([cleaned.reset_index(), forecasts_as_series], axis=1) res[\"Delta\"] = abs(res[\"Forecast\"] - res[\"Value\"]) return { \"Date\": date, \"Dataframe\": res, \"Mean_absolute_error\": res[\"Delta\"].mean(), \"Relative_error\": (res[\"Delta\"].mean() * 100) / res[\"Value\"].mean(), } if __name__ == \"__main__\": model = pickle.load(open(\"../my_model.p\", \"rb\")) day = datetime(2020, 1, 25) forecasts = forecast(model, day) historical_temperature = pd.read_csv(\"../historical_temperature.csv\") evaluation = evaluate(historical_temperature, forecasts, day) print(evaluation[\"Dataframe\"]) print() print(f'Mean absolute error : {evaluation[\"Mean_absolute_error\"]}') print(f'Relative error in %: {evaluation[\"Relative_error\"]}') "} {"text": "from taipy.core import Config, Frequency from .algorithms import evaluate, forecast model_cfg = Config.configure_data_node(\"model\", path=\"my_model.p\", storage_type=\"pickle\") day_cfg = Config.configure_data_node(id=\"day\") forecasts_cfg = Config.configure_data_node(id=\"forecasts\") forecast_task_cfg = Config.configure_task( id=\"forecast_task\", input=[model_cfg, day_cfg], function=forecast, output=forecasts_cfg, ) historical_temperature_cfg = Config.configure_data_node( \"historical_temperature\", storage_type=\"csv\", path=\"historical_temperature.csv\", has_header=True, ) evaluation_cfg = Config.configure_data_node(\"evaluation\") evaluate_task_cfg = Config.configure_task( \"evaluate_task\", input=[historical_temperature_cfg, forecasts_cfg, day_cfg], function=evaluate, output=evaluation_cfg, ) scenario_cfg = Config.configure_scenario(\"scenario\", [forecast_task_cfg, evaluate_task_cfg], frequency=Frequency.DAILY) scenario_cfg.add_sequences({\"sequence\": [forecast_task_cfg, evaluate_task_cfg]}) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import random import string from example_library import ExampleLibrary from taipy.gui import Gui # Initial value label = \"Here is some text\" page = \"\"\" # Custom elements example ## Fraction: No denominator: <|123|example.fraction|> Denominator is 0: <|321|example.fraction|denominator=0|> Regular: <|355|example.fraction|denominator=113|> ## Custom label: Colored text: <|{label}|example.label|> <|Add a character|button|id=addChar|> <|Remove a character|button|id=removeChar|> \"\"\" def on_action(state, id): if id == \"addChar\": # Add a random character to the end of 'label' state.label += random.choice(string.ascii_letters) elif id == \"removeChar\": # Remove the first character of 'label' if len(state.label) > 0: state.label = state.label[1:] Gui(page, libraries=[ExampleLibrary()]).run(debug=True) "} {"text": "from .example_library import ExampleLibrary "} {"text": "from taipy.gui.extension import Element, ElementLibrary, ElementProperty, PropertyType class ExampleLibrary(ElementLibrary): def __init__(self) -> None: # Initialize the set of visual elements for this extension library self.elements = { # A static element that displays its properties in a fraction \"fraction\": Element( \"numerator\", { \"numerator\": ElementProperty(PropertyType.number), \"denominator\": ElementProperty(PropertyType.number), }, render_xhtml=ExampleLibrary._fraction_render, ), # A dynamic element that decorates its value \"label\": Element( \"value\", {\"value\": ElementProperty(PropertyType.dynamic_string)}, # The name of the React component (ColoredLabel) that implements this custom # element, exported as ExampleLabel in front-end/src/index.ts react_component=\"ExampleLabel\", ), } # The implementation of the rendering for the \"fraction\" static element @staticmethod def _fraction_render(props: dict) -> str: # Get the property values numerator = props.get(\"numerator\") denominator = props.get(\"denominator\") # No denominator or numerator is 0: display the numerator if denominator is None or int(numerator) == 0: return f\"{numerator}\" # Denominator is zero: display infinity if int(denominator) == 0: return '' # 'Normal' case return f\"{numerator}/{denominator}\" def get_name(self) -> str: return \"example\" def get_elements(self) -> dict: return self.elements def get_scripts(self) -> list[str]: # Only one JavaScript bundle for this library. return [\"front-end/dist/exampleLibrary.js\"] "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # ----------------------------------------------------------------------------------------- # To execute this script, make sure that the taipy-gui package is installed in your # Python environment and run: # python
\"\"\")"} {"text": "from geopy.geocoders import Nominatim import folium user_agent = \"geoapiExercises/1.0 AIzaSyBIeklfsRu1yz97lY2gJzWHJcmrd7lx2zU\" # Initialize the geocoder with the user agent geolocator = Nominatim(user_agent=user_agent, timeout=10) # List of locations to geocode locations = [\"Denver, CO, United States\", \"New York, NY, United States\", \"Los Angeles, CA, United States\"] # Create an empty map map_location = folium.Map(location=[0, 0], zoom_start=5) # Iterate through the list of locations for location in locations: # Perform geocoding location_info = geolocator.geocode(location) if location_info: # Extract latitude and longitude latitude = location_info.latitude longitude = location_info.longitude # Add a marker for the geocoded location folium.Marker([latitude, longitude], popup=location).add_to(map_location) else: print(f\"Geocoding was not successful for the location: {location}\") # Save or display the map (as an HTML file) map_location.save(\"geocoded_locations_map.html\") print(\"Map created and saved as 'geocoded_locations_map.html'\") "} {"text": "from taipy.gui import Gui, notify import pandas as pd import yfinance as yf from taipy.config import Config import taipy as tp import datetime as dt from taipy import Core from show_hospitals_map import html_page from flask import Flask, request, session, jsonify, redirect, render_template from flask_restful import Api, Resource import requests Config.load(\"config_model_train.toml\") scenario_cfg = Config.scenarios['stock'] tickers = yf.Tickers(\"msft aapl goog\") root_md = \"<|navbar|>\" property_chart = { \"type\": \"lines\", \"x\": \"Date\", \"y[1]\": \"Open\", \"y[2]\": \"Close\", \"y[3]\": \"High\", \"y[4]\": \"Low\", \"color[1]\": \"green\", \"color[2]\": \"grey\", \"color[3]\": \"red\", \"color[4]\": \"yellow\", } df = pd.DataFrame([], columns=[\"Date\", \"High\", \"Low\", \"Open\", \"Close\"]) df_pred = pd.DataFrame([], columns = ['Date','Close_Prediction']) stock = \"\" stock_text = \"No Stock to Show\" chart_text = \"No Chart to Show\" stocks = [] page = \"\"\" # Stock Portfolio ### Choose the stock to show <|toggle|theme|> <|layout|columns=1 1| <| <|{stock_text}|> <|{stock}|selector|lov=MSFT;AAPL;GOOG;Reset|dropdown|> <|Press for Stock|button|on_action=on_button_action|> <|Get the future predictions|button|on_action=get_predictions|> |> <|{stock} <|{chart_text}|> <|{df}|chart|properties={property_chart}|> |> |> \"\"\" pages = { \"/\" : root_md, \"home\" : page, \"claim\": \"empty page\" } def on_button_action(state): if state.stock == \"Reset\": state.stock_text = \"No Stock to Show\" state.chart_text = \"No Chart to Show\" state.df = pd.DataFrame([], columns=[\"Date\", \"High\", \"Low\", \"Open\", \"Close\"]) state.df_pred = pd.DataFrame([], columns = ['Date','Close_Prediction']) state.pred_text = \"No Prediction to Show\" else: state.stock_text = f\"The stock is {state.stock}\" state.chart_text = f\"Monthly history of stock {state.stock}\" state.df = tickers.tickers[state.stock].history().reset_index() state.df.to_csv(f\"{stock}.csv\", index=False) def get_predictions(state): scenario_stock = tp.create_scenario(scenario_cfg) scenario_stock.initial_dataset.path = f\"{stock}\".csv notify(state, 'success', 'camehere') scenario_stock.write(state.df) tp.submit(scenario_stock) state.df_pred = scenario_stock.predictions.read() state.df_pred.to_csv(\"pred.csv\", index=False) tp.Core().run() # Gui(pages=pages).run(use_reloader=True) app = Flask(__name__) # app = Flask(__name__) app.secret_key = \"your_secret_key\" # Set a secret key for session management api = Api(app) class SignupResource(Resource): def get(self): return redirect(\"/signup.html\") def post(self): SIGNUP_API_URL = \"https://health-insurance-rest-apis.onrender.com/api/signup\" signup_data = { 'username': request.form['username'], 'password': request.form['password'], 'email': request.form['email'] } headers = { 'Content-Type': 'application/json' } print(signup_data) response = requests.post(SIGNUP_API_URL, headers=headers, json=signup_data) print(\"response\", response) if response.status_code == 200: return redirect(\"/login.html\") else: return 'Signup Failed' # Login Resource class LoginResource(Resource): def get(self): \"\"\" Return a simple login page HTML \"\"\" return redirect(\"/login.html\") def post(self): email = request.form['email'] password = request.form['password'] auth_data = { 'username': email, 'password': password } AUTH_API_URL = \"https://health-insurance-rest-apis.onrender.com/api/login\" response = requests.post(AUTH_API_URL, json=auth_data) if response.status_code == 200: auth_data = response.json() access_token = auth_data.get('access_token') refresh_token = auth_data.get('refresh_token') # Store tokens in the session session['access_token'] = access_token session['refresh_token'] = refresh_token return redirect(\"/home\") else: return 'Login failed', 401 # Protected Resource class ProtectedResource(Resource): def get(self): # Check if the JWT token is present in the session if 'jwt_token' in session: jwt_token = session['jwt_token'] # You can add logic here to verify the JWT token if needed # For simplicity, we assume the token is valid return {'message': 'Access granted for protected route', 'jwt_token': jwt_token}, 200 else: return {'message': 'Access denied'}, 401 print(\"registered the apis\") # Add resources to the API api.add_resource(LoginResource, '/login') api.add_resource(ProtectedResource, '/protected') api.add_resource(SignupResource, '/signup') @app.before_request def check_access_token(): # print ('access_token' in session, \"checkIt\") if request.endpoint != 'login' and 'access_token' not in session: # # Redirect to the login page if not on the login route and no access_token is in the session # print(request.endpoint, \"endpoint\") return redirect(\"/login\") gui = Gui(pages=pages, flask=app).run(debug=False) "} {"text": "from taipy import Config, Scope import pandas as pd from prophet import Prophet from functions import * # Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id=\"initial_dataset\", storage_type=\"csv\", default_path='df.csv') cleaned_dataset_cfg = Config.configure_data_node(id=\"cleaned_dataset\") clean_data_task_cfg = Config.configure_task(id=\"clean_data_task\", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg, skippable=True) model_training_cfg = Config.configure_data_node(id=\"model_output\") predictions_cfg = Config.configure_data_node(id=\"predictions\") model_training_task_cfg = Config.configure_task(id=\"model_retraining_task\", function=retrained_model, input=cleaned_dataset_cfg, output=model_training_cfg, skippable=True) predict_task_cfg = Config.configure_task(id=\"predict_task\", function=predict, input=model_training_cfg, output=predictions_cfg, skippable=True) # Create the first pipeline configuration # retraining_model_pipeline_cfg = Config.configure_pipeline( # id=\"model_retraining_pipeline\", # task_configs=[clean_data_task_cfg, model_training_task_cfg], # ) # Run the Taipy Core service # import taipy as tp # # Run of the Taipy Core service # tp.Core().run() # # Create the pipeline # retrain_pipeline = tp.create_pipeline(retraining_model_pipeline_cfg) # # Submit the pipeline # tp.submit(retrain_pipeline) # tp.Core().stop() scenario_cfg = Config.configure_scenario_from_tasks(id=\"stock\", task_configs=[clean_data_task_cfg, model_training_task_cfg, predict_task_cfg]) # tp.Core().run() # tp.submit(scenario_cfg) Config.export(\"config_model_train.toml\")"} {"text": "from taipy import Config from functions import build_message name_data_node_cfg = Config.configure_data_node(id=\"name\") message_data_node_cfg = Config.configure_data_node(id=\"message\") build_msg_task_cfg = Config.configure_task(\"build_msg\", build_message, name_data_node_cfg, message_data_node_cfg) scenario_cfg = Config.configure_scenario_from_tasks(\"scenario\", task_configs=[build_msg_task_cfg]) Config.export('my_config.toml')"} {"text": "from functools import wraps import jwt from flask import request, abort from flask import current_app def token_required(f): @wraps(f) def decorated(*args, **kwargs): token = None if \"Authorization\" in request.headers: token = request.headers[\"Authorization\"].split(\" \")[1] if not token: return { \"message\": \"Authentication Token is missing!\", \"data\": None, \"error\": \"Unauthorized\" }, 401 try: # data=jwt.decode(token, current_app.config[\"SECRET_KEY\"], algorithms=[\"RS256\"]) print(\"got the token\") # current_user=models.User().get_by_id(data[\"user_id\"]) current_user = 12 if current_user is None: return { \"message\": \"Invalid Authentication token!\", \"data\": None, \"error\": \"Unauthorized\" }, 401 if not current_user[\"active\"]: abort(403) except Exception as e: return { \"message\": \"Something went wrong\", \"data\": None, \"error\": str(e) }, 500 return f(current_user, *args, **kwargs) return decorated"} {"text": "from flask import Flask, request, session, jsonify from flask_restful import Api, Resource app = Flask(__name__) app.secret_key = \"your_secret_key\" # Set a secret key for session management api = Api(app) # Dummy user data for demonstration users = { 'maneesh': {'password': 'securepassword'} } # Login Resource class LoginResource(Resource): def post(self): data = request.get_json() username = data.get('username') password = data.get('password') print(\"hello\") # Check if user exists and password is correct if username in users and users[username]['password'] == password: # Simulate receiving a JWT token from a third-party API jwt_token = \"your_received_jwt_token\" # Store the JWT token in the session session['jwt_token'] = jwt_token return {'message': 'Login successful'}, 200 else: return {'message': 'Invalid credentials'}, 401 # Protected Resource class ProtectedResource(Resource): def get(self): # Check if the JWT token is present in the session if 'jwt_token' in session: jwt_token = session['jwt_token'] # You can add logic here to verify the JWT token if needed # For simplicity, we assume the token is valid return {'message': 'Access granted for protected route', 'jwt_token': jwt_token}, 200 else: return {'message': 'Access denied'}, 401 # Add resources to the API api.add_resource(LoginResource, '/login') api.add_resource(ProtectedResource, '/protected') if __name__ == '__main__': app.run(debug=True) "}