{"text": "from taipy.core.config import Config, Scope, Frequency import taipy as tp import datetime as dt import pandas as pd import time Config.load('config_09.toml') Config.configure_job_executions(mode=\"standalone\", max_nb_of_workers=2) def filter_by_month(df, month): df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == month] return df def count_values(df): print(\"Wait 10 seconds\") time.sleep(10) return len(df) def callback_scenario_state(scenario, job): \"\"\"All the scenarios are subscribed to the callback_scenario_state function. It means whenever a job is done, it is called. Depending on the job and the status, it will update the message stored in a json that is then displayed on the GUI. Args: scenario (Scenario): the scenario of the job changed job (_type_): the job that has its status changed \"\"\" print(scenario.name) if job.status.value == 7: for data_node in job.task.output.values(): print(data_node.read()) if __name__==\"__main__\": # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name=\"Scenario 2022/10/7\") scenario_1.subscribe(callback_scenario_state) scenario_1.submit(wait=True) scenario_1.submit(wait=True, timeout=5)"} {"text": "from taipy import Config import taipy as tp def double(nb): return nb * 2 Config.load('config_02.toml') if __name__ == '__main__': # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] scenario = tp.create_scenario(scenario_cfg, name=\"Scenario\") tp.submit(scenario) print(\"Output of First submit:\", scenario.output.read()) print(\"Before write\", scenario.input.read()) scenario.input.write(54) print(\"After write\",scenario.input.read()) tp.submit(scenario) print(\"Second submit\",scenario.output.read()) # Basic functions of Taipy Core print([s.name for s in tp.get_scenarios()]) scenario = tp.get(scenario.id) tp.delete(scenario.id) scenario = None data_node = None tp.Gui(\"\"\"<|{scenario}|scenario_selector|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|{data_node}|data_node_selector|>\"\"\").run() "} {"text": "from taipy.core.config import Config import taipy as tp import datetime as dt import pandas as pd def filter_current(df): current_month = dt.datetime.now().month df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == current_month] return df def count_values(df): return len(df) Config.load('config_03.toml') if __name__ == '__main__': # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] tp.Core().run() scenario = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name=\"Scenario 2022/10/7\") scenario.submit() print(\"Nb of values of scenario:\", scenario.nb_of_values.read()) data_node = None tp.Gui(\"\"\"<|{scenario}|scenario_selector|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|{data_node}|data_node_selector|>\"\"\").run() "} {"text": "from importlib import util, import_module from pathlib import Path import sys import inspect import os import json if len(sys.argv) < 3: print(\"Packages should be passed as arguments after the name of the searched file.\", file=sys.stderr) exit(1) else: errors = 0 file_name = sys.argv[1] result = dict() exit_code = 1 for package in sys.argv[2:]: parts = package.split(\".\") package_found = True for idx in range(len(parts)): if not util.find_spec(\".\".join(parts[0: idx+1])): package_found = False break if not package_found: print(f\"Package {package} not found.\", file=sys.stderr) errors += 1 else: module = import_module(package) found = False try: module_file = inspect.getfile(module) for root, dirs, files in os.walk(Path(module_file).parent.resolve()): root_path = Path(root) if file_name in files: result[package] = str((root_path / file_name).resolve()) found = True except Exception as e: print(f\"Error accessing {package}: {e}.\", file=sys.stderr) exit_code += 1 if not found: print(f\"File {file_name} not found in Package {package}.\", file=sys.stderr) errors += 1 if len(result): json.dump(result, sys.stdout) elif errors: exit(exit_code) "} {"text": "from taipy import Gui import cv2 face_cascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\") number_of_faces_detected = 0 selected_file = None image = None def process_image(state): img = cv2.imread(state.selected_file, cv2.IMREAD_UNCHANGED) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) state.number_of_faces_detected = len(faces) # Draw a rectangle around faces for (x, y, w, h) in faces: cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 2) state.image = cv2.imencode(\".jpg\", img)[1].tobytes() content = \"\"\" <|{selected_file}|file_selector|label=Upload File|on_action=process_image|extensions=.jpg,.gif,.png|drop_message=Drop Message|> <|{image}|image|width=300px|height=300px|> <|{number_of_faces_detected} face(s) detected|> \"\"\" if __name__ == \"__main__\": Gui(page=content).run(dark_mode=False, port=8080) "} {"text": "from setuptools import find_packages, setup setup( author=\"You Name\", author_email=\"your@email.domain\", python_requires=\">=3.8\", classifiers=[ \"Intended Audience :: Developers\", # \"License :: OSI Approved :: Apache Software License\", \"Natural Language :: English\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.8\", \"Programming Language :: Python :: 3.9\", \"Programming Language :: Python :: 3.10\", ], # license=\"Apache License 2.0\", install_requires=[\"taipy-gui>=2.0\"], include_package_data=True, name=\"guiext-library\", description=\"My taipy-gui extension demo\", long_description=\"This package contains a demonstration of using the Taipy GUI Extension API.\", keywords=\"taipy\", packages=find_packages(include=[\"demo_lib\", \"demo_lib.*\"]), version=\"1.0.0\", zip_safe=False ) "} {"text": "from taipy.gui import Gui from library import Library page = \"\"\" # Extension library <|library.element|> \"\"\" gui = Gui(page=page) gui.add_library(Library()) if __name__ == \"__main__\": # Run main app gui.run() "} {"text": "from pathlib import Path from taipy.gui.extension import ElementLibrary, Element, ElementProperty, PropertyType class Library(ElementLibrary): elts = { # Declare the elements of the library here, as key/value pairs of # a dictionary. # - The key is used as the element name. # - The value must be an instance of taipy.gui.extension.Element # # Ex: # \"element_name\": Element( # \"default_property_name\" # { # \"property_name\": ElementProperty(...) # }, # react_component=\"ElementComponent\" # ), } def get_name(self) -> str: return \"library\" def get_elements(self) -> dict: return Library.elts def get_scripts(self) -> list[str]: # Only one JavaScript bundle for this library. return [\"library/frontend/dist/library.js\"] def get_resource(self, name: str) -> Path: return super().get_resource(name) "} {"text": "# Export the library class for easier access by developers using it from .library import Library "} {"text": "#Import modules import taipy as tp from taipy import Config, Scope, Gui import pandas as pd import numpy as np #Back-End Code #Filter function for best/worst colleges within 1 stat def filtering_college(initial_dataset: pd.DataFrame, selected_stat, ): completed_graph_dataset = initial_dataset[selected_stat] completed_graph_data = completed_graph_dataset.nlargest(10, selected_stat, keep = \"all\") return completed_graph_data #Data Node Creation initial_dataset_cfg = Config.configure_data_node(id=\"initial_dataset\",storage_type=\"csv\",path=\"College_Data.csv\",scope=Scope.GLOBAL) selected_stat_cfg = Config.configure_data_node(id = \"selected_stat\", default_data = \"Name\", slope = Scope.GLOBAL) completed_graph_data_cfg = Config.configure_data_node(id=\"completed_graph_data\", scope=Scope.GLOBAL) #Task Creation filtered_college_cfg = Config.configure_task(id = \"filtered_college\", function=filtering_college, input = [initial_dataset_cfg, selected_stat_cfg], output = [completed_graph_data_cfg]) #Pipeline Creation pipeline_cfg = Config.configure_scenario(id=\"pipeline\",task_configs=[filtered_college_cfg]) #Scenario Creation scenario_cfg = Config.configure_scenario(id = \"scenario\", pipeline_configs = [pipeline_cfg]) #scenario = tp.create_scenario(scenario_cfg) #Core creation if __name__ == \"__main__\": tp.Core().run() #Start of Front-End Code #Callback Function def modify_df(state): scenario.selected_node.write(state.selected_stat) tp.submit(scenario) state.df = scenario.completed_graph_data_cfg.read() list_stats = [\"Name\",\"Private\",\"Apps\",\"Accept\",\"Enroll\",\"Top10perc\",\"Top25perc\",\"F.Undergrad\",\"P.Undergrad\",\"Outstate\",\"Room.Board\",\"Books\",\"Personal\",\"PhD\",\"Terminal\",\"S.F.Ratio\",\"perc.alumni\",\"Expend\",\"Grad.Rate\"] selected_stat = \"Top10perc\" df = pd.DataFrame(columns = [\"Name\", selected_stat], copy = True) #Variable Instantiation #App Creation college_stat_app = \"\"\"<|{selected_stat}|selector|lov={list_stats}|on_change=modify_df|dropdown|> <|{df}|chart|x=Name|y=selected_stat|type=bar|title=College Stats|>\"\"\" #Runs the app (finally) print(selected_stat) Gui(page = college_stat_app).run() "} {"text": "import json def add_line(source, line, step): line = line.replace('Getting Started with Taipy GUI', 'Getting Started with Taipy GUI on Notebooks') line = line.replace('(../src/', '(https://docs.taipy.io/en/latest/getting_started/src/') if line.startswith('!['): if step != 'index': line = line.replace('(', '(https://github.com/Avaiga/taipy-getting-started-gui/blob/latest/' + step + '/') else: line = line.replace('(', '(https://github.com/Avaiga/taipy-getting-started-gui/blob/latest/') # conversion of Markdown image to HTML img_src = line.split('](')[1].split(')')[0] width = line.split('](')[1].split(')')[1].split(' ')[1] source.append('
\\n') source.append(f' \\n') source.append('
\\n') elif step == 'step_00' and line.startswith('from taipy'): source.append(\"from taipy.gui import Gui, Markdown\\n\") elif 'Notebook' in line and 'step' in step: pass else: source.append(line + '\\n') return source def detect_new_cell(notebook, source, cell, line, execution_count, force_creation=False): if line.startswith('```python') or line.startswith('```') and cell == 'code' or force_creation: source = source[:-1] if cell == 'code': notebook['cells'].append({ \"cell_type\": \"code\", \"metadata\": {}, \"outputs\": [], \"execution_count\": execution_count, \"source\": source }) cell = 'markdown' execution_count += 1 else: notebook['cells'].append({ \"cell_type\": \"markdown\", \"metadata\": {}, \"source\": source }) cell = 'code' source = [] return cell, source, notebook, execution_count def create_introduction(notebook, execution_count): with open('index.md', 'r') as f: text = f.read() split_text = text.split('\\n') source = [] for line in split_text: if not line.startswith('``` console'): add_line(source, line, 'index') else: break notebook['cells'].append({ \"cell_type\": \"markdown\", \"metadata\": {}, \"source\": source }) notebook['cells'].append({ \"cell_type\": \"code\", \"metadata\": {}, \"outputs\": [], \"execution_count\": execution_count, \"source\": ['# !pip install taipy\\n'] }) notebook['cells'].append({ \"cell_type\": \"markdown\", \"metadata\": {}, \"source\": ['## Using Notebooks\\n',] }) execution_count += 1 return notebook, execution_count def create_steps(notebook, execution_count): steps = ['step_0' + str(i) for i in range(1, 8)] source = [] for step in steps: if source != []: cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count, force_creation=True) with open(step + '/ReadMe.md', 'r') as f: text = f.read() split_text = text.split('\\n') cell = \"markdown\" for_studio = 0 for line in split_text: if cell == \"markdown\": line=line.replace(\" \",\"\") elif cell == \"code\" and (line[:4] == \" \" or len(line)<=1) and for_studio == 2: line=line[4:] else: for_studio = 0 if '=== \"Taipy Studio' in line: for_studio = 1 if '=== \"Python configuration\"' in line: for_studio = 2 if for_studio != 1: add_line(source, line, step) cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count) return notebook, execution_count if __name__ == '__main__': notebook = { \"cells\": [], \"metadata\": { \"language_info\": { \"codemirror_mode\": { \"name\": \"ipython\", \"version\": 3 }, \"file_extension\": \".py\", \"mimetype\": \"text/x-python\", \"name\": \"python\", \"nbconvert_exporter\": \"python\", \"pygments_lexer\": \"ipython3\" }, \"orig_nbformat\": 4 }, \"nbformat\": 4, \"nbformat_minor\": 2 } execution_count = 0 notebook, execution_count = create_introduction(notebook, execution_count) notebook, execution_count = create_steps(notebook, execution_count) with open('getting_started.ipynb', 'w', encoding='utf-8') as f: json.dump(notebook, f, indent=2) "} {"text": "from taipy.gui import Gui, notify text = \"Original text\" # Definition of the page page = \"\"\" # Getting started with Taipy GUI My text: <|{text}|> <|{text}|input|> <|Run local|button|on_action=on_button_action|> \"\"\" def on_button_action(state): notify(state, 'info', f'The text is: {state.text}') state.text = \"Button Pressed\" def on_change(state, var_name, var_value): if var_name == \"text\" and var_value == \"Reset\": state.text = \"\" return Gui(page).run() "} {"text": "from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax import numpy as np import pandas as pd from taipy.gui import Gui, notify text = \"Original text\" page = \"\"\" # Getting started with Taipy GUI <|layout|columns=1 1| <| My text: <|{text}|> Enter a word: <|{text}|input|> <|Analyze|button|on_action=local_callback|> |> <|Table|expandable| <|{dataframe}|table|width=100%|number_format=%.2f|> |> |> <|layout|columns=1 1 1| ## Positive <|{np.mean(dataframe['Score Pos'])}|text|format=%.2f|raw|> ## Neutral <|{np.mean(dataframe['Score Neu'])}|text|format=%.2f|raw|> ## Negative <|{np.mean(dataframe['Score Neg'])}|text|format=%.2f|raw|> |> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> \"\"\" MODEL = f\"cardiffnlp/twitter-roberta-base-sentiment\" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) dataframe = pd.DataFrame({\"Text\":[''], \"Score Pos\":[0.33], \"Score Neu\":[0.33], \"Score Neg\":[0.33], \"Overall\":[0]}) dataframe2 = dataframe.copy() def analyze_text(text): # Run for Roberta Model encoded_text = tokenizer(text, return_tensors='pt') output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) return {\"Text\":text[:50], \"Score Pos\":scores[2], \"Score Neu\":scores[1], \"Score Neg\":scores[0], \"Overall\":scores[2]-scores[0]} def local_callback(state): notify(state, 'Info', f'The text is: {state.text}', True) temp = state.dataframe.copy() scores = analyze_text(state.text) temp.loc[len(temp)] = scores state.dataframe = temp state.text = \"\" path = \"\" treatment = 0 page_file = \"\"\" <|{path}|file_selector|extensions=.txt|label=Upload .txt file|on_action=analyze_file|> <|{f'Downloading {treatment}%...'}|>
<|Table|expandable| <|{dataframe2}|table|width=100%|number_format=%.2f|> |>
<|{dataframe2}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|height=600px|> \"\"\" def analyze_file(state): state.dataframe2 = dataframe2 state.treatment = 0 with open(state.path,\"r\", encoding='utf-8') as f: data = f.read() # split lines and eliminates duplicates file_list = list(dict.fromkeys(data.replace('\\n', ' ').split(\".\")[:-1])) for i in range(len(file_list)): text = file_list[i] state.treatment = int((i+1)*100/len(file_list)) temp = state.dataframe2.copy() scores = analyze_text(text) temp.loc[len(temp)] = scores state.dataframe2 = temp state.path = None pages = {\"/\":\"<|toggle|theme|>\\n
\\n<|navbar|>\\n
\", \"line\":page, \"text\":page_file} Gui(pages=pages).run()"} {"text": "from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax import numpy as np import pandas as pd from taipy.gui import Gui, notify text = \"Original text\" page = \"\"\" # Getting started with Taipy GUI <|layout|columns=1 1| <| My text: <|{text}|> Enter a word: <|{text}|input|> <|Analyze|button|on_action=local_callback|> |> <|Table|expandable| <|{dataframe}|table|width=100%|number_format=%.2f|> |> |> <|layout|columns=1 1 1| ## Positive <|{float(np.mean(dataframe['Score Pos']))}|text|format=%.2f|raw|>% ## Neutral <|{float(np.mean(dataframe['Score Neu']))}|text|format=%.2f|raw|>% ## Negative <|{float(np.mean(dataframe['Score Neg']))}|text|format=%.2f|raw|>% |>
<|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> \"\"\" MODEL = f\"cardiffnlp/twitter-roberta-base-sentiment\" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) dataframe = pd.DataFrame({\"Text\":[''], \"Score Pos\":[0.33], \"Score Neu\":[0.33], \"Score Neg\":[0.33], \"Overall\":[0]}) def analyze_text(text): # Run for Roberta Model encoded_text = tokenizer(text, return_tensors='pt') output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) return {\"Text\":text, \"Score Pos\":scores[2], \"Score Neu\":scores[1], \"Score Neg\":scores[0], \"Overall\":scores[2]-scores[0]} def local_callback(state): notify(state, 'Info', f'The text is: {state.text}', True) temp = state.dataframe.copy() scores = analyze_text(state.text) temp.loc[len(temp)] = scores state.dataframe = temp state.text = \"\" Gui(page).run()"} {"text": "from taipy.gui import Gui text = \"Original text\" page = \"\"\" # Getting started with Taipy GUI My text: <|{text}|> <|{text}|input|> \"\"\" Gui(page).run()"} {"text": " from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax import numpy as np import pandas as pd from taipy.gui import Gui, notify text = \"Original text\" MODEL = f\"cardiffnlp/twitter-roberta-base-sentiment\" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) dataframe = pd.DataFrame({\"Text\":[''], \"Score Pos\":[0.33], \"Score Neu\":[0.33], \"Score Neg\":[0.33], \"Overall\":[0]}) # Torch is, for now, only available for the Python version between 3.8 and 3.10. # If you cannot install these packages, just return a dictionary of random numbers for the `analyze_text(text).` def analyze_text(text): # Run for Roberta Model encoded_text = tokenizer(text, return_tensors='pt') output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) return {\"Text\":text, \"Score Pos\":scores[2], \"Score Neu\":scores[1], \"Score Neg\":scores[0], \"Overall\":scores[2]-scores[0]} def local_callback(state): notify(state, 'Info', f'The text is: {state.text}', True) temp = state.dataframe.copy() scores = analyze_text(state.text) temp.loc[len(temp)] = scores state.dataframe = temp state.text = \"\" page = \"\"\" <|toggle|theme|> # Getting started with Taipy GUI My text: <|{text}|> Enter a word: <|{text}|input|> <|Analyze|button|on_action=local_callback|> ## Positive <|{float(np.mean(dataframe['Score Pos']))}|text|format=%.2f|>% ## Neutral <|{float(np.mean(dataframe['Score Neu']))}|text|format=%.2f|>% ## Negative <|{float(np.mean(dataframe['Score Neg']))}|text|format=%.2f|>% <|{dataframe}|table|number_format=%.2f|> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> \"\"\" Gui(page).run()"} {"text": "from taipy import Gui Gui(page=\"# Getting started with *Taipy*\").run() "} {"text": "import pandas as pd from taipy.gui import Gui, notify text = \"Original text\" page = \"\"\" <|toggle|theme|> # Getting started with Taipy GUI My text: <|{text}|> <|{text}|input|> <|Analyze|button|on_action=local_callback|> <|{dataframe}|table|number_format=%.2f|> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> \"\"\" dataframe = pd.DataFrame({\"Text\":['Test', 'Other', 'Love'], \"Score Pos\":[1, 1, 4], \"Score Neu\":[2, 3, 1], \"Score Neg\":[1, 2, 0], \"Overall\":[0, -1, 4]}) def local_callback(state): notify(state, 'info', f'The text is: {state.text}') temp = state.dataframe.copy() temp.loc[len(temp)] = {\"Text\":state.text, \"Score Pos\":0, \"Score Neu\":0, \"Score Neg\":0, \"Overall\":0} state.dataframe = temp state.text = \"\" Gui(page).run()"} {"text": " from taipy.gui import Gui, Markdown input_pid = None navigation = [(\"/add_product\", \"Add Prodcut\"), \"/\", \"Home\"] page = \"\"\" # Admin Panel ## Add Product def submit_button(state): state. <|{input_pid}|input|> <|submit|button|on_action = submit_button> \"\"\" page1 = \"\"\" ii \"\"\" Gui(page=page+page1).run(title=\"Go To Mall | Admin Panel\", port=4000) "} {"text": "from taipy import Config from taipy import Core, Gui from taipy.gui import Markdown import taipy as tp from pages.home import home_md from pages.temp import temp_page def build_message(name: str): return f\"Hello! {name}\" input_name_data_node_cfg = Config.configure_data_node(id=\"input_name\") message_data_node_cfg = Config.configure_data_node(id=\"message\") build_msg_task_cfg = Config.configure_task( \"buil_msg\", build_message, input_name_data_node_cfg, message_data_node_cfg) scenario_cfg = Config.configure_scenario( \"scenariod\", task_configs=[build_msg_task_cfg]) # making GUI input_name = \"M ahi\" message = None def submit_scenario(state): state.scenario.input_name.write(state.input_name) state.scenario.submit() state.message = scenario.message.read() love = \"sazia\" page = \"\"\" Name: <|{input_name}|input|> <|submit|button|on_action=submit_scenario|> Message: <|{message}|text|> Kima : <|All world are need to safe|text|> \"\"\" pages = { \"/\": home_md, \"temp\": temp_page, } if __name__ == \"__main__\": Core().run() # mange scenarios and data scenario = tp.create_scenario(scenario_cfg) # instance of run gui Gui(pages=pages).run(title=\"Mahi Template\", port=5000, favicon=\"https://www.youtube.com/s/gaming/emoji/7ff574f2/emoji_u1f602.png\", ) "} {"text": "from taipy.gui import Gui, Markdown name = \"maho\" ... page = \"\"\" ... <|{dialog_is_visible}|dialog| Enter a name: <|{name}|input|> |> ... \"\"\" ... pages = { \"/\" : page, 'page1': Markdown(\"# My first page\"), 'page2': Markdown(\"# My second page\") } Gui(pages=pages).run(title=\"Mahi App\",port=5001 )"} {"text": "from taipy.gui import Markdown, Gui text = \"Welcome to home page\" mahi_text = \"So how are you\" home_md = Markdown(\"\"\" # **Home** <|{text}|>
<|{mahi_text}|> \"\"\") "} {"text": "from taipy.gui import Gui, Markdown def fahren_to_celcius(fahr): return (fahr-32)*5/9 fahr = 100 celcious = fahren_to_celcius(fahr) temp_page = Markdown(\"\"\" # **Home** Fahrenheit : <|{fahr}|> Converted Celcius : <|{celcious}|> \"\"\") "} {"text": "from taipy.gui import Gui, Markdown, notify value = 0 single_page = Markdown(\"\"\" # Taipy Application Check the documentation [here](https://docs.taipy.io/en/latest/manuals/about/). <|{value}|slider|on_change=on_slider|> <|Push|button|on_action=on_push|> \"\"\") def on_push(state): ... def on_slider(state): if state.value == 100: notify(state, \"success\", \"Taipy is running!\") def on_change(state, var_name:str, var_value): ... if __name__ == \"__main__\": gui = Gui(single_page) gui.run() "} {"text": "from taipy.gui import Gui from taipy.config import Config from pages.root.root import * from pages.page_1.page_1 import page_1_md from pages.page_2.page_2 import page_2_md Config.load(\"config/config.toml\") def on_change(state, var_name:str, var_value): ... pages = {\"/\":root_md, \"page_1\":page_1_md, \"page_2\":page_2_md} if __name__ == \"__main__\": gui = Gui(pages=pages) gui.run() "} {"text": "from taipy.config import Config"} {"text": "import pandas as pd import numpy as np from sklearn.linear_model import LinearRegression def clean_data(data): ... return data.dropna().drop_duplicates() def predict(data): model = LinearRegression() model.fit(data[[\"x\"]], data[[\"y\"]]) data[\"y_pred\"] = model.predict(data[[\"x\"]]) return data def evaluate(data): ... return np.random.rand()"} {"text": "from taipy.gui import Markdown import pandas as pd scenario = None results = None def show_results(state): state.results = state.scenario.predictions.read() page_1_md = Markdown(\"pages/page_1/page_1.md\")"} {"text": "from taipy.gui import Markdown root_md = Markdown(\"pages/root/root.md\")"} {"text": "from taipy.gui import Markdown import pandas as pd path = None data = None def drop_csv(state): state.data = pd.read_csv(state.path) page_2_md = Markdown(\"pages/page_2/page_2.md\")"} {"text": "from taipy.gui import Gui, Markdown, notify from pages.root.root import * from pages.page_1.page_1 import page_1_md from pages.page_2.page_2 import page_2_md def on_change(state, var_name:str, var_value): ... pages = {\"/\":root_md, \"page_1\":page_1_md, \"page_2\":page_2_md} if __name__ == \"__main__\": gui = Gui(pages=pages) gui.run() "} {"text": "from taipy.gui import Markdown page_1_md = Markdown(\"pages/page_1/page_1.md\")"} {"text": "from taipy.gui import Markdown root_md = Markdown(\"pages/root/root.md\")"} {"text": "from taipy.gui import Markdown page_2_md = Markdown(\"pages/page_2/page_2.md\")"} {"text": "from taipy.gui import Gui from pages import home import os gui = Gui(page=home.page).run( title=\"Demo Logistic Regression\", port=os.environ.get(\"PORT\", \"8000\"), ) "} {"text": "from config.nodes import ( node_initial_dataset, node_prediction, node_prediction_model, node_X, node_Y, ) from models.data import make_X, make_Y from models.predict import train, predict from taipy import Config task_make_X = Config.configure_task( id=\"make_X\", input=[node_initial_dataset], output=node_X, function=make_X, ) task_make_Y = Config.configure_task( id=\"make_Y\", input=[node_initial_dataset], output=node_Y, function=make_Y, ) task_train = Config.configure_task( id=\"train\", input=[node_X, node_Y], output=node_prediction_model, function=train ) task_predict = Config.configure_task( id=\"predict\", input=[node_X, node_Y], output=node_prediction, function=predict ) "} {"text": ""} {"text": "from taipy import Config node_initial_dataset = Config.configure_data_node(id=\"initial_dataset\") node_X = Config.configure_data_node(id=\"X\") node_Y = Config.configure_data_node(id=\"Y\") node_prediction_model = Config.configure_data_node(id=\"prediction_model\") node_prediction = Config.configure_data_node(id=\"prediction\") "} {"text": "from taipy import Config from config.tasks import task_make_X, task_make_Y, task_train, task_predict pipeline_train = Config.configure_pipeline( id=\"train\", task_configs=[task_make_X, task_make_Y, task_train] ) pipeline_predict = Config.configure_pipeline(id=\"predict\", task_configs=[task_predict]) "} {"text": "from sklearn.linear_model import LogisticRegression def train(X, Y): X_train, Y_train = X[:50], Y[:50] X_test, Y_test = X[50:], Y[50:] # Using scikit-learn default regression = LogisticRegression(random_state=0).fit(X_train, Y_train) print(f\"intercept: {regression.intercept_} coefficients: {regression.coef_}\") print(f\"train accuracy: {regression.score(X_train, Y_train)}\") print(f\"test accuracy: {regression.score(X_test, Y_test)}\") return regression def predict(x, regression: LogisticRegression): return regression.predict(x) "} {"text": ""} {"text": "import numpy as np # Set seed for random number generator rg = np.random.default_rng(seed=0) # Create an array with 500 rows and 3 columns. # This will serve as initial data node initial_dataset = rg.normal(size=(500, 3)) def make_X(dataset): # Remove the first column which can be considered as noise X1 = np.delete(dataset, 0, axis=1) # Now create two more columns correlated with X1 X2 = X1 + 0.1 * np.random.normal(size=(500, 2)) X = np.concatenate((X1, X2), axis=1) return X def make_Y(dataset): P = 1 / (1 + np.e ** (-np.matmul(dataset, [1, 1, 1]))) Y = P > 0.5 return Y "} {"text": "from taipy.gui import Markdown import taipy as tp from taipy.core.job.job import Job from config.pipelines import pipeline_train from models.data import initial_dataset def job_status_changed(pipeline, job: Job): print(job.status) def training_button_clicked(state, id, action): pipeline = tp.create_pipeline(pipeline_train) # Set initial dataset: pipeline.initial_dataset.write(initial_dataset) tp.subscribe_pipeline( pipeline=pipeline, callback=job_status_changed, ) tp.submit(pipeline) page = Markdown(\"src/pages/home.md\") "} {"text": ""} {"text": "from taipy import Gui page = \"\"\" # Hello World \ud83c\udf0d with *Taipy*This is my first Taipy test app. And it is running fine! \"\"\" Gui(page).run(use_reloader=True) # use_reloader=True if you are in development "} {"text": "from taipy import Gui from page.dashboard_fossil_fuels_consumption import * if __name__ == \"__main__\": Gui(page).run( use_reloader=True, title=\"Test\", dark_mode=False, ) # use_reloader=True if you are in development"} {"text": "import pandas as pd import taipy as tp from data.data import dataset_fossil_fuels_gdp country = \"Spain\" region = \"Europe\" lov_region = list(dataset_fossil_fuels_gdp.Entity.unique()) def load_dataset(_country): \"\"\"Load dataset for a specific country. Args: _country (str): The name of the country. Returns: pandas.DataFrame: A DataFrame containing the fossil fuels GDP data for the specified country. \"\"\" dataset_fossil_fuels_gdp_cp = dataset_fossil_fuels_gdp.reset_index() dataset_fossil_fuels_gdp_cp = dataset_fossil_fuels_gdp_cp[ dataset_fossil_fuels_gdp[\"Entity\"] == _country ] return dataset_fossil_fuels_gdp_cp dataset_fossil_fuels_gdp_cp = load_dataset(country) def on_change_country(state): \"\"\"Update the dataset based on the selected country. Args: state (object): The \"state\" of the variables ran by the program (value changes through selectors) Returns: None \"\"\" print(\"country is:\", state.country) _country = state.country dataset_fossil_fuels_gdp_cp = load_dataset(_country) state.dataset_fossil_fuels_gdp_cp = dataset_fossil_fuels_gdp_cp layout = {\"yaxis\": {\"range\": [0, 100000]}, \"xaxis\": {\"range\": [1965, 2021]}} page = \"\"\" # Fossil Fuel consumption by per capita by country* Data comes from Our World in Data <|{country}|selector|lov={lov_region}|on_change=on_change_country|dropdown|label=Country/Region|> <|{dataset_fossil_fuels_gdp_cp}|chart|type=plot|x=Year|y=Fossil fuels per capita (kWh)|height=200%|layout={layout}|> ## Fossil fuel per capita for <|{country}|>: <|{dataset_fossil_fuels_gdp_cp}|table|height=400px|width=95%|> \"\"\" "} {"text": "import pandas as pd dataset_fossil_fuels_gdp = pd.read_csv(\"data/per-capita-fossil-energy-vs-gdp.csv\") country_codes = pd.read_csv(\"./data/country_codes.csv\") dataset_fossil_fuels_gdp = dataset_fossil_fuels_gdp.merge( country_codes[[\"alpha-3\", \"region\"]], how=\"left\", left_on=\"Code\", right_on=\"alpha-3\" ) dataset_fossil_fuels_gdp = dataset_fossil_fuels_gdp[ ~dataset_fossil_fuels_gdp[\"Fossil fuels per capita (kWh)\"].isnull() ].reset_index() dataset_fossil_fuels_gdp[\"Fossil fuels per capita (kWh)\"] = ( dataset_fossil_fuels_gdp[\"Fossil fuels per capita (kWh)\"] * 1000 ) "} {"text": "# This is a sample Python script. # Press Maj+F10 to execute it or replace it with your code. # Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings. def print_hi(name): # Use a breakpoint in the code line below to debug your script. print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint. # Press the green button in the gutter to run the script. if __name__ == '__main__': print_hi('PyCharm') # See PyCharm help at https://www.jetbrains.com/help/pycharm/ "} {"text": "from taipy.gui import Gui, notify from taipy import Config import pandas as pd tasks = pd.DataFrame({ \"Type\":[], \"Name\":[], \"Completed\":[] }) tasks[\"Completed\"] = tasks[\"Completed\"].astype(\"bool\") task_name=\"\" task_type=\"\" page = \"\"\" # TODO Schedular Enter Task: <|{task_name}|input|> Type: <|{task_type}|selector|lov=Personal;Home;Work|dropdown|> <|Add Task|button|on_action=on_task_add|> <|{tasks}|table|filter|editable|editable[Type]=False|on_edit=on_task_edit|on_delete=on_task_delete|style=style_completed|> \"\"\" def style_completed(_1, _2, values): if(values[\"Completed\"]): return \"strikeout\" def on_task_edit(state, var_name, payload): if(var_name == \"tasks\"): index = payload[\"index\"] col = payload[\"col\"] value = payload[\"user_value\"] new_tasks = state.tasks.copy() new_tasks.loc[index, col] = value state.tasks = new_tasks notify(state, \"I\", \"Task Updated.\") def on_task_delete(state, var_name, payload): if(var_name == \"tasks\"): index = payload[\"index\"] state.tasks = state.tasks.drop(index=index) notify(state, \"E\", \"Task Deleted.\") def on_task_add(state, var_name, payload): if(state.task_name == \"\" or state.task_type == \"\"): notify(state, \"E\", \"Task Name or Task Type Not Set.\") return False _task_type = state.task_type _task_name = state.task_name _isCompleted = False new_data = pd.DataFrame([[_task_type, _task_name, _isCompleted]], columns=state.tasks.columns) state.tasks = pd.concat([new_data, state.tasks], axis=0, ignore_index=True) notify(state, \"S\", \"New Task Added Successfully.\") Gui(page, css_file=\"todo.css\").run(use_reloader=True)"} {"text": "#!/usr/bin/env python \"\"\"The setup script.\"\"\" import json import os from setuptools import find_namespace_packages, find_packages, setup with open(\"README.md\") as readme_file: readme = readme_file.read() with open(f\"src{os.sep}taipy{os.sep}config{os.sep}version.json\") as version_file: version = json.load(version_file) version_string = f'{version.get(\"major\", 0)}.{version.get(\"minor\", 0)}.{version.get(\"patch\", 0)}' if vext := version.get(\"ext\"): version_string = f\"{version_string}.{vext}\" requirements = [\"toml>=0.10,<0.11\", \"deepdiff>=6.2,<6.3\"] test_requirements = [\"pytest>=3.8\"] setup( author=\"Avaiga\", author_email=\"dev@taipy.io\", python_requires=\">=3.8\", classifiers=[ \"Intended Audience :: Developers\", \"License :: OSI Approved :: Apache Software License\", \"Natural Language :: English\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.8\", \"Programming Language :: Python :: 3.9\", \"Programming Language :: Python :: 3.10\", ], description=\"A Taipy package dedicated to easily configure a Taipy application.\", install_requires=requirements, long_description=readme, long_description_content_type=\"text/markdown\", include_package_data=True, license=\"Apache License 2.0\", keywords=\"taipy-config\", name=\"taipy-config\", package_dir={\"\": \"src\"}, packages=find_namespace_packages(where=\"src\") + find_packages(include=[\"taipy\", \"taipy.config\", \"taipy.config.*\", \"taipy.logger\", \"taipy.logger.*\"]), test_suite=\"tests\", tests_require=test_requirements, url=\"https://github.com/avaiga/taipy-config\", version=version_string, zip_safe=False, ) "} {"text": "import ast import re from pathlib import Path from typing import List def _get_function_delimiters(initial_line, lines): begin = end = initial_line while True: if lines[begin - 1] == \"\\n\": break begin -= 1 if lines[end].endswith(\"(\\n\"): while \":\\n\" not in lines[end]: end += 1 if '\"\"\"' in lines[end + 1]: while True: if '\"\"\"\\n' in lines[end]: break end += 1 return begin, end + 1 def _get_file_lines(filename: str) -> List[str]: # Get file lines for later with open(filename) as f: return f.readlines() def _get_file_ast(filename: str): # Get raw text and build ast _config = Path(filename) _tree = _config.read_text() return ast.parse(_tree) def _build_base_config_pyi(filename, base_pyi): lines = _get_file_lines(filename) tree = _get_file_ast(filename) class_lineno = [f.lineno for f in ast.walk(tree) if isinstance(f, ast.ClassDef) and f.name == \"Config\"] begin_class, end_class = _get_function_delimiters(class_lineno[0] - 1, lines) base_pyi += \"\".join(lines[begin_class:end_class]) functions = [f.lineno for f in ast.walk(tree) if isinstance(f, ast.FunctionDef) and not f.name.startswith(\"__\")] for ln in functions: begin_line, end_line = _get_function_delimiters(ln - 1, lines) base_pyi += \"\".join(lines[begin_line:end_line]) base_pyi = __add_docstring(base_pyi, lines, end_line) base_pyi += \"\\n\" return base_pyi def __add_docstring(base_pyi, lines, end_line): if '\"\"\"' not in lines[end_line - 1]: base_pyi += '\\t\\t\"\"\"\"\"\"\\n'.replace(\"\\t\", \" \") return base_pyi def _build_entity_config_pyi(base_pyi, filename, entity_map): lines = _get_file_lines(filename) tree = _get_file_ast(filename) functions = {} for f in ast.walk(tree): if isinstance(f, ast.FunctionDef): if \"_configure\" in f.name and not f.name.startswith(\"__\"): functions[f.name] = f.lineno elif \"_set_default\" in f.name and not f.name.startswith(\"__\"): functions[f.name] = f.lineno elif \"_add\" in f.name and not f.name.startswith(\"__\"): functions[f.name] = f.lineno for k, v in functions.items(): begin_line, end_line = _get_function_delimiters(v - 1, lines) try: func = \"\".join(lines[begin_line:end_line]) func = func if not k.startswith(\"_\") else func.replace(k, entity_map.get(k)) func = __add_docstring(func, lines, end_line) + \"\\n\" base_pyi += func except Exception: print(f\"key={k}\") raise return base_pyi def _generate_entity_and_property_maps(filename): entities_map = {} property_map = {} entity_tree = _get_file_ast(filename) functions = [ f for f in ast.walk(entity_tree) if isinstance(f, ast.Call) and getattr(f.func, \"id\", \"\") == \"_inject_section\" ] for f in functions: entity = ast.unparse(f.args[0]) entities_map[entity] = {} property_map[eval(ast.unparse(f.args[1]))] = entity # Remove class name from function map text = ast.unparse(f.args[-1]).replace(f\"{entity}.\", \"\") matches = re.findall(r\"\\((.*?)\\)\", text) for m in matches: v, k = m.replace(\"'\", \"\").split(\",\") entities_map[entity][k.strip()] = v return entities_map, property_map def _generate_acessors(base_pyi, property_map): for property, cls in property_map.items(): return_template = f\"Dict[str, {cls}]\" if property != \"job_config\" else f\"{cls}\" template = (\"\\t@_Classproperty\\n\" + f'\\tdef {property}(cls) -> {return_template}:\\n\\t\\t\"\"\"\"\"\"\\n').replace( \"\\t\", \" \" ) base_pyi += template + \"\\n\" return base_pyi def _build_header(filename): _file = Path(filename) return _file.read_text() + \"\\n\\n\" if __name__ == \"__main__\": header_file = \"stubs/pyi_header.py\" config_init = Path(\"taipy-core/src/taipy/core/config/__init__.py\") base_config = \"src/taipy/config/config.py\" dn_filename = \"taipy-core/src/taipy/core/config/data_node_config.py\" job_filename = \"taipy-core/src/taipy/core/config/job_config.py\" scenario_filename = \"taipy-core/src/taipy/core/config/scenario_config.py\" task_filename = \"taipy-core/src/taipy/core/config/task_config.py\" migration_filename = \"taipy-core/src/taipy/core/config/migration_config.py\" core_filename = \"taipy-core/src/taipy/core/config/core_section.py\" entities_map, property_map = _generate_entity_and_property_maps(config_init) pyi = _build_header(header_file) pyi = _build_base_config_pyi(base_config, pyi) pyi = _generate_acessors(pyi, property_map) pyi = _build_entity_config_pyi(pyi, scenario_filename, entities_map[\"ScenarioConfig\"]) pyi = _build_entity_config_pyi(pyi, dn_filename, entities_map[\"DataNodeConfig\"]) pyi = _build_entity_config_pyi(pyi, task_filename, entities_map[\"TaskConfig\"]) pyi = _build_entity_config_pyi(pyi, job_filename, entities_map[\"JobConfig\"]) pyi = _build_entity_config_pyi(pyi, migration_filename, entities_map[\"MigrationConfig\"]) pyi = _build_entity_config_pyi(pyi, core_filename, entities_map[\"CoreSection\"]) with open(\"src/taipy/config/config.pyi\", \"w\") as f: f.writelines(pyi) "} {"text": "import json from typing import Any, Callable, Dict, List, Optional, Union from datetime import timedelta from taipy.core.config import DataNodeConfig, JobConfig, ScenarioConfig, TaskConfig, MigrationConfig, CoreSection from .checker.issue_collector import IssueCollector from .common._classproperty import _Classproperty from .common._config_blocker import _ConfigBlocker from .common.frequency import Frequency from .common.scope import Scope from .global_app.global_app_config import GlobalAppConfig from .section import Section from .unique_section import UniqueSection "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os import pathlib from unittest import TestCase, mock from src.taipy.logger._taipy_logger import _TaipyLogger class TestTaipyLogger(TestCase): def test_taipy_logger(self): _TaipyLogger._get_logger().info(\"baz\") _TaipyLogger._get_logger().debug(\"qux\") def test_taipy_logger_configured_by_file(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), \"logger.conf\") with mock.patch.dict(os.environ, {\"TAIPY_LOGGER_CONFIG_PATH\": path}): _TaipyLogger._get_logger().info(\"baz\") _TaipyLogger._get_logger().debug(\"qux\") "} {"text": "import os import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked from tests.config.utils.named_temporary_file import NamedTemporaryFile config_from_filename = NamedTemporaryFile( \"\"\" [TAIPY] custom_property_not_overwritten = true custom_property_overwritten = 10 \"\"\" ) config_from_environment = NamedTemporaryFile( \"\"\" [TAIPY] custom_property_overwritten = 11 \"\"\" ) def test_load_from_environment_overwrite_load_from_filename(): os.environ[Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH] = config_from_environment.filename Config.load(config_from_filename.filename) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 11 os.environ.pop(Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH) def test_block_load_from_environment_overwrite_load_from_filename(): Config.load(config_from_filename.filename) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 10 Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): os.environ[Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH] = config_from_environment.filename Config.load(config_from_filename.filename) os.environ.pop(Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 10 # The Config.load is failed to override "} {"text": "import pytest from src.taipy.config._config import _Config from src.taipy.config._config_comparator._config_comparator import _ConfigComparator from src.taipy.config._serializer._toml_serializer import _TomlSerializer from src.taipy.config.checker.issue_collector import IssueCollector from src.taipy.config.config import Config from src.taipy.config.section import Section from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest @pytest.fixture(scope=\"function\", autouse=True) def reset(): reset_configuration_singleton() register_test_sections() def reset_configuration_singleton(): Config.unblock_update() Config._default_config = _Config()._default_config() Config._python_config = _Config() Config._file_config = _Config() Config._env_file_config = _Config() Config._applied_config = _Config() Config._collector = IssueCollector() Config._serializer = _TomlSerializer() Config._comparator = _ConfigComparator() def register_test_sections(): Config._register_default(UniqueSectionForTest(\"default_attribute\")) Config.configure_unique_section_for_tests = UniqueSectionForTest._configure Config.unique_section_name = Config.unique_sections[UniqueSectionForTest.name] Config._register_default(SectionForTest(Section._DEFAULT_KEY, \"default_attribute\", prop=\"default_prop\", prop_int=0)) Config.configure_section_for_tests = SectionForTest._configure Config.section_name = Config.sections[SectionForTest.name] "} {"text": "from unittest import mock from src.taipy.config import Config from src.taipy.config._config import _Config from src.taipy.config._config_comparator._comparator_result import _ComparatorResult from src.taipy.config.global_app.global_app_config import GlobalAppConfig from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest class TestConfigComparator: unique_section_1 = UniqueSectionForTest(attribute=\"unique_attribute_1\", prop=\"unique_prop_1\") unique_section_1b = UniqueSectionForTest(attribute=\"unique_attribute_1\", prop=\"unique_prop_1b\") section_1 = SectionForTest(\"section_1\", attribute=\"attribute_1\", prop=\"prop_1\") section_2 = SectionForTest(\"section_2\", attribute=2, prop=\"prop_2\") section_2b = SectionForTest(\"section_2\", attribute=\"attribute_2\", prop=\"prop_2b\") section_3 = SectionForTest(\"section_3\", attribute=[1, 2, 3, 4], prop=[\"prop_1\"]) section_3b = SectionForTest(\"section_3\", attribute=[1, 2], prop=[\"prop_1\", \"prop_2\", \"prop_3\"]) section_3c = SectionForTest(\"section_3\", attribute=[2, 1], prop=[\"prop_3\", \"prop_1\", \"prop_2\"]) def test_comparator_compare_method_call(self): _config_1 = _Config._default_config() _config_2 = _Config._default_config() with mock.patch( \"src.taipy.config._config_comparator._config_comparator._ConfigComparator._find_conflict_config\" ) as mck: Config._comparator._find_conflict_config(_config_1, _config_2) mck.assert_called_once_with(_config_1, _config_2) def test_comparator_without_diff(self): _config_1 = _Config._default_config() _config_2 = _Config._default_config() config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert isinstance(config_diff, _ComparatorResult) assert config_diff == {} def test_comparator_with_updated_global_config(self): _config_1 = _Config._default_config() _config_1._global_config = GlobalAppConfig(foo=\"bar\") _config_2 = _Config._default_config() _config_2._global_config = GlobalAppConfig(foo=\"baz\", bar=\"foo\") config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert config_diff.get(\"unconflicted_sections\") is None assert config_diff.get(\"conflicted_sections\") is not None conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"modified_items\"]) == 1 assert conflicted_config_diff[\"modified_items\"][0] == ( (\"Global Configuration\", \"foo\", None), (\"bar\", \"baz\"), ) assert len(conflicted_config_diff[\"added_items\"]) == 1 assert conflicted_config_diff[\"added_items\"][0] == ( (\"Global Configuration\", \"bar\", None), \"foo\", ) def test_comparator_with_new_section(self): _config_1 = _Config._default_config() # The first \"section_name\" is added to the Config _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {\"section_1\": self.section_1} config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"added_items\"]) == 1 assert conflicted_config_diff[\"added_items\"][0] == ( (\"section_name\", None, None), {\"section_1\": {\"attribute\": \"attribute_1\", \"prop\": \"prop_1\"}}, ) assert conflicted_config_diff.get(\"modified_items\") is None assert conflicted_config_diff.get(\"removed_items\") is None # A new \"section_name\" is added to the Config _config_3 = _Config._default_config() _config_3._sections[SectionForTest.name] = {\"section_1\": self.section_1, \"section_2\": self.section_2} config_diff = Config._comparator._find_conflict_config(_config_2, _config_3) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"added_items\"]) == 1 assert conflicted_config_diff[\"added_items\"][0] == ( (\"section_name\", \"section_2\", None), {\"attribute\": \"2:int\", \"prop\": \"prop_2\"}, ) assert conflicted_config_diff.get(\"modified_items\") is None assert conflicted_config_diff.get(\"removed_items\") is None def test_comparator_with_removed_section(self): _config_1 = _Config._default_config() # All \"section_name\" sections are removed from the Config _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {\"section_1\": self.section_1} config_diff = Config._comparator._find_conflict_config(_config_2, _config_1) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"removed_items\"]) == 1 assert conflicted_config_diff[\"removed_items\"][0] == ( (\"section_name\", None, None), {\"section_1\": {\"attribute\": \"attribute_1\", \"prop\": \"prop_1\"}}, ) assert conflicted_config_diff.get(\"modified_items\") is None assert conflicted_config_diff.get(\"added_items\") is None # Section \"section_1\" is removed from the Config _config_3 = _Config._default_config() _config_3._sections[SectionForTest.name] = {\"section_1\": self.section_1, \"section_2\": self.section_2} config_diff = Config._comparator._find_conflict_config(_config_3, _config_2) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"removed_items\"]) == 1 assert conflicted_config_diff[\"removed_items\"][0] == ( (\"section_name\", \"section_2\", None), {\"attribute\": \"2:int\", \"prop\": \"prop_2\"}, ) assert conflicted_config_diff.get(\"modified_items\") is None assert conflicted_config_diff.get(\"added_items\") is None def test_comparator_with_modified_section(self): _config_1 = _Config._default_config() _config_1._sections[SectionForTest.name] = {\"section_2\": self.section_2} # All \"section_name\" sections are removed from the Config _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {\"section_2\": self.section_2b} config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"modified_items\"]) == 2 assert conflicted_config_diff[\"modified_items\"][0] == ( (\"section_name\", \"section_2\", \"attribute\"), (\"2:int\", \"attribute_2\"), ) assert conflicted_config_diff[\"modified_items\"][1] == ( (\"section_name\", \"section_2\", \"prop\"), (\"prop_2\", \"prop_2b\"), ) assert conflicted_config_diff.get(\"removed_items\") is None assert conflicted_config_diff.get(\"added_items\") is None def test_comparator_with_modified_list_attribute(self): _config_1 = _Config._default_config() _config_1._sections[SectionForTest.name] = {\"section_3\": self.section_3} # All \"section_name\" sections are removed from the Config _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {\"section_3\": self.section_3b} config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"modified_items\"]) == 2 assert conflicted_config_diff[\"modified_items\"][0] == ( (\"section_name\", \"section_3\", \"prop\"), ([\"prop_1\"], [\"prop_1\", \"prop_2\", \"prop_3\"]), ) assert conflicted_config_diff[\"modified_items\"][1] == ( (\"section_name\", \"section_3\", \"attribute\"), ([\"1:int\", \"2:int\", \"3:int\", \"4:int\"], [\"1:int\", \"2:int\"]), ) assert conflicted_config_diff.get(\"removed_items\") is None assert conflicted_config_diff.get(\"added_items\") is None def test_comparator_with_different_order_list_attributes(self): _config_1 = _Config._default_config() _config_1._unique_sections _config_1._sections[SectionForTest.name] = {\"section_3\": self.section_3b} # Create _config_2 with different order of list attributes _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {\"section_3\": self.section_3c} config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) # There should be no difference since the order of list attributes is ignored assert config_diff == {} def test_comparator_with_new_unique_section(self): _config_1 = _Config._default_config() _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"added_items\"]) == 1 assert conflicted_config_diff[\"added_items\"][0] == ( (\"unique_section_name\", None, None), {\"attribute\": \"unique_attribute_1\", \"prop\": \"unique_prop_1\"}, ) assert conflicted_config_diff.get(\"modified_items\") is None assert conflicted_config_diff.get(\"removed_items\") is None def test_comparator_with_removed_unique_section(self): _config_1 = _Config._default_config() _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 config_diff = Config._comparator._find_conflict_config(_config_2, _config_1) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"removed_items\"]) == 1 assert conflicted_config_diff[\"removed_items\"][0] == ( (\"unique_section_name\", None, None), {\"attribute\": \"unique_attribute_1\", \"prop\": \"unique_prop_1\"}, ) assert conflicted_config_diff.get(\"modified_items\") is None assert conflicted_config_diff.get(\"added_items\") is None def test_comparator_with_modified_unique_section(self): _config_1 = _Config._default_config() _config_1._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 # All \"section_name\" sections are removed from the Config _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1b config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff[\"conflicted_sections\"] assert len(conflicted_config_diff[\"modified_items\"]) == 1 assert conflicted_config_diff[\"modified_items\"][0] == ( (\"unique_section_name\", \"prop\", None), (\"unique_prop_1\", \"unique_prop_1b\"), ) assert conflicted_config_diff.get(\"removed_items\") is None assert conflicted_config_diff.get(\"added_items\") is None def test_unconflicted_section_name_store_statically(self): Config._comparator._add_unconflicted_section(\"section_name_1\") assert Config._comparator._unconflicted_sections == {\"section_name_1\"} Config._comparator._add_unconflicted_section(\"section_name_2\") assert Config._comparator._unconflicted_sections == {\"section_name_1\", \"section_name_2\"} Config._comparator._add_unconflicted_section(\"section_name_1\") assert Config._comparator._unconflicted_sections == {\"section_name_1\", \"section_name_2\"} def test_unconflicted_diff_is_stored_separated_from_conflicted_ones(self): _config_1 = _Config._default_config() _config_1._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 _config_1._sections[SectionForTest.name] = {\"section_2\": self.section_2} _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1b _config_2._sections[SectionForTest.name] = {\"section_2\": self.section_2b} # Compare 2 Configuration config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert config_diff.get(\"unconflicted_sections\") is None assert config_diff.get(\"conflicted_sections\") is not None assert len(config_diff[\"conflicted_sections\"][\"modified_items\"]) == 3 # Ignore any diff of \"section_name\" and compare Config._comparator._add_unconflicted_section(\"section_name\") config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert config_diff.get(\"unconflicted_sections\") is not None assert len(config_diff[\"unconflicted_sections\"][\"modified_items\"]) == 2 assert config_diff.get(\"conflicted_sections\") is not None assert len(config_diff[\"conflicted_sections\"][\"modified_items\"]) == 1 # Ignore any diff of Global Config and compare Config._comparator._add_unconflicted_section([\"unique_section_name\"]) config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert config_diff.get(\"unconflicted_sections\") is not None assert len(config_diff[\"unconflicted_sections\"][\"modified_items\"]) == 3 assert config_diff.get(\"conflicted_sections\") is None def test_comparator_log_message(self, caplog): _config_1 = _Config._default_config() _config_1._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 _config_1._sections[SectionForTest.name] = {\"section_2\": self.section_2} _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1b _config_2._sections[SectionForTest.name] = {\"section_2\": self.section_2b} # Ignore any diff of \"section_name\" and compare Config._comparator._add_unconflicted_section(\"section_name\") Config._comparator._find_conflict_config(_config_1, _config_2) error_messages = caplog.text.strip().split(\"\\n\") assert len(error_messages) == 5 assert all( t in error_messages[0] for t in [ \"INFO\", \"There are non-conflicting changes between the current configuration and the current configuration:\", ] ) assert 'section_name \"section_2\" has attribute \"attribute\" modified: 2:int -> attribute_2' in error_messages[1] assert 'section_name \"section_2\" has attribute \"prop\" modified: prop_2 -> prop_2b' in error_messages[2] assert all( t in error_messages[3] for t in [ \"ERROR\", \"The current configuration conflicts with the current configuration:\", ] ) assert 'unique_section_name \"prop\" was modified: unique_prop_1 -> unique_prop_1b' in error_messages[4] caplog.clear() Config._comparator._find_conflict_config(_config_1, _config_2, old_version_number=\"1.0\") error_messages = caplog.text.strip().split(\"\\n\") assert len(error_messages) == 5 assert all( t in error_messages[0] for t in [ \"INFO\", \"There are non-conflicting changes between the configuration for version 1.0 and the current configuration:\", ] ) assert all( t in error_messages[3] for t in [ \"ERROR\", \"The configuration for version 1.0 conflicts with the current configuration:\", ] ) caplog.clear() Config._comparator._compare( _config_1, _config_2, version_number_1=\"1.0\", version_number_2=\"2.0\", ) error_messages = caplog.text.strip().split(\"\\n\") assert len(error_messages) == 3 assert all( t in error_messages[0] for t in [\"INFO\", \"Differences between version 1.0 Configuration and version 2.0 Configuration:\"] ) caplog.clear() "} {"text": "import os from unittest import mock import pytest from src.taipy.config.exceptions.exceptions import InvalidConfigurationId from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest class WrongUniqueSection(UniqueSectionForTest): name = \"1wrong_id\" class WrongSection(SectionForTest): name = \"correct_name\" def test_section_uses_valid_id(): with pytest.raises(InvalidConfigurationId): WrongUniqueSection(attribute=\"foo\") with pytest.raises(InvalidConfigurationId): WrongSection(\"wrong id\", attribute=\"foo\") with pytest.raises(InvalidConfigurationId): WrongSection(\"1wrong_id\", attribute=\"foo\") with pytest.raises(InvalidConfigurationId): WrongSection(\"wrong_@id\", attribute=\"foo\") def test_templated_properties_are_replaced(): with mock.patch.dict(os.environ, {\"foo\": \"bar\", \"baz\": \"1\"}): u_sect = UniqueSectionForTest(attribute=\"attribute\", tpl_property=\"ENV[foo]\") assert u_sect.tpl_property == \"bar\" sect = SectionForTest(id=\"my_id\", attribute=\"attribute\", tpl_property=\"ENV[baz]:int\") assert sect.tpl_property == 1 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.config.config import Config from src.taipy.config.global_app.global_app_config import GlobalAppConfig from src.taipy.config.section import Section from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest def _test_default_global_app_config(global_config: GlobalAppConfig): assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_default_configuration(): default_config = Config._default_config assert default_config._unique_sections is not None assert len(default_config._unique_sections) == 1 assert default_config._unique_sections[UniqueSectionForTest.name] is not None assert default_config._unique_sections[UniqueSectionForTest.name].attribute == \"default_attribute\" assert default_config._sections is not None assert len(default_config._sections) == 1 _test_default_global_app_config(default_config._global_config) _test_default_global_app_config(Config.global_config) _test_default_global_app_config(GlobalAppConfig().default_config()) def test_register_default_configuration(): Config._register_default(SectionForTest(Section._DEFAULT_KEY, \"default_attribute\", prop1=\"prop1\")) # Replace the first default section Config._register_default(SectionForTest(Section._DEFAULT_KEY, \"default_attribute\", prop2=\"prop2\")) default_section = Config.sections[SectionForTest.name][Section._DEFAULT_KEY] assert len(default_section.properties) == 1 assert default_section.prop2 == \"prop2\" assert default_section.prop1 is None "} {"text": "import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import LoadingError from tests.config.utils.named_temporary_file import NamedTemporaryFile def test_node_can_not_appear_twice(): config = NamedTemporaryFile( \"\"\" [unique_section_name] attribute = \"my_attribute\" [unique_section_name] attribute = \"other_attribute\" \"\"\" ) with pytest.raises(LoadingError, match=\"Can not load configuration\"): Config.load(config.filename) def test_skip_configuration_outside_nodes(): config = NamedTemporaryFile( \"\"\" foo = \"bar\" \"\"\" ) Config.load(config.filename) assert Config.global_config.foo is None "} {"text": "import os from unittest import mock import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import InconsistentEnvVariableError, MissingEnvVariableError from tests.config.utils.named_temporary_file import NamedTemporaryFile def test_override_default_configuration_with_code_configuration(): assert not Config.global_config.root_folder == \"foo\" assert len(Config.unique_sections) == 1 assert Config.unique_sections[\"unique_section_name\"] is not None assert Config.unique_sections[\"unique_section_name\"].attribute == \"default_attribute\" assert Config.unique_sections[\"unique_section_name\"].prop is None assert len(Config.sections) == 1 assert len(Config.sections[\"section_name\"]) == 1 assert Config.sections[\"section_name\"] is not None assert Config.sections[\"section_name\"][\"default\"].attribute == \"default_attribute\" Config.configure_global_app(root_folder=\"foo\") assert Config.global_config.root_folder == \"foo\" Config.configure_unique_section_for_tests(\"foo\", prop=\"bar\") assert len(Config.unique_sections) == 1 assert Config.unique_sections[\"unique_section_name\"] is not None assert Config.unique_sections[\"unique_section_name\"].attribute == \"foo\" assert Config.unique_sections[\"unique_section_name\"].prop == \"bar\" Config.configure_section_for_tests(\"my_id\", \"baz\", prop=\"qux\") assert len(Config.unique_sections) == 1 assert Config.sections[\"section_name\"] is not None assert Config.sections[\"section_name\"][\"my_id\"].attribute == \"baz\" assert Config.sections[\"section_name\"][\"my_id\"].prop == \"qux\" def test_override_default_config_with_code_config_including_env_variable_values(): Config.configure_global_app() assert Config.global_config.foo is None Config.configure_global_app(foo=\"bar\") assert Config.global_config.foo == \"bar\" with mock.patch.dict(os.environ, {\"FOO\": \"foo\"}): Config.configure_global_app(foo=\"ENV[FOO]\") assert Config.global_config.foo == \"foo\" def test_override_default_configuration_with_file_configuration(): tf = NamedTemporaryFile( \"\"\" [TAIPY] foo = \"bar\" \"\"\" ) assert Config.global_config.foo is None Config.load(tf.filename) assert Config.global_config.foo == \"bar\" def test_override_default_config_with_file_config_including_env_variable_values(): tf = NamedTemporaryFile( \"\"\" [TAIPY] foo_attribute = \"ENV[FOO]:int\" bar_attribute = \"ENV[BAR]:bool\" \"\"\" ) assert Config.global_config.foo_attribute is None assert Config.global_config.bar_attribute is None with mock.patch.dict(os.environ, {\"FOO\": \"foo\", \"BAR\": \"true\"}): with pytest.raises(InconsistentEnvVariableError): Config.load(tf.filename) Config.global_config.foo_attribute with mock.patch.dict(os.environ, {\"FOO\": \"5\"}): with pytest.raises(MissingEnvVariableError): Config.load(tf.filename) Config.global_config.bar_attribute with mock.patch.dict(os.environ, {\"FOO\": \"6\", \"BAR\": \"TRUe\"}): Config.load(tf.filename) assert Config.global_config.foo_attribute == 6 assert Config.global_config.bar_attribute def test_code_configuration_does_not_override_file_configuration(): config_from_filename = NamedTemporaryFile( \"\"\" [TAIPY] foo = 2 \"\"\" ) Config.override(config_from_filename.filename) Config.configure_global_app(foo=21) assert Config.global_config.foo == 2 # From file config def test_code_configuration_does_not_override_file_configuration_including_env_variable_values(): config_from_filename = NamedTemporaryFile( \"\"\" [TAIPY] foo = 2 \"\"\" ) Config.override(config_from_filename.filename) with mock.patch.dict(os.environ, {\"FOO\": \"21\"}): Config.configure_global_app(foo=\"ENV[FOO]\") assert Config.global_config.foo == 2 # From file config def test_file_configuration_overrides_code_configuration(): config_from_filename = NamedTemporaryFile( \"\"\" [TAIPY] foo = 2 \"\"\" ) Config.configure_global_app(foo=21) Config.load(config_from_filename.filename) assert Config.global_config.foo == 2 # From file config def test_file_configuration_overrides_code_configuration_including_env_variable_values(): config_from_filename = NamedTemporaryFile( \"\"\" [TAIPY] foo = \"ENV[FOO]:int\" \"\"\" ) Config.configure_global_app(foo=21) with mock.patch.dict(os.environ, {\"FOO\": \"2\"}): Config.load(config_from_filename.filename) assert Config.global_config.foo == 2 # From file config def test_override_default_configuration_with_multiple_configurations(): file_config = NamedTemporaryFile( \"\"\" [TAIPY] foo = 10 bar = \"baz\" \"\"\" ) # Default config is applied assert Config.global_config.foo is None assert Config.global_config.bar is None # Code config is applied Config.configure_global_app(foo=\"bar\") assert Config.global_config.foo == \"bar\" assert Config.global_config.bar is None # File config is applied Config.load(file_config.filename) assert Config.global_config.foo == 10 assert Config.global_config.bar == \"baz\" def test_override_default_configuration_with_multiple_configurations_including_environment_variable_values(): file_config = NamedTemporaryFile( \"\"\" [TAIPY] att = \"ENV[BAZ]\" \"\"\" ) with mock.patch.dict(os.environ, {\"FOO\": \"bar\", \"BAZ\": \"qux\"}): # Default config is applied assert Config.global_config.att is None # Code config is applied Config.configure_global_app(att=\"ENV[FOO]\") assert Config.global_config.att == \"bar\" # File config is applied Config.load(file_config.filename) assert Config.global_config.att == \"qux\" "} {"text": "import pytest from src.taipy.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest def test_unique_section_registration_and_usage(): assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == \"default_attribute\" assert Config.unique_sections[UniqueSectionForTest.name].prop is None mySection = Config.configure_unique_section_for_tests(attribute=\"my_attribute\", prop=\"my_prop\") assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert mySection is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == \"my_attribute\" assert mySection.attribute == \"my_attribute\" assert Config.unique_sections[UniqueSectionForTest.name].prop == \"my_prop\" assert mySection.prop == \"my_prop\" myNewSection = Config.configure_unique_section_for_tests(attribute=\"my_new_attribute\", prop=\"my_new_prop\") assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert myNewSection is not None assert mySection is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == \"my_new_attribute\" assert myNewSection.attribute == \"my_new_attribute\" assert mySection.attribute == \"my_new_attribute\" assert Config.unique_sections[UniqueSectionForTest.name].prop == \"my_new_prop\" assert myNewSection.prop == \"my_new_prop\" assert mySection.prop == \"my_new_prop\" def test_sections_exposed_as_attribute(): assert Config.unique_section_name.attribute == \"default_attribute\" Config.configure_unique_section_for_tests(\"my_attribute\") assert Config.unique_section_name.attribute == \"my_attribute\" assert Config.section_name[\"default\"].attribute == \"default_attribute\" Config.configure_section_for_tests(id=\"my_id\", attribute=\"my_attribute\") assert Config.section_name[\"my_id\"].attribute == \"my_attribute\" def test_section_registration_and_usage(): assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 1 assert Config.sections[SectionForTest.name][\"default\"] is not None assert Config.sections[SectionForTest.name][\"default\"].attribute == \"default_attribute\" assert Config.sections[SectionForTest.name][\"default\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"default\"].foo is None myFirstSection = Config.configure_section_for_tests(id=\"first\", attribute=\"my_attribute\", prop=\"my_prop\", foo=\"bar\") assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 2 assert Config.sections[SectionForTest.name][\"default\"] is not None assert Config.sections[SectionForTest.name][\"default\"].attribute == \"default_attribute\" assert Config.sections[SectionForTest.name][\"default\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"default\"].foo is None assert Config.sections[SectionForTest.name][\"first\"] is not None assert Config.sections[SectionForTest.name][\"first\"].attribute == \"my_attribute\" assert Config.sections[SectionForTest.name][\"first\"].prop == \"my_prop\" assert Config.sections[SectionForTest.name][\"first\"].foo == \"bar\" assert myFirstSection.attribute == \"my_attribute\" assert myFirstSection.prop == \"my_prop\" assert myFirstSection.foo == \"bar\" myNewSection = Config.configure_section_for_tests(id=\"second\", attribute=\"my_new_attribute\", prop=\"my_new_prop\") assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 3 assert Config.sections[SectionForTest.name][\"default\"] is not None assert Config.sections[SectionForTest.name][\"default\"].attribute == \"default_attribute\" assert Config.sections[SectionForTest.name][\"default\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"default\"].foo is None assert Config.sections[SectionForTest.name][\"first\"] is not None assert Config.sections[SectionForTest.name][\"first\"].attribute == \"my_attribute\" assert Config.sections[SectionForTest.name][\"first\"].prop == \"my_prop\" assert Config.sections[SectionForTest.name][\"first\"].foo == \"bar\" assert Config.sections[SectionForTest.name][\"second\"] is not None assert Config.sections[SectionForTest.name][\"second\"].attribute == \"my_new_attribute\" assert Config.sections[SectionForTest.name][\"second\"].prop == \"my_new_prop\" assert Config.sections[SectionForTest.name][\"second\"].foo is None assert myFirstSection.attribute == \"my_attribute\" assert myFirstSection.prop == \"my_prop\" assert myFirstSection.foo == \"bar\" assert myNewSection.attribute == \"my_new_attribute\" assert myNewSection.prop == \"my_new_prop\" assert myNewSection.foo is None my2ndSection = Config.configure_section_for_tests(id=\"second\", attribute=\"my_2nd_attribute\", prop=\"my_2nd_prop\") assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 3 assert Config.sections[SectionForTest.name][\"default\"] is not None assert Config.sections[SectionForTest.name][\"default\"].attribute == \"default_attribute\" assert Config.sections[SectionForTest.name][\"default\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"default\"].foo is None assert Config.sections[SectionForTest.name][\"first\"] is not None assert Config.sections[SectionForTest.name][\"first\"].attribute == \"my_attribute\" assert Config.sections[SectionForTest.name][\"first\"].prop == \"my_prop\" assert Config.sections[SectionForTest.name][\"first\"].foo == \"bar\" assert Config.sections[SectionForTest.name][\"second\"] is not None assert Config.sections[SectionForTest.name][\"second\"].attribute == \"my_2nd_attribute\" assert Config.sections[SectionForTest.name][\"second\"].prop == \"my_2nd_prop\" assert Config.sections[SectionForTest.name][\"second\"].foo is None assert myFirstSection.attribute == \"my_attribute\" assert myFirstSection.prop == \"my_prop\" assert myFirstSection.foo == \"bar\" assert myNewSection.attribute == \"my_2nd_attribute\" assert myNewSection.prop == \"my_2nd_prop\" assert myNewSection.foo is None assert my2ndSection.attribute == \"my_2nd_attribute\" assert my2ndSection.prop == \"my_2nd_prop\" assert my2ndSection.foo is None def test_block_registration(): myUniqueSection = Config.configure_unique_section_for_tests(attribute=\"my_unique_attribute\", prop=\"my_unique_prop\") mySection = Config.configure_section_for_tests(id=\"section_id\", attribute=\"my_attribute\", prop=\"my_prop\", foo=\"bar\") Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_unique_section_for_tests(attribute=\"my_new_unique_attribute\", prop=\"my_new_unique_prop\") with pytest.raises(ConfigurationUpdateBlocked): Config.configure_section_for_tests(id=\"new\", attribute=\"my_attribute\", prop=\"my_prop\", foo=\"bar\") with pytest.raises(ConfigurationUpdateBlocked): myUniqueSection.attribute = \"foo\" with pytest.raises(ConfigurationUpdateBlocked): myUniqueSection.properties = {\"foo\": \"bar\"} # myUniqueSection stay the same assert myUniqueSection.attribute == \"my_unique_attribute\" assert myUniqueSection.properties == {\"prop\": \"my_unique_prop\"} with pytest.raises(ConfigurationUpdateBlocked): mySection.attribute = \"foo\" with pytest.raises(ConfigurationUpdateBlocked): mySection.properties = {\"foo\": \"foo\"} # mySection stay the same assert mySection.attribute == \"my_attribute\" assert mySection.properties == {\"prop\": \"my_prop\", \"foo\": \"bar\", \"prop_int\": 0} "} {"text": "import pytest from src.taipy.config.config import Config from src.taipy.config.section import Section from tests.config.utils.named_temporary_file import NamedTemporaryFile from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.section_of_sections_list_for_tests import SectionOfSectionsListForTest @pytest.fixture def _init_list_section_for_test(): Config._register_default(SectionOfSectionsListForTest(Section._DEFAULT_KEY, [], prop=\"default_prop\", prop_int=0)) Config.configure_list_section_for_tests = SectionOfSectionsListForTest._configure Config.list_section_name = Config.sections[SectionOfSectionsListForTest.name] def test_applied_config_compilation_does_not_change_other_configs(): assert len(Config._default_config._unique_sections) == 1 assert Config._default_config._unique_sections[\"unique_section_name\"] is not None assert Config._default_config._unique_sections[\"unique_section_name\"].attribute == \"default_attribute\" assert Config._default_config._unique_sections[\"unique_section_name\"].prop is None assert len(Config._python_config._unique_sections) == 0 assert len(Config._file_config._unique_sections) == 0 assert len(Config._env_file_config._unique_sections) == 0 assert len(Config._applied_config._unique_sections) == 1 assert Config._applied_config._unique_sections[\"unique_section_name\"] is not None assert Config._applied_config._unique_sections[\"unique_section_name\"].attribute == \"default_attribute\" assert Config._applied_config._unique_sections[\"unique_section_name\"].prop is None assert len(Config.unique_sections) == 1 assert Config.unique_sections[\"unique_section_name\"] is not None assert Config.unique_sections[\"unique_section_name\"].attribute == \"default_attribute\" assert Config.unique_sections[\"unique_section_name\"].prop is None assert ( Config._applied_config._unique_sections[\"unique_section_name\"] is not Config._default_config._unique_sections[\"unique_section_name\"] ) Config.configure_unique_section_for_tests(\"qwe\", prop=\"rty\") assert len(Config._default_config._unique_sections) == 1 assert Config._default_config._unique_sections[\"unique_section_name\"] is not None assert Config._default_config._unique_sections[\"unique_section_name\"].attribute == \"default_attribute\" assert Config._default_config._unique_sections[\"unique_section_name\"].prop is None assert len(Config._python_config._unique_sections) == 1 assert Config._python_config._unique_sections[\"unique_section_name\"] is not None assert Config._python_config._unique_sections[\"unique_section_name\"].attribute == \"qwe\" assert Config._python_config._unique_sections[\"unique_section_name\"].prop == \"rty\" assert ( Config._python_config._unique_sections[\"unique_section_name\"] != Config._default_config._unique_sections[\"unique_section_name\"] ) assert len(Config._file_config._unique_sections) == 0 assert len(Config._env_file_config._unique_sections) == 0 assert len(Config._applied_config._unique_sections) == 1 assert Config._applied_config._unique_sections[\"unique_section_name\"] is not None assert Config._applied_config._unique_sections[\"unique_section_name\"].attribute == \"qwe\" assert Config._applied_config._unique_sections[\"unique_section_name\"].prop == \"rty\" assert ( Config._python_config._unique_sections[\"unique_section_name\"] != Config._applied_config._unique_sections[\"unique_section_name\"] ) assert ( Config._default_config._unique_sections[\"unique_section_name\"] != Config._applied_config._unique_sections[\"unique_section_name\"] ) assert len(Config.unique_sections) == 1 assert Config.unique_sections[\"unique_section_name\"] is not None assert Config.unique_sections[\"unique_section_name\"].attribute == \"qwe\" assert Config.unique_sections[\"unique_section_name\"].prop == \"rty\" def test_nested_section_instance_in_python(_init_list_section_for_test): s1_cfg = Config.configure_section_for_tests(\"s1\", attribute=\"foo\") s2_cfg = Config.configure_section_for_tests(\"s2\", attribute=\"bar\") ss_cfg = Config.configure_list_section_for_tests(\"ss\", attribute=\"foo\", sections_list=[s1_cfg, s2_cfg]) s1_config_applied_instance = Config.section_name[\"s1\"] s1_config_python_instance = Config._python_config._sections[SectionForTest.name][\"s1\"] s2_config_applied_instance = Config.section_name[\"s2\"] s2_config_python_instance = Config._python_config._sections[SectionForTest.name][\"s2\"] assert ss_cfg.sections_list[0] is s1_config_applied_instance assert ss_cfg.sections_list[0] is not s1_config_python_instance assert ss_cfg.sections_list[1] is s2_config_applied_instance assert ss_cfg.sections_list[1] is not s2_config_python_instance def _configure_in_toml(): return NamedTemporaryFile( content=\"\"\" [TAIPY] [section_name.s1] attribute = \"foo\" [section_name.s2] attribute = \"bar\" [list_section_name.ss] sections_list = [ \"foo\", \"s1:SECTION\", \"s2:SECTION\"] \"\"\" ) def test_nested_section_instance_load_toml(_init_list_section_for_test): toml_config = _configure_in_toml() Config.load(toml_config) s1_config_applied_instance = Config.section_name[\"s1\"] s1_config_python_instance = Config._python_config._sections[SectionForTest.name][\"s1\"] s2_config_applied_instance = Config.section_name[\"s2\"] s2_config_python_instance = Config._python_config._sections[SectionForTest.name][\"s2\"] ss_cfg = Config.list_section_name[\"ss\"] assert ss_cfg.sections_list[0] == \"foo\" assert ss_cfg.sections_list[1] is s1_config_applied_instance assert ss_cfg.sections_list[1] is not s1_config_python_instance assert ss_cfg.sections_list[2] is s2_config_applied_instance assert ss_cfg.sections_list[2] is not s2_config_python_instance def test_nested_section_instance_override_toml(_init_list_section_for_test): toml_config = _configure_in_toml() Config.override(toml_config) s1_config_applied_instance = Config.section_name[\"s1\"] s1_config_python_instance = Config._file_config._sections[SectionForTest.name][\"s1\"] s2_config_applied_instance = Config.section_name[\"s2\"] s2_config_python_instance = Config._file_config._sections[SectionForTest.name][\"s2\"] ss_cfg = Config.list_section_name[\"ss\"] assert ss_cfg.sections_list[0] == \"foo\" assert ss_cfg.sections_list[1] is s1_config_applied_instance assert ss_cfg.sections_list[1] is not s1_config_python_instance assert ss_cfg.sections_list[2] is s2_config_applied_instance assert ss_cfg.sections_list[1] is not s2_config_python_instance "} {"text": "import datetime import json import os from unittest import mock from src.taipy.config import Config from src.taipy.config._serializer._json_serializer import _JsonSerializer from src.taipy.config.common.frequency import Frequency from src.taipy.config.common.scope import Scope from tests.config.utils.named_temporary_file import NamedTemporaryFile from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest def add(a, b): return a + b class CustomClass: a = None b = None class CustomEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, datetime): result = {\"__type__\": \"Datetime\", \"__value__\": o.isoformat()} else: result = json.JSONEncoder.default(self, o) return result class CustomDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) def object_hook(self, source): if source.get(\"__type__\") == \"Datetime\": return datetime.fromisoformat(source.get(\"__value__\")) else: return source def test_write_toml_configuration_file(): expected_toml_config = \"\"\" [TAIPY] [unique_section_name] attribute = \"my_attribute\" prop = \"my_prop\" prop_int = \"1:int\" prop_bool = \"False:bool\" prop_list = [ \"p1\", \"1991-01-01T00:00:00:datetime\", \"1d0h0m0s:timedelta\",] prop_scope = \"SCENARIO:SCOPE\" prop_freq = \"QUARTERLY:FREQUENCY\" baz = \"ENV[QUX]\" quux = \"ENV[QUUZ]:bool\" corge = [ \"grault\", \"ENV[GARPLY]\", \"ENV[WALDO]:int\", \"3.0:float\",] [section_name.default] attribute = \"default_attribute\" prop = \"default_prop\" prop_int = \"0:int\" [section_name.my_id] attribute = \"my_attribute\" prop = \"default_prop\" prop_int = \"1:int\" prop_bool = \"False:bool\" prop_list = [ \"unique_section_name:SECTION\",] prop_scope = \"SCENARIO\" baz = \"ENV[QUX]\" \"\"\".strip() tf = NamedTemporaryFile() with mock.patch.dict( os.environ, {\"FOO\": \"in_memory\", \"QUX\": \"qux\", \"QUUZ\": \"true\", \"GARPLY\": \"garply\", \"WALDO\": \"17\"} ): unique_section = Config.configure_unique_section_for_tests( attribute=\"my_attribute\", prop=\"my_prop\", prop_int=1, prop_bool=False, prop_list=[\"p1\", datetime.datetime(1991, 1, 1), datetime.timedelta(days=1)], prop_scope=Scope.SCENARIO, prop_freq=Frequency.QUARTERLY, baz=\"ENV[QUX]\", quux=\"ENV[QUUZ]:bool\", corge=(\"grault\", \"ENV[GARPLY]\", \"ENV[WALDO]:int\", 3.0), ) Config.configure_section_for_tests( \"my_id\", \"my_attribute\", prop_int=1, prop_bool=False, prop_list=[unique_section], prop_scope=\"SCENARIO\", baz=\"ENV[QUX]\", ) Config.backup(tf.filename) actual_config = tf.read().strip() assert actual_config == expected_toml_config def test_read_toml_configuration_file(): toml_config = \"\"\" [TAIPY] foo = \"bar\" [unique_section_name] attribute = \"my_attribute\" prop = \"my_prop\" prop_int = \"1:int\" prop_bool = \"False:bool\" prop_list = [ \"p1\", \"1991-01-01T00:00:00:datetime\", \"1d0h0m0s:timedelta\",] prop_scope = \"SCENARIO:SCOPE\" prop_freq = \"QUARTERLY:FREQUENCY\" baz = \"ENV[QUX]\" quux = \"ENV[QUUZ]:bool\" corge = [ \"grault\", \"ENV[GARPLY]\", \"ENV[WALDO]:int\", \"3.0:float\",] [TAIPY.custom_properties] bar = \"baz\" [section_name.default] attribute = \"default_attribute\" prop = \"default_prop\" prop_int = \"0:int\" [section_name.my_id] attribute = \"my_attribute\" prop = \"default_prop\" prop_int = \"1:int\" prop_bool = \"False:bool\" prop_list = [ \"unique_section_name\", \"section_name.my_id\",] prop_scope = \"SCENARIO:SCOPE\" baz = \"ENV[QUX]\" \"\"\".strip() tf = NamedTemporaryFile(toml_config) with mock.patch.dict( os.environ, {\"FOO\": \"in_memory\", \"QUX\": \"qux\", \"QUUZ\": \"true\", \"GARPLY\": \"garply\", \"WALDO\": \"17\"} ): Config.override(tf.filename) assert Config.global_config.foo == \"bar\" assert Config.global_config.custom_properties.get(\"bar\") == \"baz\" assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == \"my_attribute\" assert Config.unique_sections[UniqueSectionForTest.name].prop == \"my_prop\" assert Config.unique_sections[UniqueSectionForTest.name].prop_int == 1 assert Config.unique_sections[UniqueSectionForTest.name].prop_bool is False assert Config.unique_sections[UniqueSectionForTest.name].prop_list == [ \"p1\", datetime.datetime(1991, 1, 1), datetime.timedelta(days=1), ] assert Config.unique_sections[UniqueSectionForTest.name].prop_scope == Scope.SCENARIO assert Config.unique_sections[UniqueSectionForTest.name].prop_freq == Frequency.QUARTERLY assert Config.unique_sections[UniqueSectionForTest.name].baz == \"qux\" assert Config.unique_sections[UniqueSectionForTest.name].quux is True assert Config.unique_sections[UniqueSectionForTest.name].corge == [ \"grault\", \"garply\", 17, 3.0, ] assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 2 assert Config.sections[SectionForTest.name][\"default\"] is not None assert Config.sections[SectionForTest.name][\"default\"].attribute == \"default_attribute\" assert Config.sections[SectionForTest.name][\"default\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"default\"].prop_int == 0 assert Config.sections[SectionForTest.name][\"my_id\"] is not None assert Config.sections[SectionForTest.name][\"my_id\"].attribute == \"my_attribute\" assert Config.sections[SectionForTest.name][\"my_id\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"my_id\"].prop_int == 1 assert Config.sections[SectionForTest.name][\"my_id\"].prop_bool is False assert Config.sections[SectionForTest.name][\"my_id\"].prop_list == [\"unique_section_name\", \"section_name.my_id\"] assert Config.sections[SectionForTest.name][\"my_id\"].prop_scope == Scope.SCENARIO assert Config.sections[SectionForTest.name][\"my_id\"].baz == \"qux\" tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_config_2 = tf2.read().strip() assert actual_config_2 == toml_config def test_read_write_toml_configuration_file_with_function_and_class(): expected_toml_config = \"\"\" [TAIPY] [unique_section_name] attribute = \"my_attribute\" prop = \"my_prop\" prop_list = [ \"tests.config.test_section_serialization.CustomEncoder:class\", \"tests.config.test_section_serialization.CustomDecoder:class\",] [section_name.default] attribute = \"default_attribute\" prop = \"default_prop\" prop_int = \"0:int\" [section_name.my_id] attribute = \"my_attribute\" prop = \"default_prop\" prop_int = \"0:int\" prop_fct_list = [ \"tests.config.test_section_serialization.add:function\",] prop_class_list = [ \"tests.config.test_section_serialization.CustomClass:class\",] [section_name.my_id_2] attribute = \"my_attribute_2\" prop = \"default_prop\" prop_int = \"0:int\" prop_fct_list = [ \"builtins.print:function\", \"builtins.pow:function\",] \"\"\".strip() tf = NamedTemporaryFile() Config.configure_unique_section_for_tests( attribute=\"my_attribute\", prop=\"my_prop\", prop_list=[CustomEncoder, CustomDecoder], ) Config.configure_section_for_tests( \"my_id\", \"my_attribute\", prop_fct_list=[add], prop_class_list=[CustomClass], ) Config.configure_section_for_tests( \"my_id_2\", \"my_attribute_2\", prop_fct_list=[print, pow], ) Config.backup(tf.filename) actual_exported_toml = tf.read().strip() assert actual_exported_toml == expected_toml_config Config.override(tf.filename) tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_exported_toml_2 = tf2.read().strip() assert actual_exported_toml_2 == expected_toml_config def test_write_json_configuration_file(): expected_json_config = \"\"\" { \"TAIPY\": {}, \"unique_section_name\": { \"attribute\": \"my_attribute\", \"prop\": \"my_prop\", \"prop_int\": \"1:int\", \"prop_bool\": \"False:bool\", \"prop_list\": [ \"p1\", \"1991-01-01T00:00:00:datetime\", \"1d0h0m0s:timedelta\" ], \"prop_scope\": \"SCENARIO:SCOPE\", \"prop_freq\": \"QUARTERLY:FREQUENCY\" }, \"section_name\": { \"default\": { \"attribute\": \"default_attribute\", \"prop\": \"default_prop\", \"prop_int\": \"0:int\" }, \"my_id\": { \"attribute\": \"my_attribute\", \"prop\": \"default_prop\", \"prop_int\": \"1:int\", \"prop_bool\": \"False:bool\", \"prop_list\": [ \"unique_section_name:SECTION\" ], \"prop_scope\": \"SCENARIO\", \"baz\": \"ENV[QUX]\" } } } \"\"\".strip() tf = NamedTemporaryFile() Config._serializer = _JsonSerializer() unique_section = Config.configure_unique_section_for_tests( attribute=\"my_attribute\", prop=\"my_prop\", prop_int=1, prop_bool=False, prop_list=[\"p1\", datetime.datetime(1991, 1, 1), datetime.timedelta(days=1)], prop_scope=Scope.SCENARIO, prop_freq=Frequency.QUARTERLY, ) Config.configure_section_for_tests( \"my_id\", \"my_attribute\", prop_int=1, prop_bool=False, prop_list=[unique_section], prop_scope=\"SCENARIO\", baz=\"ENV[QUX]\", ) Config.backup(tf.filename) actual_config = tf.read() assert actual_config == expected_json_config def test_read_json_configuration_file(): json_config = \"\"\" { \"TAIPY\": { \"root_folder\": \"./taipy/\", \"storage_folder\": \".data/\", \"repository_type\": \"filesystem\" }, \"unique_section_name\": { \"attribute\": \"my_attribute\", \"prop\": \"my_prop\", \"prop_int\": \"1:int\", \"prop_bool\": \"False:bool\", \"prop_list\": [ \"p1\", \"1991-01-01T00:00:00:datetime\", \"1d0h0m0s:timedelta\" ], \"prop_scope\": \"SCENARIO:SCOPE\", \"prop_freq\": \"QUARTERLY:FREQUENCY\" }, \"section_name\": { \"default\": { \"attribute\": \"default_attribute\", \"prop\": \"default_prop\", \"prop_int\": \"0:int\" }, \"my_id\": { \"attribute\": \"my_attribute\", \"prop\": \"default_prop\", \"prop_int\": \"1:int\", \"prop_bool\": \"False:bool\", \"prop_list\": [ \"unique_section_name\" ], \"prop_scope\": \"SCENARIO\" } } } \"\"\".strip() Config._serializer = _JsonSerializer() tf = NamedTemporaryFile(json_config) Config.override(tf.filename) assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == \"my_attribute\" assert Config.unique_sections[UniqueSectionForTest.name].prop == \"my_prop\" assert Config.unique_sections[UniqueSectionForTest.name].prop_int == 1 assert Config.unique_sections[UniqueSectionForTest.name].prop_bool is False assert Config.unique_sections[UniqueSectionForTest.name].prop_list == [ \"p1\", datetime.datetime(1991, 1, 1), datetime.timedelta(days=1), ] assert Config.unique_sections[UniqueSectionForTest.name].prop_scope == Scope.SCENARIO assert Config.unique_sections[UniqueSectionForTest.name].prop_freq == Frequency.QUARTERLY assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 2 assert Config.sections[SectionForTest.name][\"default\"] is not None assert Config.sections[SectionForTest.name][\"default\"].attribute == \"default_attribute\" assert Config.sections[SectionForTest.name][\"default\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"default\"].prop_int == 0 assert Config.sections[SectionForTest.name][\"my_id\"] is not None assert Config.sections[SectionForTest.name][\"my_id\"].attribute == \"my_attribute\" assert Config.sections[SectionForTest.name][\"my_id\"].prop == \"default_prop\" assert Config.sections[SectionForTest.name][\"my_id\"].prop_int == 1 assert Config.sections[SectionForTest.name][\"my_id\"].prop_bool is False assert Config.sections[SectionForTest.name][\"my_id\"].prop_list == [\"unique_section_name\"] tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_config_2 = tf2.read().strip() assert actual_config_2 == json_config def test_read_write_json_configuration_file_with_function_and_class(): expected_json_config = \"\"\" { \"TAIPY\": {}, \"unique_section_name\": { \"attribute\": \"my_attribute\", \"prop\": \"my_prop\", \"prop_list\": [ \"tests.config.test_section_serialization.CustomEncoder:class\", \"tests.config.test_section_serialization.CustomDecoder:class\" ] }, \"section_name\": { \"default\": { \"attribute\": \"default_attribute\", \"prop\": \"default_prop\", \"prop_int\": \"0:int\" }, \"my_id\": { \"attribute\": \"my_attribute\", \"prop\": \"default_prop\", \"prop_int\": \"0:int\", \"prop_fct_list\": [ \"tests.config.test_section_serialization.add:function\" ], \"prop_class_list\": [ \"tests.config.test_section_serialization.CustomClass:class\" ] }, \"my_id_2\": { \"attribute\": \"my_attribute_2\", \"prop\": \"default_prop\", \"prop_int\": \"0:int\", \"prop_fct_list\": [ \"builtins.print:function\", \"builtins.pow:function\" ] } } } \"\"\".strip() Config._serializer = _JsonSerializer() tf = NamedTemporaryFile() Config.configure_unique_section_for_tests( attribute=\"my_attribute\", prop=\"my_prop\", prop_list=[CustomEncoder, CustomDecoder], ) Config.configure_section_for_tests( \"my_id\", \"my_attribute\", prop_fct_list=[add], prop_class_list=[CustomClass], ) Config.configure_section_for_tests( \"my_id_2\", \"my_attribute_2\", prop_fct_list=[print, pow], ) Config.backup(tf.filename) actual_exported_json = tf.read().strip() assert actual_exported_json == expected_json_config Config.override(tf.filename) tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_exported_json_2 = tf2.read().strip() assert actual_exported_json_2 == expected_json_config "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from src.taipy.config._config import _Config from src.taipy.config.checker._checker import _Checker class TestDefaultConfigChecker: def test_check_default_config(self): config = _Config._default_config() collector = _Checker._check(config) assert len(collector._errors) == 0 assert len(collector._infos) == 0 assert len(collector._warnings) == 0 "} {"text": "from src.taipy.config.checker.issue import Issue from src.taipy.config.checker.issue_collector import IssueCollector class TestIssueCollector: def test_add_error(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_error(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 1 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", \"value\", \"message\", \"checker\") collector._add_error(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 2 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", \"value\", \"message\", \"checker\") assert collector.all[1] == Issue(IssueCollector._ERROR_LEVEL, \"field\", \"value\", \"message\", \"checker\") def test_add_warning(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_warning(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 0 assert len(collector.warnings) == 1 assert len(collector.infos) == 0 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, \"field\", \"value\", \"message\", \"checker\") collector._add_warning(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 0 assert len(collector.warnings) == 2 assert len(collector.infos) == 0 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, \"field\", \"value\", \"message\", \"checker\") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, \"field\", \"value\", \"message\", \"checker\") def test_add_info(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_info(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 1 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, \"field\", \"value\", \"message\", \"checker\") collector._add_info(\"field\", \"value\", \"message\", \"checker\") assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 2 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, \"field\", \"value\", \"message\", \"checker\") assert collector.all[1] == Issue(IssueCollector._INFO_LEVEL, \"field\", \"value\", \"message\", \"checker\") def test_all(self): collector = IssueCollector() collector._add_info(\"foo\", \"bar\", \"baz\", \"qux\") assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"qux\") collector._add_warning(\"foo2\", \"bar2\", \"baz2\", \"qux2\") assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, \"foo2\", \"bar2\", \"baz2\", \"qux2\") assert collector.all[1] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"qux\") collector._add_warning(\"foo3\", \"bar3\", \"baz3\", \"qux3\") assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, \"foo2\", \"bar2\", \"baz2\", \"qux2\") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, \"foo3\", \"bar3\", \"baz3\", \"qux3\") assert collector.all[2] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"qux\") collector._add_info(\"field\", \"value\", \"message\", \"checker\") collector._add_error(\"field\", \"value\", \"message\", \"checker\") assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", \"value\", \"message\", \"checker\") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, \"foo2\", \"bar2\", \"baz2\", \"qux2\") assert collector.all[2] == Issue(IssueCollector._WARNING_LEVEL, \"foo3\", \"bar3\", \"baz3\", \"qux3\") assert collector.all[3] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"qux\") assert collector.all[4] == Issue(IssueCollector._INFO_LEVEL, \"field\", \"value\", \"message\", \"checker\") "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import os from unittest import mock from unittest.mock import MagicMock from src.taipy.config import Config from src.taipy.config.checker._checker import _Checker from src.taipy.config.checker.issue_collector import IssueCollector from tests.config.utils.checker_for_tests import CheckerForTest def test_register_checker(): checker = CheckerForTest checker._check = MagicMock() _Checker.add_checker(checker) Config.check() checker._check.assert_called_once() "} {"text": "import logging from unittest import mock from src.taipy.config._config import _Config from src.taipy.config.checker._checkers._config_checker import _ConfigChecker from src.taipy.config.checker.issue import Issue from src.taipy.config.checker.issue_collector import IssueCollector class MyCustomChecker(_ConfigChecker): def _check(self) -> IssueCollector: pass def test__error(): with mock.patch.object(logging.Logger, \"error\"): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._error(\"field\", 17, \"my message\") assert len(collector.all) == 1 assert len(collector.errors) == 1 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert collector.errors[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") MyCustomChecker(_Config(), collector)._error(\"foo\", \"bar\", \"baz\") assert len(collector.all) == 2 assert len(collector.errors) == 2 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert collector.errors[0] == Issue(IssueCollector._ERROR_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") assert collector.errors[1] == Issue(IssueCollector._ERROR_LEVEL, \"foo\", \"bar\", \"baz\", \"MyCustomChecker\") def test__warning(): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._warning(\"field\", 17, \"my message\") assert len(collector.all) == 1 assert len(collector.warnings) == 1 assert len(collector.errors) == 0 assert len(collector.infos) == 0 assert collector.warnings[0] == Issue(IssueCollector._WARNING_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") MyCustomChecker(_Config(), collector)._warning(\"foo\", \"bar\", \"baz\") assert len(collector.all) == 2 assert len(collector.warnings) == 2 assert len(collector.errors) == 0 assert len(collector.infos) == 0 assert collector.warnings[0] == Issue(IssueCollector._WARNING_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") assert collector.warnings[1] == Issue(IssueCollector._WARNING_LEVEL, \"foo\", \"bar\", \"baz\", \"MyCustomChecker\") def test__info(): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._info(\"field\", 17, \"my message\") assert len(collector.all) == 1 assert len(collector.infos) == 1 assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert collector.infos[0] == Issue(IssueCollector._INFO_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") MyCustomChecker(_Config(), collector)._info(\"foo\", \"bar\", \"baz\") assert len(collector.all) == 2 assert len(collector.infos) == 2 assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert collector.infos[0] == Issue(IssueCollector._INFO_LEVEL, \"field\", 17, \"my message\", \"_ConfigChecker\") assert collector.infos[1] == Issue(IssueCollector._INFO_LEVEL, \"foo\", \"bar\", \"baz\", \"MyCustomChecker\") "} {"text": "from src.taipy.config import IssueCollector from src.taipy.config.checker._checkers._config_checker import _ConfigChecker class CheckerForTest(_ConfigChecker): def _check(self) -> IssueCollector: return self._collector "} {"text": "from copy import copy from typing import Any, Dict, List, Optional from src.taipy.config import Config, Section from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker from .section_for_tests import SectionForTest class SectionOfSectionsListForTest(Section): name = \"list_section_name\" _MY_ATTRIBUTE_KEY = \"attribute\" _SECTIONS_LIST_KEY = \"sections_list\" def __init__(self, id: str, attribute: Any = None, sections_list: List = None, **properties): self._attribute = attribute self._sections_list = sections_list if sections_list else [] super().__init__(id, **properties) def __copy__(self): return SectionOfSectionsListForTest( self.id, self._attribute, copy(self._sections_list), **copy(self._properties) ) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val @property def sections_list(self): return list(self._sections_list) @sections_list.setter # type: ignore @_ConfigBlocker._check() def sections_list(self, val): self._sections_list = val def _clean(self): self._attribute = None self._sections_list = [] self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute if self._sections_list: as_dict[self._SECTIONS_LIST_KEY] = self._sections_list as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, id) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) section_configs = config._sections.get(SectionForTest.name, None) or [] # type: ignore sections_list = [] if inputs_as_str := as_dict.pop(cls._SECTIONS_LIST_KEY, None): for section_id in inputs_as_str: if section_id in section_configs: sections_list.append(section_configs[section_id]) else: sections_list.append(section_id) return SectionOfSectionsListForTest(id=id, attribute=attribute, sections_list=sections_list, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._sections_list = as_dict.pop(self._SECTIONS_LIST_KEY, self._sections_list) if self._sections_list is None and default_section: self._sections_list = default_section._sections_list self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(id: str, attribute: str, sections_list: List = None, **properties): section = SectionOfSectionsListForTest(id, attribute, sections_list, **properties) Config._register(section) return Config.sections[SectionOfSectionsListForTest.name][id] "} {"text": ""} {"text": "import os import tempfile class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile(\"w\", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, \"r\") as fp: return fp.read() def __del__(self): os.unlink(self.filename) "} {"text": "from copy import copy from typing import Any, Dict, Optional from src.taipy.config import Config, Section from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker class SectionForTest(Section): name = \"section_name\" _MY_ATTRIBUTE_KEY = \"attribute\" def __init__(self, id: str, attribute: Any = None, **properties): self._attribute = attribute super().__init__(id, **properties) def __copy__(self): return SectionForTest(self.id, self._attribute, **copy(self._properties)) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val def _clean(self): self._attribute = None self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, id) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) return SectionForTest(id=id, attribute=attribute, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(id: str, attribute: str, **properties): section = SectionForTest(id, attribute, **properties) Config._register(section) return Config.sections[SectionForTest.name][id] "} {"text": "from copy import copy from typing import Any, Dict, Optional from src.taipy.config import Config from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker from src.taipy.config.unique_section import UniqueSection class UniqueSectionForTest(UniqueSection): name = \"unique_section_name\" _MY_ATTRIBUTE_KEY = \"attribute\" def __init__(self, attribute: str = None, **properties): self._attribute = attribute super().__init__(**properties) def __copy__(self): return UniqueSectionForTest(self._attribute, **copy(self._properties)) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val def _clean(self): self._attribute = None self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id=None, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, None) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) return UniqueSectionForTest(attribute=attribute, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(attribute: str, **properties): section = UniqueSectionForTest(attribute, **properties) Config._register(section) return Config.unique_sections[UniqueSectionForTest.name] "} {"text": "import pytest from src.taipy.config.common._validate_id import _validate_id from src.taipy.config.exceptions.exceptions import InvalidConfigurationId class TestId: def test_validate_id(self): s = _validate_id(\"foo\") assert s == \"foo\" with pytest.raises(InvalidConfigurationId): _validate_id(\"1foo\") with pytest.raises(InvalidConfigurationId): _validate_id(\"foo bar\") with pytest.raises(InvalidConfigurationId): _validate_id(\"foo/foo$\") with pytest.raises(InvalidConfigurationId): _validate_id(\"\") with pytest.raises(InvalidConfigurationId): _validate_id(\" \") with pytest.raises(InvalidConfigurationId): _validate_id(\"class\") with pytest.raises(InvalidConfigurationId): _validate_id(\"def\") with pytest.raises(InvalidConfigurationId): _validate_id(\"with\") with pytest.raises(InvalidConfigurationId): _validate_id(\"CYCLE\") with pytest.raises(InvalidConfigurationId): _validate_id(\"SCENARIO\") with pytest.raises(InvalidConfigurationId): _validate_id(\"SEQUENCE\") with pytest.raises(InvalidConfigurationId): _validate_id(\"TASK\") with pytest.raises(InvalidConfigurationId): _validate_id(\"DATANODE\") "} {"text": "import pytest from src.taipy.config.common.scope import Scope def test_scope(): # Test __ge__ method assert Scope.GLOBAL >= Scope.GLOBAL assert Scope.GLOBAL >= Scope.CYCLE assert Scope.CYCLE >= Scope.CYCLE assert Scope.GLOBAL >= Scope.SCENARIO assert Scope.CYCLE >= Scope.SCENARIO assert Scope.SCENARIO >= Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO >= \"testing string\" # Test __gt__ method assert Scope.GLOBAL > Scope.CYCLE assert Scope.GLOBAL > Scope.SCENARIO assert Scope.CYCLE > Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO > \"testing string\" # Test __le__ method assert Scope.GLOBAL <= Scope.GLOBAL assert Scope.CYCLE <= Scope.GLOBAL assert Scope.CYCLE <= Scope.CYCLE assert Scope.SCENARIO <= Scope.GLOBAL assert Scope.SCENARIO <= Scope.CYCLE assert Scope.SCENARIO <= Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO <= \"testing string\" # Test __lt__ method assert Scope.SCENARIO < Scope.GLOBAL assert Scope.SCENARIO < Scope.GLOBAL assert Scope.SCENARIO < Scope.CYCLE with pytest.raises(TypeError): assert Scope.SCENARIO < \"testing string\" "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import argparse import re import sys import pytest from src.taipy._cli._base_cli import _CLI if sys.version_info >= (3, 10): argparse_options_str = \"options:\" else: argparse_options_str = \"optional arguments:\" def preprocess_stdout(stdout): stdout = stdout.replace(\"\\n\", \" \").replace(\"\\t\", \" \") return re.sub(\" +\", \" \", stdout) def remove_subparser(name: str): \"\"\"Remove a subparser from argparse.\"\"\" _CLI._sub_taipyparsers.pop(name, None) if _CLI._subparser_action: _CLI._subparser_action._name_parser_map.pop(name, None) for action in _CLI._subparser_action._choices_actions: if action.dest == name: _CLI._subparser_action._choices_actions.remove(action) @pytest.fixture(autouse=True, scope=\"function\") def clean_argparser(): _CLI._parser = argparse.ArgumentParser(conflict_handler=\"resolve\") _CLI._arg_groups = {} subcommands = list(_CLI._sub_taipyparsers.keys()) for subcommand in subcommands: remove_subparser(subcommand) yield def test_subparser(capfd): subcommand_1 = _CLI._add_subparser(\"subcommand_1\", help=\"subcommand_1 help\") subcommand_1.add_argument(\"--foo\", \"-f\", help=\"foo help\") subcommand_1.add_argument(\"--bar\", \"-b\", help=\"bar help\") subcommand_2 = _CLI._add_subparser(\"subcommand_2\", help=\"subcommand_2 help\") subcommand_2.add_argument(\"--doo\", \"-d\", help=\"doo help\") subcommand_2.add_argument(\"--baz\", \"-z\", help=\"baz help\") expected_subcommand_1_help_message = f\"\"\"subcommand_1 [-h] [--foo FOO] [--bar BAR] {argparse_options_str} -h, --help show this help message and exit --foo FOO, -f FOO foo help --bar BAR, -b BAR bar help \"\"\" subcommand_1.print_help() stdout, _ = capfd.readouterr() assert preprocess_stdout(expected_subcommand_1_help_message) in preprocess_stdout(stdout) expected_subcommand_2_help_message = f\"\"\"subcommand_2 [-h] [--doo DOO] [--baz BAZ] {argparse_options_str} -h, --help show this help message and exit --doo DOO, -d DOO doo help --baz BAZ, -z BAZ baz help \"\"\" subcommand_2.print_help() stdout, _ = capfd.readouterr() assert preprocess_stdout(expected_subcommand_2_help_message) in preprocess_stdout(stdout) def test_duplicate_subcommand(): subcommand_1 = _CLI._add_subparser(\"subcommand_1\", help=\"subcommand_1 help\") subcommand_1.add_argument(\"--foo\", \"-f\", help=\"foo help\") subcommand_2 = _CLI._add_subparser(\"subcommand_1\", help=\"subcommand_2 help\") subcommand_2.add_argument(\"--bar\", \"-b\", help=\"bar help\") # The title of subcommand_2 is duplicated with subcommand_1, and therefore # there will be no new subcommand created assert len(_CLI._sub_taipyparsers) == 1 def test_groupparser(capfd): group_1 = _CLI._add_groupparser(\"group_1\", \"group_1 desc\") group_1.add_argument(\"--foo\", \"-f\", help=\"foo help\") group_1.add_argument(\"--bar\", \"-b\", help=\"bar help\") group_2 = _CLI._add_groupparser(\"group_2\", \"group_2 desc\") group_2.add_argument(\"--doo\", \"-d\", help=\"doo help\") group_2.add_argument(\"--baz\", \"-z\", help=\"baz help\") expected_help_message = \"\"\" group_1: group_1 desc --foo FOO, -f FOO foo help --bar BAR, -b BAR bar help group_2: group_2 desc --doo DOO, -d DOO doo help --baz BAZ, -z BAZ baz help \"\"\".strip() _CLI._parser.print_help() stdout, _ = capfd.readouterr() assert expected_help_message in stdout def test_duplicate_group(): group_1 = _CLI._add_groupparser(\"group_1\", \"group_1 desc\") group_1.add_argument(\"--foo\", \"-f\", help=\"foo help\") group_2 = _CLI._add_groupparser(\"group_1\", \"group_2 desc\") group_2.add_argument(\"--bar\", \"-b\", help=\"bar help\") # The title of group_2 is duplicated with group_1, and therefore # there will be no new group created assert len(_CLI._arg_groups) == 1 "} {"text": "import datetime import os from unittest import mock import pytest from src.taipy.config.common.frequency import Frequency from src.taipy.config.common.scope import Scope from src.taipy.config.common._template_handler import _TemplateHandler from src.taipy.config.exceptions.exceptions import InconsistentEnvVariableError def test_replace_if_template(): assert_does_not_change(\"123\") assert_does_not_change(\"foo\") assert_does_not_change(\"_foo\") assert_does_not_change(\"_foo_\") assert_does_not_change(\"foo_\") assert_does_not_change(\"foo\") assert_does_not_change(\"foo_1\") assert_does_not_change(\"1foo_1\") assert_does_not_change(\"env(foo)\") assert_does_not_change(\"env\") assert_does_not_change(\"env[foo]\") assert_does_not_change(\"Env[foo]\") assert_does_not_change(\"ENV[1foo]\") assert_does_not_change(\"123:bool\") assert_does_not_change(\"foo:bool\") assert_does_not_change(\"_foo:bool\") assert_does_not_change(\"_foo_:bool\") assert_does_not_change(\"foo_:bool\") assert_does_not_change(\"foo:bool\") assert_does_not_change(\"foo_1:bool\") assert_does_not_change(\"1foo_1:bool\") assert_does_not_change(\"env(foo):bool\") assert_does_not_change(\"env:bool\") assert_does_not_change(\"env[foo]:bool\") assert_does_not_change(\"Env[foo]:bool\") assert_does_not_change(\"ENV[1foo]:bool\") assert_does_not_change(\"ENV[foo]:\") assert_does_not_change(\"ENV[_foo]:\") assert_does_not_change(\"ENV[foo_]:\") assert_does_not_change(\"ENV[foo0]:\") assert_does_not_change(\"ENV[foo_0]:\") assert_does_not_change(\"ENV[_foo_0]:\") assert_does_not_change(\"ENV[foo]:foo\") assert_does_not_change(\"ENV[_foo]:foo\") assert_does_not_change(\"ENV[foo_]:foo\") assert_does_not_change(\"ENV[foo0]:foo\") assert_does_not_change(\"ENV[foo_0]:foo\") assert_does_not_change(\"ENV[_foo_0]:foo\") assert_does_replace(\"ENV[foo]\", \"foo\", \"VALUE\", str) assert_does_replace(\"ENV[_foo]\", \"_foo\", \"VALUE\", str) assert_does_replace(\"ENV[foo_]\", \"foo_\", \"VALUE\", str) assert_does_replace(\"ENV[foo0]\", \"foo0\", \"VALUE\", str) assert_does_replace(\"ENV[foo_0]\", \"foo_0\", \"VALUE\", str) assert_does_replace(\"ENV[_foo_0]\", \"_foo_0\", \"VALUE\", str) assert_does_replace(\"ENV[foo]:str\", \"foo\", \"VALUE\", str) assert_does_replace(\"ENV[_foo]:str\", \"_foo\", \"VALUE\", str) assert_does_replace(\"ENV[foo_]:str\", \"foo_\", \"VALUE\", str) assert_does_replace(\"ENV[foo0]:str\", \"foo0\", \"VALUE\", str) assert_does_replace(\"ENV[foo_0]:str\", \"foo_0\", \"VALUE\", str) assert_does_replace(\"ENV[_foo_0]:str\", \"_foo_0\", \"VALUE\", str) assert_does_replace(\"ENV[foo]:int\", \"foo\", \"1\", int) assert_does_replace(\"ENV[_foo]:int\", \"_foo\", \"1\", int) assert_does_replace(\"ENV[foo_]:int\", \"foo_\", \"1\", int) assert_does_replace(\"ENV[foo0]:int\", \"foo0\", \"1\", int) assert_does_replace(\"ENV[foo_0]:int\", \"foo_0\", \"1\", int) assert_does_replace(\"ENV[_foo_0]:int\", \"_foo_0\", \"1\", int) assert_does_replace(\"ENV[foo]:float\", \"foo\", \"1.\", float) assert_does_replace(\"ENV[_foo]:float\", \"_foo\", \"1.\", float) assert_does_replace(\"ENV[foo_]:float\", \"foo_\", \"1.\", float) assert_does_replace(\"ENV[foo0]:float\", \"foo0\", \"1.\", float) assert_does_replace(\"ENV[foo_0]:float\", \"foo_0\", \"1.\", float) assert_does_replace(\"ENV[_foo_0]:float\", \"_foo_0\", \"1.\", float) assert_does_replace(\"ENV[foo]:bool\", \"foo\", \"True\", bool) assert_does_replace(\"ENV[_foo]:bool\", \"_foo\", \"True\", bool) assert_does_replace(\"ENV[foo_]:bool\", \"foo_\", \"True\", bool) assert_does_replace(\"ENV[foo0]:bool\", \"foo0\", \"True\", bool) assert_does_replace(\"ENV[foo_0]:bool\", \"foo_0\", \"True\", bool) assert_does_replace(\"ENV[_foo_0]:bool\", \"_foo_0\", \"True\", bool) def assert_does_replace(template, env_variable_name, replaced_by, as_type): with mock.patch.dict(os.environ, {env_variable_name: replaced_by}): tpl = _TemplateHandler() assert tpl._replace_templates(template) == as_type(replaced_by) def assert_does_not_change(template): tpl = _TemplateHandler() assert tpl._replace_templates(template) == template def test_replace_tuple_list_dict(): with mock.patch.dict(os.environ, {\"FOO\": \"true\", \"BAR\": \"3\", \"BAZ\": \"qux\"}): tpl = _TemplateHandler() now = datetime.datetime.now() actual = tpl._replace_templates((\"ENV[FOO]:bool\", now, \"ENV[BAR]:int\", \"ENV[BAZ]\", \"quz\")) assert actual == (True, now, 3, \"qux\", \"quz\") actual = tpl._replace_templates((\"ENV[FOO]:bool\", now, \"ENV[BAR]:int\", \"ENV[BAZ]\", \"quz\")) assert actual == (True, now, 3, \"qux\", \"quz\") def test_to_bool(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_bool(\"okhds\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_bool(\"no\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_bool(\"tru\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_bool(\"tru_e\") assert _TemplateHandler._to_bool(\"true\") assert _TemplateHandler._to_bool(\"True\") assert _TemplateHandler._to_bool(\"TRUE\") assert _TemplateHandler._to_bool(\"TruE\") assert _TemplateHandler._to_bool(\"TrUE\") assert not _TemplateHandler._to_bool(\"false\") assert not _TemplateHandler._to_bool(\"False\") assert not _TemplateHandler._to_bool(\"FALSE\") assert not _TemplateHandler._to_bool(\"FalSE\") assert not _TemplateHandler._to_bool(\"FalSe\") def test_to_int(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_int(\"okhds\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_int(\"_45\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_int(\"12.5\") assert 12 == _TemplateHandler._to_int(\"12\") assert 0 == _TemplateHandler._to_int(\"0\") assert -2 == _TemplateHandler._to_int(\"-2\") assert 156165 == _TemplateHandler._to_int(\"156165\") def test_to_float(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_float(\"okhds\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_float(\"_45\") assert 12.5 == _TemplateHandler._to_float(\"12.5\") assert 2.0 == _TemplateHandler._to_float(\"2\") assert 0.0 == _TemplateHandler._to_float(\"0\") assert -2.1 == _TemplateHandler._to_float(\"-2.1\") assert 156165.3 == _TemplateHandler._to_float(\"156165.3\") def test_to_scope(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_scope(\"okhds\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_scope(\"plop\") assert Scope.GLOBAL == _TemplateHandler._to_scope(\"global\") assert Scope.GLOBAL == _TemplateHandler._to_scope(\"GLOBAL\") assert Scope.SCENARIO == _TemplateHandler._to_scope(\"SCENARIO\") assert Scope.CYCLE == _TemplateHandler._to_scope(\"cycle\") def test_to_frequency(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_frequency(\"okhds\") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_frequency(\"plop\") assert Frequency.DAILY == _TemplateHandler._to_frequency(\"DAILY\") assert Frequency.DAILY == _TemplateHandler._to_frequency(\"Daily\") assert Frequency.WEEKLY == _TemplateHandler._to_frequency(\"weekly\") assert Frequency.WEEKLY == _TemplateHandler._to_frequency(\"WEEKLY\") assert Frequency.MONTHLY == _TemplateHandler._to_frequency(\"Monthly\") assert Frequency.MONTHLY == _TemplateHandler._to_frequency(\"MONThLY\") assert Frequency.QUARTERLY == _TemplateHandler._to_frequency(\"QuaRtERlY\") assert Frequency.YEARLY == _TemplateHandler._to_frequency(\"Yearly\") "} {"text": "import pytest from src.taipy.config.common._classproperty import _Classproperty class TestClassProperty: def test_class_property(self): class TestClass: @_Classproperty def test_property(cls): return \"test_property\" assert TestClass.test_property == \"test_property\" assert TestClass().test_property == \"test_property\" with pytest.raises(TypeError): TestClass.test_property() "} {"text": "import os from unittest import mock import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked def test_global_config_with_env_variable_value(): with mock.patch.dict(os.environ, {\"FOO\": \"bar\", \"BAZ\": \"qux\"}): Config.configure_global_app(foo=\"ENV[FOO]\", bar=\"ENV[BAZ]\") assert Config.global_config.foo == \"bar\" assert Config.global_config.bar == \"qux\" def test_default_global_app_config(): global_config = Config.global_config assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_block_update_global_app_config(): Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_global_app(foo=\"bar\") with pytest.raises(ConfigurationUpdateBlocked): Config.global_config.properties = {\"foo\": \"bar\"} # Test if the global_config stay as default assert Config.global_config.foo is None assert len(Config.global_config.properties) == 0 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from importlib.util import find_spec if find_spec(\"taipy\"): if find_spec(\"taipy.config\"): from taipy.config._init import * # type: ignore if find_spec(\"taipy.gui\"): from taipy.gui._init import * # type: ignore if find_spec(\"taipy.core\"): from taipy.core._init import * # type: ignore if find_spec(\"taipy.rest\"): from taipy.rest._init import * # type: ignore if find_spec(\"taipy.gui_core\"): from taipy.gui_core._init import * # type: ignore if find_spec(\"taipy.enterprise\"): from taipy.enterprise._init import * # type: ignore if find_spec(\"taipy._run\"): from taipy._run import _run as run # type: ignore "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import logging.config import os import sys class _TaipyLogger: _ENVIRONMENT_VARIABLE_NAME_WITH_LOGGER_CONFIG_PATH = \"TAIPY_LOGGER_CONFIG_PATH\" __logger = None @classmethod def _get_logger(cls): cls._ENVIRONMENT_VARIABLE_NAME_WITH_LOGGER_CONFIG_PATH = \"TAIPY_LOGGER_CONFIG_PATH\" if cls.__logger: return cls.__logger if config_filename := os.environ.get(cls._ENVIRONMENT_VARIABLE_NAME_WITH_LOGGER_CONFIG_PATH): logging.config.fileConfig(config_filename) cls.__logger = logging.getLogger(\"Taipy\") else: cls.__logger = logging.getLogger(\"Taipy\") cls.__logger.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter(\"[%(asctime)s][%(name)s][%(levelname)s] %(message)s\", \"%Y-%m-%d %H:%M:%S\") ch.setFormatter(formatter) cls.__logger.addHandler(ch) return cls.__logger "} {"text": "import os from typing import Dict from ..logger._taipy_logger import _TaipyLogger from ._config import _Config from ._config_comparator._config_comparator import _ConfigComparator from ._serializer._json_serializer import _JsonSerializer from ._serializer._toml_serializer import _TomlSerializer from .checker._checker import _Checker from .checker.issue_collector import IssueCollector from .common._classproperty import _Classproperty from .common._config_blocker import _ConfigBlocker from .global_app.global_app_config import GlobalAppConfig from .section import Section from .unique_section import UniqueSection class Config: \"\"\"Configuration singleton.\"\"\" _ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH = \"TAIPY_CONFIG_PATH\" __logger = _TaipyLogger._get_logger() _default_config = _Config._default_config() _python_config = _Config() _file_config = _Config() _env_file_config = _Config() _applied_config = _Config() _collector = IssueCollector() _serializer = _TomlSerializer() __json_serializer = _JsonSerializer() _comparator: _ConfigComparator = _ConfigComparator() @_Classproperty def unique_sections(cls) -> Dict[str, UniqueSection]: \"\"\"Return all unique sections.\"\"\" return cls._applied_config._unique_sections @_Classproperty def sections(cls) -> Dict[str, Dict[str, Section]]: \"\"\"Return all non unique sections.\"\"\" return cls._applied_config._sections @_Classproperty def global_config(cls) -> GlobalAppConfig: \"\"\"Return configuration values related to the global application as a `GlobalAppConfig^`.\"\"\" return cls._applied_config._global_config @classmethod @_ConfigBlocker._check() def load(cls, filename): \"\"\"Load a configuration file. The current Python configuration is replaced and the Config compilation is triggered. Parameters: filename (Union[str, Path]): The path of the toml configuration file to load. \"\"\" cls.__logger.info(f\"Loading configuration. Filename: '{filename}'\") cls._python_config = cls._serializer._read(filename) cls._compile_configs() cls.__logger.info(f\"Configuration '{filename}' successfully loaded.\") @classmethod def export(cls, filename): \"\"\"Export a configuration. The export is done in a toml file. The exported configuration is taken from the Python code configuration. Parameters: filename (Union[str, Path]): The path of the file to export. Note: If *filename* already exists, it is overwritten. \"\"\" cls._serializer._write(cls._python_config, filename) @classmethod def backup(cls, filename): \"\"\"Backup a configuration. The backup is done in a toml file. The backed up configuration is a compilation from the three possible methods to configure the application: the Python code configuration, the file configuration and the environment configuration. Parameters: filename (Union[str, Path]): The path of the file to export. Note: If *filename* already exists, it is overwritten. \"\"\" cls._serializer._write(cls._applied_config, filename) @classmethod @_ConfigBlocker._check() def restore(cls, filename): \"\"\"Restore a configuration file and replace the current applied configuration. Parameters: filename (Union[str, Path]): The path of the toml configuration file to load. \"\"\" cls.__logger.info(f\"Restoring configuration. Filename: '{filename}'\") cls._applied_config = cls._serializer._read(filename) cls.__logger.info(f\"Configuration '{filename}' successfully restored.\") @classmethod @_ConfigBlocker._check() def override(cls, filename): \"\"\"Load a configuration from a file and overrides the current config. Parameters: filename (Union[str, Path]): The path of the toml configuration file to load. \"\"\" cls.__logger.info(f\"Loading configuration. Filename: '{filename}'\") cls._file_config = cls._serializer._read(filename) cls.__logger.info(\"Overriding configuration.'\") cls._compile_configs() cls.__logger.info(f\"Configuration '{filename}' successfully loaded.\") @classmethod def block_update(cls): \"\"\"Block update on the configuration signgleton.\"\"\" _ConfigBlocker._block() @classmethod def unblock_update(cls): \"\"\"Unblock update on the configuration signgleton.\"\"\" _ConfigBlocker._unblock() @classmethod @_ConfigBlocker._check() def configure_global_app(cls, **properties) -> GlobalAppConfig: \"\"\"Configure the global application. Parameters: **properties (Dict[str, Any]): A dictionary of additional properties. Returns: The global application configuration. \"\"\" glob_cfg = GlobalAppConfig(**properties) if cls._python_config._global_config is None: cls._python_config._global_config = glob_cfg else: cls._python_config._global_config._update(glob_cfg._to_dict()) cls._compile_configs() return cls._applied_config._global_config @classmethod def check(cls) -> IssueCollector: \"\"\"Check configuration. This method logs issue messages and returns an issue collector. Returns: Collector containing the info, warning and error issues. \"\"\" cls._collector = _Checker._check(cls._applied_config) cls.__log_message(cls) return cls._collector @classmethod @_ConfigBlocker._check() def _register_default(cls, default_section: Section): if isinstance(default_section, UniqueSection): if cls._default_config._unique_sections.get(default_section.name, None): cls._default_config._unique_sections[default_section.name]._update(default_section._to_dict()) else: cls._default_config._unique_sections[default_section.name] = default_section else: if def_sections := cls._default_config._sections.get(default_section.name, None): def_sections[default_section.id] = default_section else: cls._default_config._sections[default_section.name] = {default_section.id: default_section} cls._serializer._section_class[default_section.name] = default_section.__class__ # type: ignore cls.__json_serializer._section_class[default_section.name] = default_section.__class__ # type: ignore cls._compile_configs() @classmethod @_ConfigBlocker._check() def _register(cls, section): if isinstance(section, UniqueSection): if cls._python_config._unique_sections.get(section.name, None): cls._python_config._unique_sections[section.name]._update(section._to_dict()) else: cls._python_config._unique_sections[section.name] = section else: if sections := cls._python_config._sections.get(section.name, None): if sections.get(section.id, None): sections[section.id]._update(section._to_dict()) else: sections[section.id] = section else: cls._python_config._sections[section.name] = {section.id: section} cls._serializer._section_class[section.name] = section.__class__ cls.__json_serializer._section_class[section.name] = section.__class__ cls._compile_configs() @classmethod def _override_env_file(cls): if config_filename := os.environ.get(cls._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH): cls.__logger.info(f\"Loading configuration provided by environment variable. Filename: '{config_filename}'\") cls._env_file_config = cls._serializer._read(config_filename) cls.__logger.info(f\"Configuration '{config_filename}' successfully loaded.\") @classmethod def _compile_configs(cls): Config._override_env_file() cls._applied_config._clean() if cls._default_config: cls._applied_config._update(cls._default_config) if cls._python_config: cls._applied_config._update(cls._python_config) if cls._file_config: cls._applied_config._update(cls._file_config) if cls._env_file_config: cls._applied_config._update(cls._env_file_config) @classmethod def __log_message(cls, config): for issue in config._collector._warnings: cls.__logger.warning(str(issue)) for issue in config._collector._infos: cls.__logger.info(str(issue)) for issue in config._collector._errors: cls.__logger.error(str(issue)) if len(config._collector._errors) != 0: raise SystemExit(\"Configuration errors found. Please check the error log for more information.\") @classmethod def _to_json(cls, _config: _Config) -> str: return cls.__json_serializer._serialize(_config) @classmethod def _from_json(cls, config_as_str: str) -> _Config: return cls.__json_serializer._deserialize(config_as_str) Config._override_env_file() "} {"text": "import json import os def _get_version(): with open(f\"{os.path.dirname(os.path.abspath(__file__))}{os.sep}version.json\") as version_file: version = json.load(version_file) version_string = f'{version.get(\"major\", 0)}.{version.get(\"minor\", 0)}.{version.get(\"patch\", 0)}' if vext := version.get(\"ext\"): version_string = f\"{version_string}.{vext}\" return version_string "} {"text": "from .config import Config from .common.frequency import Frequency from .common.scope import Scope "} {"text": "\"\"\"# Taipy Config The Taipy Config package is a Python library designed to configure a Taipy application. The main entrypoint is the `Config^` singleton class. It exposes some methods to configure the Taipy application and some attributes to retrieve the configuration values. \"\"\" from ._init import * from typing import List from .checker.issue import Issue from .checker.issue_collector import IssueCollector from .global_app.global_app_config import GlobalAppConfig from .section import Section from .unique_section import UniqueSection from .version import _get_version __version__ = _get_version() def _config_doc(func): def func_with_doc(section, attribute_name, default, configuration_methods, add_to_unconflicted_sections=False): import os if os.environ.get(\"GENERATING_TAIPY_DOC\", None) and os.environ[\"GENERATING_TAIPY_DOC\"] == \"true\": with open(\"config_doc.txt\", \"a\") as f: from inspect import signature for exposed_configuration_method, configuration_method in configuration_methods: annotation = \" @staticmethod\\n\" sign = \" def \" + exposed_configuration_method + str(signature(configuration_method)) + \":\\n\" doc = ' \"\"\"' + configuration_method.__doc__ + '\"\"\"\\n' content = \" pass\\n\\n\" f.write(annotation + sign + doc + content) return func(section, attribute_name, default, configuration_methods, add_to_unconflicted_sections) return func_with_doc @_config_doc def _inject_section( section_clazz, attribute_name: str, default: Section, configuration_methods: List[tuple], add_to_unconflicted_sections: bool = False, ): Config._register_default(default) if issubclass(section_clazz, UniqueSection): setattr(Config, attribute_name, Config.unique_sections[section_clazz.name]) elif issubclass(section_clazz, Section): setattr(Config, attribute_name, Config.sections[section_clazz.name]) else: raise TypeError if add_to_unconflicted_sections: Config._comparator._add_unconflicted_section(section_clazz.name) for exposed_configuration_method, configuration_method in configuration_methods: setattr(Config, exposed_configuration_method, configuration_method) "} {"text": "from abc import abstractmethod from typing import Any, Dict, Optional from .common._config_blocker import _ConfigBlocker from .common._template_handler import _TemplateHandler as _tpl from .common._validate_id import _validate_id class Section: \"\"\"A Section as a consistent part of the Config. A section is defined by the section name (representing the type of objects that are configured) and a section id. \"\"\" _DEFAULT_KEY = \"default\" _ID_KEY = \"id\" def __init__(self, id, **properties): self.id = _validate_id(id) self._properties = properties or dict() @abstractmethod def __copy__(self): raise NotImplementedError @property @abstractmethod def name(self): raise NotImplementedError @abstractmethod def _clean(self): raise NotImplementedError @abstractmethod def _to_dict(self): raise NotImplementedError @classmethod @abstractmethod def _from_dict(cls, config_as_dict: Dict[str, Any], id, config): raise NotImplementedError @abstractmethod def _update(self, config_as_dict, default_section=None): raise NotImplementedError def __getattr__(self, item: str) -> Optional[Any]: return self._replace_templates(self._properties.get(item, None)) @property def properties(self): return {k: _tpl._replace_templates(v) for k, v in self._properties.items()} @properties.setter # type: ignore @_ConfigBlocker._check() def properties(self, val): self._properties = val def _replace_templates(self, value): return _tpl._replace_templates(value) "} {"text": "from abc import ABC from .common._validate_id import _validate_id from .section import Section class UniqueSection(Section, ABC): \"\"\"A UniqueSection is a configuration `Section^` that can have only one instance. A UniqueSection is only defined by the section name. \"\"\" def __init__(self, **properties): super().__init__(self.name, **properties) "} {"text": "from copy import copy from typing import Dict from .global_app.global_app_config import GlobalAppConfig from .section import Section from .unique_section import UniqueSection class _Config: DEFAULT_KEY = \"default\" def __init__(self): self._sections: Dict[str, Dict[str, Section]] = {} self._unique_sections: Dict[str, UniqueSection] = {} self._global_config: GlobalAppConfig = GlobalAppConfig() def _clean(self): self._global_config._clean() for unique_section in self._unique_sections.values(): unique_section._clean() for sections in self._sections.values(): for section in sections.values(): section._clean() @classmethod def _default_config(cls): config = _Config() config._global_config = GlobalAppConfig.default_config() return config def _update(self, other_config): self._global_config._update(other_config._global_config._to_dict()) if other_config._unique_sections: for section_name, other_section in other_config._unique_sections.items(): if section := self._unique_sections.get(section_name, None): section._update(other_section._to_dict()) else: self._unique_sections[section_name] = copy(other_config._unique_sections[section_name]) if other_config._sections: for section_name, other_non_unique_sections in other_config._sections.items(): if non_unique_sections := self._sections.get(section_name, None): self.__update_sections(non_unique_sections, other_non_unique_sections) else: self._sections[section_name] = {} self.__add_sections(self._sections[section_name], other_non_unique_sections) def __add_sections(self, entity_config, other_entity_configs): for cfg_id, sub_config in other_entity_configs.items(): entity_config[cfg_id] = copy(sub_config) self.__point_nested_section_to_self(sub_config) def __update_sections(self, entity_config, other_entity_configs): if self.DEFAULT_KEY in other_entity_configs: if self.DEFAULT_KEY in entity_config: entity_config[self.DEFAULT_KEY]._update(other_entity_configs[self.DEFAULT_KEY]._to_dict()) else: entity_config[self.DEFAULT_KEY] = other_entity_configs[self.DEFAULT_KEY] for cfg_id, sub_config in other_entity_configs.items(): if cfg_id != self.DEFAULT_KEY: if cfg_id in entity_config: entity_config[cfg_id]._update(sub_config._to_dict(), entity_config.get(self.DEFAULT_KEY)) else: entity_config[cfg_id] = copy(sub_config) entity_config[cfg_id]._update(sub_config._to_dict(), entity_config.get(self.DEFAULT_KEY)) self.__point_nested_section_to_self(sub_config) def __point_nested_section_to_self(self, section): \"\"\"Loop through attributes of a Section to find if any attribute has a list of Section as value. If there is, update each nested Section by the corresponding instance in self. Args: section (Section): The Section to search for nested sections. \"\"\" for _, attr_value in vars(section).items(): # ! This will fail if an attribute is a dictionary, or nested list of Sections. if not isinstance(attr_value, list): continue for index, item in enumerate(attr_value): if not isinstance(item, Section): continue if sub_item := self._sections.get(item.name, {}).get(item.id, None): attr_value[index] = sub_item "} {"text": "import toml # type: ignore from .._config import _Config from ..exceptions.exceptions import LoadingError from ._base_serializer import _BaseSerializer class _TomlSerializer(_BaseSerializer): \"\"\"Convert configuration from TOML representation to Python Dict and reciprocally.\"\"\" @classmethod def _write(cls, configuration: _Config, filename: str): with open(filename, \"w\") as fd: toml.dump(cls._str(configuration), fd) @classmethod def _read(cls, filename: str) -> _Config: try: config_as_dict = cls._pythonify(dict(toml.load(filename))) return cls._from_dict(config_as_dict) except toml.TomlDecodeError as e: error_msg = f\"Can not load configuration {e}\" raise LoadingError(error_msg) @classmethod def _serialize(cls, configuration: _Config) -> str: return toml.dumps(cls._str(configuration)) @classmethod def _deserialize(cls, config_as_string: str) -> _Config: return cls._from_dict(cls._pythonify(dict(toml.loads(config_as_string)))) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import inspect import re import types from abc import abstractmethod from datetime import datetime, timedelta from typing import Any, Dict, Optional from .._config import _Config from ..common._template_handler import _TemplateHandler from ..common._validate_id import _validate_id from ..common.frequency import Frequency from ..common.scope import Scope from ..exceptions.exceptions import LoadingError from ..global_app.global_app_config import GlobalAppConfig from ..section import Section from ..unique_section import UniqueSection class _BaseSerializer(object): \"\"\"Base serializer class for taipy configuration.\"\"\" _GLOBAL_NODE_NAME = \"TAIPY\" _section_class = {_GLOBAL_NODE_NAME: GlobalAppConfig} @classmethod @abstractmethod def _write(cls, configuration: _Config, filename: str): raise NotImplementedError @classmethod def _str(cls, configuration: _Config): config_as_dict = {cls._GLOBAL_NODE_NAME: configuration._global_config._to_dict()} for u_sect_name, u_sect in configuration._unique_sections.items(): config_as_dict[u_sect_name] = u_sect._to_dict() for sect_name, sections in configuration._sections.items(): config_as_dict[sect_name] = cls._to_dict(sections) return cls._stringify(config_as_dict) @classmethod def _to_dict(cls, sections: Dict[str, Any]): return {section_id: section._to_dict() for section_id, section in sections.items()} @classmethod def _stringify(cls, as_dict): if as_dict is None: return None if isinstance(as_dict, Section): return as_dict.id + \":SECTION\" if isinstance(as_dict, Scope): return as_dict.name + \":SCOPE\" if isinstance(as_dict, Frequency): return as_dict.name + \":FREQUENCY\" if isinstance(as_dict, bool): return str(as_dict) + \":bool\" if isinstance(as_dict, int): return str(as_dict) + \":int\" if isinstance(as_dict, float): return str(as_dict) + \":float\" if isinstance(as_dict, datetime): return as_dict.isoformat() + \":datetime\" if isinstance(as_dict, timedelta): return cls._timedelta_to_str(as_dict) + \":timedelta\" if inspect.isfunction(as_dict) or isinstance(as_dict, types.BuiltinFunctionType): return as_dict.__module__ + \".\" + as_dict.__name__ + \":function\" if inspect.isclass(as_dict): return as_dict.__module__ + \".\" + as_dict.__qualname__ + \":class\" if isinstance(as_dict, dict): return {str(key): cls._stringify(val) for key, val in as_dict.items()} if isinstance(as_dict, list): return [cls._stringify(val) for val in as_dict] if isinstance(as_dict, tuple): return [cls._stringify(val) for val in as_dict] return as_dict @staticmethod def _extract_node(config_as_dict, cls_config, node, config: Optional[Any]) -> Dict[str, Section]: res = {} for key, value in config_as_dict.get(node, {}).items(): # my_task, {input=[], output=[my_data_node], ...} key = _validate_id(key) res[key] = cls_config._from_dict(value, key, config) # if config is None else cls_config._from_dict(key, # value, config) return res @classmethod def _from_dict(cls, as_dict) -> _Config: config = _Config() config._global_config = GlobalAppConfig._from_dict(as_dict.get(cls._GLOBAL_NODE_NAME, {})) for section_name, sect_as_dict in as_dict.items(): if section_class := cls._section_class.get(section_name, None): if issubclass(section_class, UniqueSection): config._unique_sections[section_name] = section_class._from_dict( sect_as_dict, None, None ) # type: ignore elif issubclass(section_class, Section): config._sections[section_name] = cls._extract_node(as_dict, section_class, section_name, config) return config @classmethod def _pythonify(cls, val): match = re.fullmatch(_TemplateHandler._PATTERN, str(val)) if not match: if isinstance(val, str): TYPE_PATTERN = ( r\"^(.+):(\\bbool\\b|\\bstr\\b|\\bint\\b|\\bfloat\\b|\\bdatetime\\b||\\btimedelta\\b|\" r\"\\bfunction\\b|\\bclass\\b|\\bSCOPE\\b|\\bFREQUENCY\\b|\\bSECTION\\b)?$\" ) match = re.fullmatch(TYPE_PATTERN, str(val)) if match: actual_val = match.group(1) dynamic_type = match.group(2) if dynamic_type == \"SECTION\": return actual_val if dynamic_type == \"FREQUENCY\": return Frequency[actual_val] if dynamic_type == \"SCOPE\": return Scope[actual_val] if dynamic_type == \"bool\": return _TemplateHandler._to_bool(actual_val) elif dynamic_type == \"int\": return _TemplateHandler._to_int(actual_val) elif dynamic_type == \"float\": return _TemplateHandler._to_float(actual_val) elif dynamic_type == \"datetime\": return _TemplateHandler._to_datetime(actual_val) elif dynamic_type == \"timedelta\": return _TemplateHandler._to_timedelta(actual_val) elif dynamic_type == \"function\": return _TemplateHandler._to_function(actual_val) elif dynamic_type == \"class\": return _TemplateHandler._to_class(actual_val) elif dynamic_type == \"str\": return actual_val else: error_msg = f\"Error loading toml configuration at {val}. {dynamic_type} type is not supported.\" raise LoadingError(error_msg) if isinstance(val, dict): return {str(k): cls._pythonify(v) for k, v in val.items()} if isinstance(val, list): return [cls._pythonify(v) for v in val] return val @classmethod def _timedelta_to_str(cls, obj: timedelta) -> str: total_seconds = obj.total_seconds() return ( f\"{int(total_seconds // 86400)}d\" f\"{int(total_seconds % 86400 // 3600)}h\" f\"{int(total_seconds % 3600 // 60)}m\" f\"{int(total_seconds % 60)}s\" ) "} {"text": "import json # type: ignore from .._config import _Config from ..exceptions.exceptions import LoadingError from ._base_serializer import _BaseSerializer class _JsonSerializer(_BaseSerializer): \"\"\"Convert configuration from JSON representation to Python Dict and reciprocally.\"\"\" @classmethod def _write(cls, configuration: _Config, filename: str): with open(filename, \"w\") as fd: json.dump(cls._str(configuration), fd, ensure_ascii=False, indent=0, check_circular=False) @classmethod def _read(cls, filename: str) -> _Config: try: with open(filename) as f: config_as_dict = cls._pythonify(json.load(f)) return cls._from_dict(config_as_dict) except json.JSONDecodeError as e: error_msg = f\"Can not load configuration {e}\" raise LoadingError(error_msg) @classmethod def _serialize(cls, configuration: _Config) -> str: return json.dumps(cls._str(configuration), ensure_ascii=False, indent=0, check_circular=False) @classmethod def _deserialize(cls, config_as_string: str) -> _Config: return cls._from_dict(cls._pythonify(dict(json.loads(config_as_string)))) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from typing import Any, List from .issue import Issue class IssueCollector: \"\"\" A collection of issues (instances of class `Issue^`). Attributes: errors (List[Issue^]): List of ERROR issues collected. warnings (List[Issue^]): List WARNING issues collected. infos (List[Issue^]): List INFO issues collected. all (List[Issue^]): List of all issues collected ordered by decreasing level (ERROR, WARNING and INFO). \"\"\" _ERROR_LEVEL = \"ERROR\" _WARNING_LEVEL = \"WARNING\" _INFO_LEVEL = \"INFO\" def __init__(self): self._errors: List[Issue] = [] self._warnings: List[Issue] = [] self._infos: List[Issue] = [] @property def all(self) -> List[Issue]: return self._errors + self._warnings + self._infos @property def infos(self) -> List[Issue]: return self._infos @property def warnings(self) -> List[Issue]: return self._warnings @property def errors(self) -> List[Issue]: return self._errors def _add_error(self, field: str, value: Any, message: str, checker_name: str): self._errors.append(Issue(self._ERROR_LEVEL, field, value, message, checker_name)) def _add_warning(self, field: str, value: Any, message: str, checker_name: str): self._warnings.append(Issue(self._WARNING_LEVEL, field, value, message, checker_name)) def _add_info(self, field: str, value: Any, message: str, checker_name: str): self._infos.append(Issue(self._INFO_LEVEL, field, value, message, checker_name)) "} {"text": "from dataclasses import dataclass from typing import Any, Optional @dataclass class Issue: \"\"\" An issue detected in the configuration. Attributes: level (str): Level of the issue among ERROR, WARNING, INFO. field (str): Configuration field on which the issue has been detected. value (Any): Value of the field on which the issue has been detected. message (str): Human readable message to help the user fix the issue. tag (Optional[str]): Optional tag to be used to filter issues. \"\"\" level: str field: str value: Any message: str tag: Optional[str] def __str__(self) -> str: message = self.message if self.value: current_value_str = f'\"{self.value}\"' if isinstance(self.value, str) else f\"{self.value}\" message += f\" Current value of property `{self.field}` is {current_value_str}.\" return message "} {"text": "from typing import List from ._checkers._config_checker import _ConfigChecker from .issue_collector import IssueCollector class _Checker: \"\"\"Holds the various checkers to perform on the config.\"\"\" _checkers: List[_ConfigChecker] = [] @classmethod def _check(cls, _applied_config): collector = IssueCollector() for checker in cls._checkers: checker(_applied_config, collector)._check() return collector @classmethod def add_checker(cls, checker_class: _ConfigChecker): cls._checkers.append(checker_class) "} {"text": "import abc from typing import Any, List, Optional, Set from ..._config import _Config from ..issue_collector import IssueCollector class _ConfigChecker: _PREDEFINED_PROPERTIES_KEYS = [\"_entity_owner\"] def __init__(self, config: _Config, collector): self._collector = collector self._config = config @abc.abstractmethod def _check(self) -> IssueCollector: raise NotImplementedError def _error(self, field: str, value: Any, message: str): self._collector._add_error(field, value, message, self.__class__.__name__) def _warning(self, field: str, value: Any, message: str): self._collector._add_warning(field, value, message, self.__class__.__name__) def _info(self, field: str, value: Any, message: str): self._collector._add_info(field, value, message, self.__class__.__name__) def _check_children( self, parent_config_class, config_id: str, config_key: str, config_value, child_config_class, can_be_empty: Optional[bool] = False, ): if not config_value and not can_be_empty: self._warning( config_key, config_value, f\"{config_key} field of {parent_config_class.__name__} `{config_id}` is empty.\", ) else: if not ( (isinstance(config_value, List) or isinstance(config_value, Set)) and all(map(lambda x: isinstance(x, child_config_class), config_value)) ): self._error( config_key, config_value, f\"{config_key} field of {parent_config_class.__name__} `{config_id}` must be populated with a list \" f\"of {child_config_class.__name__} objects.\", ) def _check_existing_config_id(self, config): if not config.id: self._error( \"config_id\", config.id, f\"config_id of {config.__class__.__name__} `{config.id}` is empty.\", ) def _check_if_entity_property_key_used_is_predefined(self, config): for key, value in config._properties.items(): if key in self._PREDEFINED_PROPERTIES_KEYS: self._error( key, value, f\"Properties of {config.__class__.__name__} `{config.id}` cannot have `{key}` as its property.\", ) "} {"text": "from ..._config import _Config from ..issue_collector import IssueCollector from ._config_checker import _ConfigChecker class _AuthConfigChecker(_ConfigChecker): def __init__(self, config: _Config, collector: IssueCollector): super().__init__(config, collector) def _check(self) -> IssueCollector: auth_config = self._config._auth_config self._check_predefined_protocol(auth_config) return self._collector def _check_predefined_protocol(self, auth_config): if auth_config.protocol == auth_config._PROTOCOL_LDAP: self.__check_ldap(auth_config) if auth_config.protocol == auth_config._PROTOCOL_TAIPY: self.__check_taipy(auth_config) def __check_taipy(self, auth_config): if auth_config._TAIPY_ROLES not in auth_config.properties: self._error( \"properties\", auth_config._LDAP_SERVER, f\"`{auth_config._LDAP_SERVER}` property must be populated when {auth_config._PROTOCOL_LDAP} is used.\", ) if auth_config._TAIPY_PWD not in auth_config.properties: self._warning( \"properties\", auth_config._TAIPY_PWD, f\"`In order to protect authentication with passwords using {auth_config._PROTOCOL_TAIPY} protocol,\" f\" {auth_config._TAIPY_PWD}` property can be populated.\", ) def __check_ldap(self, auth_config): if auth_config._LDAP_SERVER not in auth_config.properties: self._error( \"properties\", auth_config._LDAP_SERVER, f\"`{auth_config._LDAP_SERVER}` attribute must be populated when {auth_config._PROTOCOL_LDAP} is used.\", ) if auth_config._LDAP_BASE_DN not in auth_config.properties: self._error( \"properties\", auth_config._LDAP_BASE_DN, f\"`{auth_config._LDAP_BASE_DN}` field must be populated when {auth_config._PROTOCOL_LDAP} is used.\", ) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from .exceptions import * "} {"text": " class LoadingError(Exception): \"\"\"Raised if an error occurs while loading the configuration file.\"\"\" class InconsistentEnvVariableError(Exception): \"\"\"Inconsistency value has been detected in an environment variable referenced by the configuration.\"\"\" class MissingEnvVariableError(Exception): \"\"\"Environment variable referenced in configuration is missing.\"\"\" class InvalidConfigurationId(Exception): \"\"\"Configuration id is not valid.\"\"\" class ConfigurationUpdateBlocked(Exception): \"\"\"The configuration is being blocked from update by other Taipy services.\"\"\" "} {"text": "from ..common._repr_enum import _ReprEnum class Frequency(_ReprEnum): \"\"\"Frequency of the recurrence of `Cycle^` and `Scenario^` objects. The frequency must be provided in the `ScenarioConfig^`. Each recurrent scenario is attached to the cycle corresponding to the creation date and the frequency. In other words, each cycle represents an iteration and contains the various scenarios created during this iteration. For instance, when scenarios have a _MONTHLY_ frequency, one cycle will be created for each month (January, February, March, etc.). A new scenario created on February 10th, gets attached to the _February_ cycle. The frequency is implemented as an enumeration with the following possible values: - With a _DAILY_ frequency, a new cycle is created for each day. - With a _WEEKLY_ frequency, a new cycle is created for each week (from Monday to Sunday). - With a _MONTHLY_ frequency, a new cycle is created for each month. - With a _QUARTERLY_ frequency, a new cycle is created for each quarter. - With a _YEARLY_ frequency, a new cycle is created for each year. \"\"\" DAILY = 1 WEEKLY = 2 MONTHLY = 3 QUARTERLY = 4 YEARLY = 5 "} {"text": "class _Classproperty(object): def __init__(self, f): self.f = f def __get__(self, obj, owner): return self.f(owner) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import functools from enum import Enum class _ReprEnum(Enum): @classmethod @functools.lru_cache def _from_repr(cls, repr_: str): return next(filter(lambda e: repr(e) == repr_, cls)) # type: ignore "} {"text": "import keyword from ..exceptions.exceptions import InvalidConfigurationId __INVALID_TAIPY_ID_TERMS = [\"CYCLE\", \"SCENARIO\", \"SEQUENCE\", \"TASK\", \"DATANODE\"] def _validate_id(name: str): for invalid_taipy_id_term in __INVALID_TAIPY_ID_TERMS: if invalid_taipy_id_term in name: raise InvalidConfigurationId(f\"{name} is not a valid identifier. {invalid_taipy_id_term} is restricted.\") if name.isidentifier() and not keyword.iskeyword(name): return name raise InvalidConfigurationId(f\"{name} is not a valid identifier.\") "} {"text": "import functools from ...logger._taipy_logger import _TaipyLogger from ..exceptions.exceptions import ConfigurationUpdateBlocked class _ConfigBlocker: \"\"\"Configuration blocker singleton.\"\"\" __logger = _TaipyLogger._get_logger() __block_config_update = False @classmethod def _block(cls): cls.__block_config_update = True @classmethod def _unblock(cls): cls.__block_config_update = False @classmethod def _check(cls): def inner(f): @functools.wraps(f) def _check_if_is_blocking(*args, **kwargs): if cls.__block_config_update: error_message = ( \"The Core service should be stopped by running core.stop() before\" \" modifying the Configuration. For more information, please refer to:\" \" https://docs.taipy.io/en/latest/manuals/running_services/#running-core.\" ) cls.__logger.error(\"ConfigurationUpdateBlocked: \" + error_message) raise ConfigurationUpdateBlocked(error_message) return f(*args, **kwargs) return _check_if_is_blocking return inner "} {"text": "from ..common._repr_enum import _ReprEnum class _OrderedEnum(_ReprEnum): def __ge__(self, other): if self.__class__ is other.__class__: return self.value >= other.value return NotImplemented def __gt__(self, other): if self.__class__ is other.__class__: return self.value > other.value return NotImplemented def __le__(self, other): if self.__class__ is other.__class__: return self.value <= other.value return NotImplemented def __lt__(self, other): if self.__class__ is other.__class__: return self.value < other.value return NotImplemented class Scope(_OrderedEnum): \"\"\"Scope of a `DataNode^`. This enumeration can have the following values: - `GLOBAL` - `CYCLE` - `SCENARIO` \"\"\" GLOBAL = 3 CYCLE = 2 SCENARIO = 1 "} {"text": "import os import re from collections import UserDict from datetime import datetime, timedelta from importlib import import_module from operator import attrgetter from pydoc import locate from ..exceptions.exceptions import InconsistentEnvVariableError, MissingEnvVariableError from .frequency import Frequency from .scope import Scope class _TemplateHandler: \"\"\"Factory to handle actions related to config value templating.\"\"\" _PATTERN = r\"^ENV\\[([a-zA-Z_]\\w*)\\](:(\\bbool\\b|\\bstr\\b|\\bfloat\\b|\\bint\\b))?$\" @classmethod def _replace_templates(cls, template, type=str, required=True, default=None): if isinstance(template, tuple): return tuple(cls._replace_template(item, type, required, default) for item in template) if isinstance(template, list): return [cls._replace_template(item, type, required, default) for item in template] if isinstance(template, dict): return {str(k): cls._replace_template(v, type, required, default) for k, v in template.items()} if isinstance(template, UserDict): return {str(k): cls._replace_template(v, type, required, default) for k, v in template.items()} return cls._replace_template(template, type, required, default) @classmethod def _replace_template(cls, template, type, required, default): if \"ENV\" not in str(template): return template match = re.fullmatch(cls._PATTERN, str(template)) if match: var = match.group(1) dynamic_type = match.group(3) val = os.environ.get(var) if val is None: if required: raise MissingEnvVariableError(f\"Environment variable {var} is not set.\") return default if type == bool: return cls._to_bool(val) elif type == int: return cls._to_int(val) elif type == float: return cls._to_float(val) elif type == Scope: return cls._to_scope(val) elif type == Frequency: return cls._to_frequency(val) else: if dynamic_type == \"bool\": return cls._to_bool(val) elif dynamic_type == \"int\": return cls._to_int(val) elif dynamic_type == \"float\": return cls._to_float(val) return val return template @staticmethod def _to_bool(val: str) -> bool: possible_values = [\"true\", \"false\"] if str.lower(val) not in possible_values: raise InconsistentEnvVariableError(\"{val} is not a Boolean.\") return str.lower(val) == \"true\" or not (str.lower(val) == \"false\") @staticmethod def _to_int(val: str) -> int: try: return int(val) except ValueError: raise InconsistentEnvVariableError(f\"{val} is not an integer.\") @staticmethod def _to_float(val: str) -> float: try: return float(val) except ValueError: raise InconsistentEnvVariableError(f\"{val} is not a float.\") @staticmethod def _to_datetime(val: str) -> datetime: try: return datetime.fromisoformat(val) except ValueError: raise InconsistentEnvVariableError(f\"{val} is not a valid datetime.\") @staticmethod def _to_timedelta(val: str) -> timedelta: \"\"\" Parse a time string e.g. (2h13m) into a timedelta object. :param timedelta_str: A string identifying a duration. (eg. 2h13m) :return datetime.timedelta: A datetime.timedelta object \"\"\" regex = re.compile( r\"^((?P[\\.\\d]+?)d)? *\" r\"((?P[\\.\\d]+?)h)? *\" r\"((?P[\\.\\d]+?)m)? *\" r\"((?P[\\.\\d]+?)s)?$\" ) parts = regex.match(val) if not parts: raise InconsistentEnvVariableError(f\"{val} is not a valid timedelta.\") time_params = {name: float(param) for name, param in parts.groupdict().items() if param} return timedelta(**time_params) # type: ignore @staticmethod def _to_scope(val: str) -> Scope: try: return Scope[str.upper(val)] except Exception: raise InconsistentEnvVariableError(f\"{val} is not a valid scope.\") @staticmethod def _to_frequency(val: str) -> Frequency: try: return Frequency[str.upper(val)] except Exception: raise InconsistentEnvVariableError(f\"{val} is not a valid frequency.\") @staticmethod def _to_function(val: str): module_name, fct_name = val.rsplit(\".\", 1) try: module = import_module(module_name) return attrgetter(fct_name)(module) except Exception: raise InconsistentEnvVariableError(f\"{val} is not a valid function.\") @staticmethod def _to_class(val: str): try: return locate(val) except Exception: raise InconsistentEnvVariableError(f\"{val} is not a valid class.\") "} {"text": ""} {"text": "from __future__ import annotations from typing import Any, Dict, Optional, Union from ..common._config_blocker import _ConfigBlocker from ..common._template_handler import _TemplateHandler as _tpl class GlobalAppConfig: \"\"\" Configuration fields related to the global application. Attributes: **properties (Dict[str, Any]): A dictionary of additional properties. \"\"\" def __init__(self, **properties): self._properties = properties @property def properties(self): return {k: _tpl._replace_templates(v) for k, v in self._properties.items()} @properties.setter # type: ignore @_ConfigBlocker._check() def properties(self, val): self._properties = val def __getattr__(self, item: str) -> Optional[Any]: return _tpl._replace_templates(self._properties.get(item)) @classmethod def default_config(cls) -> GlobalAppConfig: return GlobalAppConfig() def _clean(self): self._properties.clear() def _to_dict(self): as_dict = {} as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, config_as_dict: Dict[str, Any]): config = GlobalAppConfig() config._properties = config_as_dict return config def _update(self, config_as_dict): self._properties.update(config_as_dict) "} {"text": "import re from typing import Dict, List, Set from .._serializer._json_serializer import _JsonSerializer class _ComparatorResult(dict): ADDED_ITEMS_KEY = \"added_items\" REMOVED_ITEMS_KEY = \"removed_items\" MODIFIED_ITEMS_KEY = \"modified_items\" CONFLICTED_SECTION_KEY = \"conflicted_sections\" UNCONFLICTED_SECTION_KEY = \"unconflicted_sections\" def __init__(self, unconflicted_sections: Set[str]): super().__init__() self._unconflicted_sections = unconflicted_sections def _sort_by_section(self): if self.get(self.CONFLICTED_SECTION_KEY): for key in self[self.CONFLICTED_SECTION_KEY].keys(): self[self.CONFLICTED_SECTION_KEY][key].sort(key=lambda x: x[0][0]) if self.get(self.UNCONFLICTED_SECTION_KEY): for key in self[self.UNCONFLICTED_SECTION_KEY].keys(): self[self.UNCONFLICTED_SECTION_KEY][key].sort(key=lambda x: x[0][0]) def _check_added_items(self, config_deepdiff, new_json_config): if dictionary_item_added := config_deepdiff.get(\"dictionary_item_added\"): for item_added in dictionary_item_added: section_name, config_id, attribute = self.__get_changed_entity_attribute(item_added) diff_sections = self.__get_section(section_name) if attribute: value_added = new_json_config[section_name][config_id][attribute] elif config_id: value_added = new_json_config[section_name][config_id] else: value_added = new_json_config[section_name] section_name = self.__rename_global_node_name(section_name) self.__create_or_append_list( diff_sections, self.ADDED_ITEMS_KEY, ((section_name, config_id, attribute), (value_added)), ) def _check_removed_items(self, config_deepdiff, old_json_config): if dictionary_item_removed := config_deepdiff.get(\"dictionary_item_removed\"): for item_removed in dictionary_item_removed: section_name, config_id, attribute = self.__get_changed_entity_attribute(item_removed) diff_sections = self.__get_section(section_name) if attribute: value_removed = old_json_config[section_name][config_id][attribute] elif config_id: value_removed = old_json_config[section_name][config_id] else: value_removed = old_json_config[section_name] section_name = self.__rename_global_node_name(section_name) self.__create_or_append_list( diff_sections, self.REMOVED_ITEMS_KEY, ((section_name, config_id, attribute), (value_removed)), ) def _check_modified_items(self, config_deepdiff, old_json_config, new_json_config): if values_changed := config_deepdiff.get(\"values_changed\"): for item_changed, value_changed in values_changed.items(): section_name, config_id, attribute = self.__get_changed_entity_attribute(item_changed) diff_sections = self.__get_section(section_name) section_name = self.__rename_global_node_name(section_name) self.__create_or_append_list( diff_sections, self.MODIFIED_ITEMS_KEY, ((section_name, config_id, attribute), (value_changed[\"old_value\"], value_changed[\"new_value\"])), ) # Iterable item added will be considered a modified item if iterable_item_added := config_deepdiff.get(\"iterable_item_added\"): self.__check_modified_iterable(iterable_item_added, old_json_config, new_json_config) # Iterable item removed will be considered a modified item if iterable_item_removed := config_deepdiff.get(\"iterable_item_removed\"): self.__check_modified_iterable(iterable_item_removed, old_json_config, new_json_config) def __check_modified_iterable(self, iterable_items, old_json_config, new_json_config): for item in iterable_items: section_name, config_id, attribute = self.__get_changed_entity_attribute(item) diff_sections = self.__get_section(section_name) if attribute: new_value = new_json_config[section_name][config_id][attribute] old_value = old_json_config[section_name][config_id][attribute] else: new_value = new_json_config[section_name][config_id] old_value = old_json_config[section_name][config_id] section_name = self.__rename_global_node_name(section_name) modified_value = ((section_name, config_id, attribute), (old_value, new_value)) if ( not diff_sections.get(self.MODIFIED_ITEMS_KEY) or modified_value not in diff_sections[self.MODIFIED_ITEMS_KEY] ): self.__create_or_append_list( diff_sections, self.MODIFIED_ITEMS_KEY, modified_value, ) def __get_section(self, section_name: str) -> Dict[str, List]: if section_name in self._unconflicted_sections: if not self.get(self.UNCONFLICTED_SECTION_KEY): self[self.UNCONFLICTED_SECTION_KEY] = {} return self[self.UNCONFLICTED_SECTION_KEY] if not self.get(self.CONFLICTED_SECTION_KEY): self[self.CONFLICTED_SECTION_KEY] = {} return self[self.CONFLICTED_SECTION_KEY] def __create_or_append_list(self, diff_dict, key, value): if diff_dict.get(key): diff_dict[key].append(value) else: diff_dict[key] = [value] def __get_changed_entity_attribute(self, attribute_bracket_notation): \"\"\"Split the section name, the config id (if exists), and the attribute name (if exists) from JSON bracket notation. \"\"\" try: section_name, config_id, attribute = re.findall(r\"\\[\\'(.*?)\\'\\]\", attribute_bracket_notation) except ValueError: try: section_name, config_id = re.findall(r\"\\[\\'(.*?)\\'\\]\", attribute_bracket_notation) attribute = None except ValueError: section_name = re.findall(r\"\\[\\'(.*?)\\'\\]\", attribute_bracket_notation)[0] config_id = None attribute = None return section_name, config_id, attribute def __rename_global_node_name(self, node_name): if node_name == _JsonSerializer._GLOBAL_NODE_NAME: return \"Global Configuration\" return node_name "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import json from copy import copy from typing import Optional, Set, Union from deepdiff import DeepDiff from ...logger._taipy_logger import _TaipyLogger from .._config import _Config from .._serializer._json_serializer import _JsonSerializer from ._comparator_result import _ComparatorResult class _ConfigComparator: def __init__(self): self._unconflicted_sections: Set[str] = set() self.__logger = _TaipyLogger._get_logger() def _add_unconflicted_section(self, section_name: Union[str, Set[str]]): if isinstance(section_name, str): section_name = {section_name} self._unconflicted_sections.update(section_name) def _find_conflict_config( self, old_config: _Config, new_config: _Config, old_version_number: Optional[str] = None, new_version_number: Optional[str] = None, ): \"\"\"Compare between 2 _Config object to check for compatibility. Args: old_config (_Config): The old _Config. new_config (_Config): The new _Config. old_version_number (str, optional): The old version number for logging. Defaults to None. new_version_number (str, optional): The new version number for logging. Defaults to None. Returns: _ComparatorResult: Return a _ComparatorResult dictionary with the following format: ```python { \"added_items\": [ ((section_name_1, config_id_1, attribute_1), added_object_1), ((section_name_2, config_id_2, attribute_2), added_object_2), ], \"removed_items\": [ ((section_name_1, config_id_1, attribute_1), removed_object_1), ((section_name_2, config_id_2, attribute_2), removed_object_2), ], \"modified_items\": [ ((section_name_1, config_id_1, attribute_1), (old_value_1, new_value_1)), ((section_name_2, config_id_2, attribute_2), (old_value_2, new_value_2)), ], } ``` \"\"\" comparator_result = self.__get_config_diff(old_config, new_config) self.__log_find_conflict_message(comparator_result, old_version_number, new_version_number) return comparator_result def _compare( self, config_1: _Config, config_2: _Config, version_number_1: str, version_number_2: str, ): \"\"\"Compare between 2 _Config object to check for compatibility. Args: config_1 (_Config): The old _Config. config_2 (_Config): The new _Config. version_number_1 (str): The old version number for logging. version_number_2 (str): The new version number for logging. \"\"\" comparator_result = self.__get_config_diff(config_1, config_2) self.__log_comparison_message(comparator_result, version_number_1, version_number_2) return comparator_result def __get_config_diff(self, config_1, config_2): json_config_1 = json.loads(_JsonSerializer._serialize(config_1)) json_config_2 = json.loads(_JsonSerializer._serialize(config_2)) config_deepdiff = DeepDiff(json_config_1, json_config_2, ignore_order=True) comparator_result = _ComparatorResult(copy(self._unconflicted_sections)) comparator_result._check_added_items(config_deepdiff, json_config_2) comparator_result._check_removed_items(config_deepdiff, json_config_1) comparator_result._check_modified_items(config_deepdiff, json_config_1, json_config_2) comparator_result._sort_by_section() return comparator_result def __log_comparison_message( self, comparator_result: _ComparatorResult, version_number_1: str, version_number_2: str, ): config_str_1 = f\"version {version_number_1} Configuration\" config_str_2 = f\"version {version_number_2} Configuration\" diff_messages = [] for _, sections in comparator_result.items(): diff_messages = self.__get_messages(sections) if diff_messages: self.__logger.info( f\"Differences between {config_str_1} and {config_str_2}:\\n\\t\" + \"\\n\\t\".join(diff_messages) ) else: self.__logger.info(f\"There is no difference between {config_str_1} and {config_str_2}.\") def __log_find_conflict_message( self, comparator_result: _ComparatorResult, old_version_number: Optional[str] = None, new_version_number: Optional[str] = None, ): old_config_str = ( f\"configuration for version {old_version_number}\" if old_version_number else \"current configuration\" ) new_config_str = ( f\"configuration for version {new_version_number}\" if new_version_number else \"current configuration\" ) if unconflicted_sections := comparator_result.get(_ComparatorResult.UNCONFLICTED_SECTION_KEY): unconflicted_messages = self.__get_messages(unconflicted_sections) self.__logger.info( f\"There are non-conflicting changes between the {old_config_str}\" f\" and the {new_config_str}:\\n\\t\" + \"\\n\\t\".join(unconflicted_messages) ) if conflicted_sections := comparator_result.get(_ComparatorResult.CONFLICTED_SECTION_KEY): conflicted_messages = self.__get_messages(conflicted_sections) self.__logger.error( f\"The {old_config_str} conflicts with the {new_config_str}:\\n\\t\" + \"\\n\\t\".join(conflicted_messages) ) def __get_messages(self, diff_sections): dq = '\"' messages = [] if added_items := diff_sections.get(_ComparatorResult.ADDED_ITEMS_KEY): for diff in added_items: ((section_name, config_id, attribute), added_object) = diff messages.append( f\"{section_name} {dq}{config_id}{dq} \" f\"{f'has attribute {dq}{attribute}{dq}' if attribute else 'was'} added: {added_object}\" ) if removed_items := diff_sections.get(_ComparatorResult.REMOVED_ITEMS_KEY): for diff in removed_items: ((section_name, config_id, attribute), removed_object) = diff messages.append( f\"{section_name} {dq}{config_id}{dq} \" f\"{f'has attribute {dq}{attribute}{dq}' if attribute else 'was'} removed\" ) if modified_items := diff_sections.get(_ComparatorResult.MODIFIED_ITEMS_KEY): for diff in modified_items: ((section_name, config_id, attribute), (old_value, new_value)) = diff messages.append( f\"{section_name} {dq}{config_id}{dq} \" f\"{f'has attribute {dq}{attribute}{dq}' if attribute else 'was'} modified: \" f\"{old_value} -> {new_value}\" ) return messages "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import argparse from typing import Dict class _CLI: \"\"\"Argument parser for Taipy application.\"\"\" # The conflict_handler is set to \"resolve\" to override conflict arguments _subparser_action = None _parser = argparse.ArgumentParser(conflict_handler=\"resolve\") _sub_taipyparsers: Dict[str, argparse.ArgumentParser] = {} _arg_groups: Dict[str, argparse._ArgumentGroup] = {} @classmethod def _add_subparser(cls, name: str, **kwargs) -> argparse.ArgumentParser: \"\"\"Create a new subparser and return a argparse handler.\"\"\" if subparser := cls._sub_taipyparsers.get(name): return subparser if not cls._subparser_action: cls._subparser_action = cls._parser.add_subparsers() subparser = cls._subparser_action.add_parser( name=name, conflict_handler=\"resolve\", **kwargs, ) cls._sub_taipyparsers[name] = subparser subparser.set_defaults(which=name) return subparser @classmethod def _add_groupparser(cls, title: str, description: str = \"\") -> argparse._ArgumentGroup: \"\"\"Create a new group for arguments and return a argparser handler.\"\"\" if groupparser := cls._arg_groups.get(title): return groupparser groupparser = cls._parser.add_argument_group(title=title, description=description) cls._arg_groups[title] = groupparser return groupparser @classmethod def _parse(cls): \"\"\"Parse and return only known arguments.\"\"\" args, _ = cls._parser.parse_known_args() return args @classmethod def _remove_argument(cls, arg: str): \"\"\"Remove an argument from the parser. Note that the `arg` must be without --. Source: https://stackoverflow.com/questions/32807319/disable-remove-argument-in-argparse \"\"\" for action in cls._parser._actions: opts = action.option_strings if (opts and opts[0] == arg) or action.dest == arg: cls._parser._remove_action(action) break for argument_group in cls._parser._action_groups: for group_action in argument_group._group_actions: opts = group_action.option_strings if (opts and opts[0] == arg) or group_action.dest == arg: argument_group._group_actions.remove(group_action) return "} {"text": "from ._cli import _CLI "} {"text": "\"\"\"The setup script.\"\"\" import json import os from setuptools import find_namespace_packages, find_packages, setup with open(\"README.md\", \"rb\") as readme_file: readme = readme_file.read().decode(\"UTF-8\") with open(f\"src{os.sep}taipy{os.sep}templates{os.sep}version.json\") as version_file: version = json.load(version_file) version_string = f'{version.get(\"major\", 0)}.{version.get(\"minor\", 0)}.{version.get(\"patch\", 0)}' if vext := version.get(\"ext\"): version_string = f\"{version_string}.{vext}\" test_requirements = [\"pytest>=3.8\"] setup( author=\"Avaiga\", author_email=\"dev@taipy.io\", python_requires=\">=3.8\", classifiers=[ \"Intended Audience :: Developers\", \"License :: OSI Approved :: Apache Software License\", \"Natural Language :: English\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.8\", \"Programming Language :: Python :: 3.9\", \"Programming Language :: Python :: 3.10\", \"Programming Language :: Python :: 3.11\", ], description=\"An open-source package holding Taipy application templates.\", license=\"Apache License 2.0\", long_description=readme, long_description_content_type=\"text/markdown\", keywords=\"taipy-templates\", name=\"taipy-templates\", package_dir={\"\": \"src\"}, packages=find_namespace_packages(where=\"src\") + find_packages(include=[\"taipy\"]), include_package_data=True, test_suite=\"tests\", url=\"https://github.com/avaiga/taipy-templates\", version=version_string, zip_safe=False, ) "} {"text": "import os import pytest from cookiecutter.exceptions import FailedHookException from cookiecutter.main import cookiecutter from .utils import _run_template def test_default_answer(tmpdir): cookiecutter( template=\"src/taipy/templates/default\", output_dir=str(tmpdir), no_input=True, ) assert os.listdir(tmpdir) == [\"taipy_application\"] assert ( os.listdir(os.path.join(tmpdir, \"taipy_application\")).sort() == [\"requirements.txt\", \"main.py\", \"images\"].sort() ) with open(os.path.join(tmpdir, \"taipy_application\", \"requirements.txt\")) as requirements_file: # Assert post_gen_project hook is successful assert \"taipy==\" in requirements_file.read() oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, \"taipy_application\")) stdout = _run_template(\"main.py\") os.chdir(oldpwd) # Assert the message when the application is run successfully is in the stdout assert \"[Taipy][INFO] * Server starting on\" in str(stdout, \"utf-8\") def test_main_file_with_and_without_extension(tmpdir): cookiecutter( template=\"src/taipy/templates/default\", output_dir=str(tmpdir), no_input=True, extra_context={ \"Application main Python file\": \"app.py\", }, ) assert ( os.listdir(os.path.join(tmpdir, \"taipy_application\")).sort() == [\"requirements.txt\", \"app.py\", \"images\"].sort() ) cookiecutter( template=\"src/taipy/templates/default\", output_dir=str(tmpdir), no_input=True, extra_context={ \"Application root folder name\": \"foo_app\", \"Application main Python file\": \"app\", }, ) assert os.listdir(os.path.join(tmpdir, \"foo_app\")).sort() == [\"requirements.txt\", \"app.py\", \"images\"].sort() def test_with_core_service(tmpdir): cookiecutter( template=\"src/taipy/templates/default\", output_dir=str(tmpdir), no_input=True, extra_context={ \"Does the application use scenario management or version management?\": \"y\", \"Does the application use Rest API?\": \"no\", }, ) assert ( os.listdir(os.path.join(tmpdir, \"taipy_application\")).sort() == [\"requirements.txt\", \"main.py\", \"images\", \"configuration\", \"algorithms\"].sort() ) with open(os.path.join(tmpdir, \"taipy_application\", \"main.py\")) as main_file: assert \"core = Core()\" in main_file.read() oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, \"taipy_application\")) stdout = _run_template(\"main.py\") os.chdir(oldpwd) # Assert the message when the application is run successfully is in the stdout assert \"[Taipy][INFO] * Server starting on\" in str(stdout, \"utf-8\") assert \"[Taipy][INFO] Development mode: \" in str(stdout, \"utf-8\") def test_with_rest_service(tmpdir): cookiecutter( template=\"src/taipy/templates/default\", output_dir=str(tmpdir), no_input=True, extra_context={ \"Does the application use scenario management or version management?\": \"n\", \"Does the application use Rest API?\": \"yes\", }, ) assert ( os.listdir(os.path.join(tmpdir, \"taipy_application\")).sort() == [\"requirements.txt\", \"main.py\", \"images\"].sort() ) with open(os.path.join(tmpdir, \"taipy_application\", \"main.py\")) as main_file: assert \"rest = Rest()\" in main_file.read() oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, \"taipy_application\")) stdout = _run_template(\"main.py\") os.chdir(oldpwd) # Assert the message when the application is run successfully is in the stdout assert \"[Taipy][INFO] * Server starting on\" in str(stdout, \"utf-8\") assert \"[Taipy][INFO] Development mode: \" in str(stdout, \"utf-8\") def test_with_both_core_rest_services(tmpdir): cookiecutter( template=\"src/taipy/templates/default\", output_dir=str(tmpdir), no_input=True, extra_context={ \"Does the application use scenario management or version management?\": \"n\", \"Does the application use Rest API?\": \"yes\", }, ) assert ( os.listdir(os.path.join(tmpdir, \"taipy_application\")).sort() == [\"requirements.txt\", \"main.py\", \"images\", \"configuration\", \"algorithms\"].sort() ) with open(os.path.join(tmpdir, \"taipy_application\", \"main.py\")) as main_file: assert \"rest = Rest()\" in main_file.read() assert \"core = Core()\" not in main_file.read() oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, \"taipy_application\")) stdout = _run_template(\"main.py\") os.chdir(oldpwd) # Assert the message when the application is run successfully is in the stdout assert \"[Taipy][INFO] * Server starting on\" in str(stdout, \"utf-8\") assert \"[Taipy][INFO] Development mode: \" in str(stdout, \"utf-8\") def test_multipage_gui_template(tmpdir): cookiecutter( template=\"src/taipy/templates/default\", output_dir=str(tmpdir), no_input=True, extra_context={ \"Application root folder name\": \"foo_app\", \"Page names in multi-page application?\": \"name_1 name_2 name_3\", }, ) assert ( os.listdir(os.path.join(tmpdir, \"foo_app\")).sort() == [\"requirements.txt\", \"main.py\", \"pages\", \"images\"].sort() ) assert ( os.listdir(os.path.join(tmpdir, \"foo_app\", \"pages\")).sort() == [\"name_1\", \"name_2\", \"name_3\", \"root.md\", \"root.py\", \"__init__.py\"].sort() ) oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, \"foo_app\")) stdout = _run_template(\"main.py\") os.chdir(oldpwd) assert \"[Taipy][INFO] * Server starting on\" in str(stdout, \"utf-8\") def test_multipage_gui_template_with_invalid_page_name(tmpdir, capfd): with pytest.raises(FailedHookException): cookiecutter( template=\"src/taipy/templates/default\", output_dir=str(tmpdir), no_input=True, extra_context={ \"Application root folder name\": \"foo_app\", \"Page names in multi-page application?\": \"valid_var_name 1_invalid_var_name\", }, ) _, stderr = capfd.readouterr() assert 'Page name \"1_invalid_var_name\" is not a valid Python identifier' in stderr assert not os.path.exists(os.path.join(tmpdir, \"foo_app\")) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import subprocess import sys def _run_template(main_path, time_out=30): \"\"\"Run the templates on a subprocess and get stdout after timeout\"\"\" with subprocess.Popen([sys.executable, main_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: try: stdout, stderr = proc.communicate(timeout=time_out) except subprocess.TimeoutExpired: proc.kill() stdout, stderr = proc.communicate() # Print the eror if there is any (for debugging) if stderr := str(stderr, \"utf-8\"): print(stderr) return stdout "} {"text": "import os from cookiecutter.main import cookiecutter from .utils import _run_template def test_scenario_management_with_toml_config(tmpdir): cookiecutter( template=\"src/taipy/templates/scenario-management\", output_dir=tmpdir, no_input=True, extra_context={ \"Application root folder name\": \"foo_app\", \"Application main Python file\": \"main.py\", \"Application title\": \"bar\", \"Does the application use TOML Config?\": \"yes\", }, ) assert os.listdir(tmpdir) == [\"foo_app\"] assert ( os.listdir(os.path.join(tmpdir, \"foo_app\")).sort() == [\"requirements.txt\", \"main.py\", \"algos\", \"config\", \"pages\"].sort() ) # Assert post_gen_project hook is successful with open(os.path.join(tmpdir, \"foo_app\", \"requirements.txt\")) as requirements_file: assert \"taipy==\" in requirements_file.read() assert ( os.listdir(os.path.join(tmpdir, \"foo_app\", \"config\")).sort() == [\"__init__.py\", \"config.py\", \"config.toml\"].sort() ) with open(os.path.join(tmpdir, \"foo_app\", \"config\", \"config.py\")) as config_file: assert 'Config.load(\"config/config.toml\")' in config_file.read() oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, \"foo_app\")) stdout = _run_template(\"main.py\") os.chdir(oldpwd) # Assert the message when the application is run successfully is in the stdout assert \"[Taipy][INFO] Configuration 'config/config.toml' successfully loaded.\" in str(stdout, \"utf-8\") assert \"[Taipy][INFO] * Server starting on\" in str(stdout, \"utf-8\") def test_scenario_management_without_toml_config(tmpdir): cookiecutter( template=\"src/taipy/templates/scenario-management\", output_dir=tmpdir, no_input=True, extra_context={ \"Application root folder name\": \"foo_app\", \"Application main Python file\": \"main.py\", \"Application title\": \"bar\", \"Does the application use TOML Config?\": \"no\", }, ) assert os.listdir(tmpdir) == [\"foo_app\"] assert ( os.listdir(os.path.join(tmpdir, \"foo_app\")).sort() == [\"requirements.txt\", \"main.py\", \"algos\", \"config\", \"pages\"].sort() ) # Assert post_gen_project hook is successful with open(os.path.join(tmpdir, \"foo_app\", \"requirements.txt\")) as requirements_file: assert \"taipy==\" in requirements_file.read() assert os.listdir(os.path.join(tmpdir, \"foo_app\", \"config\")).sort() == [\"__init__.py\", \"config.py\"].sort() with open(os.path.join(tmpdir, \"foo_app\", \"config\", \"config.py\")) as config_file: config_content = config_file.read() assert 'Config.load(\"config/config.toml\")' not in config_content assert all([x in config_content for x in [\"Config.configure_csv_data_node\", \"Config.configure_task\"]]) oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, \"foo_app\")) stdout = _run_template(\"main.py\") os.chdir(oldpwd) # Assert the message when the application is run successfully is in the stdout assert \"[Taipy][INFO] * Server starting on\" in str(stdout, \"utf-8\") "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from config.config import configure from pages import job_page, scenario_page from pages.root import content, root, selected_data_node, selected_scenario import taipy as tp from taipy import Core, Gui def on_init(state): ... def on_change(state, var, val): if var == \"selected_data_node\" and val: state[\"scenario\"].manage_data_node_partial(state) pages = { \"/\": root, \"scenario\": scenario_page, \"jobs\": job_page, } if __name__ == \"__main__\": # Instantiate, configure and run the Core core = Core() default_scenario_cfg = configure() core.run() # ################################################################################################################## # PLACEHOLDER: Initialize your data application here # # # # Example: # if len(tp.get_scenarios()) == 0: tp.create_scenario(default_scenario_cfg, name=\"Default Scenario\") # Comment, remove or replace the previous lines with your own use case # # ################################################################################################################## # Instantiate, configure and run the GUI gui = Gui(pages=pages) data_node_partial = gui.add_partial(\"\") gui.run(title=\"{{cookiecutter.__application_title}}\", margin=\"0em\") "} {"text": "from algos import clean_data from taipy import Config, Frequency, Scope def configure(): # ################################################################################################################## # PLACEHOLDER: Add your scenario configurations here # # # # Example: # initial_dataset_cfg = Config.configure_csv_data_node(\"initial_dataset\", scope=Scope.CYCLE) replacement_type_cfg = Config.configure_data_node(\"replacement_type\", default_data=\"NO VALUE\") cleaned_dataset_cfg = Config.configure_csv_data_node(\"cleaned_dataset\") clean_data_cfg = Config.configure_task( \"clean_data\", function=clean_data, input=[initial_dataset_cfg, replacement_type_cfg], output=cleaned_dataset_cfg, ) scenario_cfg = Config.configure_scenario( \"scenario_configuration\", task_configs=[clean_data_cfg], frequency=Frequency.DAILY ) return scenario_cfg # Comment, remove or replace the previous lines with your own use case # # ################################################################################################################## "} {"text": "from taipy import Config def configure(): Config.load(\"config/config.toml\") return Config.scenarios[\"scenario_configuration\"] "} {"text": ""} {"text": "def clean_data(df, replacement_type): df = df.fillna(replacement_type) return df "} {"text": "from .algos import clean_data "} {"text": "from .scenario_page import scenario_page from .job_page import job_page"} {"text": "from taipy.gui import Markdown selected_scenario = None selected_data_node = None content = \"\" root = Markdown(\"pages/root.md\") "} {"text": "from .job_page import job_page "} {"text": "from taipy.gui import Markdown job_page = Markdown(\"pages/job_page/job_page.md\") "} {"text": "from taipy.gui import Markdown, notify from .data_node_management import manage_partial def notify_on_submission(state, submitable, details): if details['submission_status'] == 'COMPLETED': notify(state, \"success\", \"Submision completed!\") elif details['submission_status'] == 'FAILED': notify(state, \"error\", \"Submission failed!\") else: notify(state, \"info\", \"In progress...\") def manage_data_node_partial(state): manage_partial(state) scenario_page = Markdown(\"pages/scenario_page/scenario_page.md\") "} {"text": "from .scenario_page import scenario_page "} {"text": "# build partial content for a specific data node def build_dn_partial(dn, dn_label): partial_content = \"<|part|render={selected_scenario}|\\n\\n\" # ################################################################################################################## # PLACEHOLDER: data node specific content before automatic content # # # # Example: # if dn_label == \"replacement_type\": partial_content += \"All missing values will be replaced by the data node value.\" # Comment, remove or replace the previous lines with your own use case # # ################################################################################################################## # Automatic data node content partial_content += \"<|{selected_scenario.data_nodes['\" + dn.config_id + \"']}|data_node|scenario={selected_scenario}|>\\n\\n\" # ################################################################################################################## # PLACEHOLDER: data node specific content after automatic content # # # # Example: # if dn_label == \"initial_dataset\": partial_content += \"Select your CSV file: <|{selected_data_node.path}|file_selector|extensions=.csv|on_action={lambda s: s.refresh('selected_scenario')}|>\\n\\n\" # Comment, remove or replace the previous lines with your own use case # # ################################################################################################################## partial_content += \"|>\\n\\n\" return partial_content def manage_partial(state): dn = state.selected_data_node dn_label = dn.get_simple_label() partial_content = build_dn_partial(dn, dn_label) state.data_node_partial.update_content(state, partial_content) "} {"text": "import os import taipy # Add taipy version to requirements.txt with open(os.path.join(os.getcwd(), \"requirements.txt\"), \"a\") as requirement_file: requirement_file.write(f\"taipy=={taipy.version._get_version()}\\n\") # Use TOML config file or not use_toml_config = \"{{ cookiecutter.__use_toml_config }}\".upper() if use_toml_config == \"YES\" or use_toml_config == \"Y\": os.remove(os.path.join(os.getcwd(), \"config\", \"config.py\")) os.rename( os.path.join(os.getcwd(), \"config\", \"config_with_toml.py\"), os.path.join(os.getcwd(), \"config\", \"config.py\") ) else: os.remove(os.path.join(os.getcwd(), \"config\", \"config_with_toml.py\")) os.remove(os.path.join(os.getcwd(), \"config\", \"config.toml\")) main_file_name = \"{{cookiecutter.__main_file}}.py\" print( f\"New Taipy application has been created at {os.path.join(os.getcwd())}\" f\"\\n\\nTo start the application, change directory to the newly created folder:\" f\"\\n\\tcd {os.path.join(os.getcwd())}\" f\"\\nand run the application as follows:\" f\"\\n\\ttaipy run {main_file_name}\" ) "} {"text": ""} {"text": "\"\"\" Contain the application's configuration including the scenario configurations. The configuration is run by the Core service. \"\"\" from algorithms import * from taipy import Config # ############################################################################# # PLACEHOLDER: Put your application's configurations here # # # # Example: # # scenario_config = Config.configure_scenario(\"placeholder_scenario\", []) # # Comment, remove or replace the previous lines with your own use case # # ############################################################################# "} {"text": "from .config import * "} {"text": "\"\"\" This file is designed to contain the various Python functions used to configure tasks. The functions will be imported by the __init__.py file in this folder. \"\"\" # ################################################################################################################## # PLACEHOLDER: Put your Python functions here # # # # Example: # # def place_holder_algorithm(): # # pass # # Comment, remove or replace the previous lines with your own use case # # ################################################################################################################## "} {"text": "from algorithms import * "} {"text": "from .root import root_page "} {"text": "\"\"\" The root page of the application. Page content is imported from the root.md file. Please refer to https://docs.taipy.io/en/latest/manuals/gui/pages for more details. \"\"\" from taipy.gui import Markdown root_page = Markdown(\"pages/root.md\") "} {"text": "\"\"\" A page of the application. Page content is imported from the page_example.md file. Please refer to https://docs.taipy.io/en/latest/manuals/gui/pages for more details. \"\"\" from taipy.gui import Markdown page_example = Markdown(\"pages/page_example/page_example.md\") "} {"text": "import sys pages = \"{{ cookiecutter.__pages }}\".split(\" \") # Remove empty string from pages list pages = [page for page in pages if page != \"\"] for page in pages: if not page.isidentifier(): sys.exit(f'Page name \"{page}\" is not a valid Python identifier. Please choose another name.') "} {"text": "import os import shutil import taipy def handle_services(use_rest, use_core): if use_core or use_rest: # Write \"import taipy as tp\" at the third line of the import.txt file with open(os.path.join(os.getcwd(), \"sections\", \"import.txt\"), \"r\") as import_file: import_lines = import_file.readlines() import_lines[0] = \"import taipy as tp\\n\" + import_lines[0] + \"\\n\" with open(os.path.join(os.getcwd(), \"sections\", \"import.txt\"), \"w\") as import_file: import_file.writelines(import_lines) # Import the necessary services if use_core and use_rest: with open(os.path.join(os.getcwd(), \"sections\", \"import.txt\"), \"a\") as import_file: import_file.write(\"from taipy import Core, Rest\\n\") elif use_core: with open(os.path.join(os.getcwd(), \"sections\", \"import.txt\"), \"a\") as import_file: import_file.write(\"from taipy import Core\\n\") elif use_rest: with open(os.path.join(os.getcwd(), \"sections\", \"import.txt\"), \"a\") as import_file: import_file.write(\"from taipy import Rest\\n\") # Start the Rest service if use_rest: with open(os.path.join(os.getcwd(), \"sections\", \"main.txt\"), \"a\") as main_file: main_file.write(\" rest = Rest()\\n\") if use_core: # Create and submit the placeholder scenario with open(os.path.join(os.getcwd(), \"sections\", \"main.txt\"), \"a\") as main_file: main_file.write(\" core = Core()\\n\") main_file.write(\" core.run()\\n\") main_file.write(\" # #############################################################################\\n\") main_file.write(\" # PLACEHOLDER: Create and submit your scenario here #\\n\") main_file.write(\" # #\\n\") main_file.write(\" # Example: #\\n\") main_file.write(\" # from configuration import scenario_config #\\n\") main_file.write(\" # scenario = tp.create_scenario(scenario_config) #\\n\") main_file.write(\" # scenario.submit() #\\n\") main_file.write(\" # Comment, remove or replace the previous lines with your own use case #\\n\") main_file.write(\" # #############################################################################\\n\") else: shutil.rmtree(os.path.join(os.getcwd(), \"algorithms\")) shutil.rmtree(os.path.join(os.getcwd(), \"configuration\")) def handle_run_service(): with open(os.path.join(os.getcwd(), \"sections\", \"main.txt\"), \"a+\") as main_file: main_file.seek(0) main_content = main_file.read() # Run Rest service along with the GUI service if \"rest = Rest()\" in main_content: main_file.write(' tp.run(gui, rest, title=\"{{cookiecutter.__application_title}}\")\\n') else: main_file.write(' gui.run(title=\"{{cookiecutter.__application_title}}\")\\n') def handle_single_page_app(): shutil.rmtree(os.path.join(os.getcwd(), \"pages\")) with open(os.path.join(os.getcwd(), \"sections\", \"main.txt\"), \"a\") as main_file: main_file.write(\"\\n\") main_file.write(\" gui = Gui(page=page)\\n\") handle_run_service() with open(os.path.join(os.getcwd(), \"sections\", \"page_content.txt\"), \"a\") as page_content_file: page_content_file.write( ''' page = \"\"\"
<|navbar|lov={[(\"home\", \"Homepage\")]}|>
\"\"\" ''' ) def handle_multi_page_app(pages): for page_name in pages: os.mkdir(os.path.join(os.getcwd(), \"pages\", page_name)) with open(os.path.join(os.getcwd(), \"pages\", \"page_example\", \"page_example.md\"), \"r\") as page_md_file: page_md_content = page_md_file.read() page_md_content = page_md_content.replace(\"Page example\", page_name.replace(\"_\", \" \").title()) with open(os.path.join(os.getcwd(), \"pages\", page_name, page_name + \".md\"), \"w\") as page_md_file: page_md_file.write(page_md_content) with open(os.path.join(os.getcwd(), \"pages\", \"page_example\", \"page_example.py\"), \"r\") as page_content_file: page_py_content = page_content_file.read() page_py_content = page_py_content.replace(\"page_example\", page_name) with open(os.path.join(os.getcwd(), \"pages\", page_name, page_name + \".py\"), \"w\") as page_content_file: page_content_file.write(page_py_content) with open(os.path.join(os.getcwd(), \"pages\", \"__init__.py\"), \"a\") as page_init_file: for page_name in pages: page_init_file.write(f\"from .{page_name}.{page_name} import {page_name}\\n\") shutil.rmtree(os.path.join(os.getcwd(), \"pages\", \"page_example\")) newline = \",\\n\\t\" user_page_dict = newline.join(f'\"{page_name}\": {page_name}' for page_name in pages) page_dict = \"\"\" pages = { \"/\": root_page, {pages} } \"\"\" with open(os.path.join(os.getcwd(), \"sections\", \"page_content.txt\"), \"a\") as page_content_file: page_content_file.write(page_dict.replace(\"{pages}\", user_page_dict)) with open(os.path.join(os.getcwd(), \"sections\", \"import.txt\"), \"a\") as import_file: import_file.write(\"from pages import *\\n\") with open(os.path.join(os.getcwd(), \"sections\", \"main.txt\"), \"a\") as main_file: main_file.write(\"\\n\") main_file.write(\" gui = Gui(pages=pages)\\n\") handle_run_service() def generate_main_file(): with open(os.path.join(os.getcwd(), \"sections\", \"import.txt\"), \"r\") as import_file: import_lines = import_file.read() with open(os.path.join(os.getcwd(), \"sections\", \"page_content.txt\"), \"r\") as page_content_file: page_content = page_content_file.read() with open(os.path.join(os.getcwd(), \"sections\", \"main.txt\"), \"r\") as main_file: main_lines = main_file.read() with open(os.path.join(os.getcwd(), \"{{cookiecutter.__main_file}}.py\"), \"a\") as app_main_file: app_main_file.write(import_lines) app_main_file.write(\"\\n\") app_main_file.write(page_content) app_main_file.write(\"\\n\\n\") app_main_file.write(main_lines) with open(os.path.join(os.getcwd(), \"requirements.txt\"), \"a\") as requirement_file: requirement_file.write(f\"taipy=={taipy.version._get_version()}\\n\") use_core = \"{{ cookiecutter.__core }}\".upper() use_rest = \"{{ cookiecutter.__rest }}\".upper() handle_services(use_rest in [\"YES\", \"Y\"], use_core in [\"YES\", \"Y\"]) pages = \"{{ cookiecutter.__pages }}\".split(\" \") # Remove empty string from pages list pages = [page for page in pages if page != \"\"] if len(pages) == 0: handle_single_page_app() else: handle_multi_page_app(pages) generate_main_file() # Remove the sections folder shutil.rmtree(os.path.join(os.getcwd(), \"sections\")) main_file_name = \"{{cookiecutter.__main_file}}.py\" print( f\"New Taipy application has been created at {os.path.join(os.getcwd())}\" f\"\\n\\nTo start the application, change directory to the newly created folder:\" f\"\\n\\tcd {os.path.join(os.getcwd())}\" f\"\\nand run the application as follows:\" f\"\\n\\ttaipy run {main_file_name}\" ) "} {"text": "import os import threading from flask import Flask from pyngrok import ngrok from hf_hub_ctranslate2 import GeneratorCT2fromHfHub from flask import request, jsonify model_name = \"taipy5-ct2\" # note this is local folder model, the model uploaded to huggingface did not response correctly #model_name = \"michaelfeil/ct2fast-starchat-alpha\" #model_name = \"michaelfeil/ct2fast-starchat-beta\" model = GeneratorCT2fromHfHub( # load in int8 on CUDA model_name_or_path=model_name, device=\"cuda\", compute_type=\"int8_float16\", # tokenizer=AutoTokenizer.from_pretrained(\"{ORG}/{NAME}\") ) def generate_text_batch(prompt_texts, max_length=64): outputs = model.generate(prompt_texts, max_length=max_length, include_prompt_in_result=False) return outputs app = Flask(__name__) port = \"5000\" # Open a ngrok tunnel to the HTTP server public_url = ngrok.connect(port).public_url print(\" * ngrok tunnel \\\"{}\\\" -> \\\"http://127.0.0.1:{}\\\"\".format(public_url, port)) # Update any base URLs to use the public ngrok URL app.config[\"BASE_URL\"] = public_url # ... Update inbound traffic via APIs to use the public-facing ngrok URL # Define Flask routes @app.route(\"/\") def index(): return \"Hello from Colab!\" @app.route(\"/api/generate\", methods=[\"POST\"]) def generate_code(): try: # Get the JSON data from the request body data = request.get_json() # Extract 'inputs' and 'parameters' from the JSON data inputs = data.get('inputs', \"\") parameters = data.get('parameters', {}) # Extract the 'max_new_tokens' parameter max_new_tokens = parameters.get('max_new_tokens', 64) # Call the generate_text_batch function with inputs and max_new_tokens generated_text = generate_text_batch([inputs], max_new_tokens)[0] return jsonify({ \"generated_text\": generated_text, \"status\": 200 }) except Exception as e: return jsonify({\"error\": str(e)}) # Start the Flask server in a new thread threading.Thread(target=app.run, kwargs={\"use_reloader\": False}).start() "} {"text": "import os import json def tokenize_code(code, max_characters=256): \"\"\" Tokenize code into snippets of specified max_characters, breaking at new lines. \"\"\" lines = code.split('\\n') snippets = [] current_snippet = \"\" for line in lines: if len(current_snippet) + len(line) + 1 <= max_characters: current_snippet += line + '\\n' else: snippets.append(current_snippet.strip()) current_snippet = line + '\\n' if current_snippet: snippets.append(current_snippet.strip()) return snippets def process_file(file_path): \"\"\" Read a file, tokenize the code, and create snippets. \"\"\" with open(file_path, 'r', encoding='utf-8') as file: content = file.read() # Tokenize code into snippets of 128 characters at new lines snippets = tokenize_code(content) return snippets def escape_string(s): \"\"\" Do not escape triple quotes, double quotes, single quotes, and new lines. \"\"\" return s def main(input_folder, output_file): snippets_list = [] for root, dirs, files in os.walk(input_folder): for file in files: if file.endswith(('.py', '.md')): file_path = os.path.join(root, file) snippets = process_file(file_path) for snippet in snippets: escaped_snippet = escape_string(snippet) snippets_list.append({'text': escaped_snippet}) # Write snippets to a JSONL file with open(output_file, 'w', encoding='utf-8') as jsonl_file: for snippet in snippets_list: jsonl_file.write(json.dumps(snippet) + '\\n') if __name__ == \"__main__\": input_folder = 'taipy' # replace with your folder path output_file = 'snippets.jsonl' # replace with your desired output file path main(input_folder, output_file) "} {"text": "from taipy.gui import Gui from tensorflow.keras import models from PIL import Image import numpy as np model = models.load_model(\"assets/baseline.keras\") class_names = { 0: \"airplane\", 1: \"automobile\", 2: \"bird\", 3: \"cat\", 4: \"deer\", 5: \"dog\", 6: \"frog\", 7: \"horse\", 8: \"ship\", 9: \"truck\", } def predict_image(model, path_to_image): img = Image.open(path_to_image) img = img.convert(\"RGB\").resize((32, 32)) # Normalizing image data = np.asarray(img) print(\"Before: \", data[0][0]) # Printing color of very first pixel data = data / 255 # Comparing stuff to see if we broke something print(\"After: \", data[0][0]) # Printing color of very first pixel # Tricking model into thinking it is looking at an array of sample images and not a single image probability = model.predict(np.array([data])[:1]) probes = probability.max() prediction = class_names[np.argmax(probability)] return (probes, prediction) image_path = \"assets/placeholder_image.png\" prediction, prob, content = \"\", \"\", \"\" image_control_component = \"\"\" <|text-center| <|{\"assets/logo.png\"}|image|width=10vw|height=25vh|> <|{content}|file_selector|extensions=.png|> Select an image! <|{prediction}|> <|{image_path}|image|> <|{prob}|indicator|value={prob}|min=0|max=100|width=25vw|> > \"\"\" index = image_control_component def on_change(state, variable_name, variable_value): if variable_name == \"content\": state.image_path = variable_value probes, prediction = predict_image(model, variable_value) state.prob = round(probes * 100) # Converting decimal to percentage state.prediction = f\"This is a : {prediction}\" app = Gui(page=index) if __name__ == \"__main__\": app.run(use_reloader=True) "} {"text": "from taipy.gui import Gui from tensorflow.keras import models from PIL import Image import numpy as np model = models.load_model(\"baseline_mariya.keras\") class_names = { 0: \"airplane\", 1: \"automobile\", 2: \"bird\", 3: \"cat\", 4: \"deer\", 5: \"dog\", 6: \"frog\", 7: \"horse\", 8: \"ship\", 9: \"truck\", } def predict_image(model, path_to_image): img = Image.open(path_to_image) img = img.convert(\"RGB\").resize((32, 32)) # Normalizing image data = np.asarray(img) print(\"Before: \", data[0][0]) # Printing color of very first pixel data = data / 255 # Comparing stuff to see if we broke something print(\"After: \", data[0][0]) # Printing color of very first pixel # Tricking model into thinking it is looking at an array of sample images and not a single image probability = model.predict(np.array([data])[:1]) print(probability) probes = probability.max() prediction = class_names[np.argmax(probability)] return (probes, prediction) image_path = \"placeholder_image.png\" prediction, prob, content = \"\", \"\", \"\" image_control_component = \"\"\" <|text-center| <|{\"logo.png\"}|image|width=25vw|> <|{content}|file_selector|extensions=.png|> Select an image! <|{prediction}|> <|{image_path}|image|> <|{prob}|indicator|value={prob}|min=0|max=100|width=25vw|> > \"\"\" index = image_control_component def on_change(state, variable_name, variable_value): if variable_name == \"content\": state.image_path = variable_value probes, prediction = predict_image(model, variable_value) state.prob = round(probes * 100) # Converting decimal to percentage state.prediction = f\"This is a : {prediction}\" app = Gui(page=index) if __name__ == \"__main__\": app.run(use_reloader=True) "} {"text": "print(\"Hello, World!\") print(\"Hi Taipy!\")"} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import json import os from setuptools import find_namespace_packages, find_packages, setup with open(\"README.md\") as readme_file: readme = readme_file.read() with open(f\"src{os.sep}taipy{os.sep}rest{os.sep}version.json\") as version_file: version = json.load(version_file) version_string = f'{version.get(\"major\", 0)}.{version.get(\"minor\", 0)}.{version.get(\"patch\", 0)}' if vext := version.get(\"ext\"): version_string = f\"{version_string}.{vext}\" setup( author=\"Avaiga\", name=\"taipy-rest\", keywords=\"taipy-rest\", python_requires=\">=3.8\", version=version_string, author_email=\"dev@taipy.io\", packages=find_namespace_packages(where=\"src\") + find_packages(include=[\"taipy\", \"taipy.rest\"]), package_dir={\"\": \"src\"}, include_package_data=True, long_description=readme, long_description_content_type=\"text/markdown\", description=\"Library to expose taipy-core REST APIs.\", license=\"Apache License 2.0\", classifiers=[ \"Intended Audience :: Developers\", \"License :: OSI Approved :: Apache Software License\", \"Natural Language :: English\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.8\", \"Programming Language :: Python :: 3.9\", \"Programming Language :: Python :: 3.10\", \"Programming Language :: Python :: 3.11\", ], install_requires=[ \"flask>=3.0.0,<3.1\", \"flask-restful>=0.3.9,<0.4\", \"passlib>=1.7.4,<1.8\", \"marshmallow>=3.20.1,<3.30\", \"apispec[yaml]>=6.3,<7.0\", \"apispec-webframeworks>=0.5.2,<0.6\", \"taipy-core@git+https://git@github.com/Avaiga/taipy-core.git@develop\", ], ) "} {"text": "from unittest import mock import pytest from flask import url_for from src.taipy.rest.api.exceptions.exceptions import ScenarioIdMissingException, SequenceNameMissingException from taipy.core.exceptions.exceptions import NonExistingScenario from taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory def test_get_sequence(client, default_sequence): # test 404 user_url = url_for(\"api.sequence_by_id\", sequence_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.sequence._sequence_manager._SequenceManager._get\") as manager_mock: manager_mock.return_value = default_sequence # test get_sequence rep = client.get(url_for(\"api.sequence_by_id\", sequence_id=\"foo\")) assert rep.status_code == 200 def test_delete_sequence(client): # test 404 user_url = url_for(\"api.sequence_by_id\", sequence_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.sequence._sequence_manager._SequenceManager._delete\"), mock.patch( \"taipy.core.sequence._sequence_manager._SequenceManager._get\" ): # test get_sequence rep = client.delete(url_for(\"api.sequence_by_id\", sequence_id=\"foo\")) assert rep.status_code == 200 def test_create_sequence(client, default_scenario): sequences_url = url_for(\"api.sequences\") rep = client.post(sequences_url, json={}) assert rep.status_code == 400 assert rep.json == {\"message\": \"Scenario id is missing.\"} sequences_url = url_for(\"api.sequences\") rep = client.post(sequences_url, json={\"scenario_id\": \"SCENARIO_scenario_id\"}) assert rep.status_code == 400 assert rep.json == {\"message\": \"Sequence name is missing.\"} sequences_url = url_for(\"api.sequences\") rep = client.post(sequences_url, json={\"scenario_id\": \"SCENARIO_scenario_id\", \"sequence_name\": \"sequence\"}) assert rep.status_code == 404 _ScenarioManagerFactory._build_manager()._set(default_scenario) with mock.patch(\"taipy.core.scenario._scenario_manager._ScenarioManager._get\") as config_mock: config_mock.return_value = default_scenario sequences_url = url_for(\"api.sequences\") rep = client.post( sequences_url, json={\"scenario_id\": default_scenario.id, \"sequence_name\": \"sequence\", \"tasks\": []} ) assert rep.status_code == 201 def test_get_all_sequences(client, default_scenario_config_list): for ds in range(10): with mock.patch(\"src.taipy.rest.api.resources.scenario.ScenarioList.fetch_config\") as config_mock: config_mock.return_value = default_scenario_config_list[ds] scenario_url = url_for(\"api.scenarios\", config_id=config_mock.name) client.post(scenario_url) sequences_url = url_for(\"api.sequences\") rep = client.get(sequences_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 @pytest.mark.xfail() def test_execute_sequence(client, default_sequence): # test 404 user_url = url_for(\"api.sequence_submit\", sequence_id=\"foo\") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.sequence._sequence_manager._SequenceManager._get\") as manager_mock: manager_mock.return_value = default_sequence # test get_sequence rep = client.post(url_for(\"api.sequence_submit\", sequence_id=\"foo\")) assert rep.status_code == 200 "} {"text": "from unittest import mock from flask import url_for def test_get_job(client, default_job): # test 404 user_url = url_for(\"api.job_by_id\", job_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.job._job_manager._JobManager._get\") as manager_mock: manager_mock.return_value = default_job # test get_job rep = client.get(url_for(\"api.job_by_id\", job_id=\"foo\")) assert rep.status_code == 200 def test_delete_job(client): # test 404 user_url = url_for(\"api.job_by_id\", job_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.job._job_manager._JobManager._delete\"), mock.patch( \"taipy.core.job._job_manager._JobManager._get\" ): # test get_job rep = client.delete(url_for(\"api.job_by_id\", job_id=\"foo\")) assert rep.status_code == 200 def test_create_job(client, default_task_config): # without config param jobs_url = url_for(\"api.jobs\") rep = client.post(jobs_url) assert rep.status_code == 400 with mock.patch(\"src.taipy.rest.api.resources.job.JobList.fetch_config\") as config_mock: config_mock.return_value = default_task_config jobs_url = url_for(\"api.jobs\", task_id=\"foo\") rep = client.post(jobs_url) assert rep.status_code == 201 def test_get_all_jobs(client, create_job_list): jobs_url = url_for(\"api.jobs\") rep = client.get(jobs_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 def test_cancel_job(client, default_job): # test 404 from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory _OrchestratorFactory._build_orchestrator() _OrchestratorFactory._build_dispatcher() user_url = url_for(\"api.job_cancel\", job_id=\"foo\") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.job._job_manager._JobManager._get\") as manager_mock: manager_mock.return_value = default_job # test get_job rep = client.post(url_for(\"api.job_cancel\", job_id=\"foo\")) assert rep.status_code == 200 "} {"text": "import os import shutil import uuid from datetime import datetime, timedelta import pandas as pd import pytest from dotenv import load_dotenv from src.taipy.rest.app import create_app from taipy.config import Config from taipy.config.common.frequency import Frequency from taipy.config.common.scope import Scope from taipy.core import Cycle, DataNodeId, Job, JobId, Scenario, Sequence, Task from taipy.core.cycle._cycle_manager import _CycleManager from taipy.core.data.in_memory import InMemoryDataNode from taipy.core.job._job_manager import _JobManager from taipy.core.task._task_manager import _TaskManager from .setup.shared.algorithms import evaluate, forecast @pytest.fixture def setup_end_to_end(): model_cfg = Config.configure_data_node(\"model\", path=\"setup/my_model.p\", storage_type=\"pickle\") day_cfg = Config.configure_data_node(id=\"day\") forecasts_cfg = Config.configure_data_node(id=\"forecasts\") forecast_task_cfg = Config.configure_task( id=\"forecast_task\", input=[model_cfg, day_cfg], function=forecast, output=forecasts_cfg, ) historical_temperature_cfg = Config.configure_data_node( \"historical_temperature\", storage_type=\"csv\", path=\"setup/historical_temperature.csv\", has_header=True, ) evaluation_cfg = Config.configure_data_node(\"evaluation\") evaluate_task_cfg = Config.configure_task( \"evaluate_task\", input=[historical_temperature_cfg, forecasts_cfg, day_cfg], function=evaluate, output=evaluation_cfg, ) scenario_config = Config.configure_scenario( \"scenario\", [forecast_task_cfg, evaluate_task_cfg], frequency=Frequency.DAILY ) scenario_config.add_sequences({\"sequence\": [forecast_task_cfg, evaluate_task_cfg]}) @pytest.fixture() def app(): load_dotenv(\".testenv\") app = create_app(testing=True) app.config.update( { \"TESTING\": True, } ) with app.app_context(), app.test_request_context(): yield app @pytest.fixture() def client(app): return app.test_client() @pytest.fixture def datanode_data(): return { \"name\": \"foo\", \"storage_type\": \"in_memory\", \"scope\": \"scenario\", \"default_data\": [\"1991-01-01T00:00:00\"], } @pytest.fixture def task_data(): return { \"config_id\": \"foo\", \"input_ids\": [\"DATASOURCE_foo_3b888e17-1974-4a56-a42c-c7c96bc9cd54\"], \"function_name\": \"print\", \"function_module\": \"builtins\", \"output_ids\": [\"DATASOURCE_foo_4d9923b8-eb9f-4f3c-8055-3a1ce8bee309\"], } @pytest.fixture def sequence_data(): return { \"name\": \"foo\", \"task_ids\": [\"TASK_foo_3b888e17-1974-4a56-a42c-c7c96bc9cd54\"], } @pytest.fixture def scenario_data(): return { \"name\": \"foo\", \"sequence_ids\": [\"SEQUENCE_foo_3b888e17-1974-4a56-a42c-c7c96bc9cd54\"], \"properties\": {}, } @pytest.fixture def default_datanode(): return InMemoryDataNode( \"input_ds\", Scope.SCENARIO, DataNodeId(\"f\"), \"my name\", \"owner_id\", properties={\"default_data\": [1, 2, 3, 4, 5, 6]}, ) @pytest.fixture def default_df_datanode(): return InMemoryDataNode( \"input_ds\", Scope.SCENARIO, DataNodeId(\"id_uio2\"), \"my name\", \"owner_id\", properties={\"default_data\": pd.DataFrame([{\"a\": 1, \"b\": 2}, {\"a\": 3, \"b\": 4}, {\"a\": 5, \"b\": 6}])}, ) @pytest.fixture def default_datanode_config(): return Config.configure_data_node(f\"taipy_{uuid.uuid4().hex}\", \"in_memory\", Scope.SCENARIO) @pytest.fixture def default_datanode_config_list(): configs = [] for i in range(10): configs.append(Config.configure_data_node(id=f\"ds_{i}\", storage_type=\"in_memory\", scope=Scope.SCENARIO)) return configs def __default_task(): input_ds = InMemoryDataNode( \"input_ds\", Scope.SCENARIO, DataNodeId(\"id_uio\"), \"my name\", \"owner_id\", properties={\"default_data\": \"In memory Data Source\"}, ) output_ds = InMemoryDataNode( \"output_ds\", Scope.SCENARIO, DataNodeId(\"id_uio\"), \"my name\", \"owner_id\", properties={\"default_data\": \"In memory Data Source\"}, ) return Task( config_id=\"foo\", properties={}, function=print, input=[input_ds], output=[output_ds], id=None, ) @pytest.fixture def default_task(): return __default_task() @pytest.fixture def default_task_config(): return Config.configure_task(\"task1\", print, [], []) @pytest.fixture def default_task_config_list(): configs = [] for i in range(10): configs.append(Config.configure_task(f\"task_{i}\", print, [], [])) return configs def __default_sequence(): return Sequence(properties={\"name\": \"foo\"}, tasks=[__default_task()], sequence_id=\"SEQUENCE_foo_SCENARIO_acb\") def __task_config(): return Config.configure_task(\"task1\", print, [], []) @pytest.fixture def default_sequence(): return __default_sequence() @pytest.fixture def default_scenario_config(): task_config = __task_config() scenario_config = Config.configure_scenario( f\"taipy_{uuid.uuid4().hex}\", [task_config], ) scenario_config.add_sequences({\"sequence\": [task_config]}) return scenario_config @pytest.fixture def default_scenario_config_list(): configs = [] for _ in range(10): task_config = Config.configure_task(f\"taipy_{uuid.uuid4().hex}\", print) scenario_config = Config.configure_scenario( f\"taipy_{uuid.uuid4().hex}\", [task_config], ) scenario_config.add_sequences({\"sequence\": [task_config]}) configs.append(scenario_config) return configs @pytest.fixture def default_scenario(): return Scenario(config_id=\"foo\", properties={}, tasks=[__default_task()], scenario_id=\"SCENARIO_scenario_id\") def __create_cycle(name=\"foo\"): now = datetime.now() return Cycle( name=name, frequency=Frequency.DAILY, properties={}, creation_date=now, start_date=now, end_date=now + timedelta(days=5), ) @pytest.fixture def create_cycle_list(): cycles = [] manager = _CycleManager for i in range(10): c = __create_cycle(f\"cycle_{1}\") return cycles @pytest.fixture def cycle_data(): return { \"name\": \"foo\", \"frequency\": \"daily\", \"properties\": {}, \"creation_date\": \"2022-02-03T22:17:27.317114\", \"start_date\": \"2022-02-03T22:17:27.317114\", \"end_date\": \"2022-02-08T22:17:27.317114\", } @pytest.fixture def default_cycle(): return __create_cycle() def __create_job(): task_manager = _TaskManager task = __default_task() task_manager._set(task) submit_id = f\"SUBMISSION_{str(uuid.uuid4())}\" return Job(id=JobId(f\"JOB_{uuid.uuid4()}\"), task=task, submit_id=submit_id, submit_entity_id=task.id) @pytest.fixture def default_job(): return __create_job() @pytest.fixture def create_job_list(): jobs = [] manager = _JobManager for i in range(10): c = __create_job() return jobs @pytest.fixture(scope=\"function\", autouse=True) def cleanup_files(): Config.unblock_update() if os.path.exists(\".data\"): shutil.rmtree(\".data\") "} {"text": "from unittest import mock from flask import url_for def test_get_task(client, default_task): # test 404 user_url = url_for(\"api.task_by_id\", task_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.task._task_manager._TaskManager._get\") as manager_mock: manager_mock.return_value = default_task # test get_task rep = client.get(url_for(\"api.task_by_id\", task_id=\"foo\")) assert rep.status_code == 200 def test_delete_task(client): # test 404 user_url = url_for(\"api.task_by_id\", task_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.task._task_manager._TaskManager._delete\"), mock.patch( \"taipy.core.task._task_manager._TaskManager._get\" ): # test get_task rep = client.delete(url_for(\"api.task_by_id\", task_id=\"foo\")) assert rep.status_code == 200 def test_create_task(client, default_task_config): # without config param tasks_url = url_for(\"api.tasks\") rep = client.post(tasks_url) assert rep.status_code == 400 # config does not exist tasks_url = url_for(\"api.tasks\", config_id=\"foo\") rep = client.post(tasks_url) assert rep.status_code == 404 with mock.patch(\"src.taipy.rest.api.resources.task.TaskList.fetch_config\") as config_mock: config_mock.return_value = default_task_config tasks_url = url_for(\"api.tasks\", config_id=\"bar\") rep = client.post(tasks_url) assert rep.status_code == 201 def test_get_all_tasks(client, task_data, default_task_config_list): for ds in range(10): with mock.patch(\"src.taipy.rest.api.resources.task.TaskList.fetch_config\") as config_mock: config_mock.return_value = default_task_config_list[ds] tasks_url = url_for(\"api.tasks\", config_id=config_mock.name) client.post(tasks_url) rep = client.get(tasks_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 def test_execute_task(client, default_task): # test 404 user_url = url_for(\"api.task_submit\", task_id=\"foo\") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.task._task_manager._TaskManager._get\") as manager_mock: manager_mock.return_value = default_task # test get_task rep = client.post(url_for(\"api.task_submit\", task_id=\"foo\")) assert rep.status_code == 200 "} {"text": "from functools import wraps from unittest.mock import MagicMock, patch from src.taipy.rest.api.middlewares._middleware import _middleware def mock_enterprise_middleware(f): @wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs) return wrapper @patch(\"src.taipy.rest.api.middlewares._middleware._using_enterprise\") @patch(\"src.taipy.rest.api.middlewares._middleware._enterprise_middleware\") def test_enterprise_middleware_applied_when_enterprise_is_installed( enterprise_middleware: MagicMock, using_enterprise: MagicMock ): enterprise_middleware.return_value = mock_enterprise_middleware using_enterprise.return_value = True @_middleware def f(): return \"f\" rv = f() assert rv == \"f\" using_enterprise.assert_called_once() enterprise_middleware.assert_called_once() @patch(\"src.taipy.rest.api.middlewares._middleware._using_enterprise\") @patch(\"src.taipy.rest.api.middlewares._middleware._enterprise_middleware\") def test_enterprise_middleware_not_applied_when_enterprise_is_not_installed( enterprise_middleware: MagicMock, using_enterprise: MagicMock ): enterprise_middleware.return_value = mock_enterprise_middleware using_enterprise.return_value = False @_middleware def f(): return \"f\" rv = f() assert rv == \"f\" using_enterprise.assert_called_once() enterprise_middleware.assert_not_called() "} {"text": "from unittest import mock import pytest from flask import url_for def test_get_datanode(client, default_datanode): # test 404 user_url = url_for(\"api.datanode_by_id\", datanode_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.data._data_manager._DataManager._get\") as manager_mock: manager_mock.return_value = default_datanode # test get_datanode rep = client.get(url_for(\"api.datanode_by_id\", datanode_id=\"foo\")) assert rep.status_code == 200 def test_delete_datanode(client): # test 404 user_url = url_for(\"api.datanode_by_id\", datanode_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.data._data_manager._DataManager._delete\"), mock.patch( \"taipy.core.data._data_manager._DataManager._get\" ): # test get_datanode rep = client.delete(url_for(\"api.datanode_by_id\", datanode_id=\"foo\")) assert rep.status_code == 200 def test_create_datanode(client, default_datanode_config): # without config param datanodes_url = url_for(\"api.datanodes\") rep = client.post(datanodes_url) assert rep.status_code == 400 # config does not exist datanodes_url = url_for(\"api.datanodes\", config_id=\"foo\") rep = client.post(datanodes_url) assert rep.status_code == 404 with mock.patch(\"src.taipy.rest.api.resources.datanode.DataNodeList.fetch_config\") as config_mock: config_mock.return_value = default_datanode_config datanodes_url = url_for(\"api.datanodes\", config_id=\"bar\") rep = client.post(datanodes_url) assert rep.status_code == 201 def test_get_all_datanodes(client, default_datanode_config_list): for ds in range(10): with mock.patch(\"src.taipy.rest.api.resources.datanode.DataNodeList.fetch_config\") as config_mock: config_mock.return_value = default_datanode_config_list[ds] datanodes_url = url_for(\"api.datanodes\", config_id=config_mock.name) client.post(datanodes_url) rep = client.get(datanodes_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 def test_read_datanode(client, default_df_datanode): with mock.patch(\"taipy.core.data._data_manager._DataManager._get\") as config_mock: config_mock.return_value = default_df_datanode # without operators datanodes_url = url_for(\"api.datanode_reader\", datanode_id=\"foo\") rep = client.get(datanodes_url, json={}) assert rep.status_code == 200 # Without operators and body rep = client.get(datanodes_url) assert rep.status_code == 200 # TODO: Revisit filter test # operators = {\"operators\": [{\"key\": \"a\", \"value\": 5, \"operator\": \"LESS_THAN\"}]} # rep = client.get(datanodes_url, json=operators) # assert rep.status_code == 200 def test_write_datanode(client, default_datanode): with mock.patch(\"taipy.core.data._data_manager._DataManager._get\") as config_mock: config_mock.return_value = default_datanode # Get DataNode datanodes_read_url = url_for(\"api.datanode_reader\", datanode_id=default_datanode.id) rep = client.get(datanodes_read_url, json={}) assert rep.status_code == 200 assert rep.json == {\"data\": [1, 2, 3, 4, 5, 6]} datanodes_write_url = url_for(\"api.datanode_writer\", datanode_id=default_datanode.id) rep = client.put(datanodes_write_url, json=[1, 2, 3]) assert rep.status_code == 200 rep = client.get(datanodes_read_url, json={}) assert rep.status_code == 200 assert rep.json == {\"data\": [1, 2, 3]} "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import json from typing import Dict from flask import url_for def create_and_submit_scenario(config_id: str, client) -> Dict: response = client.post(url_for(\"api.scenarios\", config_id=config_id)) assert response.status_code == 201 scenario = response.json.get(\"scenario\") assert (set(scenario) - set(json.load(open(\"tests/json/expected/scenario.json\")))) == set() response = client.post(url_for(\"api.scenario_submit\", scenario_id=scenario.get(\"id\"))) assert response.status_code == 200 return scenario def get(url, name, client) -> Dict: response = client.get(url) returned_data = response.json.get(name) assert (set(returned_data) - set(json.load(open(f\"tests/json/expected/{name}.json\")))) == set() return returned_data def get_assert_status(url, client, status_code) -> None: response = client.get(url) assert response.status_code == status_code def get_all(url, expected_quantity, client): response = client.get(url) assert len(response.json) == expected_quantity def delete(url, client): response = client.delete(url) assert response.status_code == 200 def test_end_to_end(client, setup_end_to_end): # Create Scenario: Should also create all of its dependencies(sequences, tasks, datanodes, etc) scenario = create_and_submit_scenario(\"scenario\", client) # Get other models and verify if they return the necessary fields cycle = get(url_for(\"api.cycle_by_id\", cycle_id=scenario.get(\"cycle\")), \"cycle\", client) sequence = get( url_for(\"api.sequence_by_id\", sequence_id=f\"SEQUENCE_sequence_{scenario['id']}\"), \"sequence\", client, ) task = get(url_for(\"api.task_by_id\", task_id=sequence.get(\"tasks\")[0]), \"task\", client) datanode = get( url_for(\"api.datanode_by_id\", datanode_id=task.get(\"input_ids\")[0]), \"datanode\", client, ) # Get All get_all(url_for(\"api.scenarios\"), 1, client) get_all(url_for(\"api.cycles\"), 1, client) get_all(url_for(\"api.sequences\"), 1, client) get_all(url_for(\"api.tasks\"), 2, client) get_all(url_for(\"api.datanodes\"), 5, client) get_all(url_for(\"api.jobs\"), 2, client) # Delete entities delete(url_for(\"api.cycle_by_id\", cycle_id=cycle.get(\"id\")), client) delete(url_for(\"api.sequence_by_id\", sequence_id=sequence.get(\"id\")), client) delete(url_for(\"api.task_by_id\", task_id=task.get(\"id\")), client) delete(url_for(\"api.datanode_by_id\", datanode_id=datanode.get(\"id\")), client) # Check status code # Non-existing entities should return 404 get_assert_status(url_for(\"api.cycle_by_id\", cycle_id=9999999), client, 404) get_assert_status(url_for(\"api.scenario_by_id\", scenario_id=9999999), client, 404) get_assert_status(url_for(\"api.sequence_by_id\", sequence_id=9999999), client, 404) get_assert_status(url_for(\"api.task_by_id\", task_id=9999999), client, 404) get_assert_status(url_for(\"api.datanode_by_id\", datanode_id=9999999), client, 404) # Check URL with and without trailing slashes url_with_slash = url_for(\"api.scenarios\") url_without_slash = url_for(\"api.scenarios\")[:-1] get_all(url_with_slash, 1, client) get_all(url_without_slash, 1, client) "} {"text": "from unittest import mock from flask import url_for def test_get_cycle(client, default_cycle): # test 404 cycle_url = url_for(\"api.cycle_by_id\", cycle_id=\"foo\") rep = client.get(cycle_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.cycle._cycle_manager._CycleManager._get\") as manager_mock: manager_mock.return_value = default_cycle # test get_cycle rep = client.get(url_for(\"api.cycle_by_id\", cycle_id=\"foo\")) assert rep.status_code == 200 def test_delete_cycle(client): # test 404 cycle_url = url_for(\"api.cycle_by_id\", cycle_id=\"foo\") rep = client.get(cycle_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.cycle._cycle_manager._CycleManager._delete\"), mock.patch( \"taipy.core.cycle._cycle_manager._CycleManager._get\" ): # test get_cycle rep = client.delete(url_for(\"api.cycle_by_id\", cycle_id=\"foo\")) assert rep.status_code == 200 def test_create_cycle(client, cycle_data): # without config param cycles_url = url_for(\"api.cycles\") data = {\"bad\": \"data\"} rep = client.post(cycles_url, json=data) assert rep.status_code == 400 rep = client.post(cycles_url, json=cycle_data) assert rep.status_code == 201 def test_get_all_cycles(client, create_cycle_list): cycles_url = url_for(\"api.cycles\") rep = client.get(cycles_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 "} {"text": "from unittest import mock import pytest from flask import url_for def test_get_scenario(client, default_scenario): # test 404 user_url = url_for(\"api.scenario_by_id\", scenario_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.scenario._scenario_manager._ScenarioManager._get\") as manager_mock: manager_mock.return_value = default_scenario # test get_scenario rep = client.get(url_for(\"api.scenario_by_id\", scenario_id=\"foo\")) assert rep.status_code == 200 def test_delete_scenario(client): # test 404 user_url = url_for(\"api.scenario_by_id\", scenario_id=\"foo\") rep = client.get(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.scenario._scenario_manager._ScenarioManager._delete\"), mock.patch( \"taipy.core.scenario._scenario_manager._ScenarioManager._get\" ): # test get_scenario rep = client.delete(url_for(\"api.scenario_by_id\", scenario_id=\"foo\")) assert rep.status_code == 200 def test_create_scenario(client, default_scenario_config): # without config param scenarios_url = url_for(\"api.scenarios\") rep = client.post(scenarios_url) assert rep.status_code == 400 # config does not exist scenarios_url = url_for(\"api.scenarios\", config_id=\"foo\") rep = client.post(scenarios_url) assert rep.status_code == 404 with mock.patch(\"src.taipy.rest.api.resources.scenario.ScenarioList.fetch_config\") as config_mock: config_mock.return_value = default_scenario_config scenarios_url = url_for(\"api.scenarios\", config_id=\"bar\") rep = client.post(scenarios_url) assert rep.status_code == 201 def test_get_all_scenarios(client, default_sequence, default_scenario_config_list): for ds in range(10): with mock.patch(\"src.taipy.rest.api.resources.scenario.ScenarioList.fetch_config\") as config_mock: config_mock.return_value = default_scenario_config_list[ds] scenarios_url = url_for(\"api.scenarios\", config_id=config_mock.name) client.post(scenarios_url) rep = client.get(scenarios_url) assert rep.status_code == 200 results = rep.get_json() assert len(results) == 10 @pytest.mark.xfail() def test_execute_scenario(client, default_scenario): # test 404 user_url = url_for(\"api.scenario_submit\", scenario_id=\"foo\") rep = client.post(user_url) assert rep.status_code == 404 with mock.patch(\"taipy.core.scenario._scenario_manager._ScenarioManager._get\") as manager_mock: manager_mock.return_value = default_scenario # test get_scenario rep = client.post(url_for(\"api.scenario_submit\", scenario_id=\"foo\")) assert rep.status_code == 200 "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "import pickle import random from datetime import datetime, timedelta from typing import Any, Dict import pandas as pd n_predictions = 14 def forecast(model, date: datetime): dates = [date + timedelta(days=i) for i in range(n_predictions)] forecasts = [f + random.uniform(0, 2) for f in model.forecast(len(dates))] days = [str(dt.date()) for dt in dates] res = {\"Date\": days, \"Forecast\": forecasts} return pd.DataFrame.from_dict(res) def evaluate(cleaned: pd.DataFrame, forecasts: pd.DataFrame, date: datetime) -> Dict[str, Any]: cleaned = cleaned[cleaned[\"Date\"].isin(forecasts[\"Date\"].tolist())] forecasts_as_series = pd.Series(forecasts[\"Forecast\"].tolist(), name=\"Forecast\") res = pd.concat([cleaned.reset_index(), forecasts_as_series], axis=1) res[\"Delta\"] = abs(res[\"Forecast\"] - res[\"Value\"]) return { \"Date\": date, \"Dataframe\": res, \"Mean_absolute_error\": res[\"Delta\"].mean(), \"Relative_error\": (res[\"Delta\"].mean() * 100) / res[\"Value\"].mean(), } if __name__ == \"__main__\": model = pickle.load(open(\"../my_model.p\", \"rb\")) day = datetime(2020, 1, 25) forecasts = forecast(model, day) historical_temperature = pd.read_csv(\"../historical_temperature.csv\") evaluation = evaluate(historical_temperature, forecasts, day) print(evaluation[\"Dataframe\"]) print() print(f'Mean absolute error : {evaluation[\"Mean_absolute_error\"]}') print(f'Relative error in %: {evaluation[\"Relative_error\"]}') "} {"text": "from taipy.core import Config, Frequency from .algorithms import evaluate, forecast model_cfg = Config.configure_data_node(\"model\", path=\"my_model.p\", storage_type=\"pickle\") day_cfg = Config.configure_data_node(id=\"day\") forecasts_cfg = Config.configure_data_node(id=\"forecasts\") forecast_task_cfg = Config.configure_task( id=\"forecast_task\", input=[model_cfg, day_cfg], function=forecast, output=forecasts_cfg, ) historical_temperature_cfg = Config.configure_data_node( \"historical_temperature\", storage_type=\"csv\", path=\"historical_temperature.csv\", has_header=True, ) evaluation_cfg = Config.configure_data_node(\"evaluation\") evaluate_task_cfg = Config.configure_task( \"evaluate_task\", input=[historical_temperature_cfg, forecasts_cfg, day_cfg], function=evaluate, output=evaluation_cfg, ) scenario_cfg = Config.configure_scenario(\"scenario\", [forecast_task_cfg, evaluate_task_cfg], frequency=Frequency.DAILY) scenario_cfg.add_sequences({\"sequence\": [forecast_task_cfg, evaluate_task_cfg]}) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from importlib.util import find_spec if find_spec(\"taipy\"): if find_spec(\"taipy.config\"): from taipy.config._init import * # type: ignore if find_spec(\"taipy.gui\"): from taipy.gui._init import * # type: ignore if find_spec(\"taipy.core\"): from taipy.core._init import * # type: ignore if find_spec(\"taipy.rest\"): from taipy.rest._init import * # type: ignore if find_spec(\"taipy.gui_core\"): from taipy.gui_core._init import * # type: ignore if find_spec(\"taipy.enterprise\"): from taipy.enterprise._init import * # type: ignore if find_spec(\"taipy._run\"): from taipy._run import _run as run # type: ignore "} {"text": "import json import os def _get_version(): with open(f\"{os.path.dirname(os.path.abspath(__file__))}{os.sep}version.json\") as version_file: version = json.load(version_file) version_string = f'{version.get(\"major\", 0)}.{version.get(\"minor\", 0)}.{version.get(\"patch\", 0)}' if vext := version.get(\"ext\"): version_string = f\"{version_string}.{vext}\" return version_string "} {"text": "from .rest import Rest "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from taipy.config import Config from .app import create_app as _create_app class Rest: \"\"\" Runnable Rest application serving REST APIs on top of Taipy Core functionalities. \"\"\" def __init__(self): \"\"\" Initialize a REST API server. A Flask application is instantiated and configured using three parameters from the global config. - Config.global_config.testing (bool): Run the application on testing mode. - Config.global_config.env (Optional[str]): The application environment. - Config.global_config.secret_key (Optional[str]): Application server secret key. However, editing these parameters is only recommended for advanced users. Indeed, the default behavior of the REST server without any required configuration satisfies all the standard and basic needs. \"\"\" self._app = _create_app(Config.global_config.testing or False, Config.global_config.env, Config.global_config.secret_key) def run(self, **kwargs): \"\"\" Start a REST API server. This method is blocking. Parameters: **kwargs : Options to provide to the application server. \"\"\" self._app.run(**kwargs) "} {"text": "\"\"\"# Taipy Rest The Taipy Rest package exposes the Runnable `Rest^` service to provide REST APIs on top of Taipy Core. (more details on Taipy Core functionalities in the [user manual](../../../manuals/core/)). Once the `Rest^` service runs, users can call REST APIs to create, read, update, submit and remove Taipy entities (including cycles, scenarios, sequences, tasks, jobs, and data nodes). It is handy when it comes to integrating a Taipy application in a more complex IT ecosystem. Please refer to [REST API](../../reference_rest/) page to get the exhaustive list of available APIs.\"\"\" from ._init import * from .version import _get_version __version__ = _get_version() "} {"text": "\"\"\"Extensions registry All extensions here are used as singletons and initialized in application factory \"\"\" from .commons.apispec import APISpecExt apispec = APISpecExt()"} {"text": "import os from flask import Flask from . import api from .commons.encoder import _CustomEncoder from .extensions import apispec def create_app(testing=False, flask_env=None, secret_key=None): \"\"\"Application factory, used to create application\"\"\" app = Flask(__name__) app.config.update( ENV=os.getenv(\"FLASK_ENV\", flask_env), TESTING=os.getenv(\"TESTING\", testing), SECRET_KEY=os.getenv(\"SECRET_KEY\", secret_key), ) app.url_map.strict_slashes = False app.config[\"RESTFUL_JSON\"] = {\"cls\": _CustomEncoder} configure_apispec(app) register_blueprints(app) with app.app_context(): api.views.register_views() return app def configure_apispec(app): \"\"\"Configure APISpec for swagger support\"\"\" apispec.init_app(app) apispec.spec.components.schema( \"PaginatedResult\", { \"properties\": { \"total\": {\"type\": \"integer\"}, \"pages\": {\"type\": \"integer\"}, \"next\": {\"type\": \"string\"}, \"prev\": {\"type\": \"string\"}, } }, ) def register_blueprints(app): \"\"\"Register all blueprints for application\"\"\" app.register_blueprint(api.views.blueprint) "} {"text": "from taipy.core.cycle._cycle_converter import _CycleConverter from taipy.core.data._data_converter import _DataNodeConverter from taipy.core.scenario._scenario_converter import _ScenarioConverter from taipy.core.sequence._sequence_converter import _SequenceConverter from taipy.core.task._task_converter import _TaskConverter entity_to_models = { \"scenario\": _ScenarioConverter._entity_to_model, \"sequence\": _SequenceConverter._entity_to_model, \"task\": _TaskConverter._entity_to_model, \"data\": _DataNodeConverter._entity_to_model, \"cycle\": _CycleConverter._entity_to_model, } def _to_model(repository, entity, **kwargs): return entity_to_models[repository](entity) "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from apispec import APISpec from apispec.exceptions import APISpecError from apispec.ext.marshmallow import MarshmallowPlugin from apispec_webframeworks.flask import FlaskPlugin from flask import Blueprint, jsonify, render_template class FlaskRestfulPlugin(FlaskPlugin): \"\"\"Small plugin override to handle flask-restful resources\"\"\" @staticmethod def _rule_for_view(view, app=None): view_funcs = app.view_functions endpoint = None for ept, view_func in view_funcs.items(): if hasattr(view_func, \"view_class\"): view_func = view_func.view_class if view_func == view: endpoint = ept if not endpoint: raise APISpecError(\"Could not find endpoint for view {0}\".format(view)) # WARNING: Assume 1 rule per view function for now rule = app.url_map._rules_by_endpoint[endpoint][0] return rule class APISpecExt: \"\"\"Very simple and small extension to use apispec with this API as a flask extension\"\"\" def __init__(self, app=None, **kwargs): self.spec = None if app is not None: self.init_app(app, **kwargs) def init_app(self, app, **kwargs): app.config.setdefault(\"APISPEC_TITLE\", \"Taipy Rest\") app.config.setdefault(\"APISPEC_VERSION\", \"1.0.0\") app.config.setdefault(\"OPENAPI_VERSION\", \"3.0.2\") app.config.setdefault(\"SWAGGER_JSON_URL\", \"/swagger.json\") app.config.setdefault(\"SWAGGER_UI_URL\", \"/swagger-ui\") app.config.setdefault(\"OPENAPI_YAML_URL\", \"/openapi.yaml\") app.config.setdefault(\"REDOC_UI_URL\", \"/redoc-ui\") app.config.setdefault(\"SWAGGER_URL_PREFIX\", None) self.spec = APISpec( title=app.config[\"APISPEC_TITLE\"], version=app.config[\"APISPEC_VERSION\"], openapi_version=app.config[\"OPENAPI_VERSION\"], plugins=[MarshmallowPlugin(), FlaskRestfulPlugin()], **kwargs ) blueprint = Blueprint( \"swagger\", __name__, template_folder=\"./templates\", url_prefix=app.config[\"SWAGGER_URL_PREFIX\"], ) blueprint.add_url_rule(app.config[\"SWAGGER_JSON_URL\"], \"swagger_json\", self.swagger_json) blueprint.add_url_rule(app.config[\"SWAGGER_UI_URL\"], \"swagger_ui\", self.swagger_ui) blueprint.add_url_rule(app.config[\"OPENAPI_YAML_URL\"], \"openapi_yaml\", self.openapi_yaml) blueprint.add_url_rule(app.config[\"REDOC_UI_URL\"], \"redoc_ui\", self.redoc_ui) app.register_blueprint(blueprint) def swagger_json(self): return jsonify(self.spec.to_dict()) def swagger_ui(self): return render_template(\"swagger.j2\") def openapi_yaml(self): # Manually inject ReDoc's Authentication legend, then remove it self.spec.tag( { \"name\": \"authentication\", \"x-displayName\": \"Authentication\", \"description\": \"\", } ) redoc_spec = self.spec.to_yaml() self.spec._tags.pop(0) return redoc_spec def redoc_ui(self): return render_template(\"redoc.j2\") "} {"text": "import json from typing import Any, Union from datetime import datetime from enum import Enum Json = Union[dict, list, str, int, float, bool, None] class _CustomEncoder(json.JSONEncoder): def default(self, o: Any) -> Json: if isinstance(o, Enum): result = o.value elif isinstance(o, datetime): result = {\"__type__\": \"Datetime\", \"__value__\": o.isoformat()} else: result = json.JSONEncoder.default(self, o) return result "} {"text": "\"\"\"Simple helper to paginate query \"\"\" from flask import request, url_for DEFAULT_PAGE_SIZE = 50 DEFAULT_PAGE_NUMBER = 1 def extract_pagination(page=None, per_page=None, **request_args): page = int(page) if page is not None else DEFAULT_PAGE_NUMBER per_page = int(per_page) if per_page is not None else DEFAULT_PAGE_SIZE return page, per_page, request_args def paginate(query, schema): page, per_page, other_request_args = extract_pagination(**request.args) page_obj = query.paginate(page=page, per_page=per_page) next_ = url_for( request.endpoint, page=page_obj.next_num if page_obj.has_next else page_obj.page, per_page=per_page, **other_request_args, **request.view_args ) prev = url_for( request.endpoint, page=page_obj.prev_num if page_obj.has_prev else page_obj.page, per_page=per_page, **other_request_args, **request.view_args ) return { \"total\": page_obj.total, \"pages\": page_obj.pages, \"next\": next_, \"prev\": prev, \"results\": schema.dump(page_obj.items), } "} {"text": "from . import error_handler, views __all__ = [\"views\", \"error_handler\"] "} {"text": "from flask import jsonify from marshmallow import ValidationError from taipy.core.exceptions.exceptions import ( NonExistingCycle, NonExistingDataNode, NonExistingDataNodeConfig, NonExistingJob, NonExistingScenario, NonExistingScenarioConfig, NonExistingSequence, NonExistingSequenceConfig, NonExistingTask, NonExistingTaskConfig, ) from .exceptions.exceptions import ConfigIdMissingException, ScenarioIdMissingException, SequenceNameMissingException from .views import blueprint def _create_404(e): return {\"message\": e.message}, 404 @blueprint.errorhandler(ValidationError) def handle_marshmallow_error(e): \"\"\"Return json error for marshmallow validation errors. This will avoid having to try/catch ValidationErrors in all endpoints, returning correct JSON response with associated HTTP 400 Status (https://tools.ietf.org/html/rfc7231#section-6.5.1) \"\"\" return jsonify(e.messages), 400 @blueprint.errorhandler(ConfigIdMissingException) def handle_config_id_missing_exception(e): return jsonify({\"message\": e.message}), 400 @blueprint.errorhandler(ScenarioIdMissingException) def handle_scenario_id_missing_exception(e): return jsonify({\"message\": e.message}), 400 @blueprint.errorhandler(SequenceNameMissingException) def handle_sequence_name_missing_exception(e): return jsonify({\"message\": e.message}), 400 @blueprint.errorhandler(NonExistingDataNode) def handle_data_node_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingDataNodeConfig) def handle_data_node_config_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingCycle) def handle_cycle_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingJob) def handle_job_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingSequence) def handle_sequence_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingSequenceConfig) def handle_sequence_config_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingScenario) def handle_scenario_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingScenarioConfig) def handle_scenario_config_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingTask) def handle_task_not_found(e): return _create_404(e) @blueprint.errorhandler(NonExistingTaskConfig) def handle_task_config_not_found(e): return _create_404(e) "} {"text": "from flask import Blueprint, current_app from flask_restful import Api from taipy.core.common._utils import _load_fct from taipy.logger._taipy_logger import _TaipyLogger from ..extensions import apispec from .middlewares._middleware import _using_enterprise from .resources import ( CycleList, CycleResource, DataNodeList, DataNodeReader, DataNodeResource, DataNodeWriter, JobExecutor, JobList, JobResource, ScenarioExecutor, ScenarioList, ScenarioResource, SequenceExecutor, SequenceList, SequenceResource, TaskExecutor, TaskList, TaskResource, ) from .schemas import CycleSchema, DataNodeSchema, JobSchema, ScenarioSchema, SequenceSchema, TaskSchema _logger = _TaipyLogger._get_logger() blueprint = Blueprint(\"api\", __name__, url_prefix=\"/api/v1\") api = Api(blueprint) api.add_resource( DataNodeResource, \"/datanodes//\", endpoint=\"datanode_by_id\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource( DataNodeReader, \"/datanodes//read/\", endpoint=\"datanode_reader\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource( DataNodeWriter, \"/datanodes//write/\", endpoint=\"datanode_writer\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource( DataNodeList, \"/datanodes/\", endpoint=\"datanodes\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource( TaskResource, \"/tasks//\", endpoint=\"task_by_id\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource(TaskList, \"/tasks/\", endpoint=\"tasks\", resource_class_kwargs={\"logger\": _logger}) api.add_resource( TaskExecutor, \"/tasks/submit//\", endpoint=\"task_submit\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource( SequenceResource, \"/sequences//\", endpoint=\"sequence_by_id\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource( SequenceList, \"/sequences/\", endpoint=\"sequences\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource( SequenceExecutor, \"/sequences/submit//\", endpoint=\"sequence_submit\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource( ScenarioResource, \"/scenarios//\", endpoint=\"scenario_by_id\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource( ScenarioList, \"/scenarios/\", endpoint=\"scenarios\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource( ScenarioExecutor, \"/scenarios/submit//\", endpoint=\"scenario_submit\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource( CycleResource, \"/cycles//\", endpoint=\"cycle_by_id\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource( CycleList, \"/cycles/\", endpoint=\"cycles\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource( JobResource, \"/jobs//\", endpoint=\"job_by_id\", resource_class_kwargs={\"logger\": _logger}, ) api.add_resource(JobList, \"/jobs/\", endpoint=\"jobs\", resource_class_kwargs={\"logger\": _logger}) api.add_resource( JobExecutor, \"/jobs/cancel//\", endpoint=\"job_cancel\", resource_class_kwargs={\"logger\": _logger}, ) def load_enterprise_resources(api: Api): \"\"\" Load enterprise resources. \"\"\" if not _using_enterprise(): return load_resources = _load_fct(\"taipy.enterprise.rest.api.views\", \"_load_resources\") load_resources(api) load_enterprise_resources(api) def register_views(): apispec.spec.components.schema(\"DataNodeSchema\", schema=DataNodeSchema) apispec.spec.path(view=DataNodeResource, app=current_app) apispec.spec.path(view=DataNodeList, app=current_app) apispec.spec.path(view=DataNodeReader, app=current_app) apispec.spec.path(view=DataNodeWriter, app=current_app) apispec.spec.components.schema(\"TaskSchema\", schema=TaskSchema) apispec.spec.path(view=TaskResource, app=current_app) apispec.spec.path(view=TaskList, app=current_app) apispec.spec.path(view=TaskExecutor, app=current_app) apispec.spec.components.schema(\"SequenceSchema\", schema=SequenceSchema) apispec.spec.path(view=SequenceResource, app=current_app) apispec.spec.path(view=SequenceList, app=current_app) apispec.spec.path(view=SequenceExecutor, app=current_app) apispec.spec.components.schema(\"ScenarioSchema\", schema=ScenarioSchema) apispec.spec.path(view=ScenarioResource, app=current_app) apispec.spec.path(view=ScenarioList, app=current_app) apispec.spec.path(view=ScenarioExecutor, app=current_app) apispec.spec.components.schema(\"CycleSchema\", schema=CycleSchema) apispec.spec.path(view=CycleResource, app=current_app) apispec.spec.path(view=CycleList, app=current_app) apispec.spec.components.schema(\"JobSchema\", schema=JobSchema) apispec.spec.path(view=JobResource, app=current_app) apispec.spec.path(view=JobList, app=current_app) apispec.spec.path(view=JobExecutor, app=current_app) apispec.spec.components.schema( \"Any\", { \"description\": \"Any value\", \"nullable\": True, }, ) if _using_enterprise(): _register_views = _load_fct(\"taipy.enterprise.rest.api.views\", \"_register_views\") _register_views(apispec) "} {"text": "from datetime import datetime from flask import request from flask_restful import Resource from taipy.config.common.frequency import Frequency from taipy.core import Cycle from taipy.core.cycle._cycle_manager_factory import _CycleManagerFactory from taipy.core.exceptions.exceptions import NonExistingCycle from ...commons.to_from_model import _to_model from ..middlewares._middleware import _middleware from ..schemas import CycleResponseSchema, CycleSchema REPOSITORY = \"cycle\" def _get_or_raise(cycle_id: str) -> None: manager = _CycleManagerFactory._build_manager() cycle = manager._get(cycle_id) if not cycle: raise NonExistingCycle(cycle_id) return cycle class CycleResource(Resource): \"\"\"Single object resource --- get: tags: - api description: | Returns a `CycleSchema^` representing the unique `Cycle^` identified by the *cycle_id* given as parameter. If no cycle corresponds to *cycle_id*, a `404` error is returned. !!! Example === \"Curl\" ```shell curl -X GET http://localhost:5000/api/v1/cycles/CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a` is the value of the *cycle_id* parameter. It represents the identifier of the Cycle we want to retrieve. In case of success here is an example of the response: ``` JSON {\"cycle\": { \"frequency\": \"Frequency.DAILY\", \"creation_date\": \"2022-08-04T17:13:32.797384\", \"id\": \"CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a\", \"start_date\": \"2022-08-04T00:00:00\", \"end_date\": \"2022-08-04T23:59:59.999999\", \"name\": \"Frequency.DAILY_2022-08-04T17:13:32.797384\" ``` In case of failure here is an example of the response: ``` JSON {\"message\": \"Cycle CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a not found.\"} ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get(\"http://localhost:5000/api/v1/cycles/CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a\") print(response) print(response.json()) ``` `CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a` is the value of the *cycle_id* parameter. It represents the identifier of the Cycle we want to retrieve. In case of success here is an output example: ``` {'cycle': { 'frequency': 'Frequency.DAILY', 'creation_date': '2022-08-04T17:13:32.797384', 'id': 'CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a', 'start_date': '2022-08-04T00:00:00', 'end_date': '2022-08-04T23:59:59.999999', 'name': 'Frequency.DAILY_2022-08-04T17:13:32.797384' ``` In case of failure here is an output example: ``` {'message': 'Cycle CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a not found.'} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. parameters: - in: path name: cycle_id schema: type: string description: The identifier of the cycle to retrieve. responses: 200: content: application/json: schema: type: object properties: cycle: CycleSchema 404: description: No cycle has the *cycle_id* identifier. delete: tags: - api description: | Deletes the `Cycle^` identified by the *cycle_id* given as parameter. If the cycle does not exist, a 404 error is returned. !!! Example === \"Curl\" ```shell curl -X DELETE http://localhost:5000/api/v1/cycles/CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a` is the value of the *cycle_id* parameter. It represents the identifier of the Cycle we want to delete. In case of success here is an example of the response: ``` JSON {\"message\": \"Cycle CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a was deleted.\"} ``` In case of failure here is an example of the response: ``` JSON {\"message\": \"Cycle CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a not found.\"} ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.delete(\"http://localhost:5000/api/v1/cycles/CYCLE_797384_ef210412-af91-4f41-b6e8-74d1648edcba\") print(response) print(response.json()) ``` `CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a` is the value of the *cycle_id* parameter. It represents the identifier of the Cycle we want to delete. In case of success here is an output example: ``` {\"message\": \"Cycle CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a was deleted.\"} ``` In case of failure here is an output example: ``` {'message': 'Cycle CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a not found.'} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: path name: cycle_id schema: type: string description: The id of the cycle to delete. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No cycle has the *cycle_id* identifier. \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") @_middleware def get(self, cycle_id): schema = CycleResponseSchema() cycle = _get_or_raise(cycle_id) return {\"cycle\": schema.dump(_to_model(REPOSITORY, cycle))} @_middleware def delete(self, cycle_id): manager = _CycleManagerFactory._build_manager() _get_or_raise(cycle_id) manager._delete(cycle_id) return {\"message\": f\"Cycle {cycle_id} was deleted.\"} class CycleList(Resource): \"\"\"Creation and get_all --- get: tags: - api description: | Returns a `CycleSchema^` list representing all existing Cycles. !!! Example === \"Curl\" ```shell curl -X GET http://localhost:5000/api/v1/cycles ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. Here is an example of the response: ``` JSON [ { \"frequency\": \"Frequency.DAILY\", \"end_date\": \"2022-08-06T23:59:59.999999\", \"creation_date\": \"2022-08-06T15:45:50.223894\", \"start_date\": \"2022-08-06T00:00:00\", \"id\": \"CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a\", \"name\": \"Frequency.DAILY_2022-08-06T15:45:50.223894\" } ] ``` If there is no cycle, the response is an empty list as follows: ``` JSON [] ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get(\"http://localhost:5000/api/v1/cycles\") print(response) print(response.json()) ``` In case of success here is an output example: ``` [{ \"frequency\": \"Frequency.DAILY\", \"end_date\": \"2022-08-06T23:59:59.999999\", \"creation_date\": \"2022-08-06T15:45:50.223894\", \"start_date\": \"2022-08-06T00:00:00\", \"id\": \"CYCLE_223894_e0fab919-b50b-4b9f-ac09-52f77474fa7a\", \"name\": \"Frequency.DAILY_2022-08-06T15:45:50.223894\" } ] ``` If there is no cycle, the response is an empty list as follows: ``` [] ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/CycleSchema' post: tags: - api description: | Creates a new cycle from the `CycleSchema^` given in the request body. !!! Example === \"Curl\" ```shell curl -X POST -H \"Content-Type: application/json\"\\ -d '{\"frequency\": \"DAILY\", \"properties\": {}, \"creation_date\": \"2020-01-01T00:00:00\",\\ \"start_date\": \"2020-01-01T00:00:00\", \"end_date\": \"2020-01-01T00:00:00\"}'\\ http://localhost:5000/api/v1/cycles ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. In the curl command line, a `CycleSchema^` is provided as JSON dictionary parameter with the curl option -d (--data) to specify the various attributes of the `Cycle^` to create: ``` JSON { \"frequency\": \"DAILY\", \"properties\": {}, \"creation_date\": \"2020-01-01T00:00:00\", \"start_date\": \"2020-01-01T00:00:00\", \"end_date\": \"2020-01-01T00:00:00\" } ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests cycle_schema = { \"frequency\": \"DAILY\", \"properties\": {}, \"creation_date\": \"2020-01-01T00:00:00\", \"start_date\": \"2020-01-01T00:00:00\", \"end_date\": \"2020-01-01T00:00:00\" } response = requests.post(\"http://localhost:5000/api/v1/cycles\", json=cycle_schema) print(response) print(response.json()) ``` A `CycleSchema^` is provided as a dictionary to specify the various attributes of the `Cycle^` to create. Here is the output example: ``` { 'message': 'Cycle was created.', 'cycle': { 'frequency': 'Frequency.DAILY', 'end_date': '2020-01-01T00:00:00', 'creation_date': '2020-01-01T00:00:00', 'start_date': '2020-01-01T00:00:00', 'id': 'CYCLE_c9cc527f-a8c8-4238-8f31-42166a9817db', 'name': 'Frequency.DAILY_2020-01-01T00:00:00', 'properties': {}}} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. requestBody: required: true content: application/json: schema: CycleSchema responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. cycle: CycleSchema \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") @_middleware def get(self): schema = CycleResponseSchema(many=True) manager = _CycleManagerFactory._build_manager() cycles = [_to_model(REPOSITORY, cycle) for cycle in manager._get_all()] return schema.dump(cycles) @_middleware def post(self): schema = CycleResponseSchema() manager = _CycleManagerFactory._build_manager() cycle = self.__create_cycle_from_schema(schema.load(request.json)) manager._set(cycle) return { \"message\": \"Cycle was created.\", \"cycle\": schema.dump(_to_model(REPOSITORY, cycle)), }, 201 def __create_cycle_from_schema(self, cycle_schema: CycleSchema): return Cycle( id=cycle_schema.get(\"id\"), frequency=Frequency(getattr(Frequency, cycle_schema.get(\"frequency\", \"\").upper())), properties=cycle_schema.get(\"properties\", {}), creation_date=datetime.fromisoformat(cycle_schema.get(\"creation_date\")), start_date=datetime.fromisoformat(cycle_schema.get(\"start_date\")), end_date=datetime.fromisoformat(cycle_schema.get(\"end_date\")), ) "} {"text": "from flask import request from flask_restful import Resource from taipy.config.config import Config from taipy.core.exceptions.exceptions import NonExistingTask, NonExistingTaskConfig from taipy.core.task._task_manager_factory import _TaskManagerFactory from ...commons.to_from_model import _to_model from ..exceptions.exceptions import ConfigIdMissingException from ..middlewares._middleware import _middleware from ..schemas import TaskSchema def _get_or_raise(task_id: str): manager = _TaskManagerFactory._build_manager() task = manager._get(task_id) if task is None: raise NonExistingTask(task_id) return task REPOSITORY = \"task\" class TaskResource(Resource): \"\"\"Single object resource --- get: tags: - api summary: Get a task. description: | Return a single task by *task_id*. If the task does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_READER` role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/tasks/TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: task_id schema: type: string description: The identifier of the task. responses: 200: content: application/json: schema: type: object properties: task: TaskSchema 404: description: No task has the *task_id* identifier. delete: tags: - api summary: Delete a task. description: | Delete a task. If the task does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X DELETE http://localhost:5000/api/v1/tasks/TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: task_id schema: type: string description: The identifier of the task. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No task has the *task_id* identifier. \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") @_middleware def get(self, task_id): schema = TaskSchema() task = _get_or_raise(task_id) return {\"task\": schema.dump(_to_model(REPOSITORY, task))} @_middleware def delete(self, task_id): manager = _TaskManagerFactory._build_manager() _get_or_raise(task_id) manager._delete(task_id) return {\"message\": f\"Task {task_id} was deleted.\"} class TaskList(Resource): \"\"\"Creation and get_all --- get: tags: - api summary: Get all tasks. description: | Return an array of all tasks. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_READER` role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/tasks ``` responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/TaskSchema' post: tags: - api summary: Create a task. description: | Create a new task from its *config_id*. If the config does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/tasks?config_id=my_task_config ``` parameters: - in: query name: config_id schema: type: string description: The identifier of the task configuration. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. task: TaskSchema \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") def fetch_config(self, config_id): config = Config.tasks.get(config_id) if not config: raise NonExistingTaskConfig(config_id) return config @_middleware def get(self): schema = TaskSchema(many=True) manager = _TaskManagerFactory._build_manager() tasks = [_to_model(REPOSITORY, task) for task in manager._get_all()] return schema.dump(tasks) @_middleware def post(self): args = request.args config_id = args.get(\"config_id\") schema = TaskSchema() manager = _TaskManagerFactory._build_manager() if not config_id: raise ConfigIdMissingException config = self.fetch_config(config_id) task = manager._bulk_get_or_create([config])[0] return { \"message\": \"Task was created.\", \"task\": schema.dump(_to_model(REPOSITORY, task)), }, 201 class TaskExecutor(Resource): \"\"\"Execute a task --- post: tags: - api summary: Execute a task. description: | Execute a task by *task_id*. If the task does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_EXECUTOR` role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/tasks/submit/TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: task_id schema: type: string responses: 204: content: application/json: schema: type: object properties: message: type: string description: Status message. task: TaskSchema 404: description: No task has the *task_id* identifier. \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") @_middleware def post(self, task_id): manager = _TaskManagerFactory._build_manager() task = _get_or_raise(task_id) manager._orchestrator().submit_task(task) return {\"message\": f\"Task {task_id} was submitted.\"} "} {"text": "import uuid from typing import Optional from flask import request from flask_restful import Resource from taipy.config.config import Config from taipy.core import Job, JobId from taipy.core.exceptions.exceptions import NonExistingJob, NonExistingTaskConfig from taipy.core.job._job_manager_factory import _JobManagerFactory from taipy.core.task._task_manager_factory import _TaskManagerFactory from ..exceptions.exceptions import ConfigIdMissingException from ..middlewares._middleware import _middleware from ..schemas import JobSchema def _get_or_raise(job_id: str): manager = _JobManagerFactory._build_manager() job = manager._get(job_id) if job is None: raise NonExistingJob(job_id) return job class JobResource(Resource): \"\"\"Single object resource --- get: tags: - api summary: Get a job. description: | Return a single job by *job_id*. If the job does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_READER` role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/jobs/JOB_my_task_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: job_id schema: type: string description: The identifier of the job. responses: 200: content: application/json: schema: type: object properties: job: JobSchema 404: description: No job has the *job_id* identifier. delete: tags: - api summary: Delete a job. description: | Delete a job. If the job does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X DELETE http://localhost:5000/api/v1/jobs/JOB_my_task_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: job_id schema: type: string description: The identifier of the job. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No job has the *job_id* identifier. \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") @_middleware def get(self, job_id): schema = JobSchema() job = _get_or_raise(job_id) return {\"job\": schema.dump(job)} @_middleware def delete(self, job_id): manager = _JobManagerFactory._build_manager() job = _get_or_raise(job_id) manager._delete(job) return {\"message\": f\"Job {job_id} was deleted.\"} class JobList(Resource): \"\"\"Creation and get_all --- get: tags: - api summary: Get all jobs. description: | Return an array of all jobs. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_READER` role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/jobs ``` responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/JobSchema' post: tags: - api summary: Create a job. description: | Create a job from a task *config_id*. If the config does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/jobs?task_id=TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: query name: task_id schema: type: string description: The identifier of the task configuration. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. job: JobSchema \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") def fetch_config(self, config_id): config = Config.tasks.get(config_id) if not config: raise NonExistingTaskConfig(config_id) return config @_middleware def get(self): schema = JobSchema(many=True) manager = _JobManagerFactory._build_manager() jobs = manager._get_all() return schema.dump(jobs) @_middleware def post(self): args = request.args task_config_id = args.get(\"task_id\") if not task_config_id: raise ConfigIdMissingException manager = _JobManagerFactory._build_manager() schema = JobSchema() job = self.__create_job_from_schema(task_config_id) manager._set(job) return { \"message\": \"Job was created.\", \"job\": schema.dump(job), }, 201 def __create_job_from_schema(self, task_config_id: str) -> Optional[Job]: task_manager = _TaskManagerFactory._build_manager() task = task_manager._bulk_get_or_create([self.fetch_config(task_config_id)])[0] return Job( id=JobId(f\"JOB_{uuid.uuid4()}\"), task=task, submit_id=f\"SUBMISSION_{uuid.uuid4()}\", submit_entity_id=task.id ) class JobExecutor(Resource): \"\"\"Cancel a job --- post: tags: - api summary: Cancel a job. description: | Cancel a job by *job_id*. If the job does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), the endpoint requires `TAIPY_EXECUTOR` role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/jobs/cancel/JOB_my_task_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: job_id schema: type: string responses: 204: content: application/json: schema: type: object properties: message: type: string description: Status message. job: JobSchema 404: description: No job has the *job_id* identifier. \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") @_middleware def post(self, job_id): manager = _JobManagerFactory._build_manager() job = _get_or_raise(job_id) manager._cancel(job) return {\"message\": f\"Job {job_id} was cancelled.\"} "} {"text": " from flask import request from flask_restful import Resource from taipy.core.exceptions.exceptions import NonExistingScenario, NonExistingSequence from taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from taipy.core.sequence._sequence_manager_factory import _SequenceManagerFactory from ...commons.to_from_model import _to_model from ..exceptions.exceptions import ScenarioIdMissingException, SequenceNameMissingException from ..middlewares._middleware import _middleware from ..schemas import SequenceResponseSchema def _get_or_raise(sequence_id: str): manager = _SequenceManagerFactory._build_manager() sequence = manager._get(sequence_id) if sequence is None: raise NonExistingSequence(sequence_id) return sequence REPOSITORY = \"sequence\" class SequenceResource(Resource): \"\"\"Single object resource --- get: tags: - api summary: Get a sequence. description: | Return a single sequence by sequence_id. If the sequence does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires _TAIPY_READER_ role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/sequences/SEQUENCE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: sequence_id schema: type: string description: The identifier of the sequence. responses: 200: content: application/json: schema: type: object properties: sequence: SequenceSchema 404: description: No sequence has the *sequence_id* identifier. delete: tags: - api summary: Delete a sequence. description: | Delete a sequence. If the sequence does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires _TAIPY_EDITOR_ role. Code example: ```shell curl -X DELETE http://localhost:5000/api/v1/sequences/SEQUENCE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: sequence_id schema: type: string description: The identifier of the sequence. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No sequence has the *sequence_id* identifier. \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") @_middleware def get(self, sequence_id): schema = SequenceResponseSchema() sequence = _get_or_raise(sequence_id) return {\"sequence\": schema.dump(_to_model(REPOSITORY, sequence))} @_middleware def delete(self, sequence_id): manager = _SequenceManagerFactory._build_manager() _get_or_raise(sequence_id) manager._delete(sequence_id) return {\"message\": f\"Sequence {sequence_id} was deleted.\"} class SequenceList(Resource): \"\"\"Creation and get_all --- get: tags: - api summary: Get all sequences. description: | Return an array of all sequences. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires _TAIPY_READER_ role. Code example: ```shell curl -X GET http://localhost:5000/api/v1/sequences ``` responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/SequenceSchema' post: tags: - api summary: Create a sequence. description: | Create a sequence from scenario_id, sequence_name and task_ids. If the scenario_id does not exist or sequence_name is not provided, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires _TAIPY_EDITOR_ role. Code example: ```shell curl -X POST --data '{\"scenario_id\": \"SCENARIO_scenario_id\", \"sequence_name\": \"sequence\", \"tasks\": []}' http://localhost:5000/api/v1/sequences ``` parameters: - in: query name: scenario_id schema: type: string description: The Scenario the Sequence belongs to. name: sequence_name schema: type: string description: The name of the Sequence. name: tasks schema: type: list[string] description: A list of task id of the Sequence. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. sequence: SequenceSchema \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") @_middleware def get(self): schema = SequenceResponseSchema(many=True) manager = _SequenceManagerFactory._build_manager() sequences = [_to_model(REPOSITORY, sequence) for sequence in manager._get_all()] return schema.dump(sequences) @_middleware def post(self): sequence_data = request.json scenario_id = sequence_data.get(\"scenario_id\") sequence_name = sequence_data.get(\"sequence_name\") sequence_task_ids = sequence_data.get(\"task_ids\", []) response_schema = SequenceResponseSchema() if not scenario_id: raise ScenarioIdMissingException if not sequence_name: raise SequenceNameMissingException scenario = _ScenarioManagerFactory._build_manager()._get(scenario_id) if not scenario: raise NonExistingScenario(scenario_id=scenario_id) scenario.add_sequence(sequence_name, sequence_task_ids) sequence = scenario.sequences[sequence_name] return { \"message\": \"Sequence was created.\", \"sequence\": response_schema.dump(_to_model(REPOSITORY, sequence)), }, 201 class SequenceExecutor(Resource): \"\"\"Execute a sequence --- post: tags: - api summary: Execute a sequence. description: | Execute a sequence from sequence_id. If the sequence does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), This endpoint requires _TAIPY_EXECUTOR_ role. Code example: ```shell curl -X POST http://localhost:5000/api/v1/sequences/submit/SEQUENCE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9 ``` parameters: - in: path name: sequence_id schema: type: string responses: 204: content: application/json: schema: type: object properties: message: type: string description: Status message. sequence: SequenceSchema 404: description: No sequence has the *sequence_id* identifier. \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") @_middleware def post(self, sequence_id): _get_or_raise(sequence_id) manager = _SequenceManagerFactory._build_manager() manager._submit(sequence_id) return {\"message\": f\"Sequence {sequence_id} was submitted.\"} "} {"text": "from .cycle import CycleList, CycleResource from .datanode import DataNodeList, DataNodeReader, DataNodeResource, DataNodeWriter from .job import JobExecutor, JobList, JobResource from .scenario import ScenarioExecutor, ScenarioList, ScenarioResource from .sequence import SequenceExecutor, SequenceList, SequenceResource from .task import TaskExecutor, TaskList, TaskResource __all__ = [ \"DataNodeResource\", \"DataNodeList\", \"DataNodeReader\", \"DataNodeWriter\", \"TaskList\", \"TaskResource\", \"TaskExecutor\", \"SequenceList\", \"SequenceResource\", \"SequenceExecutor\", \"ScenarioList\", \"ScenarioResource\", \"ScenarioExecutor\", \"CycleResource\", \"CycleList\", \"JobResource\", \"JobList\", \"JobExecutor\", ] "} {"text": "from typing import List import numpy as np import pandas as pd from flask import request from flask_restful import Resource from taipy.config.config import Config from taipy.core.data._data_manager_factory import _DataManagerFactory from taipy.core.data.operator import Operator from taipy.core.exceptions.exceptions import NonExistingDataNode, NonExistingDataNodeConfig from ...commons.to_from_model import _to_model from ..exceptions.exceptions import ConfigIdMissingException from ..middlewares._middleware import _middleware from ..schemas import ( CSVDataNodeConfigSchema, DataNodeFilterSchema, DataNodeSchema, ExcelDataNodeConfigSchema, GenericDataNodeConfigSchema, InMemoryDataNodeConfigSchema, JSONDataNodeConfigSchema, PickleDataNodeConfigSchema, SQLTableDataNodeConfigSchema, SQLDataNodeConfigSchema, MongoCollectionDataNodeConfigSchema, ) ds_schema_map = { \"csv\": CSVDataNodeConfigSchema, \"pickle\": PickleDataNodeConfigSchema, \"in_memory\": InMemoryDataNodeConfigSchema, \"sql_table\": SQLTableDataNodeConfigSchema, \"sql\": SQLDataNodeConfigSchema, \"mongo_collection\": MongoCollectionDataNodeConfigSchema, \"excel\": ExcelDataNodeConfigSchema, \"generic\": GenericDataNodeConfigSchema, \"json\": JSONDataNodeConfigSchema, } REPOSITORY = \"data\" def _get_or_raise(data_node_id: str) -> None: manager = _DataManagerFactory._build_manager() data_node = manager._get(data_node_id) if not data_node: raise NonExistingDataNode(data_node_id) return data_node class DataNodeResource(Resource): \"\"\"Single object resource --- get: tags: - api description: | Returns a `DataNodeSchema^` representing the unique `DataNode^` identified by the *datanode_id* given as parameter. If no data node corresponds to *datanode_id*, a `404` error is returned. !!! Example === \"Curl\" ```shell curl -X GET http://localhost:5000/api/v1/datanodes/DATANODE_hist_cfg_75750ed8-4e09-4e00-958d -e352ee426cc9 ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `DATANODE_hist_cfg_75750ed8-4e09-4e00-958d-e352ee426cc9` is the value of the *datanode_id* parameter. It represents the identifier of the data node we want to retrieve. In case of success here is an example of the response: ``` JSON {\"datanode\": { \"id\": \"DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d\", \"config_id\": \"historical_data_set\", \"scope\": \"\", \"storage_type\": \"csv\", \"name\": \"Name of my historical data node\", \"owner_id\": \"SCENARIO_my_awesome_scenario_97f3fd67-8556-4c62-9b3b-ef189a599a38\", \"last_edit_date\": \"2022-08-10T16:03:40.855082\", \"job_ids\": [], \"version\": \"latest\", \"cacheable\": false, \"validity_days\": null, \"validity_seconds\": null, \"edit_in_progress\": false, \"data_node_properties\": { \"path\": \"daily-min-temperatures.csv\", \"has_header\": true} }} ``` In case of failure here is an example of the response: ``` JSON {\"message\":\"DataNode DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d not found\"} ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get( \"http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d\") print(response) print(response.json()) ``` `DATANODE_hist_cfg_75750ed8-4e09-4e00-958d-e352ee426cc9` is the value of the *datanode_id* parameter. It represents the identifier of the data node we want to retrieve. In case of success here is an output example: ``` {\"datanode\": { \"id\": \"DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d\", \"config_id\": \"historical_data_set\", \"scope\": \"\", \"storage_type\": \"csv\", \"name\": \"Name of my historical data node\", \"owner_id\": \"SCENARIO_my_awesome_scenario_97f3fd67-8556-4c62-9b3b-ef189a599a38\", \"last_edit_date\": \"2022-08-10T16:03:40.855082\", \"job_ids\": [], \"version\": \"latest\", \"cacheable\": false, \"validity_days\": null, \"validity_seconds\": null, \"edit_in_progress\": false, \"data_node_properties\": { \"path\": \"daily-min-temperatures.csv\", \"has_header\": true} }} ``` In case of failure here is an output example: ``` {\"message\":\"DataNode DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d not found\"} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. parameters: - in: path name: datanode_id schema: type: string description: The identifier of the data node to retrieve. responses: 200: content: application/json: schema: type: object properties: datanode: DataNodeSchema 404: description: No data node has the *datanode_id* identifier. delete: tags: - api summary: Delete a data node. description: | Deletes the `DataNode^` identified by the *datanode_id* given as parameter. If the data node does not exist, a 404 error is returned. !!! Example === \"Curl\" ```shell curl -X DELETE \\ http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d` is the value of the *datanode_id* parameter. It represents the identifier of the data node we want to delete. In case of success here is an example of the response: ``` JSON {\"msg\": \"datanode DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d deleted\"} ``` In case of failure here is an example of the response: ``` JSON {\"message\": \"Data node DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d not found.\"} ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.delete( \"http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d\") print(response) print(response.json()) ``` `DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d` is the value of the *datanode_id* parameter. It represents the identifier of the Cycle we want to delete. In case of success here is an output example: ``` {\"msg\": \"Data node DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d deleted.\"} ``` In case of failure here is an output example: ``` {'message': 'Data node DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d not found.'} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: path name: datanode_id schema: type: string description: The identifier of the data node to delete. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No data node has the *datanode_id* identifier. \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") @_middleware def get(self, datanode_id): schema = DataNodeSchema() datanode = _get_or_raise(datanode_id) return {\"datanode\": schema.dump(_to_model(REPOSITORY, datanode))} @_middleware def delete(self, datanode_id): _get_or_raise(datanode_id) manager = _DataManagerFactory._build_manager() manager._delete(datanode_id) return {\"message\": f\"Data node {datanode_id} was deleted.\"} class DataNodeList(Resource): \"\"\"Creation and get_all --- get: tags: - api description: | Returns a `DataNodeSchema^` list representing all existing data nodes. !!! Example === \"Curl\" ```shell curl -X GET http://localhost:5000/api/v1/datanodes ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. Here is an example of the response: ``` JSON [ {\"datanode\": { \"id\": \"DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d\", \"config_id\": \"historical_data_set\", \"scope\": \"\", \"storage_type\": \"csv\", \"name\": \"Name of my historical data node\", \"owner_id\": \"SCENARIO_my_awesome_scenario_97f3fd67-8556-4c62-9b3b-ef189a599a38\", \"last_edit_date\": \"2022-08-10T16:03:40.855082\", \"job_ids\": [], \"version\": \"latest\", \"cacheable\": false, \"validity_days\": null, \"validity_seconds\": null, \"edit_in_progress\": false, \"data_node_properties\": { \"path\": \"daily-min-temperatures.csv\", \"has_header\": true} }} ] ``` If there is no data node, the response is an empty list as follows: ``` JSON [] ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get(\"http://localhost:5000/api/v1/datanodes\") print(response) print(response.json()) ``` In case of success here is an output example: ``` [ {\"datanode\": { \"id\": \"DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d\", \"config_id\": \"historical_data_set\", \"scope\": \"\", \"storage_type\": \"csv\", \"name\": \"Name of my historical data node\", \"owner_id\": \"SCENARIO_my_awesome_scenario_97f3fd67-8556-4c62-9b3b-ef189a599a38\", \"last_edit_date\": \"2022-08-10T16:03:40.855082\", \"job_ids\": [], \"version\": \"latest\", \"cacheable\": false, \"validity_days\": null, \"validity_seconds\": null, \"edit_in_progress\": false, \"data_node_properties\": { \"path\": \"daily-min-temperatures.csv\", \"has_header\": true} }} ] ``` If there is no data node, the response is an empty list as follows: ``` [] ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/DataNodeSchema' post: tags: - api description: | Creates a new data node from the *config_id* given as parameter. !!! Example === \"Curl\" ```shell curl -X POST http://localhost:5000/api/v1/datanodes?config_id=historical_data_set ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. In this example the *config_id* value (\"historical_data_set\") is given as parameter directly in the url. A corresponding `DataNodeConfig^` must exist and must have been configured before. Here is the output message example: ``` {\"msg\": \"datanode created\", \"datanode\": { \"default_path\": null, \"path\": \"daily-min-temperatures.csv\", \"name\": null, \"storage_type\": \"csv\", \"scope\": 2, \"has_header\": true} } ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.post(\"http://localhost:5000/api/v1/datanodes?config_id=historical_data_set\") print(response) print(response.json()) ``` In this example the *config_id* value (\"historical_data_set\") is given as parameter directly in the url. A corresponding `DataNodeConfig^` must exist and must have been configured before. Here is the output example: ``` {'msg': 'datanode created', 'datanode': { 'name': None, 'scope': 2, 'path': 'daily-min-temperatures.csv', 'storage_type': 'csv', 'default_path': None, 'has_header': True}} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: query name: config_id schema: type: string description: The identifier of the data node configuration. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. datanode: DataNodeSchema \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") def fetch_config(self, config_id): config = Config.data_nodes.get(config_id) if not config: raise NonExistingDataNodeConfig(config_id) return config @_middleware def get(self): schema = DataNodeSchema(many=True) manager = _DataManagerFactory._build_manager() datanodes = [_to_model(REPOSITORY, datanode) for datanode in manager._get_all()] return schema.dump(datanodes) @_middleware def post(self): args = request.args config_id = args.get(\"config_id\") if not config_id: raise ConfigIdMissingException config = self.fetch_config(config_id) schema = ds_schema_map.get(config.storage_type)() manager = _DataManagerFactory._build_manager() manager._bulk_get_or_create({config}) return { \"message\": \"Data node was created.\", \"datanode\": schema.dump(config), }, 201 class DataNodeReader(Resource): \"\"\"Single object resource --- get: tags: - api description: | Returns the data read from the data node identified by *datanode_id*. If the data node does not exist, a 404 error is returned. !!! Example === \"Curl\" ```shell curl -X GET \\ http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d/read ``` `DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d` is the *datanode_id* parameter. It represents the identifier of the data node to read. Here is an output example. In this case, the storage type of the data node to read is `csv`, and no exposed type is specified. The data is exposed as a list of dictionaries, each dictionary representing a raw of the csv file. ``` {\"data\": [ {\"Date\": \"1981-01-01\", \"Temp\": 20.7}, {\"Date\": \"1981-01-02\", \"Temp\": 17.9}, {\"Date\": \"1981-01-03\", \"Temp\": 18.8}, {\"Date\": \"1981-01-04\", \"Temp\": 14.6}, {\"Date\": \"1981-01-05\", \"Temp\": 15.8}, {\"Date\": \"1981-01-06\", \"Temp\": 15.8}, {\"Date\": \"1981-01-07\", \"Temp\": 15.8} ]} ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get( \"http://localhost:5000/api/v1/datanodes/DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d/read\") print(response) print(response.json()) ``` `DATANODE_historical_data_set_9db1b542-2e45-44e7-8a85-03ef9ead173d` is the *datanode_id* parameter. It represents the identifier of the data node to read. Here is an output example. In this case, the storage type of the data node to read is `csv`, and no exposed type is specified. The data is exposed as a list of dictionaries, each dictionary representing a raw of the csv file. ``` {\"data\": [ {\"Date\": \"1981-01-01\", \"Temp\": 20.7}, {\"Date\": \"1981-01-02\", \"Temp\": 17.9}, {\"Date\": \"1981-01-03\", \"Temp\": 18.8}, {\"Date\": \"1981-01-04\", \"Temp\": 14.6}, {\"Date\": \"1981-01-05\", \"Temp\": 15.8}, {\"Date\": \"1981-01-06\", \"Temp\": 15.8}, {\"Date\": \"1981-01-07\", \"Temp\": 15.8} ]} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. parameters: - in: path name: datanode_id schema: type: string description: The id of the data node to read. requestBody: content: application/json: schema: DataNodeFilterSchema responses: 200: content: application/json: schema: type: object properties: data: type: Any description: The data read from the data node. 404: description: No data node has the *datanode_id* identifier. \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") def __make_operators(self, schema: DataNodeFilterSchema) -> List: return [ ( x.get(\"key\"), x.get(\"value\"), Operator(getattr(Operator, x.get(\"operator\", \"\").upper())), ) for x in schema.get(\"operators\") ] @_middleware def get(self, datanode_id): schema = DataNodeFilterSchema() data = request.get_json(silent=True) data_node = _get_or_raise(datanode_id) operators = self.__make_operators(schema.load(data)) if data else [] data = data_node.filter(operators) if isinstance(data, pd.DataFrame): data = data.to_dict(orient=\"records\") elif isinstance(data, np.ndarray): data = list(data) return {\"data\": data} class DataNodeWriter(Resource): \"\"\"Single object resource --- put: tags: - api summary: Write into a data node. description: | Write data from request body into a data node by *datanode_id*. If the data node does not exist, a 404 error is returned. !!! Note When the authorization feature is activated (available in the **Enterprise** edition only), this endpoint requires `TAIPY_EDITOR` role. Code example: ```shell curl -X PUT -d '[{\"path\": \"/abc\", \"type\": 1}, {\"path\": \"/def\", \"type\": 2}]' -H 'Content-Type: application/json' http://localhost:5000/api/v1/datanodes/DATANODE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9/write ``` parameters: - in: path name: datanode_id schema: type: string requestBody: content: application/json: schema: Any responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No data node has the *datanode_id* identifier. \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") @_middleware def put(self, datanode_id): data = request.json data_node = _get_or_raise(datanode_id) data_node.write(data) return {\"message\": f\"Data node {datanode_id} was successfully written.\"} "} {"text": "from flask import request from flask_restful import Resource from taipy.config.config import Config from taipy.core.exceptions.exceptions import NonExistingScenario, NonExistingScenarioConfig from taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from ...commons.to_from_model import _to_model from ..exceptions.exceptions import ConfigIdMissingException from ..middlewares._middleware import _middleware from ..schemas import ScenarioResponseSchema def _get_or_raise(scenario_id: str): manager = _ScenarioManagerFactory._build_manager() scenario = manager._get(scenario_id) if scenario is None: raise NonExistingScenario(scenario_id) return scenario REPOSITORY = \"scenario\" class ScenarioResource(Resource): \"\"\"Single object resource --- get: tags: - api description: | Returns a `ScenarioSchema^` representing the unique scenario identified by *scenario_id*. If no scenario corresponds to *scenario_id*, a `404` error is returned. !!! Example === \"Curl\" ```shell curl -X GET http://localhost:5000/api/v1/scenarios/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Scenario we want to retrieve. In case of success here is an example of the response: ``` JSON {\"scenario\": { \"cycle\": \"CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d\", \"id\": \"SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c\", \"properties\": {}, \"tags\": [], \"version\": \"latest\", \"sequences\": [ \"SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99\", \"SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4\"], \"subscribers\": [], \"creation_date\": \"2022-08-15T19:21:01.871587\", \"primary_scenario\": true}} ``` In case of failure here is an example of the response: ``` JSON {\"message\": \"SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c not found.\"} ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get( \"http://localhost:5000/api/v1/scenarios/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c\") print(response) print(response.json()) ``` `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Cycle we want to retrieve. In case of success here is an output example: ``` {\"scenario\": { \"cycle\": \"CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d\", \"id\": \"SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c\", \"properties\": {}, \"tags\": [], \"version\": \"latest\", \"sequences\": [ \"SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99\", \"SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4\"], \"subscribers\": [], \"creation_date\": \"2022-08-15T19:21:01.871587\", \"primary_scenario\": true}} ``` In case of failure here is an output example: ``` {'message': 'Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c not found.'} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. parameters: - in: path name: scenario_id schema: type: string description: The identifier of the scenario to retrieve. responses: 200: content: application/json: schema: type: object properties: scenario: ScenarioSchema 404: description: No scenario has the *scenario_id* identifier. delete: tags: - api description: | Delete the `Scenario^` scenario identified by the *scenario_id* given as parameter. If the scenario does not exist, a 404 error is returned. !!! Example === \"Curl\" ```shell curl -X DELETE http://localhost:5000/api/v1/scenarios/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the scenario we want to delete. In case of success here is an example of the response: ``` JSON {\"msg\": \"Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c deleted.\"} ``` In case of failure here is an example of the response: ``` JSON {\"message\": \"Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c not found.\"} ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.delete( \"http://localhost:5000/api/v1/scenarios/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c\") print(response) print(response.json()) ``` `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Scenario we want to delete. In case of success here is an output example: ``` {\"msg\": \"Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c deleted.\"} ``` In case of failure here is an output example: ``` {'message': 'Scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c not found.'} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: path name: scenario_id schema: type: string description: The identifier of the scenario to delete. responses: 200: content: application/json: schema: type: object properties: message: type: string description: Status message. 404: description: No scenario has the *scenario_id* identifier. \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") @_middleware def get(self, scenario_id): schema = ScenarioResponseSchema() scenario = _get_or_raise(scenario_id) return {\"scenario\": schema.dump(_to_model(REPOSITORY, scenario))} @_middleware def delete(self, scenario_id): manager = _ScenarioManagerFactory._build_manager() _get_or_raise(scenario_id) manager._delete(scenario_id) return {\"message\": f\"Scenario {scenario_id} was deleted.\"} class ScenarioList(Resource): \"\"\"Creation and get_all --- get: tags: - api summary: Get all scenarios. description: | Returns a `ScenarioSchema^` list representing all existing Scenarios. !!! Example === \"Curl\" ```shell curl -X GET http://localhost:5000/api/v1/scenarios ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. Here is an example of the response: ``` JSON [{ \"cycle\": \"CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d\", \"id\": \"SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c\", \"properties\": {}, \"tags\": [], \"version\": \"latest\", \"sequences\": [ \"SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99\", \"SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4\"], \"subscribers\": [], \"creation_date\": \"2022-08-15T19:21:01.871587\", \"primary_scenario\": true } ] ``` If there is no scenario, the response is an empty list as follows: ``` JSON [] ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.get(\"http://localhost:5000/api/v1/scenarios\") print(response) print(response.json()) ``` In case of success here is an output example: ``` [{ \"cycle\": \"CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d\", \"id\": \"SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c\", \"properties\": {}, \"tags\": [], \"version\": \"latest\", \"sequences\": [ \"SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99\", \"SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4\"], \"subscribers\": [], \"creation_date\": \"2022-08-15T19:21:01.871587\", \"primary_scenario\": true } ] ``` If there is no scenario, the response is an empty list as follows: ``` [] ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_READER` role. responses: 200: content: application/json: schema: allOf: - type: object properties: results: type: array items: $ref: '#/components/schemas/ScenarioSchema' post: tags: - api description: | Creates a new scenario from the *config_id*. If the config does not exist, a 404 error is returned. !!! Example === \"Curl\" ```shell curl -X POST http://localhost:5000/api/v1/scenarios?config_id=my_scenario_config ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. In this example the *config_id* value (\"my_scenario_config\") is given as parameter directly in the url. A corresponding `ScenarioConfig^` must exist and must have been configured before. Here is the output message example: ``` {\"msg\": \"scenario created.\", \"scenario\": { \"cycle\": \"CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d\", \"id\": \"SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c\", \"properties\": {}, \"tags\": [], \"version\": \"latest\", \"sequences\": [ \"SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99\", \"SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4\"], \"subscribers\": [], \"creation_date\": \"2022-08-15T19:21:01.871587\", \"primary_scenario\": true} } ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.post(\"http://localhost:5000/api/v1/scenarios?config_id=my_scenario_config\") print(response) print(response.json()) ``` In this example the *config_id* value (\"my_scenario_config\") is given as parameter directly in the url. A corresponding `ScenarioConfig^` must exist and must have been configured before. Here is the output example: ``` {\"msg\": \"scenario created.\", \"scenario\": { \"cycle\": \"CYCLE_863418_fdd1499a-8925-4540-93fd-9dbfb4f0846d\", \"id\": \"SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c\", \"properties\": {}, \"tags\": [], \"version\": \"latest\", \"sequences\": [ \"SEQUENCE_mean_baseline_5af317c9-34df-48b4-8a8a-bf4007e1de99\", \"SEQUENCE_arima_90aef6b9-8922-4a0c-b625-b2c6f3d19fa4\"], \"subscribers\": [], \"creation_date\": \"2022-08-15T19:21:01.871587\", \"primary_scenario\": true} } ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EDITOR` role. parameters: - in: query name: config_id schema: type: string description: The identifier of the scenario configuration. responses: 201: content: application/json: schema: type: object properties: message: type: string description: Status message. scenario: ScenarioSchema \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") def fetch_config(self, config_id): config = Config.scenarios.get(config_id) if not config: raise NonExistingScenarioConfig(config_id) return config @_middleware def get(self): schema = ScenarioResponseSchema(many=True) manager = _ScenarioManagerFactory._build_manager() scenarios = [_to_model(REPOSITORY, scenario) for scenario in manager._get_all()] return schema.dump(scenarios) @_middleware def post(self): args = request.args config_id = args.get(\"config_id\") response_schema = ScenarioResponseSchema() manager = _ScenarioManagerFactory._build_manager() if not config_id: raise ConfigIdMissingException config = self.fetch_config(config_id) scenario = manager._create(config) return { \"message\": \"Scenario was created.\", \"scenario\": response_schema.dump(_to_model(REPOSITORY, scenario)), }, 201 class ScenarioExecutor(Resource): \"\"\"Execute a scenario --- post: tags: - api description: | Executes a scenario by *scenario_id*. If the scenario does not exist, a 404 error is returned. !!! Example === \"Curl\" ```shell curl -X POST http://localhost:5000/api/v1/scenarios/submit/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c ``` In this example the REST API is served on port 5000 on localhost. We are using curl command line client. `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Scenario we want to submit. Here is the output message example: ``` {\"message\": \"Executed scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c.\"} ``` === \"Python\" This Python example requires the 'requests' package to be installed (`pip install requests`). ```python import requests response = requests.post( \"http://localhost:5000/api/v1/scenarios/submit/SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c\") print(response) print(response.json()) ``` `SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c` is the value of the *scenario_id* parameter. It represents the identifier of the Scenario we want to submit. Here is the output example: ``` {\"message\": \"Executed scenario SCENARIO_63cb358d-5834-4d73-84e4-a6343df5e08c.\"} ``` !!! Note When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint requires the `TAIPY_EXECUTOR` role. parameters: - in: path name: scenario_id schema: type: string description: The identifier of the scenario to submit. responses: 202: content: application/json: schema: type: object properties: message: type: string description: Status message. scenario: ScenarioSchema 404: description: No scenario has the *scenario_id* identifier. \"\"\" def __init__(self, **kwargs): self.logger = kwargs.get(\"logger\") @_middleware def post(self, scenario_id): _get_or_raise(scenario_id) manager = _ScenarioManagerFactory._build_manager() manager._submit(scenario_id) return {\"message\": f\"Scenario {scenario_id} was submitted.\"} "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": "from functools import wraps from importlib import util from taipy.core.common._utils import _load_fct def _middleware(f): @wraps(f) def wrapper(*args, **kwargs): if _using_enterprise(): return _enterprise_middleware()(f)(*args, **kwargs) else: return f(*args, **kwargs) return wrapper def _using_enterprise(): return util.find_spec(\"taipy.enterprise\") is not None def _enterprise_middleware(): return _load_fct(\"taipy.enterprise.rest.api.middlewares._middleware\", \"_middleware\") "} {"text": "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. "} {"text": " class ConfigIdMissingException(Exception): def __init__(self): self.message = \"Config id is missing.\" class ScenarioIdMissingException(Exception): def __init__(self): self.message = \"Scenario id is missing.\" class SequenceNameMissingException(Exception): def __init__(self): self.message = \"Sequence name is missing.\" "} {"text": "from marshmallow import Schema, fields class CycleSchema(Schema): name = fields.String() frequency = fields.String() properties = fields.Dict() creation_date = fields.String() start_date = fields.String() end_date = fields.String() class CycleResponseSchema(CycleSchema): id = fields.String() "} {"text": "from marshmallow import Schema, fields class TaskSchema(Schema): config_id = fields.String() id = fields.String() owner_id = fields.String() parent_ids = fields.List(fields.String) input_ids = fields.List(fields.String) function_name = fields.String() function_module = fields.String() output_ids = fields.List(fields.String) version = fields.String() "} {"text": "from marshmallow import Schema, fields class CallableSchema(Schema): fct_name = fields.String() fct_module = fields.String() class JobSchema(Schema): id = fields.String() task_id = fields.String() status = fields.String() force = fields.Boolean() creation_date = fields.String() subscribers = fields.Nested(CallableSchema) stacktrace = fields.List(fields.String) "} {"text": "from marshmallow import Schema, fields class SequenceSchema(Schema): owner_id = fields.String() parent_ids = fields.List(fields.String) tasks = fields.List(fields.String) version = fields.String() properties = fields.Dict() class SequenceResponseSchema(SequenceSchema): id = fields.String() subscribers = fields.List(fields.Dict) "} {"text": "from .cycle import CycleResponseSchema, CycleSchema from .datanode import ( CSVDataNodeConfigSchema, DataNodeConfigSchema, DataNodeFilterSchema, DataNodeSchema, ExcelDataNodeConfigSchema, GenericDataNodeConfigSchema, InMemoryDataNodeConfigSchema, JSONDataNodeConfigSchema, MongoCollectionDataNodeConfigSchema, PickleDataNodeConfigSchema, SQLDataNodeConfigSchema, SQLTableDataNodeConfigSchema, ) from .job import JobSchema from .scenario import ScenarioResponseSchema, ScenarioSchema from .sequence import SequenceResponseSchema, SequenceSchema from .task import TaskSchema __all__ = [ \"DataNodeSchema\", \"DataNodeFilterSchema\", \"TaskSchema\", \"SequenceSchema\", \"SequenceResponseSchema\", \"ScenarioSchema\", \"ScenarioResponseSchema\", \"CycleSchema\", \"CycleResponseSchema\", \"JobSchema\", ] "} {"text": "from marshmallow import Schema, fields, pre_dump class DataNodeSchema(Schema): config_id = fields.String() scope = fields.String() id = fields.String() storage_type = fields.String() name = fields.String() owner_id = fields.String() parent_ids = fields.List(fields.String) last_edit_date = fields.String() job_ids = fields.List(fields.String) version = fields.String() cacheable = fields.Boolean() validity_days = fields.Float() validity_seconds = fields.Float() edit_in_progress = fields.Boolean() properties = fields.Dict() class DataNodeConfigSchema(Schema): name = fields.String() storage_type = fields.String() scope = fields.Integer() cacheable = fields.Boolean() @pre_dump def serialize_scope(self, obj, **kwargs): obj.scope = obj.scope.value return obj class CSVDataNodeConfigSchema(DataNodeConfigSchema): path = fields.String() default_path = fields.String() has_header = fields.Boolean() class InMemoryDataNodeConfigSchema(DataNodeConfigSchema): default_data = fields.Inferred() class PickleDataNodeConfigSchema(DataNodeConfigSchema): path = fields.String() default_path = fields.String() default_data = fields.Inferred() class SQLTableDataNodeConfigSchema(DataNodeConfigSchema): db_name = fields.String() table_name = fields.String() class SQLDataNodeConfigSchema(DataNodeConfigSchema): db_name = fields.String() read_query = fields.String() write_query = fields.List(fields.String()) class MongoCollectionDataNodeConfigSchema(DataNodeConfigSchema): db_name = fields.String() collection_name = fields.String() class ExcelDataNodeConfigSchema(DataNodeConfigSchema): path = fields.String() default_path = fields.String() has_header = fields.Boolean() sheet_name = fields.String() class GenericDataNodeConfigSchema(DataNodeConfigSchema): pass class JSONDataNodeConfigSchema(DataNodeConfigSchema): path = fields.String() default_path = fields.String() class OperatorSchema(Schema): key = fields.String() value = fields.Inferred() operator = fields.String() class DataNodeFilterSchema(DataNodeConfigSchema): operators = fields.List(fields.Nested(OperatorSchema)) join_operator = fields.String(default=\"AND\") "} {"text": "from marshmallow import Schema, fields class ScenarioSchema(Schema): sequences = fields.Dict() properties = fields.Dict() primary_scenario = fields.Boolean(default=False) tags = fields.List(fields.String) version = fields.String() class ScenarioResponseSchema(ScenarioSchema): id = fields.String() subscribers = fields.List(fields.Dict) cycle = fields.String() creation_date = fields.String() "} {"text": "from importlib import util import inspect import os if util.find_spec(\"taipy\") and util.find_spec(\"taipy.gui\"): from taipy.gui import Gui taipy_path = f\"{os.path.dirname(os.path.dirname(inspect.getfile(Gui)))}\" potential_file_paths = [ f\"{taipy_path}{os.sep}gui{os.sep}viselements.json\", f\"{taipy_path}{os.sep}gui_core{os.sep}viselements.json\", ] if potential_file_paths := [ path for path in potential_file_paths if os.path.exists(path) ]: print(f\"Path: {';;;'.join(potential_file_paths)}\") else: print(\"Visual element descriptor files not found in taipy-gui package\") else: print(\"taipy-gui package is not installed within the selected python environment\") "} {"text": "import taipy as tp import pandas as pd from taipy import Config from taipy.gui import Gui, Markdown, notify Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() input_text_cfg = Config.configure_data_node(id=\"input_text\") text_length_cfg = Config.configure_data_node(id=\"text_length\") count_characters_cfg = Config.configure_task(id=\"count_characters\", function=len, input=input_text_cfg, output=text_length_cfg) scenario_cfg = Config.configure_scenario_from_tasks(id=\"count_characters\", task_configs=[count_characters_cfg]) scenario_list = tp.get_scenarios() input_text = \"\" main_md = Markdown(\"\"\" # Taipy Character Counter Enter Text: <|{input_text}|input|> <|Submit|button|on_action=submit|> ---------- Past Results: <|{create_results_table(scenario_list)}|table|width=fit-content|> \"\"\") def submit(state): scenario = tp.create_scenario(scenario_cfg) scenario.input_text.write(state.input_text) state.input_text = \"\" tp.submit(scenario, wait=True) notify(state, \"S\", \"Submitted!\") state.scenario_list = tp.get_scenarios() def create_results_table(scenario_list): table = [(s.id, s.input_text.read(), s.text_length.read()) for s in scenario_list] df = pd.DataFrame(table, columns=[\"id\", \"input_text\", \"text_length\"]) print(df) return df tp.Core().run() gui = Gui(main_md) gui.run(run_browser=False) "} {"text": "import json def add_line(source, line, step): line = line.replace('Getting Started with Taipy', 'Getting Started with Taipy on Notebooks') line = line.replace('(../src/', '(https://docs.taipy.io/en/latest/getting_started/src/') line = line.replace('(dataset.csv)', '(https://docs.taipy.io/en/latest/getting_started/step_01/dataset.csv)') if line.startswith('!['): if step != 'index': line = line.replace('(', '(https://docs.taipy.io/en/latest/getting_started/' + step + '/') else: line = line.replace('(', '(https://docs.taipy.io/en/latest/getting_started/') # conversion of Markdown image to HTML img_src = line.split('](')[1].split(')')[0] width = line.split('](')[1].split(')')[1].split(' ')[1] source.append('
\\n') source.append(f' \\n') source.append('
\\n') elif step == 'step_00' and line.startswith('Gui(page='): source.append('\\n') source.append('Gui(\"# Getting Started with Taipy\").run(dark_mode=False)\\n') elif line.startswith('Gui(page=') and step != 'step_00': search_for_md = line.split(')') name_of_md = search_for_md[0][9:] source.append(f'gui = Gui({name_of_md})\\n') source.append(f'gui.run()\\n') elif step == 'step_00' and line.startswith('from taipy'): source.append(\"from taipy.gui import Gui, Markdown\\n\") elif 'Notebook' in line and 'step' in step: pass else: source.append(line + '\\n') return source def detect_new_cell(notebook, source, cell, line, execution_count, force_creation=False): if line.startswith('```python') or line.startswith('```') and cell == 'code' or force_creation: source = source[:-1] if cell == 'code': notebook['cells'].append({ \"cell_type\": \"code\", \"metadata\": {}, \"outputs\": [], \"execution_count\": execution_count, \"source\": source }) cell = 'markdown' execution_count += 1 else: notebook['cells'].append({ \"cell_type\": \"markdown\", \"metadata\": {}, \"source\": source }) cell = 'code' source = [] return cell, source, notebook, execution_count def create_introduction(notebook, execution_count): with open('index.md', 'r') as f: text = f.read() split_text = text.split('\\n') source = [] for line in split_text: if not line.startswith('``` console'): add_line(source, line, 'index') else: break notebook['cells'].append({ \"cell_type\": \"markdown\", \"metadata\": {}, \"source\": source }) notebook['cells'].append({ \"cell_type\": \"code\", \"metadata\": {}, \"outputs\": [], \"execution_count\": execution_count, \"source\": ['# !pip install taipy\\n', '# !pip install scikit-learn\\n', '# !pip install statsmodels'] }) notebook['cells'].append({ \"cell_type\": \"markdown\", \"metadata\": {}, \"source\": ['## Using Notebooks\\n',] }) execution_count += 1 return notebook, execution_count def create_steps(notebook, execution_count): steps = ['step_0' + str(i) for i in range(0, 10)] + ['step_10', 'step_11', 'step_12'] source = [] for step in steps: if source != []: cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count, force_creation=True) with open(step + '/ReadMe.md', 'r') as f: text = f.read() split_text = text.split('\\n') cell = \"markdown\" for line in split_text: add_line(source, line, step) cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count) return notebook, execution_count if __name__ == '__main__': notebook = { \"cells\": [], \"metadata\": { \"language_info\": { \"codemirror_mode\": { \"name\": \"ipython\", \"version\": 3 }, \"file_extension\": \".py\", \"mimetype\": \"text/x-python\", \"name\": \"python\", \"nbconvert_exporter\": \"python\", \"pygments_lexer\": \"ipython3\" }, \"orig_nbformat\": 4 }, \"nbformat\": 4, \"nbformat_minor\": 2 } execution_count = 0 notebook, execution_count = create_introduction(notebook, execution_count) notebook, execution_count = create_steps(notebook, execution_count) with open('getting_started.ipynb', 'w', encoding='utf-8') as f: json.dump(notebook, f, indent=2) "} {"text": "from step_08 import * # Get all the scenarios already created all_scenarios = tp.get_scenarios() # Delete the scenarios that don't have a name attribute # All the scenarios of the previous steps do not have an associated name so they will be deleted, # this will not be the case for those created by this step [tp.delete(scenario.id) for scenario in all_scenarios if scenario.name is None] # Initial variable for the scenario selector # The list of possible values (lov) for the scenario selector is a list of tuples (scenario_id, scenario_name), # but the selected_scenario is just used to retrieve the scenario id and what gets displayed is the name of the scenario. scenario_selector = [(scenario.id, scenario.name) for scenario in tp.get_scenarios()] selected_scenario = None scenario_manager_page = page + \"\"\" # Create your scenario **Prediction date**\\n\\n <|{day}|date|not with_time|> **Max capacity**\\n\\n <|{max_capacity}|number|> **Number of predictions**\\n\\n<|{n_predictions}|number|> <|Create new scenario|button|on_action=create_scenario|> ## Scenario <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> ## Display the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> \"\"\" def create_name_for_scenario(state) -> str: name = f\"Scenario ({state.day.strftime('%A, %d %b')}; {state.max_capacity}; {state.n_predictions})\" # Change the name if it is the same as some scenarios if name in [s[1] for s in state.scenario_selector]: name += f\" ({len(state.scenario_selector)})\" return name # Change the create_scenario function in order to change the default parameters # and allow the creation of multiple scenarios def create_scenario(state): print(\"Execution of scenario...\") # Extra information for the scenario creation_date = state.day name = create_name_for_scenario(state) # Create a scenario scenario = tp.create_scenario(scenario_cfg, creation_date=creation_date, name=name) state.selected_scenario = (scenario.id, name) # Submit the scenario that is currently selected submit_scenario(state) def submit_scenario(state): print(\"Submitting scenario...\") # Get the currently selected scenario scenario = tp.get(state.selected_scenario[0]) # Conversion to the right format (change?) day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the Data Nodes scenario.day.write(day) scenario.n_predictions.write(int(state.n_predictions)) scenario.max_capacity.write(int(state.max_capacity)) scenario.creation_date = state.day # Execute the scenario tp.submit(scenario) # Update the scenario selector and the scenario that is currently selected update_scenario_selector(state, scenario) # change list to scenario # Update the chart directly update_chart(state) def update_scenario_selector(state, scenario): print(\"Updating scenario selector...\") # Update the scenario selector state.scenario_selector += [(scenario.id, scenario.name)] def update_chart(state): # Now, the selected_scenario comes from the state, it is interactive scenario = tp.get(state.selected_scenario[0]) pipeline = scenario.pipelines[state.selected_pipeline] update_predictions_dataset(state, pipeline) def on_change(state, var_name: str, var_value): if var_name == \"n_week\": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset[\"Date\"].dt.isocalendar().week == var_value] elif var_name == \"selected_pipeline\" or var_name == \"selected_scenario\": # Update the chart when the scenario or the pipeline is changed # Check if we can read the data node to update the chart if tp.get(state.selected_scenario[0]).predictions.read() is not None: update_chart(state) if __name__ == \"__main__\": tp.Core().run() Gui(page=scenario_manager_page).run(dark_mode=False) "} {"text": "from step_07 import * # Initial variables ## Initial variables for the scenario day = dt.datetime(2021, 7, 26) n_predictions = 40 max_capacity = 200 page_scenario_manager = page + \"\"\" # Change your scenario **Prediction date**\\n\\n <|{day}|date|not with_time|> **Max capacity**\\n\\n <|{max_capacity}|number|> **Number of predictions**\\n\\n<|{n_predictions}|number|> <|Save changes|button|on_action={submit_scenario}|> Select the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|Update chart|button|on_action={update_chart}|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> \"\"\" def create_scenario(): global selected_scenario print(\"Creating scenario...\") scenario = tp.create_scenario(scenario_cfg) selected_scenario = scenario.id tp.submit(scenario) def submit_scenario(state): print(\"Submitting scenario...\") # Get the selected scenario: in this current step a single scenario is created then modified here. scenario = tp.get(selected_scenario) # Conversion to the right format state_day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the datanodes scenario.day.write(state_day) scenario.n_predictions.write(int(state.n_predictions)) scenario.max_capacity.write(int(state.max_capacity)) # Execute the pipelines/code tp.submit(scenario) # Update the chart when we change the scenario update_chart(state) def update_chart(state): # Select the right scenario and pipeline scenario = tp.get(selected_scenario) pipeline = scenario.pipelines[state.selected_pipeline] # Update the chart based on this pipeline update_predictions_dataset(state, pipeline) if __name__ == \"__main__\": global selected_scenario tp.Core().run() # Creation of a single scenario create_scenario() Gui(page=page_scenario_manager).run(dark_mode=False) "} {"text": "import datetime as dt import pandas as pd from taipy import Config, Scope from step_01 import path_to_csv # Datanodes (3.1) ## Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id=\"initial_dataset\", storage_type=\"csv\", path=path_to_csv, scope=Scope.GLOBAL) # We assume the current day is the 26th of July 2021. # This day can be changed to simulate multiple executions of scenarios on different days day_cfg = Config.configure_data_node(id=\"day\", default_data=dt.datetime(2021, 7, 26)) n_predictions_cfg = Config.configure_data_node(id=\"n_predictions\", default_data=40) max_capacity_cfg = Config.configure_data_node(id=\"max_capacity\", default_data=200) ## Remaining Data Nodes cleaned_dataset_cfg = Config.configure_data_node(id=\"cleaned_dataset\", validity_period=dt.timedelta(days=1), scope=Scope.GLOBAL) predictions_cfg = Config.configure_data_node(id=\"predictions\", scope=Scope.PIPELINE) # Functions (3.2) def clean_data(initial_dataset: pd.DataFrame): print(\" Cleaning data\") # Convert the date column to datetime initial_dataset['Date'] = pd.to_datetime(initial_dataset['Date']) cleaned_dataset = initial_dataset.copy() return cleaned_dataset def predict_baseline(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(\" Predicting baseline\") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset['Date'] < day] predictions = train_dataset['Value'][-n_predictions:].reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions # Tasks (3.3) clean_data_task_cfg = Config.configure_task(id=\"clean_data\", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg, skippable=True) predict_baseline_task_cfg = Config.configure_task(id=\"predict_baseline\", function=predict_baseline, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_cfg) "} {"text": "from step_05 import * from step_06 import scenario_cfg from taipy import Config # Set the list of pipelines names # It will be used in a selector of pipelines pipeline_selector = [\"baseline\", \"ml\"] selected_pipeline = pipeline_selector[0] scenario_page = page + \"\"\" Select the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|Update chart|button|on_action=update_chart|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> \"\"\" def create_scenario(): print(\"Creating scenario...\") scenario = tp.create_scenario(scenario_cfg) scenario = submit_scenario(scenario) return scenario def submit_scenario(scenario): print(\"Submitting scenario...\") tp.submit(scenario) return scenario def update_chart(state): print(\"'Update chart' button clicked\") # Select the right pipeline pipeline = scenario.pipelines[state.selected_pipeline] # Update the chart based on this pipeline # It is the same function as created before in step_5 update_predictions_dataset(state, pipeline) if __name__ == \"__main__\": # Delete all entities Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() tp.Core().run() # Creation of our first scenario scenario = create_scenario() Gui(page=scenario_page).run(dark_mode=False) "} {"text": "# For the sake of clarity, we have used an AutoRegressive model rather than a pure ML model such as: # Random Forest, Linear Regression, LSTM, etc from statsmodels.tsa.ar_model import AutoReg from taipy import Config from step_04 import * from step_03 import cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg, predictions_cfg, pd, dt # This is the function that will be used by the task def predict_ml(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(\" Predicting with ML\") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset[\"Date\"] < day] # Fit the AutoRegressive model model = AutoReg(train_dataset[\"Value\"], lags=7).fit() # Get the n_predictions forecasts predictions = model.forecast(n_predictions).reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions # Create the task configuration of the predict_ml function. ## We use the same input and ouput as the previous predict_baseline task but we change the funtion predict_ml_task_cfg = Config.configure_task(id=\"predict_ml\", function=predict_ml, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_cfg) # Create the new pipeline that will clean and predict with the ml model ml_pipeline_cfg = Config.configure_pipeline(id=\"ml\", task_configs=[clean_data_task_cfg, predict_ml_task_cfg]) # Configure our scenario which is our business problem. scenario_cfg = Config.configure_scenario(id=\"scenario\", pipeline_configs=[baseline_pipeline_cfg, ml_pipeline_cfg]) # The configuration is now complete if __name__ == \"__main__\": tp.Core().run() # Create the scenario scenario = tp.create_scenario(scenario_cfg) # Execute it tp.submit(scenario) # Get the resulting scenario ## Print the predictions of the two pipelines (baseline and ml) print(\"\\nBaseline predictions\\n\", scenario.baseline.predictions.read()) print(\"\\nMachine Learning predictions\\n\", scenario.ml.predictions.read()) "} {"text": "from step_01 import dataset, n_week, Gui # Select the week based on the slider value dataset_week = dataset[dataset[\"Date\"].dt.isocalendar().week == n_week] page = \"\"\" # Getting started with Taipy Select week: *<|{n_week}|>* <|{n_week}|slider|min=1|max=52|> <|{dataset_week}|chart|type=bar|x=Date|y=Value|height=100%|width=100%|> \"\"\" # on_change is the function that is called when any variable is changed def on_change(state, var_name: str, var_value): if var_name == \"n_week\": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset[\"Date\"].dt.isocalendar().week == var_value] if __name__ == \"__main__\": Gui(page=page).run(dark_mode=False) "} {"text": "from step_11 import * from sklearn.metrics import mean_absolute_error, mean_squared_error # Initial dataset for comparison comparison_scenario = pd.DataFrame({\"Scenario Name\": [], \"RMSE baseline\": [], \"MAE baseline\": [], \"RMSE ML\": [], \"MAE ML\": []}) # Indicates if the comparison is done comparison_scenario_done = False # Selector for metrics metric_selector = [\"RMSE\", \"MAE\"] selected_metric = metric_selector[0] def compute_metrics(historical_data, predicted_data): rmse = mean_squared_error(historical_data, predicted_data) mae = mean_absolute_error(historical_data, predicted_data) return rmse, mae def compare(state): print(\"Comparing...\") # Initial lists for comparison scenario_names = [] rmses_baseline = [] maes_baseline = [] rmses_ml = [] maes_ml = [] # Go through all the primary scenarios all_scenarios = tp.get_primary_scenarios() all_scenarios_ordered = sorted(all_scenarios, key=lambda x: x.creation_date.timestamp()) for scenario in all_scenarios_ordered: print(f\"Scenario {scenario.name}\") # Go through all the pipelines for pipeline in scenario.pipelines.values(): print(f\" Pipeline {pipeline.config_id}\") # Get the predictions dataset with the historical data only_prediction_dataset = create_predictions_dataset(pipeline)[-pipeline.n_predictions.read():] # Series to compute the metrics (true values and predicted values) historical_values = only_prediction_dataset[\"Historical values\"] predicted_values = only_prediction_dataset[\"Predicted values\"] # Compute the metrics for this pipeline and primary scenario rmse, mae = compute_metrics(historical_values, predicted_values) # Add values to the appropriate lists if \"baseline\" in pipeline.config_id: rmses_baseline.append(rmse) maes_baseline.append(mae) elif \"ml\" in pipeline.config_id: rmses_ml.append(rmse) maes_ml.append(mae) scenario_names.append(scenario.creation_date.strftime(\"%A %d %b\")) # Update comparison_scenario state.comparison_scenario = pd.DataFrame({\"Scenario Name\": scenario_names, \"RMSE baseline\": rmses_baseline, \"MAE baseline\": maes_baseline, \"RMSE ML\": rmses_ml, \"MAE ML\": maes_ml}) # When comparison_scenario_done will be set to True, # the part with the graphs will be finally rendered state.comparison_scenario_done = True # Performance page page_performance = \"\"\"
<|part|render={comparison_scenario_done}| <|Table|expanded=False|expandable| <|{comparison_scenario}|table|width=100%|> |> <|{selected_metric}|selector|lov={metric_selector}|dropdown|> <|part|render={selected_metric==\"RMSE\"}| <|{comparison_scenario}|chart|type=bar|x=Scenario Name|y[1]=RMSE baseline|y[2]=RMSE ML|height=100%|width=100%|> |> <|part|render={selected_metric==\"MAE\"}| <|{comparison_scenario}|chart|type=bar|x=Scenario Name|y[1]=MAE baseline|y[2]=MAE ML|height=100%|width=100%|> |> |>
<|Compare primarys|button|on_action=compare|>
\"\"\" lov_menu = [(\"Data-Visualization\", \"Data Visualization\"), (\"Scenario-Manager\", \"Scenario Manager\"), (\"Performance\", \"Performance\")] # Create a menu with our pages root_md = \"<|menu|label=Menu|lov={lov_menu}|on_action=menu_fct|>\" pages = {\"/\":root_md, \"Data-Visualization\":page_data_visualization, \"Scenario-Manager\":page_scenario_manager, \"Performance\":page_performance} def menu_fct(state, var_name: str, fct: str, var_value: list): # Change the value of the state.page variable in order to render the correct page navigate(state, var_value[\"args\"][0]) if __name__ == \"__main__\": tp.Core().run() Gui(pages=pages).run(dark_mode=False) "} {"text": "import numpy as np import pandas as pd from step_04 import tp, baseline_pipeline_cfg, dt from step_02 import * # Initialize the \"predictions\" dataset predictions_dataset = pd.DataFrame( {\"Date\": [dt.datetime(2021, 6, 1)], \"Historical values\": [np.NaN], \"Predicted values\": [np.NaN]}) # Add a button and a chart for our predictions pipeline_page = page + \"\"\" Press <|predict|button|on_action=predict|> to predict with default parameters (30 predictions) and June 1st as day. <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> \"\"\" def predict(state): print(\"'Predict' button clicked\") pipeline = create_and_submit_pipeline() update_predictions_dataset(state, pipeline) def create_and_submit_pipeline(): print(\"Execution of pipeline...\") # Create the pipeline from the pipeline config pipeline = tp.create_pipeline(baseline_pipeline_cfg) # Submit the pipeline (Execution) tp.submit(pipeline) return pipeline def create_predictions_dataset(pipeline): print(\"Creating predictions dataset...\") # Read data from the pipeline predictions = pipeline.predictions.read() day = pipeline.day.read() n_predictions = pipeline.n_predictions.read() cleaned_data = pipeline.cleaned_dataset.read() # Set arbitrarily the time window for the chart as 5 times the number of predictions window = 5 * n_predictions # Create the historical dataset that will be displayed new_length = len(cleaned_data[cleaned_data[\"Date\"] < day]) + n_predictions temp_df = cleaned_data[:new_length] temp_df = temp_df[-window:].reset_index(drop=True) # Create the series that will be used in the concat historical_values = pd.Series(temp_df[\"Value\"], name=\"Historical values\") predicted_values = pd.Series([np.NaN] * len(temp_df), name=\"Predicted values\") predicted_values[-len(predictions):] = predictions # Create the predictions dataset # Columns : [Date, Historical values, Predicted values] return pd.concat([temp_df[\"Date\"], historical_values, predicted_values], axis=1) def update_predictions_dataset(state, pipeline): print(\"Updating predictions dataset...\") state.predictions_dataset = create_predictions_dataset(pipeline) if __name__ == \"__main__\": tp.Core().run() Gui(page=pipeline_page).run(dark_mode=False) "} {"text": "from taipy import Gui import pandas as pd def get_data(path_to_csv: str): # pandas.read_csv() returns a pd.DataFrame dataset = pd.read_csv(path_to_csv) dataset[\"Date\"] = pd.to_datetime(dataset[\"Date\"]) return dataset # Read the dataframe path_to_csv = \"dataset.csv\" dataset = get_data(path_to_csv) # Initial value n_week = 10 # Definition of the page page = \"\"\" # Getting started with Taipy Week number: *<|{n_week}|>* Interact with this slider to change the week number: <|{n_week}|slider|min=1|max=52|> ## Dataset: Display the last three months of data: <|{dataset[9000:]}|chart|type=bar|x=Date|y=Value|height=100%|> <|{dataset}|table|height=400px|width=95%|> \"\"\" if __name__ == \"__main__\": # Create a Gui object with our page content Gui(page=page).run(dark_mode=False) "} {"text": "from step_10 import * from step_06 import ml_pipeline_cfg from taipy import Config, Frequency from taipy.gui import notify # Create scenarios each week and compare them scenario_daily_cfg = Config.configure_scenario(id=\"scenario\", pipeline_configs=[baseline_pipeline_cfg, ml_pipeline_cfg], frequency=Frequency.DAILY) if __name__ == \"__main__\": # Delete all entities Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() # Change the inital scenario selector to see which scenarios are primary scenario_selector = [(scenario.id, (\"*\" if scenario.is_primary else \"\") + scenario.name) for scenario in tp.get_scenarios()] # Redefine update_scenario_selector to add \"*\" in the display name when the scnario is primary def update_scenario_selector(state, scenario): print(\"Updating scenario selector...\") # Create the scenario name for the scenario selector # This name changes dependind whether the scenario is primary or not scenario_name = (\"*\" if scenario.is_primary else \"\") + scenario.name print(scenario_name) # Update the scenario selector state.scenario_selector += [(scenario.id, scenario_name)] selected_scenario_is_primary = None # Change the create_scenario function to create a scenario with the selected frequency def create_scenario(state): print(\"Execution of scenario...\") # Extra information for scenario creation_date = state.day name = create_name_for_scenario(state) # Create a scenario with the week cycle scenario = tp.create_scenario(scenario_daily_cfg, creation_date=creation_date, name=name) state.selected_scenario = (scenario.id, name) # Change the scenario that is currently selected submit_scenario(state) # This is the same code as in step_9_dynamic_scenario_creation.py def submit_scenario(state): print(\"Submitting scenario...\") # Get the currently selected scenario scenario = tp.get(state.selected_scenario[0]) # Conversion to the right format state_day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the Data Nodes # if state.day != scenario.day.read(): scenario.day.write(state_day) # if int(state.n_predictions) != scenario.n_predictions.read(): scenario.n_predictions.write(int(state.n_predictions)) # if state.max_capacity != scenario.max_capacity.read(): scenario.max_capacity.write(int(state.max_capacity)) # if state.day != scenario.creation_date: scenario.creation_date = state.day # Execute the pipelines/code tp.submit(scenario) # Update the scenario selector and the scenario that is currently selected update_scenario_selector(state, scenario) # change list to scenario # Update the chart directly update_chart(state) def make_primary(state): print(\"Making the current scenario primary...\") scenario = tp.get(state.selected_scenario[0]) # Take the current scenario primary tp.set_primary(scenario) # Update the scenario selector accordingly state.scenario_selector = [(scenario.id, (\"*\" if scenario.is_primary else \"\") + scenario.name) for scenario in tp.get_scenarios()] state.selected_scenario_is_primary = True def remove_scenario_from_selector(state, scenario: list): # Take all the scenarios in the selector that doesn't have the scenario.id state.scenario_selector = [(s[0], s[1]) for s in state.scenario_selector if s[0] != scenario.id] state.selected_scenario = state.scenario_selector[-1] def delete_scenario(state): scenario = tp.get(state.selected_scenario[0]) if scenario.is_primary: # Notify the user that primary scenarios can not be deleted notify(state, \"info\", \"Cannot delete the primary scenario\") else: # Delete the scenario and the related objects (datanodes, tasks, jobs,...) tp.delete(scenario.id) # Update the scenario selector accordingly remove_scenario_from_selector(state, scenario) # Add a \"Delete scenario\" and a \"Make primary\" buttons page_scenario_manager = \"\"\" # Create your scenario: <|layout|columns=1 1 1 1| <| **Prediction date**\\n\\n <|{day}|date|not with_time|> |> <| **Max capacity**\\n\\n <|{max_capacity}|number|> |> <| **Number of predictions**\\n\\n<|{n_predictions}|number|> |> <|

<|Create new scenario|button|on_action=create_scenario|> |> |> <|part|render={len(scenario_selector) > 0}| <|layout|columns=1 1| <|layout|columns=1 1| <| ## Scenario \\n <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> |>



<|Delete scenario|button|on_action=delete_scenario|active={len(scenario_selector)>0}|> <|Make primary|button|on_action=make_primary|active={not(selected_scenario_is_primary) and len(scenario_selector)>0}|> |> <| ## Display the pipeline \\n <|{selected_pipeline}|selector|lov={pipeline_selector}|dropdown|> |> |> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> |> \"\"\" lov_menu = [(\"Data-Visualization\", \"Data Visualization\"), (\"Scenario-Manager\", \"Scenario Manager\")] # Create a menu with our pages root_md = \"<|menu|label=Menu|lov={lov_menu}|on_action=menu_fct|>\" pages = {\"/\":root_md, \"Data-Visualization\":page_data_visualization, \"Scenario-Manager\":page_scenario_manager} def menu_fct(state, var_name: str, fct: str, var_value: list): # Change the value of the state.page variable in order to render the correct page navigate(state, var_value[\"args\"][0]) def on_change(state, var_name: str, var_value): if var_name == \"n_week\": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset[\"Date\"].dt.isocalendar().week == var_value] elif var_name == \"selected_pipeline\" or var_name == \"selected_scenario\": # Update selected_scenario_is_primary indicating if the current scenario is primary or not state.selected_scenario_is_primary = tp.get(state.selected_scenario[0]).is_primary # Check if we can read the data node to update the chart if tp.get(state.selected_scenario[0]).predictions.read() is not None: update_chart(state) if __name__ == \"__main__\": tp.Core().run() Gui(pages=pages).run(dark_mode=False) "} {"text": "from taipy import Gui # A dark mode is available in Taipy # However, we will use the light mode for the Getting Started Gui(page=\"# Getting started with *Taipy*\").run(dark_mode=False) "} {"text": "from step_09 import * from taipy.gui import navigate # Our first page is the original page # (with the slider and the chart that displays a week of the historical data) page_data_visualization = page # Second page: create scenarios and display results page_scenario_manager = \"\"\" # Create your scenario <|layout|columns=1 1 1 1| <| **Prediction date**\\n\\n <|{day}|date|not with_time|> |> <| **Max capacity**\\n\\n <|{max_capacity}|number|> |> <| **Number of predictions**\\n\\n<|{n_predictions}|number|> |> <|

\\n <|Create new scenario|button|on_action=create_scenario|> |> |> <|part|render={len(scenario_selector) > 0}| <|layout|columns=1 1| <| ## Scenario \\n <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> |> <| ## Display the pipeline \\n <|{selected_pipeline}|selector|lov={pipeline_selector}|dropdown|> |> |> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> |> \"\"\" lov_menu = [(\"Data-Visualization\", \"Data Visualization\"), (\"Scenario-Manager\", \"Scenario Manager\")] # Create a menu with our pages root_md = \"<|menu|label=Menu|lov={lov_menu}|on_action=menu_fct|>\" pages = {\"/\":root_md, \"Data-Visualization\":page_data_visualization, \"Scenario-Manager\":page_scenario_manager} def menu_fct(state, var_name: str, fct: str, var_value: list): # Change the value of the state.page variable in order to render the correct page navigate(state, var_value[\"args\"][0]) if __name__ == \"__main__\": tp.Core().run() Gui(pages=pages).run(dark_mode=False) "} {"text": "import taipy as tp from step_03 import Config, clean_data_task_cfg, predict_baseline_task_cfg, dt # Create the first pipeline configuration baseline_pipeline_cfg = Config.configure_pipeline(id=\"baseline\", task_configs=[clean_data_task_cfg, predict_baseline_task_cfg]) ## Execute the \"baseline\" pipeline if __name__ == \"__main__\": tp.Core().run() # Create the pipeline baseline_pipeline = tp.create_pipeline(baseline_pipeline_cfg) # Submit the pipeline (Execution) tp.submit(baseline_pipeline) # Read output data from the pipeline baseline_predictions = baseline_pipeline.predictions.read() print(\"Predictions of baseline algorithm\\n\", baseline_predictions) "} {"text": "AWS_ACCESS_KEY = '' AWS_SECRET_KEY = '' AWS_REGION = '' S3_BUCKET_NAME = '' "} {"text": "from flask import Flask, render_template, redirect, url_for, request, flash from werkzeug.utils import secure_filename from uploads.file_handler import is_file_type_allowed, upload_file_to_s3, get_presigned_file_url from localStoragePy import localStoragePy from transformers import AutoTokenizer, pipeline # from tensorflow.keras.preprocessing.sequence import pad_sequences # from tensorflow.keras.models import load_model from taipy.gui import Gui import webbrowser import tensorflow as tf import pandas as pd import numpy as np import pytorch_pretrained_bert as ppb assert 'bert-large-cased' in ppb.modeling.PRETRAINED_MODEL_ARCHIVE_MAP app = Flask(__name__) app.secret_key = '3d6f45a5fc12445dbac2f59c3b6c7cb1' localStorage = localStoragePy('app', 'json') target_arr = [\"df['col1'].nunique()\", \"df.sort_values(by=['col1'],inplace =True)\", \"df.sort_values(by=['col1', 'col2'],inplace =True)\", \"df.sort_values(by=['col1', 'col2', 'col3'],inplace =True)\", \"df.drop(columns = 'col1',inplace = True)\", \"new_df=df.loc[:, ['col1','col2']]\", \"df['col1'].value_counts()\", \"<|{dataset}|chart|type=bar|x=col1|y=col2|height=100%|>\", \"<|{dataset}|chart|type=pie|values=col2|labels=col1|height=100%|>\", \"<|{dataset}|chart|mode=lines|x=col1|y=col2|>\"] portNo = 8888 @app.route(\"/\", methods=['GET']) def home(): return render_template('home.html') @app.route(\"/upload-file\", methods=['POST']) def upload_file(): if 'file' not in request.files: flash('No file uploaded', 'danger') return redirect(url_for('home')) file_to_upload = request.files['file'] if file_to_upload.filename == '': flash('No file uploaded', 'danger') return redirect(url_for('home')) if file_to_upload and is_file_type_allowed(file_to_upload.filename): provided_file_name = secure_filename(file_to_upload.filename) stored_file_name = upload_file_to_s3(file_to_upload, provided_file_name) localStorage.setItem(\"stored_file_name\", stored_file_name) localStorage.setItem(\"provided_file_name\", provided_file_name) flash(f'{provided_file_name} was successfully uploaded', 'success') return redirect(url_for('home')) @app.route(\"/query\", methods=['POST']) def query(): try: query = request.form['query'] provided_file_name = localStorage.getItem(\"provided_file_name\") stored_file_name = localStorage.getItem(\"stored_file_name\") csv = get_presigned_file_url(stored_file_name, provided_file_name) df = pd.read_csv(csv) print(\"query: \" + query) prediction_int, cols_requested = getPredictionInt(df, query) if prediction_int < 7: panda_query = target_arr[prediction_int] print(panda_query) for i in range(len(cols_requested)): panda_query = panda_query.replace(\"col\" + str(i+1), cols_requested[i]) exec(panda_query) html_string = '''

Download CSV

Click here to return to home page

{table}
. ''' filename = \"new.csv\" df.to_csv(filename, index=False) file_to_upload = open(\"new.csv\", 'rb') new_provided_file_name = secure_filename(filename) new_stored_file_name = upload_file_to_s3(file_to_upload, new_provided_file_name) new_presigned_url = get_presigned_file_url(new_stored_file_name, new_provided_file_name) print(\"Presigned url: \" + new_presigned_url) df = df.reset_index(drop=True) html = df.to_html(classes='table table-striped table-bordered w-75 mx-auto') html = html.replace(\"text-align: right;\", \"text-align: left;\") toDisplay = html_string.format(table = html, new_presigned_url = new_presigned_url) return toDisplay else: print(\"taipy\") taipy_query = target_arr[prediction_int] dataset = df for i in range(len(cols_requested)): taipy_query = taipy_query.replace(\"col\" + str(i+1), cols_requested[i]) page = \"\"\"{0}\"\"\" page = page.format(taipy_query) gui = Gui(page) global portNo portNum = portNo portNo += 1 webbrowser.open_new_tab('http://localhost:' + str(portNum)) gui.run(port=portNum) print(\"hello world\") return redirect(url_for('home')) except: print(\"Invalid query\") flash('Invalid query', 'danger') return redirect(url_for('home')) def getPredictionInt(df, query): cols = df.columns sentence = query words = sentence.split() cols_requested = [] for item in cols: for word in words: if(item.upper() == word.upper()): cols_requested.append(item) general_sentence = sentence for i in range(len(cols_requested)): general_sentence = general_sentence.replace(cols_requested[i], \"col\" + str(i+1)) model_id = \"tanishabhagwanani/distilbert-base-uncased-finetuned-emotion\" classifier = pipeline(\"text-classification\", model=model_id) custom_question = query preds = classifier(custom_question, return_all_scores=True) preds_df = pd.DataFrame(preds[0]) prediction_int = np.argmax(preds_df.score) return prediction_int, cols_requested if __name__=='__main__': app.run(host=\"localhost\", port=8000, debug=True)"} {"text": "import uuid import boto3 from config import AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET_NAME s3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_KEY, region_name=AWS_REGION ) ALLOWED_FILE_TYPES = {'csv'} S3_BUCKET_NAME = S3_BUCKET_NAME S3_EXPIRES_IN_SECONDS = 100 def get_file_type(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() def is_file_type_allowed(filename): return get_file_type(filename) in ALLOWED_FILE_TYPES def upload_file_to_s3(file, provided_file_name): stored_file_name = f'{str(uuid.uuid4())}.{get_file_type(provided_file_name)}' s3.upload_fileobj(file, S3_BUCKET_NAME, stored_file_name) return stored_file_name def get_presigned_file_url(stored_file_name, provided_file_name): if not stored_file_name or not provided_file_name: return return s3.generate_presigned_url( 'get_object', Params = { 'Bucket': S3_BUCKET_NAME, 'Key': stored_file_name, 'ResponseContentDisposition': f\"attachment; filename = {provided_file_name}\" }, ExpiresIn = S3_EXPIRES_IN_SECONDS )"} {"text": "import json def add_line(source, line, step): on_change_needed = ['step_02', 'step_09', 'step_11'] line = line.replace('Getting Started with Taipy', 'Getting Started with Taipy on Notebooks') line = line.replace('(../src/', '(https://docs.taipy.io/getting_started/src/') line = line.replace('(dataset.csv)', '(https://docs.taipy.io/getting_started/step_01/dataset.csv)') if line.startswith('!['): if step != 'index': line = line.replace('(', '(https://docs.taipy.io/getting_started/' + step + '/') else: line = line.replace('(', '(https://docs.taipy.io/getting_started/') # conversion of Markdown image to HTML img_src = line.split('](')[1].split(')')[0] width = line.split('](')[1].split(')')[1].split(' ')[1] source.append('
\\n') source.append(f' \\n') source.append('
\\n') elif step == 'step_00' and line.startswith('Gui(page='): source.append('\\n') source.append('# We can use Gui(\"# Getting Started with Taipy\").run() directly\\n') source.append('# However, we need a Markdown and Gui object to modify the content of the page\\n') source.append('# in the Notebook\\n') source.append('\\n') source.append('main_page = Markdown(\"# Getting Started with Taipy\")\\n') source.append('gui = Gui(main_page)\\n') source.append('gui.run(dark_mode=False)\\n') elif line.startswith('Gui(page=') and step != 'step_00': search_for_md = line.split(')') name_of_md = search_for_md[0][9:] source.append('gui.stop()\\n') if step in on_change_needed: source.append('gui.on_change = on_change\\n') source.append(f'main_page.set_content({name_of_md})\\n') source.append('gui.run()\\n') elif step == 'step_00' and line.startswith('from taipy'): source.append(\"from taipy.gui import Gui, Markdown\\n\") elif 'Notebook' in line and 'step' in step: pass else: source.append(line + '\\n') return source def detect_new_cell(notebook, source, cell, line, execution_count, force_creation=False): if line.startswith('```python') or line.startswith('```') and cell == 'code' or force_creation: source = source[:-1] if cell == 'code': notebook['cells'].append({ \"cell_type\": \"code\", \"metadata\": {}, \"outputs\": [], \"execution_count\": execution_count, \"source\": source }) cell = 'markdown' execution_count += 1 else: notebook['cells'].append({ \"cell_type\": \"markdown\", \"metadata\": {}, \"source\": source }) cell = 'code' source = [] return cell, source, notebook, execution_count def create_introduction(notebook, execution_count): with open('index.md', 'r') as f: text = f.read() split_text = text.split('\\n') source = [] for line in split_text: if not line.startswith('``` console'): add_line(source, line, 'index') else: break notebook['cells'].append({ \"cell_type\": \"markdown\", \"metadata\": {}, \"source\": source }) notebook['cells'].append({ \"cell_type\": \"code\", \"metadata\": {}, \"outputs\": [], \"execution_count\": execution_count, \"source\": ['# !pip install taipy\\n', '# !pip install scikit-learn\\n', '# !pip install statsmodels'] }) notebook['cells'].append({ \"cell_type\": \"markdown\", \"metadata\": {}, \"source\": ['## Using Notebooks\\n', 'Some functions will be used in the Getting Started for Notebooks that are primarly used for Notebooks (`gui.stop()`, `gui.run()`, `gui.on_change`, `set_content()`)\\n', 'To have more explanation on these different functions, you can find the documentation related [here](https://docs.taipy.io/manuals/gui/notebooks/)\\n', '**Warning**: Do not forget to stop your server when you are finished. You can do so by restarting your kernel.\\n'] }) execution_count += 1 return notebook, execution_count def create_steps(notebook, execution_count): steps = ['step_0' + str(i) for i in range(0, 10)] + ['step_10', 'step_11', 'step_12'] source = [] for step in steps: if source != []: cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count, force_creation=True) with open(step + '/ReadMe.md', 'r') as f: text = f.read() split_text = text.split('\\n') cell = \"markdown\" for line in split_text: add_line(source, line, step) cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count) return notebook, execution_count if __name__ == '__main__': notebook = { \"cells\": [], \"metadata\": { \"language_info\": { \"codemirror_mode\": { \"name\": \"ipython\", \"version\": 3 }, \"file_extension\": \".py\", \"mimetype\": \"text/x-python\", \"name\": \"python\", \"nbconvert_exporter\": \"python\", \"pygments_lexer\": \"ipython3\" }, \"orig_nbformat\": 4 }, \"nbformat\": 4, \"nbformat_minor\": 2 } execution_count = 0 notebook, execution_count = create_introduction(notebook, execution_count) notebook, execution_count = create_steps(notebook, execution_count) with open('getting_started.ipynb', 'w', encoding='utf-8') as f: json.dump(notebook, f, indent=2) "} {"text": "from step_08 import * # Get all the scenarios already created all_scenarios = tp.get_scenarios() # Delete the scenarios that don't have a name attribute # All the scenarios of the previous steps do not have an associated name so they will be deleted, # this will not be the case for those created by this step [tp.delete(scenario.id) for scenario in all_scenarios if scenario.name is None] # Initial variable for the scenario selector # The list of possible values (lov) for the scenario selector is a list of tuples (scenario_id, scenario_name), # but the selected_scenario is just used to retrieve the scenario id and what gets displayed is the name of the scenario. scenario_selector = [(scenario.id, scenario.name) for scenario in tp.get_scenarios()] selected_scenario = None scenario_manager_page = page + \"\"\" # Create your scenario **Prediction date**\\n\\n <|{day}|date|not with_time|> **Max capacity**\\n\\n <|{max_capacity}|number|> **Number of predictions**\\n\\n<|{n_predictions}|number|> <|Create new scenario|button|on_action=create_scenario|> ## Scenario <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> ## Display the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> \"\"\" def create_name_for_scenario(state) -> str: name = f\"Scenario ({state.day.strftime('%A, %d %b')}; {state.max_capacity}; {state.n_predictions})\" # Change the name if it is the same as some scenarios if name in [s[1] for s in state.scenario_selector]: name += f\" ({len(state.scenario_selector)})\" return name # Change the create_scenario function in order to change the default parameters # and allow the creation of multiple scenarios def create_scenario(state): print(\"Execution of scenario...\") # Extra information for the scenario creation_date = state.day name = create_name_for_scenario(state) # Create a scenario scenario = tp.create_scenario(scenario_cfg, creation_date=creation_date, name=name) state.selected_scenario = (scenario.id, name) # Submit the scenario that is currently selected submit_scenario(state) def submit_scenario(state): print(\"Submitting scenario...\") # Get the currently selected scenario scenario = tp.get(state.selected_scenario[0]) # Conversion to the right format (change?) day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the Data Nodes scenario.day.write(day) scenario.n_predictions.write(int(state.n_predictions)) scenario.max_capacity.write(int(state.max_capacity)) scenario.creation_date = state.day # Execute the scenario tp.submit(scenario) # Update the scenario selector and the scenario that is currently selected update_scenario_selector(state, scenario) # change list to scenario # Update the chart directly update_chart(state) def update_scenario_selector(state, scenario): print(\"Updating scenario selector...\") # Update the scenario selector state.scenario_selector += [(scenario.id, scenario.name)] def update_chart(state): # Now, the selected_scenario comes from the state, it is interactive scenario = tp.get(state.selected_scenario[0]) pipeline = scenario.pipelines[state.selected_pipeline] update_predictions_dataset(state, pipeline) def on_change(state, var_name: str, var_value): if var_name == \"n_week\": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset[\"Date\"].dt.isocalendar().week == var_value] elif var_name == \"selected_pipeline\" or var_name == \"selected_scenario\": # Update the chart when the scenario or the pipeline is changed # Check if we can read the data node to update the chart if tp.get(state.selected_scenario[0]).predictions.read() is not None: update_chart(state) if __name__ == \"__main__\": Gui(page=scenario_manager_page).run(dark_mode=False) "} {"text": "from step_07 import * # Initial variables ## Initial variables for the scenario day = dt.datetime(2021, 7, 26) n_predictions = 40 max_capacity = 200 page_scenario_manager = page + \"\"\" # Change your scenario **Prediction date**\\n\\n <|{day}|date|not with_time|> **Max capacity**\\n\\n <|{max_capacity}|number|> **Number of predictions**\\n\\n<|{n_predictions}|number|> <|Save changes|button|on_action={submit_scenario}|> Select the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|Update chart|button|on_action={update_chart}|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> \"\"\" def create_scenario(): global selected_scenario print(\"Creating scenario...\") scenario = tp.create_scenario(scenario_cfg) selected_scenario = scenario.id tp.submit(scenario) def submit_scenario(state): print(\"Submitting scenario...\") # Get the selected scenario: in this current step a single scenario is created then modified here. scenario = tp.get(selected_scenario) # Conversion to the right format state_day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the datanodes scenario.day.write(state_day) scenario.n_predictions.write(int(state.n_predictions)) scenario.max_capacity.write(int(state.max_capacity)) # Execute the pipelines/code tp.submit(scenario) # Update the chart when we change the scenario update_chart(state) def update_chart(state): # Select the right scenario and pipeline scenario = tp.get(selected_scenario) pipeline = scenario.pipelines[state.selected_pipeline] # Update the chart based on this pipeline update_predictions_dataset(state, pipeline) if __name__ == \"__main__\": global selected_scenario # Creation of a single scenario create_scenario() Gui(page=page_scenario_manager).run(dark_mode=False) "} {"text": "import datetime as dt import pandas as pd from taipy import Config, Scope from step_01 import path_to_csv # Datanodes (3.1) ## Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id=\"initial_dataset\", storage_type=\"csv\", path=path_to_csv, scope=Scope.GLOBAL) # We assume the current day is the 26th of July 2021. # This day can be changed to simulate multiple executions of scenarios on different days day_cfg = Config.configure_data_node(id=\"day\", default_data=dt.datetime(2021, 7, 26)) n_predictions_cfg = Config.configure_data_node(id=\"n_predictions\", default_data=40) max_capacity_cfg = Config.configure_data_node(id=\"max_capacity\", default_data=200) ## Remaining Data Nodes cleaned_dataset_cfg = Config.configure_data_node(id=\"cleaned_dataset\", cacheable=True, validity_period=dt.timedelta(days=1), scope=Scope.GLOBAL) predictions_cfg = Config.configure_data_node(id=\"predictions\", scope=Scope.PIPELINE) # Functions (3.2) def clean_data(initial_dataset: pd.DataFrame): print(\" Cleaning data\") # Convert the date column to datetime initial_dataset['Date'] = pd.to_datetime(initial_dataset['Date']) cleaned_dataset = initial_dataset.copy() return cleaned_dataset def predict_baseline(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(\" Predicting baseline\") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset['Date'] < day] predictions = train_dataset['Value'][-n_predictions:].reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions # Tasks (3.3) clean_data_task_cfg = Config.configure_task(id=\"clean_data\", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg) predict_baseline_task_cfg = Config.configure_task(id=\"predict_baseline\", function=predict_baseline, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_cfg) "} {"text": "from step_05 import * from step_06 import scenario_cfg from taipy import Config # Set the list of pipelines names # It will be used in a selector of pipelines pipeline_selector = [\"baseline\", \"ml\"] selected_pipeline = pipeline_selector[0] scenario_page = page + \"\"\" Select the pipeline <|{selected_pipeline}|selector|lov={pipeline_selector}|> <|Update chart|button|on_action=update_chart|> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> \"\"\" def create_scenario(): print(\"Creating scenario...\") scenario = tp.create_scenario(scenario_cfg) scenario = submit_scenario(scenario) return scenario def submit_scenario(scenario): print(\"Submitting scenario...\") tp.submit(scenario) return scenario def update_chart(state): print(\"'Update chart' button clicked\") # Select the right pipeline pipeline = scenario.pipelines[state.selected_pipeline] # Update the chart based on this pipeline # It is the same function as created before in step_5 update_predictions_dataset(state, pipeline) if __name__ == \"__main__\": # Delete all entities Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() # Creation of our first scenario scenario = create_scenario() Gui(page=scenario_page).run(dark_mode=False) "} {"text": "# For the sake of clarity, we have used an AutoRegressive model rather than a pure ML model such as: # Random Forest, Linear Regression, LSTM, etc from statsmodels.tsa.ar_model import AutoReg from taipy import Config from step_04 import * from step_03 import cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg, predictions_cfg, pd, dt # This is the function that will be used by the task def predict_ml(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(\" Predicting with ML\") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset[\"Date\"] < day] # Fit the AutoRegressive model model = AutoReg(train_dataset[\"Value\"], lags=7).fit() # Get the n_predictions forecasts predictions = model.forecast(n_predictions).reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions # Create the task configuration of the predict_ml function. ## We use the same input and ouput as the previous predict_baseline task but we change the funtion predict_ml_task_cfg = Config.configure_task(id=\"predict_ml\", function=predict_ml, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_cfg) # Create the new pipeline that will clean and predict with the ml model ml_pipeline_cfg = Config.configure_pipeline(id=\"ml\", task_configs=[clean_data_task_cfg, predict_ml_task_cfg]) # Configure our scenario which is our business problem. scenario_cfg = Config.configure_scenario(id=\"scenario\", pipeline_configs=[baseline_pipeline_cfg, ml_pipeline_cfg]) # The configuration is now complete if __name__ == \"__main__\": # Create the scenario scenario = tp.create_scenario(scenario_cfg) # Execute it tp.submit(scenario) # Get the resulting scenario ## Print the predictions of the two pipelines (baseline and ml) print(\"\\nBaseline predictions\\n\", scenario.baseline.predictions.read()) print(\"\\nMachine Learning predictions\\n\", scenario.ml.predictions.read()) "} {"text": "from step_01 import dataset, n_week, Gui # Select the week based on the slider value dataset_week = dataset[dataset[\"Date\"].dt.isocalendar().week == n_week] page = \"\"\" # Getting started with Taipy Select week: *<|{n_week}|>* <|{n_week}|slider|min=1|max=52|> <|{dataset_week}|chart|type=bar|x=Date|y=Value|height=100%|width=100%|> \"\"\" # on_change is the function that is called when any variable is changed def on_change(state, var_name: str, var_value): if var_name == \"n_week\": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset[\"Date\"].dt.isocalendar().week == var_value] if __name__ == \"__main__\": Gui(page=page).run(dark_mode=False) "} {"text": "from step_11 import * from sklearn.metrics import mean_absolute_error, mean_squared_error # Initial dataset for comparison comparison_scenario = pd.DataFrame({\"Scenario Name\": [], \"RMSE baseline\": [], \"MAE baseline\": [], \"RMSE ML\": [], \"MAE ML\": []}) # Indicates if the comparison is done comparison_scenario_done = False # Selector for metrics metric_selector = [\"RMSE\", \"MAE\"] selected_metric = metric_selector[0] def compute_metrics(historical_data, predicted_data): rmse = mean_squared_error(historical_data, predicted_data) mae = mean_absolute_error(historical_data, predicted_data) return rmse, mae def compare(state): print(\"Comparing...\") # Initial lists for comparison scenario_names = [] rmses_baseline = [] maes_baseline = [] rmses_ml = [] maes_ml = [] # Go through all the primary scenarios all_scenarios = tp.get_primary_scenarios() all_scenarios_ordered = sorted(all_scenarios, key=lambda x: x.creation_date.timestamp()) for scenario in all_scenarios_ordered: print(f\"Scenario {scenario.name}\") # Go through all the pipelines for pipeline in scenario.pipelines.values(): print(f\" Pipeline {pipeline.config_id}\") # Get the predictions dataset with the historical data only_prediction_dataset = create_predictions_dataset(pipeline)[-pipeline.n_predictions.read():] # Series to compute the metrics (true values and predicted values) historical_values = only_prediction_dataset[\"Historical values\"] predicted_values = only_prediction_dataset[\"Predicted values\"] # Compute the metrics for this pipeline and primary scenario rmse, mae = compute_metrics(historical_values, predicted_values) # Add values to the appropriate lists if \"baseline\" in pipeline.config_id: rmses_baseline.append(rmse) maes_baseline.append(mae) elif \"ml\" in pipeline.config_id: rmses_ml.append(rmse) maes_ml.append(mae) scenario_names.append(scenario.creation_date.strftime(\"%A %d %b\")) # Update comparison_scenario state.comparison_scenario = pd.DataFrame({\"Scenario Name\": scenario_names, \"RMSE baseline\": rmses_baseline, \"MAE baseline\": maes_baseline, \"RMSE ML\": rmses_ml, \"MAE ML\": maes_ml}) # When comparison_scenario_done will be set to True, # the part with the graphs will be finally rendered state.comparison_scenario_done = True # Performance page page_performance = \"\"\"
<|part|render={comparison_scenario_done}| <|Table|expanded=False|expandable| <|{comparison_scenario}|table|width=100%|> |> <|{selected_metric}|selector|lov={metric_selector}|dropdown|> <|part|render={selected_metric==\"RMSE\"}| <|{comparison_scenario}|chart|type=bar|x=Scenario Name|y[1]=RMSE baseline|y[2]=RMSE ML|height=100%|width=100%|> |> <|part|render={selected_metric==\"MAE\"}| <|{comparison_scenario}|chart|type=bar|x=Scenario Name|y[1]=MAE baseline|y[2]=MAE ML|height=100%|width=100%|> |> |>
<|Compare primarys|button|on_action=compare|>
\"\"\" # Add the page_performance section to the menu multi_pages = \"\"\" <|menu|label=Menu|lov={[\"Data Visualization\", \"Scenario Manager\", \"Performance\"]}|on_action=menu_fct|> <|part|render={page==\"Data Visualization\"}|\"\"\" + page_data_visualization + \"\"\"|> <|part|render={page==\"Scenario Manager\"}|\"\"\" + page_scenario_manager + \"\"\"|> <|part|render={page==\"Performance\"}|\"\"\" + page_performance + \"\"\"|> \"\"\" if __name__ == \"__main__\": Gui(page=multi_pages).run(dark_mode=False) "} {"text": "import numpy as np import pandas as pd from step_04 import tp, baseline_pipeline_cfg, dt from step_02 import * # Initialize the \"predictions\" dataset predictions_dataset = pd.DataFrame( {\"Date\": [dt.datetime(2021, 6, 1)], \"Historical values\": [np.NaN], \"Predicted values\": [np.NaN]}) # Add a button and a chart for our predictions pipeline_page = page + \"\"\" Press <|predict|button|on_action=predict|> to predict with default parameters (30 predictions) and June 1st as day. <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> \"\"\" def predict(state): print(\"'Predict' button clicked\") pipeline = create_and_submit_pipeline() update_predictions_dataset(state, pipeline) def create_and_submit_pipeline(): print(\"Execution of pipeline...\") # Create the pipeline from the pipeline config pipeline = tp.create_pipeline(baseline_pipeline_cfg) # Submit the pipeline (Execution) tp.submit(pipeline) return pipeline def create_predictions_dataset(pipeline): print(\"Creating predictions dataset...\") # Read data from the pipeline predictions = pipeline.predictions.read() day = pipeline.day.read() n_predictions = pipeline.n_predictions.read() cleaned_data = pipeline.cleaned_dataset.read() # Set arbitrarily the time window for the chart as 5 times the number of predictions window = 5 * n_predictions # Create the historical dataset that will be displayed new_length = len(cleaned_data[cleaned_data[\"Date\"] < day]) + n_predictions temp_df = cleaned_data[:new_length] temp_df = temp_df[-window:].reset_index(drop=True) # Create the series that will be used in the concat historical_values = pd.Series(temp_df[\"Value\"], name=\"Historical values\") predicted_values = pd.Series([np.NaN] * len(temp_df), name=\"Predicted values\") predicted_values[-len(predictions):] = predictions # Create the predictions dataset # Columns : [Date, Historical values, Predicted values] return pd.concat([temp_df[\"Date\"], historical_values, predicted_values], axis=1) def update_predictions_dataset(state, pipeline): print(\"Updating predictions dataset...\") state.predictions_dataset = create_predictions_dataset(pipeline) if __name__ == \"__main__\": Gui(page=pipeline_page).run(dark_mode=False) "} {"text": "from taipy import Gui import pandas as pd def get_data(path_to_csv: str): # pandas.read_csv() returns a pd.DataFrame dataset = pd.read_csv(path_to_csv) dataset[\"Date\"] = pd.to_datetime(dataset[\"Date\"]) return dataset # Read the dataframe path_to_csv = \"dataset.csv\" dataset = get_data(path_to_csv) # Initial value n_week = 10 # Definition of the page page = \"\"\" # Getting started with Taipy Week number: *<|{n_week}|>* Interact with this slider to change the week number: <|{n_week}|slider|min=1|max=52|> ## Dataset: Display the last three months of data: <|{dataset[9000:]}|chart|type=bar|x=Date|y=Value|height=100%|> <|{dataset}|table|height=400px|width=95%|> \"\"\" if __name__ == \"__main__\": # Create a Gui object with our page content Gui(page=page).run(dark_mode=False) "} {"text": "from step_10 import * from step_06 import ml_pipeline_cfg from taipy import Config, Frequency from taipy.gui import notify # Create scenarios each week and compare them scenario_daily_cfg = Config.configure_scenario(id=\"scenario\", pipeline_configs=[baseline_pipeline_cfg, ml_pipeline_cfg], frequency=Frequency.DAILY) if __name__ == \"__main__\": # Delete all entities Config.configure_global_app(clean_entities_enabled=True) tp.clean_all_entities() # Change the inital scenario selector to see which scenarios are primary scenario_selector = [(scenario.id, (\"*\" if scenario.is_primary else \"\") + scenario.name) for scenario in tp.get_scenarios()] # Redefine update_scenario_selector to add \"*\" in the display name when the scnario is primary def update_scenario_selector(state, scenario): print(\"Updating scenario selector...\") # Create the scenario name for the scenario selector # This name changes dependind whether the scenario is primary or not scenario_name = (\"*\" if scenario.is_primary else \"\") + scenario.name print(scenario_name) # Update the scenario selector state.scenario_selector += [(scenario.id, scenario_name)] selected_scenario_is_primary = None # Change the create_scenario function to create a scenario with the selected frequency def create_scenario(state): print(\"Execution of scenario...\") # Extra information for scenario creation_date = state.day name = create_name_for_scenario(state) # Create a scenario with the week cycle scenario = tp.create_scenario(scenario_daily_cfg, creation_date=creation_date, name=name) state.selected_scenario = (scenario.id, name) # Change the scenario that is currently selected submit_scenario(state) # This is the same code as in step_9_dynamic_scenario_creation.py def submit_scenario(state): print(\"Submitting scenario...\") # Get the currently selected scenario scenario = tp.get(state.selected_scenario[0]) # Conversion to the right format state_day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the Data Nodes # if state.day != scenario.day.read(): scenario.day.write(state_day) # if int(state.n_predictions) != scenario.n_predictions.read(): scenario.n_predictions.write(int(state.n_predictions)) # if state.max_capacity != scenario.max_capacity.read(): scenario.max_capacity.write(int(state.max_capacity)) # if state.day != scenario.creation_date: scenario.creation_date = state.day # Execute the pipelines/code tp.submit(scenario) # Update the scenario selector and the scenario that is currently selected update_scenario_selector(state, scenario) # change list to scenario # Update the chart directly update_chart(state) def make_primary(state): print(\"Making the current scenario primary...\") scenario = tp.get(state.selected_scenario[0]) # Take the current scenario primary tp.set_primary(scenario) # Update the scenario selector accordingly state.scenario_selector = [(scenario.id, (\"*\" if scenario.is_primary else \"\") + scenario.name) for scenario in tp.get_scenarios()] state.selected_scenario_is_primary = True def remove_scenario_from_selector(state, scenario: list): # Take all the scenarios in the selector that doesn't have the scenario.id state.scenario_selector = [(s[0], s[1]) for s in state.scenario_selector if s[0] != scenario.id] state.selected_scenario = state.scenario_selector[-1] def delete_scenario(state): scenario = tp.get(state.selected_scenario[0]) if scenario.is_primary: # Notify the user that primary scenarios can not be deleted notify(state, \"info\", \"Cannot delete the primary scenario\") else: # Delete the scenario and the related objects (datanodes, tasks, jobs,...) tp.delete(scenario.id) # Update the scenario selector accordingly remove_scenario_from_selector(state, scenario) # Add a \"Delete scenario\" and a \"Make primary\" buttons page_scenario_manager = \"\"\" # Create your scenario: <|layout|columns=1 1 1 1| <| **Prediction date**\\n\\n <|{day}|date|not with_time|> |> <| **Max capacity**\\n\\n <|{max_capacity}|number|> |> <| **Number of predictions**\\n\\n<|{n_predictions}|number|> |> <|

<|Create new scenario|button|on_action=create_scenario|> |> |> <|part|render={len(scenario_selector) > 0}| <|layout|columns=1 1| <|layout|columns=1 1| <| ## Scenario \\n <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> |>



<|Delete scenario|button|on_action=delete_scenario|active={len(scenario_selector)>0}|> <|Make primary|button|on_action=make_primary|active={not(selected_scenario_is_primary) and len(scenario_selector)>0}|> |> <| ## Display the pipeline \\n <|{selected_pipeline}|selector|lov={pipeline_selector}|dropdown|> |> |> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> |> \"\"\" # Redefine the multi_pages multi_pages = \"\"\" <|menu|label=Menu|lov={[\"Data Visualization\", \"Scenario Manager\"]}|on_action=menu_fct|> <|part|render={page==\"Data Visualization\"}|\"\"\" + page_data_visualization + \"\"\"|> <|part|render={page==\"Scenario Manager\"}|\"\"\" + page_scenario_manager + \"\"\"|> \"\"\" def on_change(state, var_name: str, var_value): if var_name == \"n_week\": # Update the dataset when the slider is moved state.dataset_week = dataset[dataset[\"Date\"].dt.isocalendar().week == var_value] elif var_name == \"selected_pipeline\" or var_name == \"selected_scenario\": # Update selected_scenario_is_primary indicating if the current scenario is primary or not state.selected_scenario_is_primary = tp.get(state.selected_scenario[0]).is_primary # Check if we can read the data node to update the chart if tp.get(state.selected_scenario[0]).predictions.read() is not None: update_chart(state) if __name__ == \"__main__\": Gui(page=multi_pages).run(dark_mode=False) "} {"text": "from taipy import Gui # A dark mode is available in Taipy # However, we will use the light mode for the Getting Started Gui(page=\"# Getting started with *Taipy*\").run(dark_mode=False) "} {"text": "from step_09 import * # Our first page is the original page # (with the slider and the chart that displays a week of the historical data) page_data_visualization = page # Second page: create scenarios and display results page_scenario_manager = \"\"\" # Create your scenario <|layout|columns=1 1 1 1| <| **Prediction date**\\n\\n <|{day}|date|not with_time|> |> <| **Max capacity**\\n\\n <|{max_capacity}|number|> |> <| **Number of predictions**\\n\\n<|{n_predictions}|number|> |> <|

\\n <|Create new scenario|button|on_action=create_scenario|> |> |> <|part|render={len(scenario_selector) > 0}| <|layout|columns=1 1| <| ## Scenario \\n <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|> |> <| ## Display the pipeline \\n <|{selected_pipeline}|selector|lov={pipeline_selector}|dropdown|> |> |> <|{predictions_dataset}|chart|x=Date|y[1]=Historical values|type[1]=bar|y[2]=Predicted values|type[2]=scatter|height=80%|width=100%|> |> \"\"\" # Create a menu with our pages multi_pages = \"\"\" <|menu|label=Menu|lov={[\"Data Visualization\", \"Scenario Manager\"]}|on_action=menu_fct|> <|part|render={page==\"Data Visualization\"}|\"\"\" + page_data_visualization + \"\"\"|> <|part|render={page==\"Scenario Manager\"}|\"\"\" + page_scenario_manager + \"\"\"|> \"\"\" # The initial page is the \"Data Visualization\" page page = \"Data Visualization\" def menu_fct(state, var_name: str, fct: str, var_value: list): # Change the value of the state.page variable in order to render the correct page state.page = var_value[\"args\"][0] if __name__ == \"__main__\": Gui(page=multi_pages).run(dark_mode=False) "} {"text": "import taipy as tp from step_03 import Config, clean_data_task_cfg, predict_baseline_task_cfg, dt # Create the first pipeline configuration baseline_pipeline_cfg = Config.configure_pipeline(id=\"baseline\", task_configs=[clean_data_task_cfg, predict_baseline_task_cfg]) ## Execute the \"baseline\" pipeline if __name__ == \"__main__\": # Create the pipeline baseline_pipeline = tp.create_pipeline(baseline_pipeline_cfg) # Submit the pipeline (Execution) tp.submit(baseline_pipeline) # Read output data from the pipeline baseline_predictions = baseline_pipeline.predictions.read() print(\"Predictions of baseline algorithm\\n\", baseline_predictions) "} {"text": "from taipy.gui import Gui from keras.models import load_model from PIL import Image import numpy as np class_names = { 0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck', } model = load_model(\"Neural Network Notebook/Cifar10Model.keras\") def predict_image(model, path_to_img): img = Image.open(path_to_img) img = img.convert(\"RGB\") img = img.resize((32, 32)) data = np.asarray(img) data = data / 255 probs = model.predict(np.array([data])[:1]) top_prob = probs.max() top_pred = class_names[np.argmax(probs)] return top_prob, top_pred content = \"\" img_path = \"placeholder_image.png\" prob = 0 pred = \"\" index = \"\"\" <|text-center| <|{\"logo.png\"}|image|width=16vw|> <|{content}|file_selector|extensions=.png|> select an image from your file system <|{pred}|> <|{img_path}|image|> <|{prob}|indicator|value={prob}|min=0|max=100|width=25vw|> > \"\"\" def on_change(state, var_name, var_val): if var_name == \"content\": top_prob, top_pred = predict_image(model, var_val) state.prob = round(top_prob * 100) state.pred = \"this is a \" + top_pred state.img_path = var_val #print(var_name, var_val) app = Gui(page=index) if __name__ == \"__main__\": app.run(use_reloader=True)"} {"text": "import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import mean_squared_error import numpy as np # Import numpy for RMSE calculation from prophet import Prophet def build_message(name: str): return f\"Hello {name}!\" def clean_data(initial_dataset: pd.DataFrame): return initial_dataset def retrained_model(cleaned_dataset: pd.DataFrame): # Split the dataset into features (X) and target (y) X = cleaned_dataset.drop('Claim_Amount', axis=1) y = cleaned_dataset['Claim_Amount'] # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Define the categorical columns for one-hot encoding categorical_cols = ['Procedure_Code', 'Diagnosis_Code', 'Provider_Specialty', 'Insurance_Plan'] # Create a column transformer preprocessor = ColumnTransformer( transformers=[ ('cat', OneHotEncoder(drop='first'), categorical_cols) ], remainder='passthrough' ) # Create a pipeline with preprocessing and the Random Forest Regressor model = Pipeline([ ('preprocessor', preprocessor), ('regressor', RandomForestRegressor(n_estimators=100, random_state=42)) ]) # Fit the model on the training data model.fit(X_train, y_train) # Make predictions on the test set predictions = model.predict(X_test) # Calculate Mean Squared Error (MSE) mse = mean_squared_error(y_test, predictions) # Calculate Root Mean Squared Error (RMSE) rmse = np.sqrt(mse) # Print the RMSE print(f\"Mean Squared Error: {mse}\") print(f\"Root Mean Squared Error (RMSE): {rmse}\") return model def predict(model): # Example: Make a prediction for a new patient new_patient_data = pd.DataFrame({ 'Procedure_Code': ['CPT456'], 'Diagnosis_Code': ['ICD-10-B'], 'Provider_Specialty': ['Orthopedics'], 'Patient_Age': [35], 'Insurance_Plan': ['PPO'], 'Deductible': [200], 'Copayment': [30], 'Coinsurance': [20], }, index=[0]) # Predict the claim amount for the new patient new_patient_claim = model.predict(new_patient_data) print(f\"Predicted Claim Amount for New Patient: ${new_patient_claim[0]:.2f}\")"} {"text": "import taipy as tp from taipy.core.config import Config Config.load('my_config.toml') scenario_cfg = Config.scenarios['scenario'] if __name__ == '__main__': tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) print(\"submitting\") scenario_1.submit() print(\"submit shit ayyindhi\") "} {"text": "from taipy.gui import Html html_page = Html(\"\"\"
\"\"\")"} {"text": "from geopy.geocoders import Nominatim import folium user_agent = \"geoapiExercises/1.0 AIzaSyBIeklfsRu1yz97lY2gJzWHJcmrd7lx2zU\" # Initialize the geocoder with the user agent geolocator = Nominatim(user_agent=user_agent, timeout=10) # List of locations to geocode locations = [\"Denver, CO, United States\", \"New York, NY, United States\", \"Los Angeles, CA, United States\"] # Create an empty map map_location = folium.Map(location=[0, 0], zoom_start=5) # Iterate through the list of locations for location in locations: # Perform geocoding location_info = geolocator.geocode(location) if location_info: # Extract latitude and longitude latitude = location_info.latitude longitude = location_info.longitude # Add a marker for the geocoded location folium.Marker([latitude, longitude], popup=location).add_to(map_location) else: print(f\"Geocoding was not successful for the location: {location}\") # Save or display the map (as an HTML file) map_location.save(\"geocoded_locations_map.html\") print(\"Map created and saved as 'geocoded_locations_map.html'\") "} {"text": "from taipy.gui import Gui, notify import pandas as pd import yfinance as yf from taipy.config import Config import taipy as tp import datetime as dt from taipy import Core from show_hospitals_map import html_page from flask import Flask, request, session, jsonify, redirect, render_template from flask_restful import Api, Resource import requests Config.load(\"config_model_train.toml\") scenario_cfg = Config.scenarios['stock'] tickers = yf.Tickers(\"msft aapl goog\") root_md = \"<|navbar|>\" property_chart = { \"type\": \"lines\", \"x\": \"Date\", \"y[1]\": \"Open\", \"y[2]\": \"Close\", \"y[3]\": \"High\", \"y[4]\": \"Low\", \"color[1]\": \"green\", \"color[2]\": \"grey\", \"color[3]\": \"red\", \"color[4]\": \"yellow\", } df = pd.DataFrame([], columns=[\"Date\", \"High\", \"Low\", \"Open\", \"Close\"]) df_pred = pd.DataFrame([], columns = ['Date','Close_Prediction']) stock = \"\" stock_text = \"No Stock to Show\" chart_text = \"No Chart to Show\" stocks = [] page = \"\"\" # Stock Portfolio ### Choose the stock to show <|toggle|theme|> <|layout|columns=1 1| <| <|{stock_text}|> <|{stock}|selector|lov=MSFT;AAPL;GOOG;Reset|dropdown|> <|Press for Stock|button|on_action=on_button_action|> <|Get the future predictions|button|on_action=get_predictions|> |> <|{stock} <|{chart_text}|> <|{df}|chart|properties={property_chart}|> |> |> \"\"\" pages = { \"/\" : root_md, \"home\" : page, \"claim\": \"empty page\" } def on_button_action(state): if state.stock == \"Reset\": state.stock_text = \"No Stock to Show\" state.chart_text = \"No Chart to Show\" state.df = pd.DataFrame([], columns=[\"Date\", \"High\", \"Low\", \"Open\", \"Close\"]) state.df_pred = pd.DataFrame([], columns = ['Date','Close_Prediction']) state.pred_text = \"No Prediction to Show\" else: state.stock_text = f\"The stock is {state.stock}\" state.chart_text = f\"Monthly history of stock {state.stock}\" state.df = tickers.tickers[state.stock].history().reset_index() state.df.to_csv(f\"{stock}.csv\", index=False) def get_predictions(state): scenario_stock = tp.create_scenario(scenario_cfg) scenario_stock.initial_dataset.path = f\"{stock}\".csv notify(state, 'success', 'camehere') scenario_stock.write(state.df) tp.submit(scenario_stock) state.df_pred = scenario_stock.predictions.read() state.df_pred.to_csv(\"pred.csv\", index=False) tp.Core().run() # Gui(pages=pages).run(use_reloader=True) app = Flask(__name__) # app = Flask(__name__) app.secret_key = \"your_secret_key\" # Set a secret key for session management api = Api(app) class SignupResource(Resource): def get(self): return redirect(\"/signup.html\") def post(self): SIGNUP_API_URL = \"https://health-insurance-rest-apis.onrender.com/api/signup\" signup_data = { 'username': request.form['username'], 'password': request.form['password'], 'email': request.form['email'] } headers = { 'Content-Type': 'application/json' } print(signup_data) response = requests.post(SIGNUP_API_URL, headers=headers, json=signup_data) print(\"response\", response) if response.status_code == 200: return redirect(\"/login.html\") else: return 'Signup Failed' # Login Resource class LoginResource(Resource): def get(self): \"\"\" Return a simple login page HTML \"\"\" return redirect(\"/login.html\") def post(self): email = request.form['email'] password = request.form['password'] auth_data = { 'username': email, 'password': password } AUTH_API_URL = \"https://health-insurance-rest-apis.onrender.com/api/login\" response = requests.post(AUTH_API_URL, json=auth_data) if response.status_code == 200: auth_data = response.json() access_token = auth_data.get('access_token') refresh_token = auth_data.get('refresh_token') # Store tokens in the session session['access_token'] = access_token session['refresh_token'] = refresh_token return redirect(\"/home\") else: return 'Login failed', 401 # Protected Resource class ProtectedResource(Resource): def get(self): # Check if the JWT token is present in the session if 'jwt_token' in session: jwt_token = session['jwt_token'] # You can add logic here to verify the JWT token if needed # For simplicity, we assume the token is valid return {'message': 'Access granted for protected route', 'jwt_token': jwt_token}, 200 else: return {'message': 'Access denied'}, 401 print(\"registered the apis\") # Add resources to the API api.add_resource(LoginResource, '/login') api.add_resource(ProtectedResource, '/protected') api.add_resource(SignupResource, '/signup') @app.before_request def check_access_token(): # print ('access_token' in session, \"checkIt\") if request.endpoint != 'login' and 'access_token' not in session: # # Redirect to the login page if not on the login route and no access_token is in the session # print(request.endpoint, \"endpoint\") return redirect(\"/login\") gui = Gui(pages=pages, flask=app).run(debug=False) "} {"text": "from taipy import Config, Scope import pandas as pd from prophet import Prophet from functions import * # Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id=\"initial_dataset\", storage_type=\"csv\", default_path='df.csv') cleaned_dataset_cfg = Config.configure_data_node(id=\"cleaned_dataset\") clean_data_task_cfg = Config.configure_task(id=\"clean_data_task\", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg, skippable=True) model_training_cfg = Config.configure_data_node(id=\"model_output\") predictions_cfg = Config.configure_data_node(id=\"predictions\") model_training_task_cfg = Config.configure_task(id=\"model_retraining_task\", function=retrained_model, input=cleaned_dataset_cfg, output=model_training_cfg, skippable=True) predict_task_cfg = Config.configure_task(id=\"predict_task\", function=predict, input=model_training_cfg, output=predictions_cfg, skippable=True) # Create the first pipeline configuration # retraining_model_pipeline_cfg = Config.configure_pipeline( # id=\"model_retraining_pipeline\", # task_configs=[clean_data_task_cfg, model_training_task_cfg], # ) # Run the Taipy Core service # import taipy as tp # # Run of the Taipy Core service # tp.Core().run() # # Create the pipeline # retrain_pipeline = tp.create_pipeline(retraining_model_pipeline_cfg) # # Submit the pipeline # tp.submit(retrain_pipeline) # tp.Core().stop() scenario_cfg = Config.configure_scenario_from_tasks(id=\"stock\", task_configs=[clean_data_task_cfg, model_training_task_cfg, predict_task_cfg]) # tp.Core().run() # tp.submit(scenario_cfg) Config.export(\"config_model_train.toml\")"} {"text": "from taipy import Config from functions import build_message name_data_node_cfg = Config.configure_data_node(id=\"name\") message_data_node_cfg = Config.configure_data_node(id=\"message\") build_msg_task_cfg = Config.configure_task(\"build_msg\", build_message, name_data_node_cfg, message_data_node_cfg) scenario_cfg = Config.configure_scenario_from_tasks(\"scenario\", task_configs=[build_msg_task_cfg]) Config.export('my_config.toml')"} {"text": "from functools import wraps import jwt from flask import request, abort from flask import current_app def token_required(f): @wraps(f) def decorated(*args, **kwargs): token = None if \"Authorization\" in request.headers: token = request.headers[\"Authorization\"].split(\" \")[1] if not token: return { \"message\": \"Authentication Token is missing!\", \"data\": None, \"error\": \"Unauthorized\" }, 401 try: # data=jwt.decode(token, current_app.config[\"SECRET_KEY\"], algorithms=[\"RS256\"]) print(\"got the token\") # current_user=models.User().get_by_id(data[\"user_id\"]) current_user = 12 if current_user is None: return { \"message\": \"Invalid Authentication token!\", \"data\": None, \"error\": \"Unauthorized\" }, 401 if not current_user[\"active\"]: abort(403) except Exception as e: return { \"message\": \"Something went wrong\", \"data\": None, \"error\": str(e) }, 500 return f(current_user, *args, **kwargs) return decorated"} {"text": "from flask import Flask, request, session, jsonify from flask_restful import Api, Resource app = Flask(__name__) app.secret_key = \"your_secret_key\" # Set a secret key for session management api = Api(app) # Dummy user data for demonstration users = { 'maneesh': {'password': 'securepassword'} } # Login Resource class LoginResource(Resource): def post(self): data = request.get_json() username = data.get('username') password = data.get('password') print(\"hello\") # Check if user exists and password is correct if username in users and users[username]['password'] == password: # Simulate receiving a JWT token from a third-party API jwt_token = \"your_received_jwt_token\" # Store the JWT token in the session session['jwt_token'] = jwt_token return {'message': 'Login successful'}, 200 else: return {'message': 'Invalid credentials'}, 401 # Protected Resource class ProtectedResource(Resource): def get(self): # Check if the JWT token is present in the session if 'jwt_token' in session: jwt_token = session['jwt_token'] # You can add logic here to verify the JWT token if needed # For simplicity, we assume the token is valid return {'message': 'Access granted for protected route', 'jwt_token': jwt_token}, 200 else: return {'message': 'Access denied'}, 401 # Add resources to the API api.add_resource(LoginResource, '/login') api.add_resource(ProtectedResource, '/protected') if __name__ == '__main__': app.run(debug=True) "}