import datasets #Each dataset/.csv file is separately pushed to Huggingface under a different directory. original_dataset = datasets.load_dataset( "csv", data_files = f'data/Ahneman_ORD_Data.csv', keep_in_memory = True, sep = ",") sanitized_dataset = datasets.load_dataset( "csv", data_files = f'data/Sanitized_Ahneman_ORD_Data.csv', keep_in_memory = True, sep = ",") prepared_dataset = datasets.load_dataset( "csv", data_files = f'data/Prepared_Data.csv', keep_in_memory = True, sep = ",") print("Pushing to cmmauro/ORD_Ahneman_2018") sanitized_dataset.push_to_hub( repo_id = "cmmauro/ORD_Ahneman_2018", data_dir="Sanitized Dataset") print("Pushing to cmmauro/ORD_Ahneman_2018") original_dataset.push_to_hub( repo_id = "cmmauro/ORD_Ahneman_2018", data_dir = "Original Dataset") print("Pushing to cmmauro/ORD_Ahneman_2018") prepared_dataset.push_to_hub( repo_id = "cmmauro/ORD_Ahneman_2018", data_dir = "Prepared Dataset for ML")