ORD_Ahneman_2018 / src /05.run_autoML_updated.py
cmmauro's picture
Upload 6 files
be5174c verified
raw
history blame
7.61 kB
#Split prepared data into training, validation, and test sets. Then train, run, and analyze AutoML model.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from rdkit.Chem import AllChem
from sklearn import model_selection, metrics
import pyarrow as pa
import shap
import h2o
from h2o.automl import H2OAutoML
#Read data from one-hot encoded data .csv file
file_path = 'data/Prepared_Data.csv' #Sanitized data
ohe_df = pd.read_csv(file_path)
print("OHE DF Shape: ", ohe_df.shape)
# Create numpy arrays for inputs and outputs.
X = ohe_df.drop(columns=["yield"]).values
y = ohe_df["yield"].values
# Verify array shapes
print("Shapes of input and output arrays:")
print("X size: ", X.shape, ", Y size: ", y.shape)
#Split data into training, validation, and tests sets. 70% training/30% test split.
#Set training, validation, and test sets with random_state for reproducibility
_X_train, X_test, _y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3, random_state=0)
X_train, X_valid, y_train, y_valid = model_selection.train_test_split(
_X_train, _y_train, test_size=(0.1 / 0.7), shuffle=False
)
# Check lengths
print("X_train size: ", X_train.shape, ", y_train size: ", y_train.shape)
print("X_valid size: ", X_valid.shape, ", y_valid size: ", y_valid.shape)
print("X_test size: ", X_test.shape, ", y_test size: ", y_test.shape)
print("Is length of data frame equal to sum of split data set lengths?",
len(ohe_df) == X_train.shape[0] + X_valid.shape[0] + X_test.shape[0])
print("X_train_Dataset", X_train[0:20])
print("Y_train_Dataset", y_train[0:20])
# Start the H2O cluster (locally)
h2o.init(nthreads=-1)
# Convert the TensorFlow dataset to a pandas DataFrame
data_train_df = pd.DataFrame(X_train)
data_train_df['yield'] = y_train
print("Length Data Train", data_train_df.shape)
#Shorten data frames for faster training/validation
data_train_df = data_train_df[0:250]
#Read pandas dataframe into H2O Frame
h2o_data_train = h2o.H2OFrame(data_train_df)
#Specify target columns & features
target = "yield" #Main objective is to predict yields based on reaction conditions. Yield is therefore the target variable.
features = [col for col in h2o_data_train.columns if col != target]
#Initialize AutoML and train models
aml = H2OAutoML(max_models=8, exclude_algos=['StackedEnsemble']) #Exclude stacked ensemble models
aml.train(x= features, y= target, training_frame= h2o_data_train)
#Display leaderbord for all models
lb = aml.leaderboard
print(lb.head(rows=lb.nrows))
#Store model leaderboard/peformance data for SHAP Analysis
best_model = aml.leader # Retrieve best-performing model
# Create a background frame from the training data
background_frame = h2o_data_train[0:100] # Use the first 100 rows of training data as background frame
# Use a smaller sample for SHAP analysis reduce computation time
#sample_data_train = h2o_data_train[:500]
sample_data_train = h2o_data_train
# Compute SHAP values using best model found by H2O AutoML
shap_values = best_model.predict_contributions(sample_data_train, background_frame=background_frame)
# Convert SHAP values to Pandas DataFrame for plotting using multi-threading
shap_df = shap_values.as_data_frame(use_pandas=True, use_multi_thread=True)
print("shap_df", shap_df[0:3])
# Drop BiasTerm if present
if 'BiasTerm' in shap_df.columns:
shap_df = shap_df.drop('BiasTerm', axis=1)
print("Original SHAP DataFrame columns:", shap_df.columns)
# Function to clean and consolidate column names in shap_df
def consolidate_shap_columns(shap_df):
# Clean column names by removing specific state suffixes ('.True', '.False', '.Missing')
shap_df.columns = shap_df.columns.str.replace(r'\.(True|False|Missing <math><mrow><mi>N</mi><mi>A</mi></mrow></math>)$', '', regex=True)
# Remove duplicated columns after consolidation
shap_df = shap_df.loc[:, ~shap_df.columns.duplicated()]
return shap_df
# Convert SHAP values dataframe columns to get rid of extensions
shap_df = consolidate_shap_columns(shap_df)
print("Cleaned SHAP Columns", shap_df.columns)
# Convert H2OFrame with original data to Pandas DataFrame
df_train_pandas = h2o_data_train.as_data_frame(use_pandas=True, use_multi_thread=True)
# List of feature names from training data
feature_columns = [col for col in df_train_pandas.columns if col != 'yield']
print("Feature columns", feature_columns)
# Ensure alignment between SHAP DataFrame and original training features
shap_df = shap_df[feature_columns]
print("Original data columns:")
print(df_train_pandas.columns)
print("SHAP data columns:")
print(shap_df.columns)
#Remove "yield" column from data_train_pandas df to ensure consistency with shap_df columns.
df_train_pandas = df_train_pandas.drop(columns=["yield"])
# Verifying column alignment between training data columns and shap columns
assert list(shap_df.columns) == list(df_train_pandas.columns), "Feature columns do not match between SHAP values and data"
# -----Visualize using SHAP summary plot-----
# Use SHAP summary plot
shap.summary_plot(shap_df.values, df_train_pandas, plot_type="bar")
# For detailed SHAP plots:
shap.summary_plot(shap_df.values, df_train_pandas)
# Show and save the plots
plt.tight_layout()
plt.show()
plt.savefig("SHAP_Analysis_Summary.png")
#------------Analyze Model Performance------------
# Convert datasets to H2OFrame
h2o_test = h2o.H2OFrame(pd.DataFrame(X_test, columns=ohe_df.drop(columns=["yield"]).columns))
#Set up loss curves for best model identified by AutoML
model_with_history = None
for model_id in aml.leaderboard.as_data_frame()['model_id']:
model = h2o.get_model(model_id)
if hasattr(model, 'scoring_history'):
model_with_history = model
break
# Check if model has available scoring history
if model_with_history and hasattr(model_with_history, 'scoring_history'):
scoring_history = model_with_history.scoring_history()
else:
print("No suitable model with scoring history found.")
scoring_history = pd.DataFrame() # Avoid further errors
# Extract metrics
preds_h2o = aml.leader.predict(h2o.H2OFrame(pd.DataFrame(X_test))).as_data_frame().values.flatten()
# Calculate RMSE and R^2
r2 = metrics.r2_score(y_test, preds_h2o)
rmse = np.sqrt(metrics.mean_squared_error(y_test, preds_h2o))
print(f"Test RMSE: {rmse}")
print(fr"Test $R^2$: {r2}")
# Plot model performance
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 12))
fig.suptitle("Buchwald-Hartwig AutoML Model Performance")
if not scoring_history.empty:
if 'training_rmse' in scoring_history.columns:
ax1.plot(scoring_history['training_rmse'], 'b', label='Training RMSE')
if 'validation_rmse' in scoring_history.columns:
ax1.plot(scoring_history['validation_rmse'], 'g', label='Validation RMSE')
else:
ax1.text(0.5, 0.5, 'Scoring history unavailable', horizontalalignment='center', verticalalignment='center')
ax1.legend()
ax1.set_ylabel("RMSE")
ax1.set_xlabel("Epoch/Tree Index")
ax1.set_title(f"Loss Curves for {best_model.model_id}")
# Plot predictions vs. ground truth
ax2.scatter(y_test, preds_h2o, c='b', marker='o', label='Predictions')
ax2.plot([min(y_test), max(y_test)], [min(y_test), max(y_test)], "r-", lw=2) # Line y=x
ax2.set_ylabel("Predicted Yield")
ax2.set_xlabel("Ground Truth Yield")
ax2.set_title("Predictions vs Ground Truth")
ax2.text(0.15, 0.9 * max(y_test), fr"Test RMSE: {round(rmse, 3)}", fontsize=12)
ax2.text(0.15, 0.8 * max(y_test), fr"Test $R^2$: {round(r2, 3)}", fontsize=12)
# View performance plots
plt.show()
plt.tight_layout()
plt.savefig("B-H AutoML Model Performance.png")