GSA
GSA

Reputation: 793

Iterate GridSearchCV Over Multiple Datasets and Classifiers (Python)

I have multiple datasets that I want to estimate parameters for using different classifiers (logistic and randomforest).

I want to run each data for both classifiers using gridsearchcv, and then get the best parameters for each classifier per dataset. I am just a bit stumped on how to go about that. My code is below.

# modules
import pandas as pd
import numpy as np

from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.base import BaseEstimator, TransformerMixin

# import preprocessing and pipeline modules
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline

# grid search module
from sklearn.model_selection import GridSearchCV

from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier

# features
X = {'df1': np.random.normal(0, 1, (200, 5)), 
    'df2': np.random.normal(0, 1, (200, 5))}

# labels
y = {'df1': np.random.choice([0, 1], 200), 
    'df2': np.random.choice([0, 1], 200)}

num_columns = list(subset_features[1:])
num_transformer = Pipeline([('imputer', IterativeImputer()),
                                ('scaler', StandardScaler())])

# column transformer
ct = ColumnTransformer([('numeric_pipeline', num_transformer, num_columns)])

# the classifiers
clf1 = LogisticRegression(solver='liblinear', random_state=None)
clf2 = RandomForestClassifier(random_state=None)

# pipeline
pipe = Pipeline([('ct', ct), ('classifier', clf1)])

params1 = {'classifier__penalty': ['l1', 'l2'],
           'classifier__C': [0.1, 1, 10],
           'classifier': [clf1]}

params2 = {'classifier__n_estimators': [100, 150, 200],
           'classifier__min_samples_leaf': [1, 2],
           'classifier' = [clf2]

params = [params1, params2]

gs = GridSearchCV(pipe, params)
gs.fit(X, y)

gs.best_params_

Upvotes: 0

Views: 365

Answers (2)

GSA
GSA

Reputation: 793

Using different classifiers/estimators, I was able to do what I posted the question for. I am sure, the code can be optimized.

Some of the ideas I used came from this stackoverflow link.

Below is my attempt at answering the question I asked using anomaly detection estimators, instead of logistic regression and randomforest.

# modules
import pandas as pd
import numpy as np

from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.base import BaseEstimator, TransformerMixin

# import preprocessing and pipeline modules
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline

# grid search module
from sklearn.model_selection import GridSearchCV

# the anomaly detection estimators
from sklearn.ensemble import IsolationForest
from inne import IsolationNNE

from scorers import scorer_decision  # user defined scoring

# define numeric columns
num_columns = list(df1.columns)

class GSP:
    def __init__(self):
        pass
    def mods(self, x):
        num_columns # indicates list of numeric columns in dfs
        num_transformer = Pipeline([('imputer', IterativeImputer()),
                                        ('scaler', StandardScaler())])

        # column transformer
        ct = ColumnTransformer([('numeric_pipeline', num_transformer, num_columns)])

        # classifiers
        clf1 = IsolationForest(n_jobs=-1, random_state=None, bootstrap=False)
        clf2 = IsolationNNE(random_state=None)

        # pipeline
        pipe = Pipeline([('ct', ct), ('classifier', clf1)])

        # grid search parameters
        num_estimators = list(np.linspace(100, 200, num=5, endpoint=True).astype(int))
        max_samples = list(np.linspace(0.70, 1.00, num=5))
        contamination = list(np.linspace(0.05, 0.10, num=5, endpoint=True))
        max_features = [0.25, 0.50, 0.75, 0.80, 0.90, 1.00]
        
        params1 = {# set isolation forest grid parameters
           'classifier__n_estimators': num_estimators,
           'classifier__max_samples': max_samples,
           'classifier__contamination': contamination,
           'classifier__max_features': max_features,
           'classifier': [clf1]}
        
        params2 = {# set inne grid parameters
                   'classifier__n_estimators': num_estimators,
                   'classifier__max_samples': max_samples,
                   'classifier__contamination': contamination,
                   'classifier': [clf2]}

        params = [params1, params2]

        gsresults = pd.DataFrame()
        for key in x.keys():
            print('running key:', key)
            gs = GridSearchCV(estimator=pipe,
                              param_grid=params,
                              cv=2,
                              n_jobs=4,
                              verbose=1, 
                              scoring=scorer_decision,
                              error_score='raise',
                              refit=True)

            # fit the model
            gs.fit(x[key])
            dftemp = pd.DataFrame(gs.cv_results_)
            dftemp['dataset'] = key
    
            gsresults = pd.concat([gsresults, dftemp], ignore_index=True)
            gsresults = (gsresults.set_index(gsresults['params']
                    .apply(lambda x: ' '.join(str(val) for val in x.values()))).rename_axis('kernel'))
            selected_columns = ['dataset', 'mean_test_score', 'rank_test_score',
                                'param_classifier', 'param_classifier__contamination',
                                'param_classifier__max_features', 'param_classifier__max_samples',
                                'param_classifier__n_estimators']
            gsresults2 = (gsresults.loc[:, selected_columns]
                            .rename(columns={'mean_test_score': 'mean_score',
                                             'rank_test_score': 'rank_score',
                                             'param_classifier': 'classifier', 
                                             'param_classifier__contamination': 'contamination',
                                             'param_classifier__max_features': 'max_features', 
                                             'param_classifier__max_samples': 'max_samples',
                                             'param_classifier__n_estimators': 'n_estimators'}))
            gsresults3 = (gsresults2.sort_values(['rank_score', 'mean_score'], ascending=True)
                                              .groupby(['dataset']))
            
            # check output by dataframes    
            dfs = {}
            for key, df in gsresults3:
                dfs[key] = df

        return dfs

# running the mods method below returns a dictionary of dataframes
best_params = GSP().mods(X)

Note: the models are fitted on a dictionary of dataframes, X.

Upvotes: 0

ASH
ASH

Reputation: 20302

How about this?

# Pandas and numpy for data manipulation
import pandas as pd
import numpy as np

# Modeling
import lightgbm as lgb

# Evaluation of the model
from sklearn.model_selection import KFold

MAX_EVALS = 500
N_FOLDS = 10

# Read in data and separate into training and testing sets
data = pd.read_csv('C:\\caravan-insurance-challenge.csv')
train = data[data['ORIGIN'] == 'train']
test = data[data['ORIGIN'] == 'test']

# Extract the labels and format properly
train_labels = np.array(train['CARAVAN'].astype(np.int32)).reshape((-1,))
test_labels = np.array(test['CARAVAN'].astype(np.int32)).reshape((-1,))

# Drop the unneeded columns
train = train.drop(columns = ['ORIGIN', 'CARAVAN'])
test = test.drop(columns = ['ORIGIN', 'CARAVAN'])

# Convert to numpy array for splitting in cross validation
features = np.array(train)
test_features = np.array(test)
labels = train_labels[:]

print('Train shape: ', train.shape)
print('Test shape: ', test.shape)
train.head()

import matplotlib.pyplot as plt
import seaborn as sns

plt.hist(labels, edgecolor = 'k'); 
plt.xlabel('Label'); plt.ylabel('Count'); plt.title('Counts of Labels')


# Model with default hyperparameters
model = lgb.LGBMClassifier()
model



from sklearn.metrics import roc_auc_score
from timeit import default_timer as timer

start = timer()
model.fit(features, labels)
train_time = timer() - start

predictions = model.predict_proba(test_features)[:, 1]
auc = roc_auc_score(test_labels, predictions)

print('The baseline score on the test set is {:.4f}.'.format(auc))
print('The baseline training time is {:.4f} seconds'.format(train_time))


import random

lgb.LGBMClassifier()


# Hyperparameter grid
param_grid = {
    'class_weight': [None, 'balanced'],
    'boosting_type': ['gbdt', 'goss', 'dart'],
    'num_leaves': list(range(30, 150)),
    'learning_rate': list(np.logspace(np.log(0.005), np.log(0.2), base = np.exp(1), num = 1000)),
    'subsample_for_bin': list(range(20000, 300000, 20000)),
    'min_child_samples': list(range(20, 500, 5)),
    'reg_alpha': list(np.linspace(0, 1)),
    'reg_lambda': list(np.linspace(0, 1)),
    'colsample_bytree': list(np.linspace(0.6, 1, 10))
}

# Subsampling (only applicable with 'goss')
subsample_dist = list(np.linspace(0.5, 1, 100))


plt.hist(param_grid['learning_rate'], color = 'r', edgecolor = 'k');
plt.xlabel('Learning Rate', size = 14); plt.ylabel('Count', size = 14); plt.title('Learning Rate Distribution', size = 18)

plt.hist(param_grid['num_leaves'], color = 'm', edgecolor = 'k')
plt.xlabel('Learning Number of Leaves', size = 14); plt.ylabel('Count', size = 14); plt.title('Number of Leaves Distribution', size = 18)


# Randomly sample parameters for gbm
params = {key: random.sample(value, 1)[0] for key, value in param_grid.items()}
params


params['subsample'] = random.sample(subsample_dist, 1)[0] if params['boosting_type'] != 'goss' else 1.0
params

Result:

{'class_weight': 'balanced',
 'boosting_type': 'goss',
 'num_leaves': 58,
 'learning_rate': 0.010197109660117238,
 'subsample_for_bin': 40000,
 'min_child_samples': 230,
 'reg_alpha': 0.7755102040816326,
 'reg_lambda': 0.7755102040816326,
 'colsample_bytree': 0.8666666666666667,
 'subsample': 1.0}

Data:

https://www.kaggle.com/datasets/uciml/caravan-insurance-challenge

Source Code:

https://github.com/WillKoehrsen/hyperparameter-optimization/blob/master/Bayesian%20Hyperparameter%20Optimization%20of%20Gradient%20Boosting%20Machine.ipynb

Upvotes: 1

Related Questions