diff --git a/h2o-algos/src/test/java/hex/ensemble/StackedEnsembleTest.java b/h2o-algos/src/test/java/hex/ensemble/StackedEnsembleTest.java index 8277a4d135af..8b851ad6cae8 100644 --- a/h2o-algos/src/test/java/hex/ensemble/StackedEnsembleTest.java +++ b/h2o-algos/src/test/java/hex/ensemble/StackedEnsembleTest.java @@ -1,9 +1,6 @@ package hex.ensemble; -import hex.GLMHelper; -import hex.Model; -import hex.ModelMetrics; -import hex.SplitFrame; +import hex.*; import hex.ensemble.Metalearner.Algorithm; import hex.ensemble.StackedEnsembleModel.StackedEnsembleParameters; import hex.genmodel.utils.DistributionFamily; @@ -754,6 +751,7 @@ public StackedEnsembleModel.StackedEnsembleOutput basicEnsemble(String training_ stackedEnsembleParameters._base_models = new Key[] {gbm._key, drf._key}; stackedEnsembleParameters._seed = seed; stackedEnsembleParameters._score_training_samples = 0; // don't subsample dataset for training metrics so we don't randomly fail the test + stackedEnsembleParameters._auc_type = MultinomialAucType.MACRO_OVO; // Invoke Stacked Ensemble and block till end StackedEnsemble stackedEnsembleJob = new StackedEnsemble(stackedEnsembleParameters); // Get the stacked ensemble diff --git a/h2o-algos/src/test/java/hex/glm/GLMBasicTestMultinomial.java b/h2o-algos/src/test/java/hex/glm/GLMBasicTestMultinomial.java index 3c23fe5b8280..c5f0baccfce8 100644 --- a/h2o-algos/src/test/java/hex/glm/GLMBasicTestMultinomial.java +++ b/h2o-algos/src/test/java/hex/glm/GLMBasicTestMultinomial.java @@ -1,10 +1,7 @@ package hex.glm; -import hex.CreateFrame; -import hex.DataInfo; -import hex.FrameSplitter; +import hex.*; import hex.ModelMetricsBinomialGLM.ModelMetricsMultinomialGLM; -import hex.SplitFrame; import hex.glm.GLMModel.GLMParameters; import hex.glm.GLMModel.GLMParameters.Family; import hex.glm.GLMModel.GLMParameters.Solver; diff --git a/h2o-automl/src/main/java/ai/h2o/automl/AutoMLBuildSpec.java b/h2o-automl/src/main/java/ai/h2o/automl/AutoMLBuildSpec.java index 6c742337daf3..8ab392fd8bb8 100644 --- a/h2o-automl/src/main/java/ai/h2o/automl/AutoMLBuildSpec.java +++ b/h2o-automl/src/main/java/ai/h2o/automl/AutoMLBuildSpec.java @@ -192,6 +192,7 @@ public static final class AutoMLCustomParameters extends Iced { // let's limit the list of allowed custom parameters by default for now: we can always decide to open this later. private static final String[] ALLOWED_PARAMETERS = { "monotone_constraints", + "auc_type" // "ntrees", }; private static final String ROOT_PARAM = "algo_parameters"; diff --git a/h2o-py/h2o/automl/_estimator.py b/h2o-py/h2o/automl/_estimator.py index 6009379be498..28f286d79799 100644 --- a/h2o-py/h2o/automl/_estimator.py +++ b/h2o-py/h2o/automl/_estimator.py @@ -156,6 +156,7 @@ def __init__(self, custom_metric_func=None, export_checkpoints_dir=None, verbosity="warn", + auc_type="AUTO", **kwargs): """ Create a new H2OAutoML instance. @@ -296,6 +297,8 @@ def __init__(self, :param verbosity: Verbosity of the backend messages printed during training. Available options are ``None`` (live log disabled), ``"debug"``, ``"info"``, ``"warn"`` or ``"error"``. Defaults to ``"warn"``. + :param auc_type: + :type auc_type: str, optional """ # early validate kwargs, extracting hidden parameters: @@ -359,6 +362,8 @@ def __init__(self, self.preprocessing = preprocessing if monotone_constraints is not None: algo_parameters['monotone_constraints'] = monotone_constraints + if auc_type is not None: + algo_parameters['auc_type'] = auc_type self._algo_parameters = algo_parameters self.sort_metric = sort_metric @@ -438,6 +443,13 @@ def __validate_monotone_constraints(self, monotone_constraints): else: self._algo_parameters['monotone_constraints'] = monotone_constraints return self.__validate_algo_parameters(self._algo_parameters) + + def validate_auc_type(self, auc_type): + if auc_type is None: + auc_type = "NONE" + auc_type = auc_type.upper() + auc_types = ['MACRO_OVO', 'WEIGHTED_OVO', 'MACRO_OVR', 'WEIGHTED_OVR', 'AUTO', 'NONE'] + assert auc_type in auc_types, "The auc_type must be one of %s." % auc_types def __validate_algo_parameters(self, algo_parameters): if algo_parameters is None: @@ -448,6 +460,8 @@ def __validate_algo_parameters(self, algo_parameters): if len(name) == 0: name, scope = scope, 'any' value = [dict(key=k, value=v) for k, v in v.items()] if isinstance(v, dict) else v # we can't use stringify_dict here as this will be converted into a JSON string + if k is "auc_type": + self.validate_auc_type(v) algo_parameters_json.append(dict(scope=scope, name=name, value=value)) return algo_parameters_json @@ -521,6 +535,7 @@ def __validate_distribution(self, distribution): validate_fn=__validate_preprocessing) monotone_constraints = _aml_property('build_models.algo_parameters', name='monotone_constraints', types=(None, dict), freezable=True, validate_fn=__validate_monotone_constraints) + auc_type = _aml_property('build_models.algo_parameters', name='auc_type', types=(None, dict), freezable=True) _algo_parameters = _aml_property('build_models.algo_parameters', types=(None, dict), freezable=True, validate_fn=__validate_algo_parameters) diff --git a/h2o-py/tests/testdir_algos/automl/pyunit_automl_multiclass.py b/h2o-py/tests/testdir_algos/automl/pyunit_automl_multiclass.py index 3c80491d7c7b..44049f069bba 100644 --- a/h2o-py/tests/testdir_algos/automl/pyunit_automl_multiclass.py +++ b/h2o-py/tests/testdir_algos/automl/pyunit_automl_multiclass.py @@ -12,10 +12,37 @@ def test_default_automl_with_multiclass_task(): aml = H2OAutoML(max_models=2, project_name='aml_multiclass') - aml.train(y=ds.target, training_frame=ds.train, validation_frame=ds.valid, leaderboard_frame=ds.test) + model = aml.train(y=ds.target, training_frame=ds.train, validation_frame=ds.valid, leaderboard_frame=ds.test) print(aml.leader) print(aml.leaderboard) assert aml.leaderboard.columns == ["model_id", "mean_per_class_error", "logloss", "rmse", "mse"] + auc_table = model.model_performance().multinomial_auc_table() + print(auc_table) + assert "AUC table was not computed" in auc_table, "The multinomial AUC table should not be computed." + + # test setting auc_type + auc_type = "WEIGHTED_OVR" + aml2 = H2OAutoML(max_models=2, + project_name='aml_multiclass_auc_type', + # auc_type is not implemented and used in StackedEnsemble model + # (see https://github.com/h2oai/h2o-3/issues/16373) + exclude_algos=["StackedEnsemble"], + auc_type=auc_type) + model = aml2.train(y=ds.target, training_frame=ds.train, validation_frame=ds.valid, leaderboard_frame=ds.test) + print(model) + print(model.params["auc_type"]) + assert auc_type == model.params["auc_type"]["input"], "The auc_type parameter should be the same." + auc_table = model.model_performance().multinomial_auc_table() + print(auc_table) + assert "AUC table was not computed" not in str(auc_table), "The multinomial AUC table should be calculated" + + # wrong auc_type + try: + H2OAutoML(max_models=2, + project_name='aml_multiclass_auc_type', + auc_type="ABC") + except AssertionError as e: + assert "The auc_type must be one of ['MACRO_OVO', 'WEIGHTED_OVO', 'MACRO_OVR', 'WEIGHTED_OVR', 'AUTO', 'NONE']" in str(e), "Model build should fail." pu.run_tests([ diff --git a/h2o-r/h2o-package/R/automl.R b/h2o-r/h2o-package/R/automl.R index 4030a0eb8a2f..88d77fef1eee 100644 --- a/h2o-r/h2o-package/R/automl.R +++ b/h2o-r/h2o-package/R/automl.R @@ -69,6 +69,7 @@ #' @param export_checkpoints_dir (Optional) Path to a directory where every model will be stored in binary form. #' @param verbosity Verbosity of the backend messages printed during training; Optional. #' Must be one of NULL (live log disabled), "debug", "info", "warn", "error". Defaults to "warn". +#' @param auc_type (Optional) #' @param ... Additional (experimental) arguments to be passed through; Optional. #' @return An \linkS4class{H2OAutoML} object. #' @details AutoML trains several models, cross-validated by default, by using the following available algorithms: @@ -138,6 +139,7 @@ h2o.automl <- function(x, y, training_frame, sort_metric = c("AUTO", "deviance", "logloss", "MSE", "RMSE", "MAE", "RMSLE", "AUC", "AUCPR", "mean_per_class_error"), export_checkpoints_dir = NULL, verbosity = "warn", + auc_type="NONE", ...) { dots <- list(...) @@ -339,6 +341,12 @@ h2o.automl <- function(x, y, training_frame, if(is.null(algo_parameters)) algo_parameters <- list() algo_parameters$monotone_constraints <- monotone_constraints } + if (!is.null(auc_type)) { + if(!(toupper(auc_type) %in% list("MACRO_OVO", "WEIGHTED_OVO", "MACRO_OVR", "WEIGHTED_OVR", "NONE", "AUTO"))) + stop("The auc_type must be MACRO_OVO, WEIGHTED_OVO, MACRO_OVR, WEIGHTED_OVR, NONE or AUTO.") + if(is.null(algo_parameters)) algo_parameters <- list() + algo_parameters$auc_type <- auc_type + } if (!is.null(algo_parameters)) { keys <- names(algo_parameters) algo_parameters_json <- lapply(keys, function(k) { diff --git a/h2o-r/tests/testdir_algos/automl/runit_automl_multinomial.R b/h2o-r/tests/testdir_algos/automl/runit_automl_multinomial.R index 0ec1a9042dbc..a07ee079db62 100644 --- a/h2o-r/tests/testdir_algos/automl/runit_automl_multinomial.R +++ b/h2o-r/tests/testdir_algos/automl/runit_automl_multinomial.R @@ -21,6 +21,24 @@ automl.multinomial.test <- function() { # Check that there's a StackedEnsemble model in the leaderboard expect_true(sum(grepl("StackedEnsemble", as.vector(aml@leaderboard$model_id))) > 0) + + # Check auc_type setting + aml2 <- h2o.automl(x = 1:4, + y = 5, + training_frame = train, + project_name = "automl.multinomial.test.auc", + seed = 1, + max_models = 3, + exclude_algos = list("StackedEnsemble"), + auc_type = "MACRO_OVO") + model <- aml2@leader + tr_mm <- model@model$training_metrics + print(tr_mm) + tr_auc <- tr_mm@metrics$AUC + perf <- h2o.performance(model = model, newdata = train, auc_type = "MACRO_OVO") + perf_auc <- h2o.auc(perf) + print(paste(tr_auc, "=", perf_auc)) + expect_equal(tr_auc, perf_auc) } doTest("AutoML Multinomial Test", automl.multinomial.test) diff --git a/h2o-r/tests/testdir_algos/gbm/runit_GBM_iris_multinomial_auc.R b/h2o-r/tests/testdir_algos/gbm/runit_GBM_iris_multinomial_auc.R index 5d0a87855a5f..a99f73b9cc1b 100644 --- a/h2o-r/tests/testdir_algos/gbm/runit_GBM_iris_multinomial_auc.R +++ b/h2o-r/tests/testdir_algos/gbm/runit_GBM_iris_multinomial_auc.R @@ -3,31 +3,34 @@ source("../../../scripts/h2o-r-test-setup.R") test.GBM.iris.multinomial.auc <- function() { - prostate <- h2o.importFile(path = "http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv") + #prostate <- h2o.importFile(path = "http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv") + prostate <- h2o.importFile("/home/mori/Documents/h2o/code/h2o-3/smalldata/prostate/prostate.csv") + print(prostate) # Split dataset giving the training dataset 75% of the data prostate_split <- h2o.splitFrame(data = prostate, ratios = 0.75) - response_col = "GLEASON" + response_col <- "GLEASON" # Create a training set from the 1st dataset in the split train.hex <- prostate_split[[1]] - train.hex[, response_col] = as.factor(train.hex[, response_col]) + train.hex[, response_col] <- as.factor(train.hex[, response_col]) # Create a testing set from the 2nd dataset in the split test.hex <- prostate_split[[2]] - test.hex[, response_col] = as.factor(test.hex[, response_col]) + test.hex[, response_col] <- as.factor(test.hex[, response_col]) - predictors = c("RACE", "AGE", "PSA", "DPROS", "CAPSULE", "VOL", "DCAPS") + predictors <- c("RACE", "AGE", "PSA", "DPROS", "CAPSULE", "VOL", "DCAPS") # Build GBM model iris.gbm <- h2o.gbm(y=response_col, x=predictors, distribution="multinomial", training_frame=train.hex, ntrees=1, max_depth=2, min_rows=20) # Score test data with different default auc_type (previous was "NONE", so no AUC calculation) - perf <- h2o.performance(iris.gbm, test.hex, auc_type="WEIGHTED_OVO") + auc_type <- "WEIGHTED_OVO" + perf <- h2o.performance(iris.gbm, test.hex, auc_type=auc_type) # Check default AUC is set correctly - auc_table = h2o.multinomial_auc_table(perf) + auc_table <- h2o.multinomial_auc_table(perf) default_auc <- h2o.auc(perf) weighted_ovo_auc <- auc_table[32, 4] # weighted ovo AUC is the last number in the table @@ -37,11 +40,25 @@ test.GBM.iris.multinomial.auc <- function() { print(auc_table) #Test auc_type is set and newdata is NULL - perf2 <- h2o.performance(iris.gbm, train=TRUE, auc_type="WEIGHTED_OVO") + perf2 <- h2o.performance(iris.gbm, train=TRUE, auc_type=auc_type) auc <- h2o.auc(perf2) print(auc) expect_true(auc == "NaN") + # Build GBM model with auc_type + iris.gbm <- h2o.gbm(y=response_col, x=predictors, distribution="multinomial", training_frame=train.hex, ntrees=1, max_depth=2, min_rows=20, auc_type=auc_type) + mm <- iris.gbm@model$training_metrics + print("AUC auc_type set") + auc_table <- h2o.multinomial_auc_table(mm) + default_auc <- h2o.auc(mm) + weighted_ovo_auc <- auc_table[32, 4] # weighted ovo AUC is the last number in the table + + expect_equal(default_auc, weighted_ovo_auc) + print(paste(weighted_ovo_auc, "=", default_auc)) + print(perf) + print(auc_table) + + # Build GBM model with cv iris.gbm <- h2o.gbm(y=response_col, x=predictors, distribution="multinomial", training_frame=train.hex, validation_frame=test.hex, ntrees=5, max_depth=2, min_rows=20, nfold=3)