index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GLMV3.java
|
package hex.schemas;
import hex.glm.GLM;
import hex.glm.GLMModel.GLMParameters;
import hex.glm.GLMModel.GLMParameters.Solver;
import water.api.API;
import water.api.API.Direction;
import water.api.API.Level;
import water.api.schemas3.KeyV3.FrameKeyV3;
import water.api.schemas3.ModelParametersSchemaV3;
import water.api.schemas3.StringPairV3;
/**
* Created by tomasnykodym on 8/29/14.
*/
public class GLMV3 extends ModelBuilderSchema<GLM,GLMV3,GLMV3.GLMParametersV3> {
public static final class GLMParametersV3 extends ModelParametersSchemaV3<GLMParameters, GLMParametersV3> {
public static final String[] fields = new String[]{
"model_id",
"training_frame",
"validation_frame",
"nfolds",
"checkpoint",
"export_checkpoints_dir",
"seed",
"keep_cross_validation_models",
"keep_cross_validation_predictions",
"keep_cross_validation_fold_assignment",
"fold_assignment",
"fold_column",
"response_column",
"ignored_columns",
"ignore_const_cols",
"score_each_iteration",
"score_iteration_interval",
"offset_column",
"weights_column",
"family",
"tweedie_variance_power",
"tweedie_link_power",
"theta", // equals to 1/r and should be > 0 and <=1, used by negative binomial
"solver",
"alpha",
"lambda",
"lambda_search",
"early_stopping",
"nlambdas",
"standardize",
"missing_values_handling",
"plug_values",
"compute_p_values",
"dispersion_parameter_method",
"init_dispersion_parameter",
"remove_collinear_columns",
"intercept",
"non_negative",
"max_iterations",
"objective_epsilon",
"beta_epsilon",
"gradient_epsilon",
"link",
"startval", // initial starting values for coefficients, double array
"calc_like", // true will return likelihood function value
"prior",
"cold_start", // if true, will start GLM model from initial values and conditions
"lambda_min_ratio",
"beta_constraints",
"max_active_predictors",
"interactions",
"interaction_pairs",
"obj_reg",
"stopping_rounds",
"stopping_metric",
"stopping_tolerance",
// dead unused args forced here by backwards compatibility, remove in V4
"balance_classes",
"class_sampling_factors",
"max_after_balance_size",
"max_confusion_matrix_size",
"max_runtime_secs",
"custom_metric_func",
"generate_scoring_history",
"auc_type",
"dispersion_epsilon",
"tweedie_epsilon",
"max_iterations_dispersion",
"build_null_model",
"fix_dispersion_parameter",
"generate_variable_inflation_factors",
"fix_tweedie_variance_power",
"dispersion_learning_rate",
"influence",
"gainslift_bins",
"linear_constraints",
"init_optimal_glm", // default to true
"separate_linear_beta", // default to false
"constraint_eta0", // default to 0.1258925
"constraint_tau", // default to 10
"constraint_alpha", // default to 0.1
"constraint_beta", // default to 0.9
"constraint_c0", // default to 10
};
@API(help = "Seed for pseudo random number generator (if applicable).", gridable = true)
public long seed;
// Input fields
@API(help = "Family. Use binomial for classification with logistic regression, others are for regression problems.",
values = {"AUTO", "gaussian", "binomial", "fractionalbinomial", "quasibinomial", "ordinal", "multinomial",
"poisson", "gamma", "tweedie", "negativebinomial"}, level = Level.critical)
// took tweedie out since it's not reliable
public GLMParameters.Family family;
@API(help = "Tweedie variance power", level = Level.critical, gridable = true)
public double tweedie_variance_power;
@API(help = "Dispersion learning rate is only valid for tweedie family dispersion parameter estimation using ml. " +
"It must be > 0. This controls how much the dispersion parameter estimate is to be changed when the" +
" calculated loglikelihood actually decreases with the new dispersion. In this case, instead of setting" +
" new dispersion = dispersion + change, we set new dispersion = dispersion + dispersion_learning_rate * change. " +
"Defaults to 0.5.", level = Level.expert, gridable = true)
public double dispersion_learning_rate;
@API(help = "Tweedie link power.", level = Level.critical, gridable = true)
public double tweedie_link_power;
@API(help = "Theta", level = Level.critical, gridable = true)
public double theta; // used by negtaive binomial distribution family
@API(help = "AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems" +
" with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for datasets" +
" with many columns.", values = {"AUTO", "IRLSM", "L_BFGS","COORDINATE_DESCENT_NAIVE",
"COORDINATE_DESCENT", "GRADIENT_DESCENT_LH", "GRADIENT_DESCENT_SQERR"}, level = Level.critical)
public Solver solver;
@API(help = "Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for " +
"alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between " +
"specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS'; 0.5" +
" otherwise.", level = Level.critical, gridable = true)
public double[] alpha;
@API(help = "Regularization strength", required = false, level = Level.critical, gridable = true)
public double[] lambda;
@API(help = "Use lambda search starting at lambda max, given lambda is then interpreted as lambda min.",
level = Level.critical)
public boolean lambda_search;
@API(help="Stop early when there is no more relative improvement on train or validation (if provided).")
public boolean early_stopping;
@API(help = "Number of lambdas to be used in a search." +
" Default indicates: If alpha is zero, with lambda search" +
" set to True, the value of nlamdas is set to 30 (fewer lambdas" +
" are needed for ridge regression) otherwise it is set to 100.", level = Level.critical)
public int nlambdas;
@API(help = "Perform scoring for every score_iteration_interval iterations.", level = Level.secondary)
public int score_iteration_interval;
@API(help = "Standardize numeric columns to have zero mean and unit variance.", level = Level.critical,
gridable = true)
public boolean standardize;
@API(help = "Only applicable to multiple alpha/lambda values. If false, build the next model for next set of " +
"alpha/lambda values starting from the values provided by current model. If true will start GLM model " +
"from scratch.", level = Level.critical)
public boolean cold_start;
@API(help = "Handling of missing values. Either MeanImputation, Skip or PlugValues.",
values = { "MeanImputation", "Skip", "PlugValues" }, level = API.Level.expert,
direction=API.Direction.INOUT, gridable = true)
public GLMParameters.MissingValuesHandling missing_values_handling;
@API(help = "If set to dfbetas will calculate the difference in beta when a datarow is included and excluded in " +
"the dataset.", values = { "dfbetas" }, level = API.Level.expert, gridable = false)
public GLMParameters.Influence influence;
@API(help = "Plug Values (a single row frame containing values that will be used to impute missing values of the" +
" training/validation frame, use with conjunction missing_values_handling = PlugValues).",
direction = API.Direction.INPUT)
public FrameKeyV3 plug_values;
@API(help = "Restrict coefficients (not intercept) to be non-negative.")
public boolean non_negative;
@API(help = "Maximum number of iterations. Value should >=1. A value of 0 is only set when only the model " +
"coefficient names and model coefficient dimensions are needed.", level = Level.secondary)
public int max_iterations;
@API(help = "Converge if beta changes less (using L-infinity norm) than beta esilon. ONLY applies to IRLSM solver."
, level = Level.expert)
public double beta_epsilon;
@API(help = "Converge if objective value changes less than this."+ " Default (of -1.0) indicates: If lambda_search"+
" is set to True the value of objective_epsilon is set to .0001. If the lambda_search is set to False" +
" and lambda is equal to zero, the value of objective_epsilon is set to .000001, for any other value" +
" of lambda the default value of objective_epsilon is set to .0001.", level = API.Level.expert)
public double objective_epsilon;
@API(help = "Converge if objective changes less (using L-infinity norm) than this, ONLY applies to L-BFGS" +
" solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda is equal to zero, the" +
" default value of gradient_epsilon is equal to .000001, otherwise the default value is .0001. If " +
"lambda_search is set to True, the conditional values above are 1E-8 and 1E-6 respectively.",
level = API.Level.expert)
public double gradient_epsilon;
@API(help="Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs.")
public double obj_reg;
@API(help = "Link function.", level = Level.secondary, values = {"family_default", "identity", "logit", "log",
"inverse", "tweedie", "ologit"}) //"oprobit", "ologlog": will be supported.
public GLMParameters.Link link;
@API(help="Method used to estimate the dispersion parameter for Tweedie, Gamma and Negative Binomial only.",
level = Level.secondary, values={"deviance", "pearson", "ml"})
public GLMParameters.DispersionMethod dispersion_parameter_method;
@API(help = "double array to initialize coefficients for GLM. If standardize is true, the standardized " +
"coefficients should be used. Otherwise, use the regular coefficients.", gridable=true)
public double[] startval;
@API(help = "if true, will return likelihood function value.") // not gridable
public boolean calc_like;
@API(help="if true, will generate variable inflation factors for numerical predictors. Default to false.",
level = Level.expert)
public boolean generate_variable_inflation_factors;
@API(help="Include constant term in the model", level = Level.expert)
public boolean intercept;
@API(help="If set, will build a model with only the intercept. Default to false.", level = Level.expert)
public boolean build_null_model;
@API(help="Only used for Tweedie, Gamma and Negative Binomial GLM. If set, will use the dispsersion parameter" +
" in init_dispersion_parameter as the standard error and use it to calculate the p-values. Default to" +
" false.", level=Level.expert)
public boolean fix_dispersion_parameter;
@API(help="Only used for Tweedie, Gamma and Negative Binomial GLM. Store the initial value of dispersion " +
"parameter. If fix_dispersion_parameter is set, this value will be used in the calculation of p-values.",
level=Level.expert, gridable=true)
public double init_dispersion_parameter;
@API(help = "Prior probability for y==1. To be used only for logistic regression iff the data has been sampled and" +
" the mean of response does not reflect reality.", level = Level.expert)
public double prior;
@API(help = "Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest lambda that " +
"drives all coefficients to zero). Default indicates: if the number of observations is greater than the" +
" number of variables, then lambda_min_ratio is set to 0.0001; if the number of observations is less than" +
" the number of variables, then lambda_min_ratio is set to 0.01.", level = Level.expert)
public double lambda_min_ratio;
@API(help = "Beta constraints", direction = API.Direction.INPUT /* Not required, to allow initial params validation: , required=true */)
public FrameKeyV3 beta_constraints;
@API(help = "Linear constraints: used to specify linear constraints involving more than one coefficients in " +
"standard form. It is only supported for solver IRLSM. It contains four columns: names (strings for " +
"coefficient names or constant), values, types ( strings of 'Equal' or 'LessThanEqual'), constraint_numbers" +
" (0 for first linear constraint, 1 for second linear constraint, ...).",
direction = API.Direction.INPUT /* Not required, to allow initial params validation: , required=true */)
public FrameKeyV3 linear_constraints;
@API(help="Maximum number of active predictors during computation. Use as a stopping criterion" +
" to prevent expensive model building with many predictors." + " Default indicates: If the IRLSM solver is used," +
" the value of max_active_predictors is set to 5000 otherwise it is set to 100000000.", direction = Direction.INPUT,
level = Level.expert)
public int max_active_predictors = -1;
@API(help="A list of predictor column indices to interact. All pairwise combinations will be computed for the " +
"list.", direction=Direction.INPUT, level=Level.expert)
public String[] interactions;
@API(help="A list of pairwise (first order) column interactions.", direction=Direction.INPUT, level=Level.expert)
public StringPairV3[] interaction_pairs;
// dead unused args, formely inherited from supervised model schema
/**
* For imbalanced data, balance training data class counts via
* over/under-sampling. This can result in improved predictive accuracy.
*/
@API(help = "Balance training data class counts via over/under-sampling (for imbalanced data).",
level = API.Level.secondary, direction = API.Direction.INOUT)
public boolean balance_classes;
/**
* Desired over/under-sampling ratios per class (lexicographic order).
* Only when balance_classes is enabled.
* If not specified, they will be automatically computed to obtain class balance during training.
*/
@API(help = "Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling" +
" factors will be automatically computed to obtain class balance during training. Requires balance_classes.",
level = API.Level.expert, direction = API.Direction.INOUT)
public float[] class_sampling_factors;
/**
* When classes are balanced, limit the resulting dataset size to the
* specified multiple of the original dataset size.
*/
@API(help = "Maximum relative size of the training data after balancing class counts (can be less than 1.0)." +
" Requires balance_classes.", /* dmin=1e-3, */ level = API.Level.expert, direction = API.Direction.INOUT)
public float max_after_balance_size;
/** For classification models, the maximum size (in terms of classes) of
* the confusion matrix for it to be printed. This option is meant to
* avoid printing extremely large confusion matrices. */
@API(help = "[Deprecated] Maximum size (# classes) for confusion matrices to be printed in the Logs.",
level = API.Level.secondary, direction = API.Direction.INOUT)
public int max_confusion_matrix_size;
@API(help="Request p-values computation, p-values work only with IRLSM solver.", level = Level.secondary)
public boolean compute_p_values;
@API(help="If true, will fix tweedie variance power value to the value set in tweedie_variance_power.",
level=Level.secondary, direction=Direction.INPUT)
public boolean fix_tweedie_variance_power;
@API(help="In case of linearly dependent columns, remove the dependent columns.", level = Level.secondary)
public boolean remove_collinear_columns; // _remove_collinear_columns
@API(help = "If changes in dispersion parameter estimation or loglikelihood value is smaller than " +
"dispersion_epsilon, will break out of the dispersion parameter estimation loop using maximum " +
"likelihood.", level = API.Level.secondary, direction = API.Direction.INOUT)
public double dispersion_epsilon;
@API(help = "In estimating tweedie dispersion parameter using maximum likelihood, this is used to choose the lower" +
" and upper indices in the approximating of the infinite series summation.",
level = API.Level.secondary, direction = API.Direction.INOUT)
public double tweedie_epsilon;
@API(help = "Control the maximum number of iterations in the dispersion parameter estimation loop using maximum" +
" likelihood.", level = API.Level.secondary, direction = API.Direction.INOUT)
public int max_iterations_dispersion;
@API(help="If set to true, will generate scoring history for GLM. This may significantly slow down the algo.",
level = Level.secondary, direction = Direction.INPUT)
public boolean generate_scoring_history; // if enabled, will generate scoring history for iterations specified in
// scoring_iteration_interval and score_every_iteration
@API(help="If true, will initialize coefficients with values derived from GLM runs without linear constraints. " +
"Only available for linear constraints.", level = API.Level.secondary,
direction = API.Direction.INOUT, gridable = true)
public boolean init_optimal_glm;
@API(help="If true, will keep the beta constraints and linear constraints separate. After new coefficients are " +
"found, first beta constraints will be applied followed by the application of linear constraints. Note " +
"that the beta constraints in this case will not be part of the objective function. If false, will" +
" combine the beta and linear constraints.", level = API.Level.secondary,
direction = API.Direction.INOUT, gridable = true)
public boolean separate_linear_beta;
@API(help="For constrained GLM only. It affects the setting of eta_k+1=eta_0/power(ck+1, alpha).",
level = API.Level.expert, direction = API.Direction.INOUT, gridable = true)
public double constraint_eta0;
@API(help="For constrained GLM only. It affects the setting of c_k+1=tau*c_k.",
level = API.Level.expert, direction = API.Direction.INOUT, gridable = true)
public double constraint_tau;
@API(help="For constrained GLM only. It affects the setting of eta_k = eta_0/pow(c_0, alpha).",
level = API.Level.expert, direction = API.Direction.INOUT, gridable = true)
public double constraint_alpha;
@API(help="For constrained GLM only. It affects the setting of eta_k+1 = eta_k/pow(c_k, beta).",
level = API.Level.expert, direction = API.Direction.INOUT, gridable = true)
public double constraint_beta;
@API(help="For constrained GLM only. It affects the initial setting of epsilon_k = 1/c_0.",
level = API.Level.expert, direction = API.Direction.INOUT, gridable = true)
public double constraint_c0;
/////////////////////
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GLRMModelV3.java
|
package hex.schemas;
import hex.glrm.GLRMModel;
import water.api.*;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
import water.api.schemas3.TwoDimTableV3;
public class GLRMModelV3 extends ModelSchemaV3<GLRMModel, GLRMModelV3, GLRMModel.GLRMParameters, GLRMV3.GLRMParametersV3, GLRMModel.GLRMOutput, GLRMModelV3.GLRMModelOutputV3> {
public static final class GLRMModelOutputV3 extends ModelOutputSchemaV3<GLRMModel.GLRMOutput, GLRMModelOutputV3> {
// Output fields; input fields are in the parameters list
@API(help = "Number of iterations executed")
public int iterations;
@API(help = "Number of updates executed")
public int updates;
@API(help = "Current value of the objective function")
public double objective;
@API(help = "Average change in objective value on final iteration")
public double avg_change_obj;
@API(help = "Final step size")
public double step_size;
@API(help = "Mapping from lower dimensional k-space to training features (Y)")
public TwoDimTableV3 archetypes;
@API(help = "Singular values of XY matrix")
public double[] singular_vals;
@API(help = "Eigenvectors of XY matrix")
public TwoDimTableV3 eigenvectors;
@API(help = "Frame key name for X matrix")
public String representation_name;
@API(help = "Standard deviation and importance of each principal component")
public TwoDimTableV3 importance;
}
// TODO: I think we can implement the following two in ModelSchemaV3, using reflection on the type parameters.
public GLRMV3.GLRMParametersV3 createParametersSchema() { return new GLRMV3.GLRMParametersV3(); }
public GLRMModelOutputV3 createOutputSchema() { return new GLRMModelOutputV3(); }
// Version&Schema-specific filling into the impl
@Override public GLRMModel createImpl() {
GLRMModel.GLRMParameters parms = parameters.createImpl();
return new GLRMModel( model_id.key(), parms, null );
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GLRMV3.java
|
package hex.schemas;
import hex.DataInfo;
import hex.genmodel.algos.glrm.GlrmRegularizer;
import hex.glrm.GLRM;
import hex.glrm.GLRMModel.GLRMParameters;
import hex.genmodel.algos.glrm.GlrmLoss;
import hex.genmodel.algos.glrm.GlrmInitialization;
import hex.svd.SVDModel.SVDParameters;
import water.api.API;
import water.api.schemas3.ModelParametersSchemaV3;
import water.api.schemas3.KeyV3;
public class GLRMV3 extends ModelBuilderSchema<GLRM, GLRMV3, GLRMV3.GLRMParametersV3> {
public static final class GLRMParametersV3 extends ModelParametersSchemaV3<GLRMParameters, GLRMParametersV3> {
public static String[] fields = {
"model_id",
"training_frame",
"validation_frame",
"ignored_columns",
"ignore_const_cols",
"score_each_iteration",
"representation_name",
"loading_name",
"transform",
"k",
"loss",
"loss_by_col",
"loss_by_col_idx",
"multi_loss",
"period",
"regularization_x",
"regularization_y",
"gamma_x",
"gamma_y",
"max_iterations",
"max_updates",
"init_step_size",
"min_step_size",
"seed",
"init",
"svd_method",
"user_y",
"user_x",
"expand_user_y",
"impute_original",
"recover_svd",
"max_runtime_secs",
"export_checkpoints_dir"
};
@API(help = "Transformation of training data", values = { "NONE", "STANDARDIZE", "NORMALIZE", "DEMEAN", "DESCALE" }, gridable = true) // TODO: pull out of categorical class
public DataInfo.TransformType transform;
@API(help = "Rank of matrix approximation", required = true, gridable = true)
public int k;
@API(help = "Numeric loss function", values = { "Quadratic", "Absolute", "Huber", "Poisson", "Hinge", "Logistic", "Periodic" }, gridable = true) // TODO: pull out of enum class
public GlrmLoss loss;
@API(help = "Categorical loss function", values = { "Categorical", "Ordinal" }, gridable = true) // TODO: pull out of categorical class
public GlrmLoss multi_loss;
@API(help = "Loss function by column (override)", values = { "Quadratic", "Absolute", "Huber", "Poisson", "Hinge", "Logistic", "Periodic", "Categorical", "Ordinal" }, gridable = true)
public GlrmLoss[] loss_by_col;
@API(help = "Loss function by column index (override)")
public int[] loss_by_col_idx;
@API(help = "Length of period (only used with periodic loss function)", gridable = true)
public int period;
@API(help = "Regularization function for X matrix", values = { "None", "Quadratic", "L2", "L1", "NonNegative", "OneSparse", "UnitOneSparse", "Simplex" }, gridable = true) // TODO: pull out of categorical class
public GlrmRegularizer regularization_x;
@API(help = "Regularization function for Y matrix", values = { "None", "Quadratic", "L2", "L1", "NonNegative", "OneSparse", "UnitOneSparse", "Simplex" }, gridable = true) // TODO: pull out of categorical class
public GlrmRegularizer regularization_y;
@API(help = "Regularization weight on X matrix", gridable = true)
public double gamma_x;
@API(help = "Regularization weight on Y matrix", gridable = true)
public double gamma_y;
@API(help = "Maximum number of iterations", gridable = true)
public int max_iterations;
@API(help = "Maximum number of updates, defaults to 2*max_iterations", gridable = true)
public int max_updates;
@API(help = "Initial step size", gridable = true)
public double init_step_size;
@API(help = "Minimum step size", gridable = true)
public double min_step_size;
@API(help = "RNG seed for initialization", gridable = true)
public long seed;
@API(help = "Initialization mode", values = { "Random", "SVD", "PlusPlus", "User" }, gridable = true) // TODO: pull out of categorical class
public GlrmInitialization init;
@API(help = "Method for computing SVD during initialization (Caution: Randomized is currently experimental and unstable)", values = { "GramSVD", "Power", "Randomized" }, gridable = true) // TODO: pull out of enum class
public SVDParameters.Method svd_method;
@API(help = "User-specified initial Y")
public KeyV3.FrameKeyV3 user_y;
@API(help = "User-specified initial X")
public KeyV3.FrameKeyV3 user_x;
@API(help = "[Deprecated] Use representation_name instead. Frame key to save resulting X.")
public String loading_name;
@API(help = "Frame key to save resulting X")
public String representation_name;
@API(help = "Expand categorical columns in user-specified initial Y")
public boolean expand_user_y;
@API(help = "Reconstruct original training data by reversing transform")
public boolean impute_original;
@API(help = "Recover singular values and eigenvectors of XY")
public boolean recover_svd;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GenericModelV3.java
|
package hex.schemas;
import hex.generic.GenericModel;
import hex.generic.GenericModelOutput;
import hex.generic.GenericModelParameters;
import water.api.API;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
import water.api.schemas3.TwoDimTableV3;
public class GenericModelV3 extends ModelSchemaV3<GenericModel, GenericModelV3, GenericModelParameters, GenericV3.GenericParametersV3, GenericModelOutput, GenericModelV3.GenericModelOutputV3> {
@Override
public GenericV3.GenericParametersV3 createParametersSchema() {
return new GenericV3.GenericParametersV3();
}
@Override
public GenericModelOutputV3 createOutputSchema() {
return new GenericModelOutputV3();
}
public static final class GenericModelOutputV3 extends ModelOutputSchemaV3<GenericModelOutput, GenericModelOutputV3>{
@API(help="Variable Importances", direction=API.Direction.OUTPUT, level = API.Level.secondary)
TwoDimTableV3 variable_importances;
@API(help = "Short identifier of the original algorithm name", direction = API.Direction.OUTPUT,
level = API.Level.secondary)
String original_model_identifier;
@API(help = "Full, potentially long name of the original agorithm", direction = API.Direction.OUTPUT,
level = API.Level.secondary)
String original_model_full_name;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GenericV3.java
|
package hex.schemas;
import hex.generic.Generic;
import hex.generic.GenericModelParameters;
import water.api.API;
import water.api.schemas3.KeyV3;
import water.api.schemas3.ModelParameterSchemaV3;
import water.api.schemas3.ModelParametersSchemaV3;
public class GenericV3 extends ModelBuilderSchema<Generic, GenericV3, GenericV3.GenericParametersV3> {
public static final class GenericParametersV3 extends ModelParametersSchemaV3<GenericModelParameters, GenericParametersV3> {
public static final String[] fields = new String[]{
"model_id",
"model_key",
"path"
};
@API(required = false, level = API.Level.critical, help = "Path to file with self-contained model archive.")
public String path;
@API(required = false, direction = API.Direction.INOUT, level = API.Level.critical, help = "Key to the self-contained model archive already uploaded to H2O.")
public KeyV3.FrameKeyV3 model_key;
public transient ModelParameterSchemaV3[] additionalParameters;
@Override
protected ModelParameterSchemaV3[] getAdditionalParameters() {
return additionalParameters;
}
@Override
protected GenericParametersV3 fillFromImpl(GenericModelParameters impl, String[] fieldsToSkip) {
final GenericParametersV3 genericParametersV3 = super.fillFromImpl(impl, fieldsToSkip);
genericParametersV3.additionalParameters = impl._modelParameters;
return genericParametersV3;
}
@Override
public GenericParametersV3 fillFromImpl(GenericModelParameters impl) {
final GenericParametersV3 genericParametersV3 = super.fillFromImpl(impl);
genericParametersV3.additionalParameters = impl._modelParameters;
return genericParametersV3;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GramV3.java
|
package hex.schemas;
import water.Iced;
import water.api.API;
import water.api.Schema;
import water.api.schemas3.FrameV3;
import water.api.schemas3.KeyV3;
/**
* Created by tomas on 10/26/16.
*/
public class GramV3 extends Schema<Iced,GramV3>{
@API(help="source data", required = true, direction = API.Direction.INPUT)
public KeyV3.FrameKeyV3 X;
@API(help="weight vector", required = false, direction = API.Direction.INPUT)
public FrameV3.ColSpecifierV3 W;
@API(help="use all factor levels when doing 1-hot encoding", required=false,direction=API.Direction.INPUT)
public boolean use_all_factor_levels;
@API(help="standardize data",required=false,direction = API.Direction.INPUT)
public boolean standardize;
@API(help="skip missing values",required=false,direction = API.Direction.INPUT)
public boolean skip_missing;
@API(help="Destination key for the resulting matrix.", direction = API.Direction.OUTPUT)
public KeyV3.FrameKeyV3 destination_frame;
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GrepModelV3.java
|
package hex.schemas;
import hex.grep.GrepModel;
import water.H2O;
import water.api.*;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
import water.util.PojoUtils;
public class GrepModelV3 extends ModelSchemaV3<GrepModel, GrepModelV3, GrepModel.GrepParameters, GrepV3.GrepParametersV3, GrepModel.GrepOutput, GrepModelV3.GrepModelOutputV3> {
public static final class GrepModelOutputV3 extends ModelOutputSchemaV3<GrepModel.GrepOutput, GrepModelOutputV3> {
// Output fields
// Assume small-data results: string matches only
@API(help="Matching strings") public String[] matches;
@API(help="Byte offsets of matches") public long[] offsets;
@Override public GrepModel.GrepOutput createImpl() {
GrepModel.GrepOutput impl = new GrepModel.GrepOutput(null);
PojoUtils.copyProperties(impl, this, PojoUtils.FieldNaming.DEST_HAS_UNDERSCORES);
return impl;
}
@Override public GrepModelOutputV3 fillFromImpl( GrepModel.GrepOutput impl) {
PojoUtils.copyProperties(this, impl, PojoUtils.FieldNaming.ORIGIN_HAS_UNDERSCORES);
return this;
}
} // GrepModelOutputV2
//==========================
// Custom adapters go here
// TOOD: I think we can implement the following two in ModelSchemaV3, using reflection on the type parameters.
public GrepV3.GrepParametersV3 createParametersSchema() { return new GrepV3.GrepParametersV3(); }
public GrepModelOutputV3 createOutputSchema() { return new GrepModelOutputV3(); }
// Version&Schema-specific filling into the impl
@Override public GrepModel createImpl() {
throw H2O.unimpl();
}
// Version&Schema-specific filling from the impl
@Override public GrepModelV3 fillFromImpl( GrepModel m ) {
// TODO: if( !(h instanceof InspectHandler) ) throw H2O.unimpl();
// TODO: InspectHandler ih = (InspectHandler)h;
// TODO: GrepModel kmm = ih._val.get();
// TODO: iters = kmm._iters;
return super.fillFromImpl(m);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/GrepV3.java
|
package hex.schemas;
import hex.grep.Grep;
import hex.grep.GrepModel;
import water.api.API;
import water.api.schemas3.ModelParametersSchemaV3;
public class GrepV3 extends ModelBuilderSchema<Grep,GrepV3,GrepV3.GrepParametersV3> {
public static final class GrepParametersV3 extends ModelParametersSchemaV3<GrepModel.GrepParameters, GrepParametersV3> {
static public String[] own_fields = new String[] { "regex" };
// Input fields
@API(help="regex") public String regex;
} // GrepParametersV2
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/HGLMModelV3.java
|
package hex.schemas;
import hex.hglm.HGLMModel;
import water.api.API;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
import water.api.schemas3.TwoDimTableV3;
import water.util.TwoDimTable;
import java.util.Arrays;
import static water.util.ArrayUtils.flattenArray;
public class HGLMModelV3 extends ModelSchemaV3<HGLMModel,
HGLMModelV3,
HGLMModel.HGLMParameters,
HGLMV3.HGLMParametersV3,
HGLMModel.HGLMModelOutput,
HGLMModelV3.HGLMModelOutputV3> {
public static final class HGLMModelOutputV3 extends ModelOutputSchemaV3<HGLMModel.HGLMModelOutput, HGLMModelOutputV3> {
// the doc == document described our HGLM implementation attached to issue: https://github.com/h2oai/h2o-3/issues/8487
@API(help="Table of Fixed Coefficients")
TwoDimTableV3 coefficients_table;
@API(help="Table of Random Coefficients")
TwoDimTableV3 random_coefficients_table;
@API(help="Table of Scoring History for Validation Dataset")
TwoDimTableV3 scoring_history_valid;
@API(help="Fixed Effects Coefficient Names")
public String[] coefficient_names; // include intercept only if _parms._intercept is true
@API(help="Random Effects Coefficient Names")
public String[] random_coefficient_names; // include intercept only if _parms._random_intercept = true
@API(help="Level 2 Indice Names")
public String[] group_column_names;
@API(help="Fixed Effects Coefficients")
public double[] beta; // fixed coefficients
@API(help="Random Effects Coefficients")
public double[][] ubeta; // random coefficients
@API(help="Covariance Matrix for Random Effects (= Tj in section II.I of the doc")
public double[][] tmat;
@API(help="Ratio of each random effect variance and (sum of all random effect variances plus the residual noise" +
" variance).")
double[] icc;
@API(help="Residual noise variance")
double residual_variance;
@API(help="Mean residual error with fixed effect coefficients only")
double mean_residual_fixed;
@API(help="Mean residual error with fixed effect coefficients only")
double mean_residual_fixed_valid;
@Override
public HGLMModelOutputV3 fillFromImpl(HGLMModel.HGLMModelOutput impl) {
super.fillFromImpl(impl);
coefficient_names = impl._fixed_coefficient_names;
random_coefficient_names = impl._random_coefficient_names;
group_column_names = impl._group_column_names;
beta = impl._beta;
ubeta = impl._ubeta;
coefficients_table = new TwoDimTableV3();
coefficients_table.fillFromImpl(generateCoeffTable("fixed effect oefficients",
"HGLM fixed effect coefficients", beta, coefficient_names));
random_coefficients_table = new TwoDimTableV3();
random_coefficients_table.fillFromImpl(generate2DCoeffTable("random effect coefficients",
"HGLM random effect coefficients", ubeta, random_coefficient_names, impl._group_column_names));
icc = impl._icc;
residual_variance = impl._tau_e_var;
mean_residual_fixed = impl._yMinusFixPredSquare /impl._nobs;
if (impl._nobs_valid > 0)
mean_residual_fixed_valid = impl._yMinusFixPredSquareValid /impl._nobs_valid;
return this;
}
}
public static TwoDimTable generateCoeffTable(String title1, String title2, double[] coeffs, String[] coeffNames) {
String[] colnames = new String[] {"coefficients"};
String[] colFormats = new String[] {"%.5f"};
String[] colTypes = new String[] {"double"};
TwoDimTable tdt = new TwoDimTable(title1, title2, coeffNames, colnames, colTypes, colFormats, "names");
int tableLen = coeffs.length;
for (int index=0; index<tableLen; index++) {
tdt.set(index, 0, coeffs[index]);
}
return tdt;
}
public static TwoDimTable generate2DCoeffTable(String title1, String title2, double[][] coeffs, String[] coeffNames,
String[] level2Domain) {
int numLevel2Index = level2Domain.length;
String[] coeffNamesUsed;
double[][] coeffsUsed;
coeffNamesUsed = coeffNames;
coeffsUsed = coeffs;
double[] fCoeffValues = flattenArray(coeffsUsed);
String[] fCoeffNames = extendCoeffNames(coeffNamesUsed, numLevel2Index);
String[] fLevel2Vals = extendLevel2Ind(level2Domain, coeffsUsed[0].length);
String[] colnames = new String[]{"coefficient names", "coefficients"};
String[] colFormats = new String[]{"%s", "%.5f"};
String[] colTypes = new String[]{"string", "double"};
TwoDimTable tdt = new TwoDimTable(title1, title2, fLevel2Vals, colnames, colTypes, colFormats, "names");
int tableLen = fCoeffNames.length;
for (int index = 0; index < tableLen; index++) {
tdt.set(index, 0, fCoeffNames[index]);
tdt.set(index, 1, fCoeffValues[index]);
}
return tdt;
}
public static String[] extendLevel2Ind(String[] level2Domain, int numCoeff) {
int levelIndNum = level2Domain.length;
String[][] extendedDomain = new String[levelIndNum][numCoeff];
int extendLen = extendedDomain.length;
for (int index=0; index<extendLen; index++) {
Arrays.fill(extendedDomain[index], level2Domain[index]);
}
return flattenArray(extendedDomain);
}
public static String[] extendCoeffNames(String[] coeffNames, int numLevel2Ind) {
int numCoeff = coeffNames.length;
String[] extendedCoeffNames = new String[numCoeff*numLevel2Ind];
int indexStart;
for (int index=0; index<numLevel2Ind; index++) {
indexStart = index*numCoeff;
System.arraycopy(coeffNames, 0, extendedCoeffNames, indexStart, numCoeff);
}
return extendedCoeffNames;
}
public HGLMV3.HGLMParametersV3 createParametersSchema() { return new HGLMV3.HGLMParametersV3(); }
public HGLMModelOutputV3 createOutputSchema() { return new HGLMModelOutputV3(); }
@Override
public HGLMModel createImpl() {
HGLMModel.HGLMParameters parms = parameters.createImpl();
return new HGLMModel(model_id.key(), parms, null);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/HGLMV3.java
|
package hex.schemas;
import hex.glm.GLMModel.GLMParameters;
import hex.hglm.HGLM;
import hex.hglm.HGLMModel;
import water.api.API;
import water.api.API.Direction;
import water.api.API.Level;
import water.api.schemas3.KeyV3;
import water.api.schemas3.ModelParametersSchemaV3;
public class HGLMV3 extends ModelBuilderSchema<HGLM, HGLMV3, HGLMV3.HGLMParametersV3> {
public static final class HGLMParametersV3 extends ModelParametersSchemaV3<HGLMModel.HGLMParameters, HGLMParametersV3> {
public static final String[] fields = new String[] {
"model_id",
"training_frame",
"validation_frame",
"response_column",
"ignored_columns",
"ignore_const_cols",
"offset_column",
"weights_column",
"max_runtime_secs",
"custom_metric_func",
"score_each_iteration",
"score_iteration_interval",
"seed",
"missing_values_handling",
"plug_values",
"family",
"rand_family",
"max_iterations",
"initial_fixed_effects",
"initial_random_effects",
"initial_t_matrix",
"tau_u_var_init",
"tau_e_var_init",
"random_columns",
"method",
"em_epsilon",
"random_intercept",
"group_column",
"gen_syn_data"
};
@API(help = "Perform scoring for every score_iteration_interval iterations.", level = Level.secondary)
public int score_iteration_interval;
@API(help = "Seed for pseudo random number generator (if applicable).", gridable = true)
public long seed;
@API(help = "Handling of missing values. Either MeanImputation, Skip or PlugValues.",
values = { "MeanImputation", "Skip", "PlugValues"}, level = API.Level.expert,
direction=API.Direction.INOUT, gridable = true)
public GLMParameters.MissingValuesHandling missing_values_handling;
@API(help = "Plug Values (a single row frame containing values that will be used to impute missing values of the" +
" training/validation frame, use with conjunction missing_values_handling = PlugValues).",
direction = API.Direction.INPUT)
public KeyV3.FrameKeyV3 plug_values;
// Input fields
@API(help = "Family. Only gaussian is supported now.",
values = {"gaussian"}, level = Level.critical)
public GLMParameters.Family family;
@API(help = "Set distribution of random effects. Only Gaussian is implemented now.",
values = {"gaussian"}, level = Level.critical)
public GLMParameters.Family rand_family;
@API(help = "Maximum number of iterations. Value should >=1. A value of 0 is only set when only the model " +
"coefficient names and model coefficient dimensions are needed.", level = Level.secondary)
public int max_iterations;
@API(level = API.Level.expert, direction = API.Direction.INOUT, gridable=true,
help = "An array that contains initial values of the fixed effects coefficient.")
public double[] initial_fixed_effects;
@API(level = API.Level.expert, direction = API.Direction.INOUT, gridable=true,
help = "A H2OFrame id that contains initial values of the random effects coefficient. The row names should" +
"be the random coefficient names. If you are not sure what the random coefficient names are," +
" build HGLM model with max_iterations = 0 and checkout the model output field " +
"random_coefficient_names. The number of rows of this frame should be the number of level 2" +
" units. Again, to figure this out, build HGLM model with max_iterations=0 and check out " +
"the model output field group_column_names. The number of rows should equal the length of the" +
"group_column_names.")
public KeyV3.FrameKeyV3 initial_random_effects;
@API(level = API.Level.expert, direction = API.Direction.INOUT, gridable=true,
help = "A H2OFrame id that contains initial values of the T matrix. It should be a positive symmetric matrix.")
public KeyV3.FrameKeyV3 initial_t_matrix;
@API(help = "Initial variance of random coefficient effects. If set, should provide a value > 0.0. If not set, " +
"will be randomly set in the model building process."
, level = Level.expert, gridable = true)
public double tau_u_var_init;
@API(help = "Initial variance of random noise. If set, should provide a value > 0.0. If not set, will be randomly" +
" set in the model building process."
, level = Level.expert, gridable = true)
public double tau_e_var_init;
@API(help = "Random columns indices for HGLM.", gridable=true)
public String[] random_columns;
@API(help = "We only implemented EM as a method to obtain the fixed, random coefficients and the various variances.",
values = {"EM"}, level = Level.critical)
public HGLMModel.HGLMParameters.Method method;
@API(help = "Converge if beta/ubeta/tmat/tauEVar changes less (using L-infinity norm) than em esilon. ONLY applies to EM method."
, level = Level.expert)
public double em_epsilon;
@API(help="If true, will allow random component to the GLM coefficients.", direction=Direction.INPUT, gridable=true)
public boolean random_intercept;
@API(help="Group column is the column that is categorical and used to generate the groups in HGLM", gridable=true)
public String group_column;
@API(help="If true, add gaussian noise with variance specified in parms._tau_e_var_init.",
direction=Direction.INPUT, gridable=true)
public boolean gen_syn_data;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/IsolationForestModelV3.java
|
package hex.schemas;
import hex.tree.isofor.IsolationForestModel;
import water.api.API;
import water.api.schemas3.TwoDimTableV3;
public class IsolationForestModelV3 extends SharedTreeModelV3<IsolationForestModel,
IsolationForestModelV3,
IsolationForestModel.IsolationForestParameters,
IsolationForestV3.IsolationForestParametersV3,
IsolationForestModel.IsolationForestOutput,
IsolationForestModelV3.IsolationForestModelOutputV3> {
public static final class IsolationForestModelOutputV3 extends SharedTreeModelV3.SharedTreeModelOutputV3<IsolationForestModel.IsolationForestOutput, IsolationForestModelOutputV3> {
@API(help="Variable Splits", direction=API.Direction.OUTPUT, level = API.Level.secondary)
public TwoDimTableV3 variable_splits;
}
public IsolationForestV3.IsolationForestParametersV3 createParametersSchema() { return new IsolationForestV3.IsolationForestParametersV3(); }
public IsolationForestModelOutputV3 createOutputSchema() { return new IsolationForestModelOutputV3(); }
//==========================
// Custom adapters go here
// Version&Schema-specific filling into the impl
@Override public IsolationForestModel createImpl() {
IsolationForestV3.IsolationForestParametersV3 p = this.parameters;
IsolationForestModel.IsolationForestParameters parms = p.createImpl();
return new IsolationForestModel( model_id.key(), parms, new IsolationForestModel.IsolationForestOutput(null) );
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/IsolationForestV3.java
|
package hex.schemas;
import hex.tree.isofor.IsolationForest;
import hex.tree.isofor.IsolationForestModel;
import water.api.API;
import water.api.schemas3.FrameV3;
public class IsolationForestV3 extends SharedTreeV3<IsolationForest, IsolationForestV3, IsolationForestV3.IsolationForestParametersV3> {
public static final class IsolationForestParametersV3 extends SharedTreeV3.SharedTreeParametersV3<IsolationForestModel.IsolationForestParameters, IsolationForestParametersV3> {
static public String[] fields = new String[]{
"model_id",
"training_frame",
"score_each_iteration",
"score_tree_interval",
"ignored_columns",
"ignore_const_cols",
"ntrees",
"max_depth",
"min_rows",
"max_runtime_secs",
"seed",
"build_tree_one_node",
"mtries",
"sample_size",
"sample_rate",
"col_sample_rate_change_per_level",
"col_sample_rate_per_tree",
"categorical_encoding",
"stopping_rounds",
"stopping_metric",
"stopping_tolerance",
"export_checkpoints_dir",
"contamination",
"validation_frame",
"validation_response_column"
};
// Input fields
@API(help = "Number of randomly sampled observations used to train each Isolation Forest tree. Only one of parameters sample_size and sample_rate should be defined. If sample_rate is defined, sample_size will be ignored.", gridable = true)
public long sample_size;
@API(help = "Rate of randomly sampled observations used to train each Isolation Forest tree. Needs to be in range from 0.0 to 1.0. If set to -1, sample_rate is disabled and sample_size will be used instead.", gridable = true)
public double sample_rate;
@API(help = "Number of variables randomly sampled as candidates at each split. If set to -1, defaults (number of predictors)/3.", gridable = true)
public int mtries;
@API(help = "Contamination ratio - the proportion of anomalies in the input dataset. If undefined (-1) the predict function will not mark observations as anomalies and only anomaly score will be returned. Defaults to -1 (undefined).")
public double contamination;
@API(level = API.Level.secondary, direction = API.Direction.INOUT,
is_member_of_frames = {"validation_frame"},
is_mutually_exclusive_with = {"ignored_columns"},
help = "(experimental) Name of the response column in the validation frame. " +
"Response column should be binary and indicate not anomaly/anomaly.")
public FrameV3.ColSpecifierV3 validation_response_column;
@Override
public IsolationForestParametersV3 fillFromImpl(IsolationForestModel.IsolationForestParameters impl) {
IsolationForestParametersV3 pv3 = super.fillFromImpl(impl);
if (impl._response_column != null) {
pv3.validation_response_column = new FrameV3.ColSpecifierV3(impl._response_column);
}
return pv3;
}
@Override
public IsolationForestModel.IsolationForestParameters fillImpl(IsolationForestModel.IsolationForestParameters impl) {
IsolationForestModel.IsolationForestParameters p = super.fillImpl(impl);
if (validation_response_column != null) {
p._response_column = validation_response_column.column_name;
}
return p;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/IsotonicRegressionModelV3.java
|
package hex.schemas;
import hex.isotonic.IsotonicRegressionModel;
import water.api.API;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
public class IsotonicRegressionModelV3 extends ModelSchemaV3<
IsotonicRegressionModel, IsotonicRegressionModelV3,
IsotonicRegressionModel.IsotonicRegressionParameters, IsotonicRegressionV3.IsotonicRegressionParametersV3,
IsotonicRegressionModel.IsotonicRegressionOutput, IsotonicRegressionModelV3.IsotonicRegressionModelOutputV3
> {
public static final class IsotonicRegressionModelOutputV3 extends ModelOutputSchemaV3<IsotonicRegressionModel.IsotonicRegressionOutput, IsotonicRegressionModelOutputV3> {
@API(help = "thresholds y")
public double[] thresholds_y;
@API(help = "thresholds X")
public double[] thresholds_x;
@API(help = "min X")
public double min_x;
@API(help = "max X")
public double max_x;
@Override
public IsotonicRegressionModelOutputV3 fillFromImpl(IsotonicRegressionModel.IsotonicRegressionOutput impl) {
super.fillFromImpl(impl);
return this;
}
} // IsotonicRegressionOutputV3
public IsotonicRegressionV3.IsotonicRegressionParametersV3 createParametersSchema() { return new IsotonicRegressionV3.IsotonicRegressionParametersV3(); }
public IsotonicRegressionModelOutputV3 createOutputSchema() { return new IsotonicRegressionModelOutputV3(); }
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/IsotonicRegressionV3.java
|
package hex.schemas;
import hex.isotonic.IsotonicRegression;
import hex.isotonic.IsotonicRegressionModel;
import water.api.API;
import water.api.schemas3.ModelParametersSchemaV3;
public class IsotonicRegressionV3 extends ModelBuilderSchema<IsotonicRegression, IsotonicRegressionV3, IsotonicRegressionV3.IsotonicRegressionParametersV3> {
public static final class IsotonicRegressionParametersV3
extends ModelParametersSchemaV3<IsotonicRegressionModel.IsotonicRegressionParameters, IsotonicRegressionV3.IsotonicRegressionParametersV3> {
public static String[] fields = new String[]{
"model_id",
"training_frame",
"validation_frame",
"response_column",
"ignored_columns",
"weights_column",
"out_of_bounds",
"custom_metric_func",
"nfolds",
"keep_cross_validation_models",
"keep_cross_validation_predictions",
"keep_cross_validation_fold_assignment",
"fold_assignment",
"fold_column"
};
@API(help="Method of handling values of X predictor that are outside of the bounds seen in training.", values = {"NA", "clip"}, direction = API.Direction.INOUT)
public IsotonicRegressionModel.OutOfBoundsHandling out_of_bounds;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/KMeansModelV3.java
|
package hex.schemas;
import hex.kmeans.KMeans;
import hex.kmeans.KMeansModel;
import hex.util.ClusteringUtils;
import water.api.API;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
import water.api.schemas3.TwoDimTableV3;
public class KMeansModelV3 extends ModelSchemaV3<KMeansModel, KMeansModelV3, KMeansModel.KMeansParameters, KMeansV3.KMeansParametersV3, KMeansModel.KMeansOutput, KMeansModelV3.KMeansModelOutputV3> {
public static final class KMeansModelOutputV3 extends ModelOutputSchemaV3<KMeansModel.KMeansOutput, KMeansModelOutputV3> {
// Output fields; input fields are in the parameters list
@API(help="Cluster Centers[k][features]")
public TwoDimTableV3 centers;
@API(help="Cluster Centers[k][features] on Standardized Data")
public TwoDimTableV3 centers_std;
@Override public KMeansModelOutputV3 fillFromImpl(KMeansModel.KMeansOutput impl) {
KMeansModelOutputV3 kmv3 = super.fillFromImpl(impl);
kmv3.centers = new TwoDimTableV3().fillFromImpl(ClusteringUtils.createCenterTable(impl, false));
if (impl._centers_std_raw != null)
kmv3.centers_std = new TwoDimTableV3().fillFromImpl(ClusteringUtils.createCenterTable(impl, true));
return kmv3;
}
} // KMeansModelOutputV2
// TOOD: I think we can implement the following two in ModelSchemaV3, using reflection on the type parameters.
public KMeansV3.KMeansParametersV3 createParametersSchema() { return new KMeansV3.KMeansParametersV3(); }
public KMeansModelOutputV3 createOutputSchema() { return new KMeansModelOutputV3(); }
//==========================
// Custom adapters go here
// Version&Schema-specific filling into the impl
@Override public KMeansModel createImpl() {
KMeansModel.KMeansParameters parms = parameters.createImpl();
return new KMeansModel( model_id.key(), parms, null );
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/KMeansV3.java
|
package hex.schemas;
import hex.kmeans.KMeans;
import hex.kmeans.KMeansModel.KMeansParameters;
import water.api.API;
import water.api.schemas3.ClusteringModelParametersSchemaV3;
import water.api.schemas3.KeyV3;
public class KMeansV3 extends ClusteringModelBuilderSchema<KMeans,KMeansV3,KMeansV3.KMeansParametersV3> {
public static final class KMeansParametersV3 extends ClusteringModelParametersSchemaV3<KMeansParameters, KMeansParametersV3> {
static public String[] fields = new String[] {
"model_id",
"training_frame",
"validation_frame",
"nfolds",
"keep_cross_validation_models",
"keep_cross_validation_predictions",
"keep_cross_validation_fold_assignment",
"fold_assignment",
"fold_column",
"ignored_columns",
"ignore_const_cols",
"score_each_iteration",
"k",
"estimate_k",
"user_points",
"max_iterations",
"standardize",
"seed",
"init",
"max_runtime_secs",
"categorical_encoding",
"export_checkpoints_dir",
"cluster_size_constraints"
};
// Input fields
@API(help = "This option allows you to specify a dataframe, where each row represents an initial cluster center. " +
"The user-specified points must have the same number of columns as the training observations. " +
"The number of rows must equal the number of clusters", required = false, level = API.Level.expert)
public KeyV3.FrameKeyV3 user_points;
@API(help="Maximum training iterations (if estimate_k is enabled, then this is for each inner Lloyds iteration)", gridable = true)
public int max_iterations; // Max iterations
@API(help = "Standardize columns before computing distances", level = API.Level.critical, gridable = true)
public boolean standardize = true;
@API(help = "RNG Seed", level = API.Level.secondary /* tested, works: , dependsOn = {"k", "max_iterations"} */, gridable = true)
public long seed;
@API(help = "Initialization mode", values = { "Random", "PlusPlus", "Furthest", "User" }, gridable = true) // TODO: pull out of categorical class. . .
public KMeans.Initialization init;
@API(help = "Whether to estimate the number of clusters (<=k) iteratively and deterministically.", level = API.Level.critical, gridable = true)
public boolean estimate_k = false;
@API(help = "An array specifying the minimum number of points that should be in each cluster. The length of the constraints array has to be the same as the number of clusters.", level = API.Level.expert)
public int[] cluster_size_constraints = null;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/MakeGLMModelV3.java
|
package hex.schemas;
import water.Iced;
import water.api.API;
import water.api.API.Direction;
import water.api.schemas3.KeyV3;
import water.api.schemas3.SchemaV3;
/**
* End point to update a model. Creates a modified copy of the original model. Can only change coefficient values.
*/
public class MakeGLMModelV3 extends SchemaV3<Iced,MakeGLMModelV3> {
@API(help="source model", required = true, direction = Direction.INPUT)
public KeyV3.ModelKeyV3 model;
@API(help="destination key", required = false, direction = Direction.INPUT)
public KeyV3.ModelKeyV3 dest;//new KeyV3.ModelKeyV3(Key.make());
@API(help="coefficient names", required = true, direction = Direction.INPUT)
public String [] names;
@API(help = "new glm coefficients", required = true, direction = Direction.INPUT)
public double [] beta;
@API(help="decision threshold for label-generation", required = false, direction = Direction.INPUT)
public float threshold = .5f;
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/ModelSelectionModelV3.java
|
package hex.schemas;
import hex.modelselection.ModelSelectionModel;
import water.api.API;
import water.api.schemas3.KeyV3;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
public class ModelSelectionModelV3 extends ModelSchemaV3<ModelSelectionModel, ModelSelectionModelV3, ModelSelectionModel.ModelSelectionParameters,
ModelSelectionV3.ModelSelectionParametersV3, ModelSelectionModel.ModelSelectionModelOutput, ModelSelectionModelV3.ModelSelectionModelOutputV3> {
public static final class ModelSelectionModelOutputV3 extends ModelOutputSchemaV3<ModelSelectionModel.ModelSelectionModelOutput,
ModelSelectionModelOutputV3> {
@API(help="Names of predictors in the best predictor subset")
String[][] best_predictors_subset;
@API(help="R2 values of all possible predictor subsets. Only for mode='allsubsets' or 'maxr'.")
double[] best_r2_values; // store the best R2 values of the best models with fix number of predictors
@API(help="at each predictor subset size, the predictor added is collected in this array. Not for mode = " +
"'backward'.")
String[][] predictors_added_per_step;
@API(help="at each predictor subset size, the predictor removed is collected in this array.")
String[][] predictors_removed_per_step;
@API(help="p-values of chosen predictor subsets at each subset size. Only for model='backward'.")
double[][] coef_p_values;
@API(help="z-values of chosen predictor subsets at each subset size. Only for model='backward'.")
double[][] z_values;
@API(help="Key of models containing best 1-predictor model, best 2-predictors model, ....")
KeyV3.ModelKeyV3[] best_model_ids;
@API(help="arrays of string arrays containing coefficient names of best 1-predictor model, best 2-predictors model, ....")
String[][] coefficient_names;
@API(help="store coefficient values for each predictor subset. Only for maxrsweep when build_glm_model is false.")
double[][] coefficient_values;
@API(help="store standardized coefficient values for each predictor subset. Only for maxrsweep when build_glm_model is false.")
double[][] coefficient_values_normalized;
@Override
public ModelSelectionModelOutputV3 fillFromImpl(ModelSelectionModel.ModelSelectionModelOutput impl) {
super.fillFromImpl(impl); // fill in the best_model_predictors_r2 table here when done
return this;
}
}
public ModelSelectionV3.ModelSelectionParametersV3 createParametersSchema() { return new ModelSelectionV3.ModelSelectionParametersV3(); }
public ModelSelectionModelOutputV3 createOutputSchema() { return new ModelSelectionModelOutputV3();}
@Override
public ModelSelectionModel createImpl() {
ModelSelectionModel.ModelSelectionParameters parms = parameters.createImpl();
return new ModelSelectionModel(model_id.key(), parms, null);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/ModelSelectionV3.java
|
package hex.schemas;
import hex.glm.GLMModel;
import hex.modelselection.ModelSelection;
import hex.modelselection.ModelSelectionModel;
import water.api.API;
import water.api.EnumValuesProvider;
import water.api.schemas3.KeyV3;
import water.api.schemas3.ModelParametersSchemaV3;
public class ModelSelectionV3 extends ModelBuilderSchema<ModelSelection, ModelSelectionV3, ModelSelectionV3.ModelSelectionParametersV3> {
public static final class ModelSelectionParametersV3 extends ModelParametersSchemaV3<ModelSelectionModel.ModelSelectionParameters,
ModelSelectionParametersV3> {
public static final String[] fields = new String[]{
"model_id",
"training_frame",
"validation_frame",
"nfolds",
"seed",
"fold_assignment",
"fold_column",
"response_column",
"ignored_columns",
"ignore_const_cols",
"score_each_iteration",
"score_iteration_interval",
"offset_column",
"weights_column",
"family",
"link",
"tweedie_variance_power",
"tweedie_link_power",
"theta", // equals to 1/r and should be > 0 and <=1, used by negative binomial
"solver",
"alpha",
"lambda",
"lambda_search",
"early_stopping",
"nlambdas",
"standardize",
"missing_values_handling",
"plug_values",
"compute_p_values",
"remove_collinear_columns",
"intercept",
"non_negative",
"max_iterations",
"objective_epsilon",
"beta_epsilon",
"gradient_epsilon",
"startval", // initial starting values for coefficients, double array
"prior",
"cold_start", // if true, will start GLM model from initial values and conditions
"lambda_min_ratio",
"beta_constraints",
"max_active_predictors",
"obj_reg",
"stopping_rounds",
"stopping_metric",
"stopping_tolerance",
// dead unused args forced here by backwards compatibility, remove in V4
"balance_classes",
"class_sampling_factors",
"max_after_balance_size",
"max_confusion_matrix_size",
"max_runtime_secs",
"nparallelism",
"max_predictor_number", // denote maximum number of predictors to build models for
"min_predictor_number",
"mode", // naive, maxr, maxrsweep, backward
"build_glm_model",
"p_values_threshold",
"influence",
"multinode_mode"
};
@API(help = "Seed for pseudo random number generator (if applicable)", gridable = true)
public long seed;
// Input fields
@API(help = "Family. For maxr/maxrsweep, only gaussian. For backward, ordinal and multinomial families are not supported",
values = {"AUTO", "gaussian", "binomial", "fractionalbinomial", "quasibinomial", "poisson",
"gamma", "tweedie", "negativebinomial"}, level = API.Level.critical)
// took tweedie out since it's not reliable
public GLMModel.GLMParameters.Family family;
@API(help = "Tweedie variance power", level = API.Level.critical, gridable = true)
public double tweedie_variance_power;
@API(help = "Tweedie link power", level = API.Level.critical, gridable = true)
public double tweedie_link_power;
@API(help = "Theta", level = API.Level.critical, gridable = true)
public double theta; // used by negtaive binomial distribution family
@API(help = "AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on " +
"problems with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales " +
"better for datasets with many columns.", values = {"AUTO", "IRLSM", "L_BFGS",
"COORDINATE_DESCENT_NAIVE", "COORDINATE_DESCENT", "GRADIENT_DESCENT_LH", "GRADIENT_DESCENT_SQERR"},
level = API.Level.critical)
public GLMModel.GLMParameters.Solver solver;
@API(help = "Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for" +
" alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between" +
" specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';" +
" 0.5 otherwise.", level = API.Level.critical, gridable = true)
public double[] alpha;
@API(help = "Regularization strength", required = false, level = API.Level.critical, gridable = true)
public double[] lambda;
@API(help = "Use lambda search starting at lambda max, given lambda is then interpreted as lambda min",
level = API.Level.critical)
public boolean lambda_search;
@API(help = "For maxrsweep only. If enabled, will attempt to perform sweeping action using multiple nodes in " +
"the cluster. Defaults to false.",
level = API.Level.critical)
public boolean multinode_mode;
@API(help = "For maxrsweep mode only. If true, will return full blown GLM models with the desired predictor" +
"subsets. If false, only the predictor subsets, predictor coefficients are returned. This is for" +
"speeding up the model selection process. The users can choose to build the GLM models themselves" +
"by using the predictor subsets themselves. Defaults to false.",
level = API.Level.critical)
public boolean build_glm_model;
@API(help="Stop early when there is no more relative improvement on train or validation (if provided)")
public boolean early_stopping;
@API(help = "Number of lambdas to be used in a search." +
" Default indicates: If alpha is zero, with lambda search" +
" set to True, the value of nlamdas is set to 30 (fewer lambdas" +
" are needed for ridge regression) otherwise it is set to 100.", level = API.Level.critical)
public int nlambdas;
@API(help = "Perform scoring for every score_iteration_interval iterations", level = API.Level.secondary)
public int score_iteration_interval;
@API(help = "Standardize numeric columns to have zero mean and unit variance", level = API.Level.critical)
public boolean standardize;
@API(help = "Only applicable to multiple alpha/lambda values. If false, build the next model for next set" +
" of alpha/lambda values starting from the values provided by current model. If true will start GLM" +
" model from scratch.", level = API.Level.critical)
public boolean cold_start;
@API(help = "Handling of missing values. Either MeanImputation, Skip or PlugValues.",
values = { "MeanImputation", "Skip", "PlugValues" }, level = API.Level.expert,
direction=API.Direction.INOUT, gridable = true)
public GLMModel.GLMParameters.MissingValuesHandling missing_values_handling;
@API(help = "Plug Values (a single row frame containing values that will be used to impute missing values of" +
" the training/validation frame, use with conjunction missing_values_handling = PlugValues)",
direction = API.Direction.INPUT)
public KeyV3.FrameKeyV3 plug_values;
@API(help = "Restrict coefficients (not intercept) to be non-negative")
public boolean non_negative;
@API(help = "Maximum number of iterations", level = API.Level.secondary)
public int max_iterations;
@API(help = "Converge if beta changes less (using L-infinity norm) than beta esilon, ONLY applies to IRLSM" +
" solver ", level = API.Level.expert)
public double beta_epsilon;
@API(help = "Converge if objective value changes less than this."+ " Default (of -1.0) indicates: If lambda_search"+
" is set to True the value of objective_epsilon is set to .0001. If the lambda_search is set to False" +
" and lambda is equal to zero, the value of objective_epsilon is set to .000001, for any other value" +
" of lambda the default value of objective_epsilon is set to .0001.", level = API.Level.expert)
public double objective_epsilon;
@API(help = "Converge if objective changes less (using L-infinity norm) than this, ONLY applies to L-BFGS" +
" solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda is equal to zero, the" +
" default value of gradient_epsilon is equal to .000001, otherwise the default value is .0001. If " +
"lambda_search is set to True, the conditional values above are 1E-8 and 1E-6 respectively.",
level = API.Level.expert)
public double gradient_epsilon;
@API(help="Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs")
public double obj_reg;
@API(help = "Link function.", level = API.Level.secondary, values = {"family_default", "identity", "logit", "log",
"inverse", "tweedie", "ologit"}) //"oprobit", "ologlog": will be supported.
public GLMModel.GLMParameters.Link link;
@API(help = "Double array to initialize coefficients for GLM.",
gridable=true)
public double[] startval;
@API(help = "If true, will return likelihood function value for GLM.") // not gridable
public boolean calc_like;
@API(level = API.Level.critical, direction = API.Direction.INOUT,
valuesProvider = ModelSelectionModeProvider.class,
help = "Mode: Used to choose model selection algorithms to use. Options include "
+ "'allsubsets' for all subsets, "
+ "'maxr' that uses sequential replacement and GLM to build all models, slow but works with cross-validation, validation frames for more robust results, "
+ "'maxrsweep' that uses sequential replacement and sweeping action, much faster than 'maxr', "
+ "'backward' for backward selection."
)
public ModelSelectionModel.ModelSelectionParameters.Mode mode;
@API(help="Include constant term in the model", level = API.Level.expert)
public boolean intercept;
@API(help = "Prior probability for y==1. To be used only for logistic regression iff the data has been " +
"sampled and the mean of response does not reflect reality.", level = API.Level.expert)
public double prior;
@API(help = "Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest lambda" +
" that drives all coefficients to zero). Default indicates: if the number of observations is greater" +
" than the number of variables, then lambda_min_ratio is set to 0.0001; if the number of observations" +
" is less than the number of variables, then lambda_min_ratio is set to 0.01.",
level = API.Level.expert)
public double lambda_min_ratio;
@API(help = "Beta constraints", direction = API.Direction.INPUT /* Not required, to allow initial params validation: , required=true */)
public KeyV3.FrameKeyV3 beta_constraints;
@API(help="Maximum number of active predictors during computation. Use as a stopping criterion to prevent" +
" expensive model building with many predictors." + " Default indicates: If the IRLSM solver is used," +
" the value of max_active_predictors is set to 5000 otherwise it is set to 100000000.",
direction = API.Direction.INPUT, level = API.Level.expert)
public int max_active_predictors = -1;
// dead unused args, formely inherited from supervised model schema
/**
* For imbalanced data, balance training data class counts via
* over/under-sampling. This can result in improved predictive accuracy.
*/
@API(help = "Balance training data class counts via over/under-sampling (for imbalanced data).",
level = API.Level.secondary, direction = API.Direction.INOUT)
public boolean balance_classes;
/**
* Desired over/under-sampling ratios per class (lexicographic order).
* Only when balance_classes is enabled.
* If not specified, they will be automatically computed to obtain class balance during training.
*/
@API(help = "Desired over/under-sampling ratios per class (in lexicographic order). If not specified, " +
"sampling factors will be automatically computed to obtain class balance during training. Requires" +
" balance_classes.", level = API.Level.expert, direction = API.Direction.INOUT)
public float[] class_sampling_factors;
/**
* When classes are balanced, limit the resulting dataset size to the
* specified multiple of the original dataset size.
*/
@API(help = "Maximum relative size of the training data after balancing class counts (can be less than 1.0)." +
" Requires balance_classes.", /* dmin=1e-3, */ level = API.Level.expert,
direction = API.Direction.INOUT)
public float max_after_balance_size;
/** For classification models, the maximum size (in terms of classes) of
* the confusion matrix for it to be printed. This option is meant to
* avoid printing extremely large confusion matrices. */
@API(help = "[Deprecated] Maximum size (# classes) for confusion matrices to be printed in the Logs",
level = API.Level.secondary, direction = API.Direction.INOUT)
public int max_confusion_matrix_size;
@API(help="Request p-values computation, p-values work only with IRLSM solver and no regularization",
level = API.Level.secondary, direction = API.Direction.INPUT)
public boolean compute_p_values; // _remove_collinear_columns
@API(help="In case of linearly dependent columns, remove some of the dependent columns",
level = API.Level.secondary, direction = API.Direction.INPUT)
public boolean remove_collinear_columns; // _remove_collinear_columns
@API(help = "Maximum number of predictors to be considered when building GLM models. Defaults to 1.",
level = API.Level.secondary, direction = API.Direction.INPUT)
public int max_predictor_number;
@API(help = "For mode = 'backward' only. Minimum number of predictors to be considered when building GLM " +
"models starting with all predictors to be included. Defaults to 1.",
level = API.Level.secondary, direction = API.Direction.INPUT)
public int min_predictor_number;
@API(help = "number of models to build in parallel. Defaults to 0.0 which is adaptive to the system capability",
level = API.Level.secondary, gridable = true)
public int nparallelism;
@API(help = "For mode='backward' only. If specified, will stop the model building process when all coefficients" +
"p-values drop below this threshold ", level = API.Level.expert)
public double p_values_threshold;
@API(help = "If set to dfbetas will calculate the difference in beta when a datarow is included and excluded in " +
"the dataset.", values = { "dfbetas" }, level = API.Level.expert, gridable = false)
public GLMModel.GLMParameters.Influence influence;
}
public static final class ModelSelectionModeProvider extends EnumValuesProvider<ModelSelectionModel.ModelSelectionParameters.Mode> {
public ModelSelectionModeProvider() { super(ModelSelectionModel.ModelSelectionParameters.Mode.class); }
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/NaiveBayesModelV3.java
|
package hex.schemas;
import hex.naivebayes.NaiveBayesModel;
import water.api.API;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
import water.api.schemas3.TwoDimTableV3;
public class NaiveBayesModelV3 extends ModelSchemaV3<NaiveBayesModel, NaiveBayesModelV3, NaiveBayesModel.NaiveBayesParameters, NaiveBayesV3.NaiveBayesParametersV3, NaiveBayesModel.NaiveBayesOutput, NaiveBayesModelV3.NaiveBayesModelOutputV3> {
public static final class NaiveBayesModelOutputV3 extends ModelOutputSchemaV3<NaiveBayesModel.NaiveBayesOutput, NaiveBayesModelOutputV3> {
// Output fields; input fields are in the parameters list
@API(help = "Categorical levels of the response")
public String[] levels;
@API(help = "A-priori probabilities of the response")
public TwoDimTableV3 apriori;
@API(help = "Conditional probabilities of the predictors")
public TwoDimTableV3[] pcond;
}
// TODO: I think we can implement the following two in ModelSchemaV3, using reflection on the type parameters.
public NaiveBayesV3.NaiveBayesParametersV3 createParametersSchema() { return new NaiveBayesV3.NaiveBayesParametersV3(); }
public NaiveBayesModelOutputV3 createOutputSchema() { return new NaiveBayesModelOutputV3(); }
// Version&Schema-specific filling into the impl
@Override public NaiveBayesModel createImpl() {
NaiveBayesModel.NaiveBayesParameters parms = parameters.createImpl();
return new NaiveBayesModel( model_id.key(), parms, null );
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/NaiveBayesV3.java
|
package hex.schemas;
import hex.naivebayes.NaiveBayes;
import hex.naivebayes.NaiveBayesModel.NaiveBayesParameters;
import water.api.API;
import water.api.schemas3.ModelParametersSchemaV3;
public class NaiveBayesV3 extends ModelBuilderSchema<NaiveBayes,NaiveBayesV3,NaiveBayesV3.NaiveBayesParametersV3> {
public static final class NaiveBayesParametersV3 extends ModelParametersSchemaV3<NaiveBayesParameters, NaiveBayesParametersV3> {
static public String[] fields = new String[]{
"model_id",
"nfolds",
"seed",
"fold_assignment",
"fold_column",
"keep_cross_validation_models",
"keep_cross_validation_predictions",
"keep_cross_validation_fold_assignment",
"training_frame",
"validation_frame",
"response_column",
"ignored_columns",
"ignore_const_cols",
"score_each_iteration",
"balance_classes",
"class_sampling_factors",
"max_after_balance_size",
"max_confusion_matrix_size",
"laplace",
"min_sdev",
"eps_sdev",
"min_prob",
"eps_prob",
"compute_metrics",
"max_runtime_secs",
"export_checkpoints_dir",
"gainslift_bins",
"auc_type"
};
/*Imbalanced Classes*/
/**
* For imbalanced data, balance training data class counts via
* over/under-sampling. This can result in improved predictive accuracy.
*/
@API(help = "Balance training data class counts via over/under-sampling (for imbalanced data).", level = API.Level.secondary, direction = API.Direction.INOUT)
public boolean balance_classes;
/**
* Desired over/under-sampling ratios per class (lexicographic order).
* Only when balance_classes is enabled.
* If not specified, they will be automatically computed to obtain class balance during training.
*/
@API(help = "Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will be automatically computed to obtain class balance during training. Requires balance_classes.", level = API.Level.expert, direction = API.Direction.INOUT)
public float[] class_sampling_factors;
/**
* When classes are balanced, limit the resulting dataset size to the
* specified multiple of the original dataset size.
*/
@API(help = "Maximum relative size of the training data after balancing class counts (can be less than 1.0). Requires balance_classes.", /* dmin=1e-3, */ level = API.Level.expert, direction = API.Direction.INOUT)
public float max_after_balance_size;
/** For classification models, the maximum size (in terms of classes) of
* the confusion matrix for it to be printed. This option is meant to
* avoid printing extremely large confusion matrices. */
@API(help = "[Deprecated] Maximum size (# classes) for confusion matrices to be printed in the Logs", level = API.Level.secondary, direction = API.Direction.INOUT)
public int max_confusion_matrix_size;
//
@API(help = "Laplace smoothing parameter", gridable = true)
public double laplace;
@API(help = "Min. standard deviation to use for observations with not enough data", gridable = true)
public double min_sdev;
@API(help = "Cutoff below which standard deviation is replaced with min_sdev", gridable = true)
public double eps_sdev;
@API(help = "Min. probability to use for observations with not enough data", gridable = true)
public double min_prob;
@API(help = "Cutoff below which probability is replaced with min_prob", gridable = true)
public double eps_prob;
@API(help = "Compute metrics on training data", gridable = true)
public boolean compute_metrics;
@API(help = "Seed for pseudo random number generator (only used for cross-validation and fold_assignment=\"Random\" or \"AUTO\")", level = API.Level.expert, direction=API.Direction.INOUT, gridable = true)
public long seed;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/PCAModelV3.java
|
package hex.schemas;
import hex.pca.PCAModel;
import water.api.*;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
import water.api.schemas3.TwoDimTableV3;
public class PCAModelV3 extends ModelSchemaV3<PCAModel, PCAModelV3, PCAModel.PCAParameters, PCAV3.PCAParametersV3, PCAModel.PCAOutput, PCAModelV3.PCAModelOutputV3> {
public static final class PCAModelOutputV3 extends ModelOutputSchemaV3<PCAModel.PCAOutput, PCAModelOutputV3> {
// Output fields; input fields are in the parameters list
@API(help = "Standard deviation and importance of each principal component")
public TwoDimTableV3 importance;
@API(help = "Principal components matrix")
public TwoDimTableV3 eigenvectors;
@API(help = "Final value of GLRM squared loss function")
public double objective;
}
// TODO: I think we can implement the following two in ModelSchemaV3, using reflection on the type parameters.
public PCAV3.PCAParametersV3 createParametersSchema() { return new PCAV3.PCAParametersV3(); }
public PCAModelOutputV3 createOutputSchema() { return new PCAModelOutputV3(); }
// Version&Schema-specific filling into the impl
@Override public PCAModel createImpl() {
PCAModel.PCAParameters parms = parameters.createImpl();
return new PCAModel( model_id.key(), parms, null );
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/PCAV3.java
|
package hex.schemas;
import hex.DataInfo;
import hex.pca.PCA;
import hex.pca.PCAModel.PCAParameters;
import hex.pca.PCAImplementation;
import water.api.API;
import water.api.schemas3.ModelParametersSchemaV3;
public class PCAV3 extends ModelBuilderSchema<PCA,PCAV3,PCAV3.PCAParametersV3> {
public static final class PCAParametersV3 extends ModelParametersSchemaV3<PCAParameters, PCAParametersV3> {
static public String[] fields = new String[]{
"model_id",
"training_frame",
"validation_frame",
"ignored_columns",
"ignore_const_cols",
"score_each_iteration",
"transform",
"pca_method",
"pca_impl",
"k",
"max_iterations",
"use_all_factor_levels",
"compute_metrics",
"impute_missing",
"seed",
"max_runtime_secs",
"export_checkpoints_dir"
};
@API(help = "Transformation of training data", values = { "NONE", "STANDARDIZE", "NORMALIZE", "DEMEAN", "DESCALE" }, gridable = true) // TODO: pull out of categorical class
public DataInfo.TransformType transform;
@API(
help = "Specify the algorithm to use for computing the principal components: " +
"GramSVD - uses a distributed computation of the Gram matrix, followed by a local SVD; " +
"Power - computes the SVD using the power iteration method (experimental); " +
"Randomized - uses randomized subspace iteration method; " +
"GLRM - fits a generalized low-rank model with L2 loss function and no regularization and solves for the SVD using local matrix algebra (experimental)",
values = { "GramSVD", "Power", "Randomized", "GLRM" }) // TODO: pull out of categorical class
public PCAParameters.Method pca_method;
@API(
help = "Specify the implementation to use for computing PCA (via SVD or EVD): " +
"MTJ_EVD_DENSEMATRIX - eigenvalue decompositions for dense matrix using MTJ; " +
"MTJ_EVD_SYMMMATRIX - eigenvalue decompositions for symmetric matrix using MTJ; " +
"MTJ_SVD_DENSEMATRIX - singular-value decompositions for dense matrix using MTJ; " +
"JAMA - eigenvalue decompositions for dense matrix using JAMA. " +
"References: " +
"JAMA - http://math.nist.gov/javanumerics/jama/; " +
"MTJ - https://github.com/fommil/matrix-toolkits-java/",
values = { "MTJ_EVD_DENSEMATRIX", "MTJ_EVD_SYMMMATRIX", "MTJ_SVD_DENSEMATRIX", "JAMA" })
public PCAImplementation pca_impl;
@API(help = "Rank of matrix approximation", required = true, direction = API.Direction.INOUT, gridable = true)
public int k;
@API(help = "Maximum training iterations", direction = API.Direction.INOUT, gridable = true)
public int max_iterations;
@API(help = "RNG seed for initialization", direction = API.Direction.INOUT)
public long seed;
@API(help = "Whether first factor level is included in each categorical expansion", direction = API.Direction.INOUT)
public boolean use_all_factor_levels;
@API(help = "Whether to compute metrics on the training data", direction = API.Direction.INOUT)
public boolean compute_metrics;
@API(help = "Whether to impute missing entries with the column mean", direction = API.Direction.INOUT)
public boolean impute_missing;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/PSVMModelV3.java
|
package hex.schemas;
import hex.psvm.PSVMModel;
import water.api.API;
import water.api.schemas3.KeyV3;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
public class PSVMModelV3 extends ModelSchemaV3<PSVMModel, PSVMModelV3, PSVMModel.PSVMParameters, PSVMV3.PSVMParametersV3, PSVMModel.PSVMModelOutput, PSVMModelV3.PSVMModelOutputV3> {
public static final class PSVMModelOutputV3 extends ModelOutputSchemaV3<PSVMModel.PSVMModelOutput, PSVMModelOutputV3> {
@API(help = "Total number of support vectors")
public long svs_count;
@API(help = "Number of bounded support vectors")
public long bsv_count;
@API(help = "rho")
public double rho;
@API(help = "Weights of support vectors")
public KeyV3.FrameKeyV3 alpha_key;
} // PSVMModelOutputV3
//==========================
// Custom adapters go here
public PSVMV3.PSVMParametersV3 createParametersSchema() { return new PSVMV3.PSVMParametersV3(); }
public PSVMModelOutputV3 createOutputSchema() { return new PSVMModelOutputV3(); }
// Version&Schema-specific filling into the impl
@Override public PSVMModel createImpl() {
PSVMModel.PSVMParameters parms = parameters.createImpl();
return new PSVMModel( model_id.key(), parms, null);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/PSVMV3.java
|
package hex.schemas;
import hex.genmodel.algos.psvm.KernelType;
import hex.psvm.PSVM;
import hex.psvm.PSVMModel;
import water.api.API;
import water.api.schemas3.ModelParametersSchemaV3;
public class PSVMV3 extends ModelBuilderSchema<PSVM, PSVMV3, PSVMV3.PSVMParametersV3> {
public static final class PSVMParametersV3 extends ModelParametersSchemaV3<PSVMModel.PSVMParameters, PSVMParametersV3> {
public static final String[] fields = new String[]{
"model_id",
"training_frame",
"validation_frame",
"response_column",
"ignored_columns",
"ignore_const_cols",
"hyper_param",
"kernel_type",
"gamma",
"rank_ratio",
"positive_weight",
"negative_weight",
"disable_training_metrics",
"sv_threshold",
"fact_threshold",
"feasible_threshold",
"surrogate_gap_threshold",
"mu_factor",
"max_iterations",
"seed",
};
@API(help = "Penalty parameter C of the error term", gridable = true)
public double hyper_param;
@API(help = "Type of used kernel", values = {"gaussian"})
public KernelType kernel_type;
@API(help = "Coefficient of the kernel (currently RBF gamma for gaussian kernel, -1 means 1/#features)", gridable = true)
public double gamma;
@API(help = "Desired rank of the ICF matrix expressed as an ration of number of input rows (-1 means use sqrt(#rows)).", gridable = true)
public double rank_ratio;
@API(help = "Weight of positive (+1) class of observations")
public double positive_weight;
@API(help = "Weight of positive (-1) class of observations")
public double negative_weight;
@API(help = "Disable calculating training metrics (expensive on large datasets)")
public boolean disable_training_metrics;
@API(help = "Threshold for accepting a candidate observation into the set of support vectors", level = API.Level.secondary)
public double sv_threshold;
@API(help = "Maximum number of iteration of the algorithm", level = API.Level.secondary)
public int max_iterations;
@API(help = "Convergence threshold of the Incomplete Cholesky Factorization (ICF)", level = API.Level.expert)
public double fact_threshold;
@API(help = "Convergence threshold for primal-dual residuals in the IPM iteration", level = API.Level.expert)
public double feasible_threshold;
@API(help = "Feasibility criterion of the surrogate duality gap (eta)", level = API.Level.expert)
public double surrogate_gap_threshold;
@API(help = "Increasing factor mu", level = API.Level.expert)
public double mu_factor;
@API(help = "Seed for pseudo random number generator (if applicable)", gridable = true)
public long seed;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/RuleFitModelV3.java
|
package hex.schemas;
import hex.rulefit.RuleFitModel;
import water.api.API;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
import water.api.schemas3.TwoDimTableV3;
public class RuleFitModelV3 extends ModelSchemaV3<RuleFitModel, RuleFitModelV3, RuleFitModel.RuleFitParameters, RuleFitV3.RuleFitParametersV3,
RuleFitModel.RuleFitOutput, RuleFitModelV3.RuleFitModelOutputV3> {
public static final class RuleFitModelOutputV3 extends ModelOutputSchemaV3<RuleFitModel.RuleFitOutput, RuleFitModelOutputV3> {
// Output
@API(help = "The estimated coefficients without language representations for each of the significant baselearners.")
public TwoDimTableV3 rule_importance;
@API(help = "Intercept.")
public double[] intercept;
}
public RuleFitV3.RuleFitParametersV3 createParametersSchema() { return new RuleFitV3.RuleFitParametersV3();}
public RuleFitModelOutputV3 createOutputSchema() { return new RuleFitModelOutputV3();}
@Override
public RuleFitModel createImpl() {
RuleFitModel.RuleFitParameters parms = parameters.createImpl();
return new RuleFitModel(model_id.key(), parms, null, null, null);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/RuleFitV3.java
|
package hex.schemas;
import hex.rulefit.RuleFit;
import hex.rulefit.RuleFitModel;
import water.api.API;
import water.api.schemas3.ModelParametersSchemaV3;
public class RuleFitV3 extends ModelBuilderSchema<RuleFit, RuleFitV3, RuleFitV3.RuleFitParametersV3> {
public static final class RuleFitParametersV3 extends ModelParametersSchemaV3<RuleFitModel.RuleFitParameters, RuleFitParametersV3> {
public static final String[] fields = new String[] {
"model_id",
"training_frame",
"validation_frame",
"seed",
"response_column",
"ignored_columns",
"algorithm",
"min_rule_length",
"max_rule_length",
"max_num_rules",
"model_type",
"weights_column",
"distribution",
"rule_generation_ntrees",
"auc_type",
"remove_duplicates",
"lambda",
"max_categorical_levels",
};
@API(help = "Seed for pseudo random number generator (if applicable).", gridable = true)
public long seed;
// Input fields
@API(help = "The algorithm to use to generate rules.",
values = {"AUTO", "DRF", "GBM"})
public RuleFitModel.Algorithm algorithm;
@API(help = "Minimum length of rules. Defaults to 3.")
public int min_rule_length;
@API(help = "Maximum length of rules. Defaults to 3.")
public int max_rule_length;
@API(help = "The maximum number of rules to return. defaults to -1 which means the number of rules is selected \n" +
"by diminishing returns in model deviance.")
public int max_num_rules;
@API(help = "Specifies type of base learners in the ensemble.", values = {"RULES_AND_LINEAR", "RULES", "LINEAR"})
public RuleFitModel.ModelType model_type;
@API(help = "Specifies the number of trees to build in the tree model. Defaults to 50.")
public int rule_generation_ntrees;
@API(help = "Whether to remove rules which are identical to an earlier rule. Defaults to true." )
public boolean remove_duplicates;
@API(help = "Lambda for LASSO regressor.")
public double[] lambda;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/SVDModelV99.java
|
package hex.schemas;
import hex.svd.SVDModel;
import water.api.*;
import water.api.schemas3.KeyV3;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
public class SVDModelV99 extends ModelSchemaV3<SVDModel, SVDModelV99, SVDModel.SVDParameters, SVDV99.SVDParametersV99, SVDModel.SVDOutput, SVDModelV99.SVDModelOutputV99> {
public static final class SVDModelOutputV99 extends ModelOutputSchemaV3<SVDModel.SVDOutput, SVDModelOutputV99> {
// Output fields; input fields are in the parameters list
@API(help = "Frame key of right singular vectors")
public KeyV3.FrameKeyV3 v_key;
@API(help = "Singular values")
public double[] d;
@API(help = "Frame key of left singular vectors")
public KeyV3.FrameKeyV3 u_key;
}
// TODO: I think we can implement the following two in ModelSchemaV3, using reflection on the type parameters.
public SVDV99.SVDParametersV99 createParametersSchema() { return new SVDV99.SVDParametersV99(); }
public SVDModelOutputV99 createOutputSchema() { return new SVDModelOutputV99(); }
// Version&Schema-specific filling into the impl
@Override public SVDModel createImpl() {
SVDModel.SVDParameters parms = parameters.createImpl();
return new SVDModel( model_id.key(), parms, null );
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/SVDV99.java
|
package hex.schemas;
import hex.DataInfo;
import hex.svd.SVD;
import hex.svd.SVDModel.SVDParameters;
import water.api.API;
import water.api.schemas3.ModelParametersSchemaV3;
public class SVDV99 extends ModelBuilderSchema<SVD,SVDV99,SVDV99.SVDParametersV99> {
public static final class SVDParametersV99 extends ModelParametersSchemaV3<SVDParameters, SVDParametersV99> {
static public String[] fields = new String[] {
"model_id",
"training_frame",
"validation_frame",
"ignored_columns",
"ignore_const_cols",
"score_each_iteration",
"transform",
"svd_method",
"nv",
"max_iterations",
"seed",
"keep_u",
"u_name",
"use_all_factor_levels",
"max_runtime_secs",
"export_checkpoints_dir"
};
@API(help = "Transformation of training data", values = { "NONE", "STANDARDIZE", "NORMALIZE", "DEMEAN", "DESCALE" }) // TODO: pull out of categorical class
public DataInfo.TransformType transform;
@API(help = "Method for computing SVD (Caution: Randomized is currently experimental and unstable)", values = { "GramSVD", "Power", "Randomized" }) // TODO: pull out of enum class
public SVDParameters.Method svd_method;
@API(help = "Number of right singular vectors")
public int nv;
@API(help = "Maximum iterations")
public int max_iterations;
@API(help = "RNG seed for k-means++ initialization")
public long seed;
@API(help = "Save left singular vectors?")
public boolean keep_u;
@API(help = "Frame key to save left singular vectors")
public String u_name;
@API(help = "Whether first factor level is included in each categorical expansion", direction = API.Direction.INOUT)
public boolean use_all_factor_levels;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/SharedTreeModelV3.java
|
package hex.schemas;
import hex.tree.SharedTreeModel;
import water.api.API;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
import water.api.schemas3.TwoDimTableV3;
public class SharedTreeModelV3<M extends SharedTreeModel<M, P, O>,
S extends SharedTreeModelV3<M, S, P, PS, O, OS>,
P extends SharedTreeModel.SharedTreeParameters,
PS extends SharedTreeV3.SharedTreeParametersV3<P, PS>,
O extends SharedTreeModel.SharedTreeOutput,
OS extends SharedTreeModelV3.SharedTreeModelOutputV3<O,OS>>
extends ModelSchemaV3<M, S, P, PS, O, OS> {
public static class SharedTreeModelOutputV3<O extends SharedTreeModel.SharedTreeOutput, SO extends SharedTreeModelOutputV3<O,SO>> extends ModelOutputSchemaV3<O, SO> {
@API(help="Variable Importances", direction=API.Direction.OUTPUT, level = API.Level.secondary)
TwoDimTableV3 variable_importances;
@API(help="The Intercept term, the initial model function value to which trees make adjustments", direction=API.Direction.OUTPUT)
double init_f;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/SharedTreeV3.java
|
package hex.schemas;
import hex.tree.CalibrationHelper;
import hex.tree.SharedTree;
import hex.tree.SharedTreeModel.SharedTreeParameters;
import water.api.API;
import water.api.schemas3.KeyV3.FrameKeyV3;
import water.api.schemas3.ModelParametersSchemaV3;
public class SharedTreeV3<B extends SharedTree, S extends SharedTreeV3<B,S,P>, P extends SharedTreeV3.SharedTreeParametersV3> extends ModelBuilderSchema<B,S,P> {
public static class SharedTreeParametersV3<P extends SharedTreeParameters, S extends SharedTreeParametersV3<P, S>> extends ModelParametersSchemaV3<P, S> {
/*Imbalanced Classes*/
/**
* For imbalanced data, balance training data class counts via
* over/under-sampling. This can result in improved predictive accuracy.
*/
@API(help = "Balance training data class counts via over/under-sampling (for imbalanced data).", level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true)
public boolean balance_classes;
/**
* Desired over/under-sampling ratios per class (lexicographic order).
* Only when balance_classes is enabled.
* If not specified, they will be automatically computed to obtain class balance during training.
*/
@API(help = "Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will be automatically computed to obtain class balance during training. Requires balance_classes.", level = API.Level.expert, direction = API.Direction.INOUT, gridable = true)
public float[] class_sampling_factors;
/**
* When classes are balanced, limit the resulting dataset size to the
* specified multiple of the original dataset size.
*/
@API(help = "Maximum relative size of the training data after balancing class counts (can be less than 1.0). Requires balance_classes.", /* dmin=1e-3, */ level = API.Level.expert, direction = API.Direction.INOUT, gridable = true)
public float max_after_balance_size;
/** For classification models, the maximum size (in terms of classes) of
* the confusion matrix for it to be printed. This option is meant to
* avoid printing extremely large confusion matrices. */
@API(help = "[Deprecated] Maximum size (# classes) for confusion matrices to be printed in the Logs", level = API.Level.secondary, direction = API.Direction.INOUT)
public int max_confusion_matrix_size;
@API(help="Number of trees.", gridable = true)
public int ntrees;
@API(help="Maximum tree depth (0 for unlimited).", gridable = true)
public int max_depth;
@API(help="Fewest allowed (weighted) observations in a leaf.", gridable = true)
public double min_rows;
@API(help="For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the best point", gridable = true)
public int nbins;
@API(help = "For numerical columns (real/int), build a histogram of (at most) this many bins at the root level, then decrease by factor of two per level", level = API.Level.secondary, gridable = true)
public int nbins_top_level;
@API(help="For categorical columns (factors), build a histogram of this many bins, then split at the best point. Higher values can lead to more overfitting.", level = API.Level.secondary, gridable = true)
public int nbins_cats;
@API(help="r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds, stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making trees when the R^2 metric equals or exceeds this", level = API.Level.secondary, gridable = true)
public double r2_stopping;
@API(help = "Seed for pseudo random number generator (if applicable)", gridable = true)
public long seed;
@API(help="Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets.", level = API.Level.expert, gridable = false)
public boolean build_tree_one_node;
@API(help = "A list of row sample rates per class (relative fraction for each class, from 0.0 to 1.0), for each tree", level = API.Level.expert, gridable = true)
public double[] sample_rate_per_class;
@API(help = "Column sample rate per tree (from 0.0 to 1.0)", level = API.Level.secondary, gridable = true)
public double col_sample_rate_per_tree;
@API(help = "Relative change of the column sampling rate for every level (must be > 0.0 and <= 2.0)", level = API.Level.expert, gridable = true)
public double col_sample_rate_change_per_level;
@API(help="Score the model after every so many trees. Disabled if set to 0.", level = API.Level.secondary, gridable = false)
public int score_tree_interval;
@API(help="Minimum relative improvement in squared error reduction for a split to happen", level = API.Level.secondary, gridable = true)
public double min_split_improvement;
@API(help="What type of histogram to use for finding optimal split points", values = { "AUTO", "UniformAdaptive", "Random", "QuantilesGlobal", "RoundRobin", "UniformRobust"}, level = API.Level.secondary, gridable = true)
public SharedTreeParameters.HistogramType histogram_type;
@API(help="Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class probabilities. Calibration can provide more accurate estimates of class probabilities.", level = API.Level.expert)
public boolean calibrate_model;
@API(help="Data for model calibration", level = API.Level.expert, direction = API.Direction.INOUT)
public FrameKeyV3 calibration_frame;
@API(help="Calibration method to use", values = {"AUTO", "PlattScaling", "IsotonicRegression"}, level = API.Level.expert, direction = API.Direction.INOUT)
public CalibrationHelper.CalibrationMethod calibration_method;
@API(help="Check if response column is constant. If enabled, then an exception is thrown if the response column is a constant value." +
"If disabled, then model will train regardless of the response column being a constant value or not.", level = API.Level.expert, direction = API.Direction.INOUT)
public boolean check_constant_response;
@API(help="Create checkpoints into defined directory while training process is still running. In case of cluster shutdown, this checkpoint can be used to restart training.", level = API.Level.expert, gridable = false)
public String in_training_checkpoints_dir;
@API(help="Checkpoint the model after every so many trees. Parameter is used only when in_training_checkpoints_dir is defined", level = API.Level.expert, gridable = false)
public int in_training_checkpoints_tree_interval;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/StackedEnsembleModelV99.java
|
package hex.schemas;
import hex.ensemble.StackedEnsembleModel;
import water.api.API;
import water.api.EnumValuesProvider;
import water.api.schemas3.KeyV3;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
public class StackedEnsembleModelV99 extends ModelSchemaV3<StackedEnsembleModel, StackedEnsembleModelV99, StackedEnsembleModel.StackedEnsembleParameters, StackedEnsembleV99.StackedEnsembleParametersV99, StackedEnsembleModel.StackedEnsembleOutput, StackedEnsembleModelV99.StackedEnsembleModelOutputV99> {
public static final class StackedEnsembleModelOutputV99 extends ModelOutputSchemaV3<StackedEnsembleModel.StackedEnsembleOutput, StackedEnsembleModelOutputV99> {
@API(help="Model which combines the base_models into a stacked ensemble.", direction = API.Direction.OUTPUT)
KeyV3.ModelKeyV3 metalearner;
@API(help="Level one frame used for metalearner training.", direction = API.Direction.OUTPUT)
KeyV3.FrameKeyV3 levelone_frame_id;
@API(help="The stacking strategy used for training.", valuesProvider = StackingStrategyProvider.class, direction = API.Direction.OUTPUT)
StackedEnsembleModel.StackingStrategy stacking_strategy;
}
public static class StackingStrategyProvider extends EnumValuesProvider<StackedEnsembleModel.StackingStrategy> {
public StackingStrategyProvider() {
super(StackedEnsembleModel.StackingStrategy.class);
}
}
public StackedEnsembleV99.StackedEnsembleParametersV99 createParametersSchema() { return new StackedEnsembleV99.StackedEnsembleParametersV99(); }
public StackedEnsembleModelOutputV99 createOutputSchema() { return new StackedEnsembleModelOutputV99(); }
@Override public StackedEnsembleModel createImpl() {
StackedEnsembleV99.StackedEnsembleParametersV99 p = this.parameters;
StackedEnsembleModel.StackedEnsembleParameters parms = p.createImpl();
return new StackedEnsembleModel(model_id.key(), parms, new StackedEnsembleModel.StackedEnsembleOutput());
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/StackedEnsembleV99.java
|
package hex.schemas;
import com.google.gson.reflect.TypeToken;
import hex.ensemble.Metalearner.Algorithm;
import hex.ensemble.Metalearners;
import hex.ensemble.StackedEnsemble;
import hex.ensemble.StackedEnsembleModel;
import hex.naivebayes.NaiveBayesModel;
import hex.tree.gbm.GBMModel;
import hex.tree.drf.DRFModel;
import hex.deeplearning.DeepLearningModel;
import hex.glm.GLMModel;
import hex.Model;
import water.DKV;
import water.Key;
import water.Value;
import water.api.API;
import water.api.EnumValuesProvider;
import water.api.Schema;
import water.api.schemas3.KeyV3;
import water.api.schemas3.ModelParametersSchemaV3;
import water.api.schemas3.FrameV3;
import com.google.gson.Gson;
import water.fvec.Frame;
import java.lang.reflect.InvocationTargetException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
public class StackedEnsembleV99 extends ModelBuilderSchema<StackedEnsemble,StackedEnsembleV99,StackedEnsembleV99.StackedEnsembleParametersV99> {
public static final class StackedEnsembleParametersV99 extends ModelParametersSchemaV3<StackedEnsembleModel.StackedEnsembleParameters, StackedEnsembleParametersV99> {
static public String[] fields = new String[] {
"model_id",
"training_frame",
"response_column",
"validation_frame",
"blending_frame",
"base_models",
"metalearner_algorithm",
"metalearner_nfolds",
"metalearner_fold_assignment",
"metalearner_fold_column",
"metalearner_params",
"metalearner_transform",
"max_runtime_secs",
"weights_column",
"offset_column",
"custom_metric_func",
"seed",
"score_training_samples",
"keep_levelone_frame",
"export_checkpoints_dir",
"auc_type",
"gainslift_bins",
};
public static class AlgorithmValuesProvider extends EnumValuesProvider<Algorithm> {
public AlgorithmValuesProvider() {
super(Algorithm.class);
}
}
// Base models
@API(level = API.Level.critical, direction = API.Direction.INOUT,
help = "List of models or grids (or their ids) to ensemble/stack together. Grids are expanded to individual models. "
+ "If not using blending frame, then models must have been cross-validated using nfolds > 1, and folds must be identical across models.", required = true)
public KeyV3 base_models[];
// Metalearner algorithm
@API(level = API.Level.critical, direction = API.Direction.INOUT,
valuesProvider = AlgorithmValuesProvider.class,
help = "Type of algorithm to use as the metalearner. Options include "
+ "'AUTO' (GLM with non negative weights; if validation_frame is present, a lambda search is performed), "
+ "'deeplearning' (Deep Learning with default parameters), "
+ "'drf' (Random Forest with default parameters), "
+ "'gbm' (GBM with default parameters), "
+ "'glm' (GLM with default parameters), "
+ "'naivebayes' (NaiveBayes with default parameters), "
+ "or 'xgboost' (if available, XGBoost with default parameters)."
)
public Algorithm metalearner_algorithm;
// For ensemble metalearner cross-validation
@API(level = API.Level.critical, direction = API.Direction.INOUT,
help = "Number of folds for K-fold cross-validation of the metalearner algorithm (0 to disable or >= 2).")
public int metalearner_nfolds;
// For ensemble metalearner cross-validation
@API(level = API.Level.secondary, direction = API.Direction.INOUT,
values = {"AUTO", "Random", "Modulo", "Stratified"},
help = "Cross-validation fold assignment scheme for metalearner cross-validation. Defaults to AUTO (which is currently set to Random)." +
" The 'Stratified' option will stratify the folds based on the response variable, for classification problems.")
public Model.Parameters.FoldAssignmentScheme metalearner_fold_assignment;
// For ensemble metalearner cross-validation
@API(level = API.Level.secondary, direction = API.Direction.INOUT,
is_member_of_frames = {"training_frame"},
//is_mutually_exclusive_with = {"ignored_columns", "response_column", "weights_column", "offset_column"},
is_mutually_exclusive_with = {"ignored_columns", "response_column"},
help = "Column with cross-validation fold index assignment per observation for cross-validation of the metalearner.")
public FrameV3.ColSpecifierV3 metalearner_fold_column;
@API(level = API.Level.critical, direction = API.Direction.INOUT,
help = "Transformation used for the level one frame.",
values = {"NONE", "Logit"}
)
public StackedEnsembleModel.StackedEnsembleParameters.MetalearnerTransform metalearner_transform;
@API(level = API.Level.secondary,
help = "Keep level one frame used for metalearner training.")
public boolean keep_levelone_frame;
@API(help = "Parameters for metalearner algorithm", direction = API.Direction.INOUT)
public String metalearner_params;
@API(help="Frame used to compute the predictions that serve as the training frame for the metalearner (triggers blending mode if provided)", direction = API.Direction.INOUT)
public KeyV3.FrameKeyV3 blending_frame;
@API(help = "Seed for random numbers; passed through to the metalearner algorithm. Defaults to -1 (time-based random number)", gridable = true)
public long seed;
@API(help = "Specify the number of training set samples for scoring. The value must be >= 0. To use all training samples, enter 0.",
level = API.Level.secondary,
direction = API.Direction.INOUT)
public long score_training_samples;
@Override
public StackedEnsembleParametersV99 fillFromImpl(StackedEnsembleModel.StackedEnsembleParameters impl) {
super.fillFromImpl(impl);
if (impl._blending!= null) {
Value v = DKV.get(impl._blending);
if (v != null) {
blending_frame = new KeyV3.FrameKeyV3(((Frame) v.get())._key);
}
}
return this;
}
public StackedEnsembleModel.StackedEnsembleParameters fillImpl(StackedEnsembleModel.StackedEnsembleParameters impl) {
super.fillImpl(impl);
impl._blending = (this.blending_frame == null) ? null : Key.<Frame>make(this.blending_frame.name);
if (metalearner_params != null && !metalearner_params.isEmpty()) {
Properties p = new Properties();
HashMap<String, String[]> map = new Gson().fromJson(metalearner_params, new TypeToken<HashMap<String, String[]>>() {
}.getType());
for (Map.Entry<String, String[]> param : map.entrySet()) {
String[] paramVal = param.getValue();
if (paramVal.length == 1) {
p.setProperty(param.getKey(), paramVal[0]);
} else {
p.setProperty(param.getKey(), Arrays.toString(paramVal));
}
}
Schema paramsSchema = Metalearners.createParametersSchema(metalearner_algorithm.name());
Model.Parameters params = Metalearners.createParameters(metalearner_algorithm.name());
paramsSchema.init_meta();
impl._metalearner_parameters = (Model.Parameters) paramsSchema
.fillFromImpl(params)
.fillFromParms(p, true)
.createAndFillImpl();
super.fillImpl(impl);
}
return impl;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/TreeStatsV3.java
|
package hex.schemas;
import hex.tree.TreeStats;
import water.api.API;
import water.api.schemas3.SchemaV3;
public class TreeStatsV3 extends SchemaV3<TreeStats, TreeStatsV3> {
// TODO: no CamelCase
@API(help="minDepth")
public int min_depth;
@API(help="maxDepth")
public int max_depth;
@API(help="meanDepth")
public float mean_depth;
@API(help="minLeaves")
public int min_leaves;
@API(help="maxLeaves")
public int max_leaves;
@API(help="meanLeaves")
public float mean_leaves;
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/TreeV3.java
|
package hex.schemas;
import hex.tree.TreeHandler;
import water.Iced;
import water.api.API;
import water.api.schemas3.KeyV3;
import water.api.schemas3.SchemaV3;
public class TreeV3 extends SchemaV3<Iced, TreeV3> {
@API(required = true, direction = API.Direction.INPUT, help = "Key of the model the desired tree belongs to",level = API.Level.critical)
public KeyV3.ModelKeyV3 model;
@API(required = true, direction = API.Direction.INPUT, help = "Index of the tree in the model.", level = API.Level.critical)
public int tree_number;
@API(direction = API.Direction.INOUT, help = "Name of the class of the tree. Ignored for regression and binomial.", level = API.Level.critical)
public String tree_class;
@API(direction = API.Direction.INPUT, help = "Whether to generate plain language rules.", level = API.Level.critical, values = {"AUTO", "TRUE", "FALSE"})
public TreeHandler.PlainLanguageRules plain_language_rules;
@API(direction = API.Direction.OUTPUT, help = "Left child nodes in the tree")
public int[] left_children;
@API(direction = API.Direction.OUTPUT, help = "Right child nodes in the tree")
public int[] right_children;
@API(direction = API.Direction.OUTPUT, help = "Number of the root node")
public int root_node_id;
@API(direction = API.Direction.OUTPUT, help = "Split thresholds (numeric and possibly categorical columns)")
public float[] thresholds;
@API(direction = API.Direction.OUTPUT, help = "Names of the column of the split")
public String[] features;
@API(direction = API.Direction.OUTPUT, help = "Which way NA Splits (LEFT, RIGHT, NA)")
public String[] nas;
@API(direction = API.Direction.OUTPUT, help = "Description of the tree's nodes")
public String[] descriptions;
@API(direction = API.Direction.OUTPUT, help = "Categorical levels on the edge from the parent node")
public int[][] levels;
@API(direction = API.Direction.OUTPUT, help = "Prediction values on terminal nodes")
public float[] predictions;
@API(direction = API.Direction.OUTPUT, help = "Plain language rules representation of a trained decision tree")
public String tree_decision_path;
@API(direction = API.Direction.OUTPUT, help = "Plain language rules that were used in a particular prediction")
public String[] decision_paths;
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/UpliftDRFModelV3.java
|
package hex.schemas;
import hex.tree.uplift.UpliftDRFModel;
import water.api.API;
public class UpliftDRFModelV3 extends SharedTreeModelV3<UpliftDRFModel,
UpliftDRFModelV3,
UpliftDRFModel.UpliftDRFParameters,
UpliftDRFV3.UpliftDRFParametersV3,
UpliftDRFModel.UpliftDRFOutput,
UpliftDRFModelV3.UpliftDRFModelOutputV3> {
public static final class UpliftDRFModelOutputV3 extends SharedTreeModelV3.SharedTreeModelOutputV3<UpliftDRFModel.UpliftDRFOutput, UpliftDRFModelOutputV3> {
@API(help="Default thresholds to calculate AUUC metric. If validation is enabled, thresholds from validation metrics is saved here. Otherwise thresholds are from training metrics.")
public double[] default_auuc_thresholds;
@Override public UpliftDRFModelV3.UpliftDRFModelOutputV3 fillFromImpl(UpliftDRFModel.UpliftDRFOutput impl) {
UpliftDRFModelV3.UpliftDRFModelOutputV3 uov3 = super.fillFromImpl(impl);
uov3.default_auuc_thresholds = impl._defaultAuucThresholds;
return uov3;
}
}
public UpliftDRFV3.UpliftDRFParametersV3 createParametersSchema() { return new UpliftDRFV3.UpliftDRFParametersV3(); }
public UpliftDRFModelOutputV3 createOutputSchema() { return new UpliftDRFModelOutputV3(); }
//==========================
// Custom adapters go here
// Version&Schema-specific filling into the impl
@Override public UpliftDRFModel createImpl() {
UpliftDRFV3.UpliftDRFParametersV3 p = this.parameters;
UpliftDRFModel.UpliftDRFParameters parms = p.createImpl();
return new UpliftDRFModel( model_id.key(), parms, new UpliftDRFModel.UpliftDRFOutput(null) );
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/UpliftDRFV3.java
|
package hex.schemas;
import hex.AUUC;
import hex.tree.uplift.UpliftDRF;
import hex.tree.uplift.UpliftDRFModel.UpliftDRFParameters;
import water.api.API;
public class UpliftDRFV3 extends SharedTreeV3<UpliftDRF, UpliftDRFV3, UpliftDRFV3.UpliftDRFParametersV3> {
public static final class UpliftDRFParametersV3 extends SharedTreeV3.SharedTreeParametersV3<UpliftDRFParameters, UpliftDRFParametersV3> {
static public String[] fields = new String[]{
"model_id",
"training_frame",
"validation_frame",
"score_each_iteration",
"score_tree_interval",
"response_column",
"ignored_columns",
"ignore_const_cols",
"ntrees",
"max_depth",
"min_rows",
"nbins",
"nbins_top_level",
"nbins_cats",
"max_runtime_secs",
"seed",
"mtries",
"sample_rate",
"sample_rate_per_class",
"col_sample_rate_change_per_level",
"col_sample_rate_per_tree",
"histogram_type",
"categorical_encoding",
"distribution",
"check_constant_response",
"custom_metric_func",
"treatment_column",
"uplift_metric",
"auuc_type",
"auuc_nbins",
"stopping_rounds",
"stopping_metric",
"stopping_tolerance"
};
// Input fields
@API(help = "Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p} for classification and p/3 for regression (where p is the # of predictors", gridable = true)
public int mtries;
@API(help = "Row sample rate per tree (from 0.0 to 1.0)", gridable = true)
public double sample_rate;
@API(help = "Define the column which will be used for computing uplift gain to select best split for a tree. The column has to divide the dataset into treatment (value 1) and control (value 0) groups.", gridable = false, level = API.Level.secondary, required = true,
is_member_of_frames = {"training_frame", "validation_frame"},
is_mutually_exclusive_with = {"ignored_columns","response_column", "weights_column"})
public String treatment_column;
@API(help = "Divergence metric used to find best split when building an uplift tree.", level = API.Level.secondary, values = { "AUTO", "KL", "Euclidean", "ChiSquared"}, gridable = true)
public UpliftDRFParameters.UpliftMetricType uplift_metric;
@API(help = "Metric used to calculate Area Under Uplift Curve.", level = API.Level.secondary, values = { "AUTO", "qini", "lift", "gain"})
public AUUC.AUUCType auuc_type;
@API(help = "Number of bins to calculate Area Under Uplift Curve.", level = API.Level.secondary)
public int auuc_nbins;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/Word2VecModelV3.java
|
package hex.schemas;
import hex.word2vec.Word2VecModel;
import water.api.schemas3.ModelOutputSchemaV3;
import water.api.schemas3.ModelSchemaV3;
import water.api.*;
public class Word2VecModelV3 extends ModelSchemaV3<Word2VecModel, Word2VecModelV3, Word2VecModel.Word2VecParameters, Word2VecV3.Word2VecParametersV3, Word2VecModel.Word2VecOutput, Word2VecModelV3.Word2VecModelOutputV3> {
public static final class Word2VecModelOutputV3 extends ModelOutputSchemaV3<Word2VecModel.Word2VecOutput, Word2VecModelOutputV3> {
@API(help = "Number of epochs executed")
public int epochs;
}
public Word2VecV3.Word2VecParametersV3 createParametersSchema() { return new Word2VecV3.Word2VecParametersV3(); }
public Word2VecModelOutputV3 createOutputSchema() { return new Word2VecModelOutputV3(); }
// Version&Schema-specific filling into the impl
@Override public Word2VecModel createImpl() {
Word2VecModel.Word2VecParameters parms = parameters.createImpl();
return new Word2VecModel( model_id.key(), parms, null);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/Word2VecSynonymsV3.java
|
package hex.schemas;
import water.Iced;
import water.api.API;
import water.api.schemas3.KeyV3;
import water.api.schemas3.SchemaV3;
public class Word2VecSynonymsV3 extends SchemaV3<Iced, Word2VecSynonymsV3> {
@API(help="Source word2vec Model", required = true, direction = API.Direction.INPUT)
public KeyV3.ModelKeyV3 model;
@API(help="Target word to find synonyms for", required = true, direction = API.Direction.INPUT)
public String word;
@API(help="Number of synonyms", required = true, direction = API.Direction.INPUT)
public int count;
@API(help="Synonymous words")
public String[] synonyms;
@API(help="Similarity scores")
public double[] scores;
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/Word2VecTransformV3.java
|
package hex.schemas;
import hex.word2vec.Word2VecModel;
import water.Iced;
import water.api.API;
import water.api.schemas3.KeyV3;
import water.api.schemas3.SchemaV3;
public class Word2VecTransformV3 extends SchemaV3<Iced, Word2VecTransformV3> {
@API(help="Source word2vec Model", required = true, direction = API.Direction.INPUT)
public KeyV3.ModelKeyV3 model;
@API(help = "Words Frame", required = true, direction = API.Direction.INPUT)
public KeyV3.FrameKeyV3 words_frame;
@API(help="Method of aggregating word-vector sequences into a single vector", values = {"NONE", "AVERAGE"},
direction = API.Direction.INPUT)
public Word2VecModel.AggregateMethod aggregate_method;
@API(help = "Word Vectors Frame", direction = API.Direction.OUTPUT)
public KeyV3.FrameKeyV3 vectors_frame;
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/schemas/Word2VecV3.java
|
package hex.schemas;
import hex.word2vec.Word2Vec;
import hex.word2vec.Word2VecModel.Word2VecParameters;
import water.api.API;
import water.api.schemas3.KeyV3;
import water.api.schemas3.ModelParametersSchemaV3;
public class Word2VecV3 extends ModelBuilderSchema<Word2Vec,Word2VecV3,Word2VecV3.Word2VecParametersV3> {
public static final class Word2VecParametersV3 extends ModelParametersSchemaV3<Word2VecParameters, Word2VecParametersV3> {
public static String[] fields = new String[] {
"model_id",
"training_frame",
"min_word_freq",
"word_model",
"norm_model",
"vec_size",
"window_size",
"sent_sample_rate",
"init_learning_rate",
"epochs",
"pre_trained",
"max_runtime_secs",
"export_checkpoints_dir"
};
@API(help="Set size of word vectors")
public int vec_size;
@API(help="Set max skip length between words")
public int window_size;
@API(help="Set threshold for occurrence of words. Those that appear with higher frequency in the training data\n" +
"\t\twill be randomly down-sampled; useful range is (0, 1e-5)")
public float sent_sample_rate;
@API(help="Use Hierarchical Softmax", values = {"HSM"})
public Word2Vec.NormModel norm_model;
@API(help="Number of training iterations to run")
public int epochs;
@API(help="This will discard words that appear less than <int> times")
public int min_word_freq;
@API(help="Set the starting learning rate")
public float init_learning_rate;
@API(help="The word model to use (SkipGram or CBOW)", values = {"SkipGram", "CBOW"})
public Word2Vec.WordModel word_model;
@API(help="Id of a data frame that contains a pre-trained (external) word2vec model")
public KeyV3.FrameKeyV3 pre_trained;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/splitframe/ShuffleSplitFrame.java
|
package hex.splitframe;
import java.util.Random;
import water.*;
import water.fvec.*;
/** Frame splitter function to divide given frame into multiple partitions
* based on given ratios.
*
* <p>The task creates <code>ratios.length+1</code> output frame each
* containing a demanded fraction of rows from source dataset</p>
*
* Rows are selected at random for each split, but remain ordered.
*/
public class ShuffleSplitFrame {
public static Frame[] shuffleSplitFrame( Frame fr, Key<Frame>[] keys, final double ratios[], final long seed ) {
// Sanity check the ratios
assert keys.length == ratios.length;
double sum = ratios[0];
for( int i = 1; i<ratios.length; i++ ) {
sum += ratios[i];
ratios[i] = sum;
}
assert water.util.MathUtils.equalsWithinOneSmallUlp(sum,1.0);
byte[] types = fr.types();
final int ncols = fr.numCols();
byte[] alltypes = new byte[ncols*ratios.length];
for( int i = 0; i<ratios.length; i++ )
System.arraycopy(types,0,alltypes,i*ncols,ncols);
// Do the split, into ratios.length groupings of NewChunks
MRTask mr = new MRTask() {
@Override public void map( Chunk cs[], NewChunk ncs[] ) {
Random rng = new Random(seed*cs[0].cidx());
int nrows = cs[0]._len;
for( int i=0; i<nrows; i++ ) {
double r = rng.nextDouble();
int x=0; // Pick the NewChunk split
for( ; x<ratios.length-1; x++ ) if( r<ratios[x] ) break;
x *= ncols;
// Copy row to correct set of NewChunks
for( int j=0; j<ncols; j++ ) {
byte colType = cs[j].vec().get_type();
switch (colType) {
case Vec.T_BAD : break; /* NOP */
case Vec.T_STR : ncs[x + j].addStr(cs[j], i); break;
case Vec.T_UUID: ncs[x + j].addUUID(cs[j], i); break;
case Vec.T_NUM : /* fallthrough */
case Vec.T_CAT :
case Vec.T_TIME:
ncs[x + j].addNum(cs[j].atd(i));
break;
default:
throw new IllegalArgumentException("Unsupported vector type: " + colType);
}
}
}
}
}.doAll(alltypes,fr);
// Build output frames
Frame frames[] = new Frame[ratios.length];
Vec[] vecs = fr.vecs();
String[] names = fr.names();
Futures fs = new Futures();
for( int i=0; i<ratios.length; i++ ) {
Vec[] nvecs = new Vec[ncols];
final int rowLayout = mr.appendables()[i*ncols].compute_rowLayout();
for( int c=0; c<ncols; c++ ) {
AppendableVec av = mr.appendables()[i*ncols + c];
av.setDomain(vecs[c].domain());
nvecs[c] = av.close(rowLayout,fs);
}
frames[i] = new Frame(keys[i],fr.names(),nvecs);
DKV.put(frames[i],fs);
}
fs.blockForPending();
return frames;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/svd/SVD.java
|
package hex.svd;
import Jama.Matrix;
import Jama.QRDecomposition;
import Jama.SingularValueDecomposition;
import hex.DataInfo;
import hex.DataInfo.Row;
import hex.FrameTask;
import hex.ModelBuilder;
import hex.ModelCategory;
import hex.glrm.GLRMModel;
import hex.gram.Gram;
import hex.gram.Gram.GramTask;
import hex.svd.SVDModel.SVDParameters;
import hex.util.LinearAlgebraUtils;
import hex.util.LinearAlgebraUtils.BMulInPlaceTask;
import hex.util.LinearAlgebraUtils.BMulTask;
import hex.util.LinearAlgebraUtils.SMulTask;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.rapids.Rapids;
import water.util.ArrayUtils;
import water.util.PrettyPrint;
import water.util.TwoDimTable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import static hex.util.DimensionReductionUtils.createScoringHistoryTableDR;
import static hex.util.DimensionReductionUtils.getTransformedEigenvectors;
import static java.lang.StrictMath.sqrt;
import static water.util.ArrayUtils.*;
/**
* Singular Value Decomposition
* <a href = "http://www.cs.yale.edu/homes/el327/datamining2013aFiles/07_singular_value_decomposition.pdf">SVD via Power Method Algorithm</a>
* <a href = "https://www.cs.cmu.edu/~venkatg/teaching/CStheory-infoage/book-chapter-4.pdf">Proof of Convergence for Power Method</a>
* <a href = "http://arxiv.org/pdf/0909.4061.pdf">Randomized Algorithms for Matrix Approximation</a>
* @author anqi_fu
*/
public class SVD extends ModelBuilder<SVDModel,SVDModel.SVDParameters,SVDModel.SVDOutput> {
// Convergence tolerance
private final double TOLERANCE = 1e-16; // Cutoff for estimation error of right singular vector
private final double EPS = 1e-16; // cutoff if vector norm is too small
// Maximum number of columns when categoricals expanded
private final int MAX_COLS_EXPANDED = 5000;
private boolean _callFromGLRM; // when SVD is used as an init method for GLRM, need to initialize properly
private GLRMModel _glrmModel;
// Number of columns in training set (p)
private transient int _ncolExp; // With categoricals expanded into 0/1 indicator cols
boolean _wideDataset = false; // default with wideDataset set to be false.
private double[] _estimatedSingularValues; // store estimated singular values for power method
private boolean _matrixRankReached = false; // stop if eigenvector norm becomes too small. Reach rank of matrix
private boolean _failedConvergence = false; // warn if power failed to converge for some eigenvector calculation
@Override protected SVDDriver trainModelImpl() { return new SVDDriver(); }
@Override public ModelCategory[] can_build() { return new ModelCategory[]{ ModelCategory.DimReduction }; }
@Override public BuilderVisibility builderVisibility() { return BuilderVisibility.Experimental; }
@Override public boolean isSupervised() { return false; }
@Override public boolean havePojo() { return true; }
@Override public boolean haveMojo() { return false; }
// Called from an http request
public SVD(SVDModel.SVDParameters parms ) { super(parms ); init(false); _glrmModel=null; _callFromGLRM=false;}
public SVD(SVDModel.SVDParameters parms, Job job) { super(parms,job); init(false); _glrmModel=null; _callFromGLRM=false;}
public SVD(SVDModel.SVDParameters parms, Job job, boolean callFromGlrm, GLRMModel gmodel) {
super(parms,job);
init(false);
_callFromGLRM = callFromGlrm;
if (gmodel == null)
error("_train SVD for GLRM", "Your GLRM model parameter is null.");
_glrmModel = gmodel;
}
public SVD(boolean startup_once) { super(new SVDParameters(),startup_once); }
@Override
protected void checkMemoryFootPrint_impl() {
HeartBeat hb = H2O.SELF._heartbeat;
double p = LinearAlgebraUtils.numColsExp(_train, true);
double r = _train.numRows();
boolean useGramSVD = _parms._svd_method == SVDParameters.Method.GramSVD;
boolean usePower = _parms._svd_method == SVDParameters.Method.Power;
boolean useRandomized = _parms._svd_method == SVDParameters.Method.Randomized;
double gramSize = _train.lastVec().nChunks()==1 ? 1 :
Math.log((double) _train.lastVec().nChunks()) / Math.log(2.); // gets to zero if nChunks=1
long mem_usage = (useGramSVD || usePower || useRandomized) ? (long) (hb._cpus_allowed * p * p * 8/*doubles*/
* gramSize) : 1; //one gram per core
long mem_usage_w = (useGramSVD || usePower || useRandomized) ? (long) (hb._cpus_allowed * r * r * 8/*doubles*/
* gramSize) : 1; //one gram per core
long max_mem = hb.get_free_mem();
if ((mem_usage > max_mem) && (mem_usage_w > max_mem)) {
String msg = "Gram matrices (one per thread) won't fit in the driver node's memory ("
+ PrettyPrint.bytes(mem_usage) + " > " + PrettyPrint.bytes(max_mem)
+ ") - try reducing the number of columns and/or the number of categorical factors.";
error("_train", msg);
}
// _wideDataset is true if original memory does not fit.
if (mem_usage > max_mem) {
_wideDataset = true; // have to set _wideDataset in this case
} else { // both ways fit into memory. Want to choose wideDataset if p is too big.
if ((p > 5000) && ( r < 5000)) {
_wideDataset = true;
}
}
}
/*
Set value of wideDataset. Note that this routine is used for test purposes only and is not intended
for users.
*/
public void setWideDataset(boolean isWide) {
_wideDataset = isWide;
}
@Override public void init(boolean expensive) {
super.init(expensive);
if (_parms._max_iterations < 1)
error("_max_iterations", "max_iterations must be at least 1");
if(_train == null) return;
if (_callFromGLRM) // when used to initialize GLRM, need to treat binary numeric columns with binary loss as numeric columns
_ncolExp = _glrmModel._output._catOffsets[_glrmModel._output._catOffsets.length-1]+_glrmModel._output._nnums;
else
_ncolExp = LinearAlgebraUtils.numColsExp(_train,_parms._use_all_factor_levels);
if (_ncolExp > MAX_COLS_EXPANDED) {
warn("_train", "_train has " + _ncolExp + " columns when categoricals are expanded. " +
"Algorithm may be slow.");
}
if(_parms._nv < 1 || _parms._nv > _ncolExp)
error("_nv", "Number of right singular values must be between 1 and " + _ncolExp);
if (expensive && error_count() == 0) {
if (!(_train.hasNAs()) || _parms._impute_missing) {
checkMemoryFootPrint(); // perform memory check here if dataset contains no NAs or if impute_missing enabled
}
}
}
// Compute ivv_sum - vec * vec' for symmetric array ivv_sum
public static double[][] updateIVVSum(double[][] ivv_sum, double[] vec) {
double diff;
for(int i = 0; i < vec.length; i++) {
for(int j = 0; j < i; j++) {
diff = ivv_sum[i][j] - vec[i] * vec[j];
ivv_sum[i][j] = ivv_sum[j][i] = diff;
}
ivv_sum[i][i] -= vec[i] * vec[i];
}
return ivv_sum;
}
class SVDDriver extends Driver {
SVDModel _model;
private double[] powerLoop(Gram gram, long seed, SVDModel model, double[] randomInitialV, double[] finalV, int k)
{
// Arrays.fill(randomInitialV,0);
randomInitialV = ArrayUtils.gaussianVector(seed+k, randomInitialV); // random vector for each iteration!
div(randomInitialV, l2norm(randomInitialV)); // normalize initial vector
return powerLoop(gram, randomInitialV, model, finalV, k);
}
/**
* Problem I have with the current powerLoop is that the err gets to be very small very quickly and
* we never really get to iterate over much. I am changing the stopping condition to one that is
* used for symmetric matrices as follows:
*
* let X = A^m*X0, let lambda1 = AX dot_product X/(X dot_product X).
* Stop if err = sqrt(AX dot_product AX/X dot_productX - lambda1^2) < tolerance or max iteration is reached.
*
* @param gram
* @param v
* @param model
* @param vnew
* @return
*/
private double[] powerLoop(Gram gram, double[] v, SVDModel model, double[] vnew, int k) {
// TODO: What happens if Gram matrix is essentially zero? Numerical inaccuracies in PUBDEV-1161.
assert v.length == gram.fullN();
// Set initial value v_0 to standard normal distribution
int iters = 0;
double err = 2 * TOLERANCE;
double lambda1_calc = 0; // this is the actual singular values that we are looking for as well!q
double lambda_est = 0;
int eigIndex = model._output._iterations+1; // we start counting at 1 and not zero.
// Update v_i <- (A'Av_{i-1})/||A'Av_{i-1}|| where A'A = Gram matrix of training frame
while(iters < _parms._max_iterations && err > TOLERANCE) {
if (stop_requested()) break;
// Compute x_i <- A'Av_{i-1} and ||x_i||
gram.mul(v, vnew);
lambda1_calc = innerProduct(vnew, v);
lambda_est = innerProduct(vnew, vnew);
double norm = l2norm(vnew);
double invnorm = 0;
err = 0;
if (norm > EPS) { // norm is not too small
invnorm = 1 / norm;
for (int i = 0; i < v.length; i++) {
vnew[i] *= invnorm; // Compute singular vector v_i = x_i/||x_i||
v[i] = vnew[i]; // Update v_i for next iteration
}
err = Math.sqrt(lambda_est - lambda1_calc * lambda1_calc);
iters++; // TODO: Should output vector of final iterations for each k
// store variables for scoring history
model._output._training_time_ms.add(System.currentTimeMillis());
model._output._history_err.add(err);
model._output._history_eigenVectorIndex.add((double) eigIndex);
} else {
_job.warn("_train SVD: Dataset is rank deficient. User specified "+_parms._nv);
_matrixRankReached = true;
break;
}
}
if (err > TOLERANCE) {
_failedConvergence=true;
_job.warn("_train: PCA Power method failed to converge within TOLERANCE. Increase max_iterations or reduce " +
"TOLERANCE to mitigate this problem.");
}
_estimatedSingularValues[k] = lambda1_calc;
return v;
}
private double computeSigmaU(DataInfo dinfo, SVDModel model, int k, double[][] ivv_sum, Vec[] uvecs, double[] vresult) {
double[] ivv_vk = ArrayUtils.multArrVec(ivv_sum, model._output._v[k], vresult);
CalcSigmaU ctsk = new CalcSigmaU(_job._key, dinfo, ivv_vk).doAll(Vec.T_NUM, dinfo._adaptedFrame);
model._output._d[k] = ctsk._sval;
assert ctsk._nobs == model._output._nobs : "Processed " + ctsk._nobs + " rows but expected " + model._output._nobs; // Check same number of skipped rows as Gram
Frame tmp = ctsk.outputFrame();
uvecs[k] = tmp.vec(0); // Save output column of U
tmp.unlock(_job);
return model._output._d[k];
}
/*
// Algorithm 4.4: Randomized subspace iteration from Halk et al (http://arxiv.org/pdf/0909.4061.pdf)
private Frame randSubIterInPlace(DataInfo dinfo, SVDModel model) {
DataInfo yinfo = null;
Frame yqfrm = null;
try {
// 1) Initialize Y = AG where G ~ N(0,1) and compute Y = QR factorization
_job.update(1, "Initializing random subspace of training data Y");
double[][] gt = ArrayUtils.gaussianArray(_parms._nv, _ncolExp, _parms._seed);
RandSubInit rtsk = new RandSubInit(_job._key, dinfo, gt);
rtsk.doAll(_parms._nv, Vec.T_NUM, dinfo._adaptedFrame);
yqfrm = rtsk.outputFrame(Key.<Frame>make(), null, null); // Alternates between Y and Q from Y = QR
// Make input frame [A,Q] where A = read-only training data, Y = A \tilde{Q}, Q from Y = QR factorization
// Note: If A is n by p (p = num cols with categoricals expanded), then \tilde{Q} is p by k and Q is n by k
// Q frame is used to save both intermediate Y calculation and final orthonormal Q matrix
Frame aqfrm = new Frame(dinfo._adaptedFrame);
aqfrm.add(yqfrm);
// Calculate Cholesky of Y Gram to get R' = L matrix
_job.update(1, "Computing QR factorization of Y");
yinfo = new DataInfo(yqfrm, null, true, DataInfo.TransformType.NONE, true, false, false);
DKV.put(yinfo._key, yinfo);
LinearAlgebraUtils.computeQInPlace(_job._key, yinfo);
model._output._iterations = 0;
while (model._output._iterations < _parms._max_iterations) {
if(stop_requested()) break;
_job.update(1, "Iteration " + String.valueOf(model._output._iterations+1) + " of randomized subspace iteration");
// 2) Form \tilde{Y}_j = A'Q_{j-1} and compute \tilde{Y}_j = \tilde{Q}_j \tilde{R}_j factorization
SMulTask stsk = new SMulTask(dinfo, _parms._nv);
stsk.doAll(aqfrm);
Matrix ysmall = new Matrix(stsk._atq);
QRDecomposition ysmall_qr = new QRDecomposition(ysmall);
double[][] qtilde = ysmall_qr.getQ().getArray();
// 3) [A,Q_{j-1}] -> [A,Y_j]: Form Y_j = A\tilde{Q}_j and compute Y_j = Q_jR_j factorization
BMulInPlaceTask tsk = new BMulInPlaceTask(dinfo, ArrayUtils.transpose(qtilde));
tsk.doAll(aqfrm);
LinearAlgebraUtils.computeQInPlace(_job._key, yinfo);
model._output._iterations++;
model.update(_job);
}
} finally {
if( yinfo != null ) yinfo.remove();
}
return yqfrm;
}
*/
// Algorithm 4.4: Randomized subspace iteration from Halk et al (http://arxiv.org/pdf/0909.4061.pdf)
// This function keeps track of change in Q each iteration ||Q_j - Q_{j-1}||_2 to check convergence
private Frame randSubIter(DataInfo dinfo, SVDModel model) {
DataInfo yinfo = null;
Frame ybig = null, qfrm = null, ysmallF = null, ysmallqfrm = null;
final int ncolA = dinfo._adaptedFrame.numCols();
double[][] xx = null;
double[][] ysmall_q = null;
DataInfo ysmallInfo = null;
try {
// 1) Initialize Y = AG where G ~ N(0,1) and compute Y = QR factorization
_job.update(1, "Initializing random subspace of training data Y");
double[][] gt = ArrayUtils.gaussianArray(_parms._nv, _ncolExp, _parms._seed);
RandSubInit rtsk = new RandSubInit(_job._key, dinfo, gt);
rtsk.doAll(_parms._nv, Vec.T_NUM, dinfo._adaptedFrame);
ybig = rtsk.outputFrame(Key.<Frame>make(), null, null);
Frame yqfrm = new Frame(ybig);
for (int i = 0; i < _parms._nv; i++)
yqfrm.add("qcol_" + i, yqfrm.anyVec().makeZero());
// Calculate Cholesky of Gram to get R' = L matrix
_job.update(1, "Computing QR factorization of Y");
yinfo = new DataInfo(ybig, null, true, DataInfo.TransformType.NONE, true, false, false);
DKV.put(yinfo._key, yinfo);
LinearAlgebraUtils.computeQ(_job._key, yinfo, yqfrm, xx);
if (yqfrm.hasInfs()) { // dataset is rank deficient, reduce _nv to fit the true rank better
_matrixRankReached=true; // count when bad infinity or NaNs appear to denote problem;
String warnMessage = "_train SVD: Dataset is rank deficient. _parms._nv was "+_parms._nv;
for (int colIndex = ybig.numCols(); colIndex < yqfrm.numCols(); colIndex++) {
if (yqfrm.vec(colIndex).pinfs() > 0) {
_parms._nv = colIndex-ybig.numCols();
break;
}
}
_job.warn(warnMessage+" and is now set to "+_parms._nv);
// redo with correct _nv number
gt = ArrayUtils.gaussianArray(_parms._nv, _ncolExp, _parms._seed);
rtsk = new RandSubInit(_job._key, dinfo, gt);
rtsk.doAll(_parms._nv, Vec.T_NUM, dinfo._adaptedFrame);
ybig.remove();
yinfo.remove();
ybig = rtsk.outputFrame(Key.<Frame>make(), null, null);
yinfo = new DataInfo(ybig, null, true, DataInfo.TransformType.NONE, true, false, false);
DKV.put(yinfo._key, yinfo);
}
// Make input frame [A,Q,Y] where A = read-only training data, Y = A \tilde{Q}, Q from Y = QR factorization
// Note: If A is n by p (p = num cols with categoricals expanded), then \tilde{Q} is p by k and Q is n by k
Frame ayqfrm = new Frame(dinfo._adaptedFrame);
ayqfrm.add(ybig);
for (int i = 0; i < _parms._nv; i++)
ayqfrm.add("qcol_" + i, ayqfrm.anyVec().makeZero());
Frame ayfrm = ayqfrm.subframe(0, ncolA + _parms._nv); // [A,Y]
Frame aqfrm = ayqfrm.subframe(0, ncolA);
aqfrm.add(ayqfrm.subframe(ncolA + _parms._nv, ayqfrm.numCols())); // [A,Q]
yqfrm = ayqfrm.subframe(ncolA, ayqfrm.numCols()); // [Y,Q]
xx = MemoryManager.malloc8d(_parms._nv, _parms._nv);
LinearAlgebraUtils.computeQ(_job._key, yinfo, yqfrm, xx);
model._output._iterations = 0;
long qobs = dinfo._adaptedFrame.numRows() * _parms._nv; // Number of observations in Q
double qerr = 2 * TOLERANCE * qobs; // Stop when average SSE between Q_j and Q_{j-2} below tolerance
double average_SEE = qerr / qobs;
int wEndCol = 2*_parms._nv-1;
int wEndColR = _parms._nv-1;
while ((model._output._iterations < 10 || average_SEE > TOLERANCE) && model._output._iterations < _parms._max_iterations) { // Run at least 10 iterations before tolerance cutoff
if(stop_requested()) {
if (timeout())
_job.warn("_train SVD: max_runtime_secs is reached. Not all iterations are computed.");
break;
}
_job.update(1, "Iteration " + String.valueOf(model._output._iterations+1) + " of randomized subspace iteration");
// 2) Form \tilde{Y}_j = A'Q_{j-1} and compute \tilde{Y}_j = \tilde{Q}_j \tilde{R}_j factorization
SMulTask stsk = new SMulTask(dinfo, _parms._nv, _ncolExp);
stsk.doAll(aqfrm); // Pass in [A,Q]
if (_wideDataset) {
if (model._output._iterations==0) {
ysmallF = new water.util.ArrayUtils().frame(stsk._atq);
ysmallInfo = new DataInfo(ysmallF, null, true, DataInfo.TransformType.NONE,
true, false, false);
DKV.put(ysmallInfo._key, ysmallInfo);
ysmall_q = MemoryManager.malloc8d(_ncolExp, _parms._nv);
ysmallqfrm = new Frame(ysmallF);
for (int i = 0; i < _parms._nv; i++) // pray that _nv is small
ysmallqfrm.add("qcol_" + i, ysmallqfrm.anyVec().makeZero());
} else { // replace content of ysmallqfrm with new contents in _atq,
new CopyArrayToFrame(0, wEndColR, _ncolExp, stsk._atq).doAll(ysmallqfrm);
}
LinearAlgebraUtils.computeQ(_job._key, ysmallInfo, ysmallqfrm, xx);
ysmall_q = new FrameToArray(_parms._nv, wEndCol, _ncolExp, ysmall_q).doAll(ysmallqfrm).getArray();
} else { // let ysmall as 2-D double array
Matrix ysmall = new Matrix(stsk._atq); // small only for n_exp << m. Not for wide dataset.
QRDecomposition ysmall_qr = new QRDecomposition(ysmall);
ysmall_q = ysmall_qr.getQ().getArray(); // memory allocation here too.
}
// 3) Form Y_j = A\tilde{Q}_j and compute Y_j = Q_jR_j factorization (ybig)
BMulInPlaceTask tsk = new BMulInPlaceTask(dinfo, ArrayUtils.transpose(ysmall_q), _ncolExp);
tsk.doAll(ayfrm);
qerr = LinearAlgebraUtils.computeQ(_job._key, yinfo, yqfrm, xx);
average_SEE = qerr/qobs;
model._output._iterations++;
// store variables for scoring history
model._output._training_time_ms.add(System.currentTimeMillis());
model._output._history_average_SEE.add(average_SEE);
model.update(_job);
}
model._output._nobs = ybig.numRows(); // update nobs parameter
model.update(_job);
// 4) Extract and save final Q_j from [A,Q] frame
qfrm = ayqfrm.extractFrame(ncolA + _parms._nv, ayqfrm.numCols());
qfrm = new Frame(Key.<Frame>make(), qfrm.names(), qfrm.vecs());
DKV.put(qfrm);
} finally {
if( yinfo != null ) yinfo.remove();
if( ybig != null ) ybig.delete();
if (ysmallInfo != null) ysmallInfo.remove();
if (ysmallF != null) ysmallF.delete();
if (ysmallqfrm != null) ysmallqfrm.delete();
}
return qfrm;
}
// Algorithm 5.1: Direct SVD from Halko et al (http://arxiv.org/pdf/0909.4061.pdf)
private Frame directSVD(DataInfo dinfo, Frame qfrm, SVDModel model) {
String u_name = (_parms._u_name == null || _parms._u_name.length() == 0) ? "SVDUMatrix_" + Key.rand() : _parms._u_name;
return directSVD(dinfo, qfrm, model, u_name);
}
private Frame directSVD(DataInfo dinfo, Frame qfrm, SVDModel model, String u_name) {
DataInfo qinfo = null;
Frame u = null;
final int ncolA = dinfo._adaptedFrame.numCols();
try {
Vec[] vecs = new Vec[ncolA + _parms._nv];
for (int i = 0; i < ncolA; i++) vecs[i] = dinfo._adaptedFrame.vec(i);
for (int i = 0; i < _parms._nv; i++) vecs[ncolA + i] = qfrm.vec(i);
Frame aqfrm = new Frame(vecs);
// 1) Form the matrix B' = A'Q = (Q'A)'
_job.update(1, "Forming small matrix B = Q'A for direct SVD");
SMulTask stsk = new SMulTask(dinfo, _parms._nv, _ncolExp);
stsk.doAll(aqfrm); // _atq size is _ncolExp by _nv
if (_wideDataset) { // for wide dataset, calculate gram of B*T(B), get the SVD and proceed from there.
/* double[][] xgram = ArrayUtils.formGram(stsk._atq, false);
Matrix gramJ2 = new Matrix(xgram); // form outer gram*/
Frame tB = new water.util.ArrayUtils().frame(stsk._atq);
DataInfo tbInfo = new DataInfo(tB, null, true, DataInfo.TransformType.NONE,
false, false, false);
GramTask gtsk = new GramTask(_job._key, tbInfo).doAll(tB);
Matrix gramJ = new Matrix(gtsk._gram.getXX()); // form outer gram
SingularValueDecomposition svdJ = gramJ.svd();
// 3) Form orthonormal matrix U = QV
_job.update(1, "Forming distributed orthonormal matrix U");
u=makeUVec(model, u_name, u, qfrm, new Matrix(stsk._atq), svdJ);
model._output._d = ArrayUtils.mult((Arrays.copyOfRange(ArrayUtils.sqrtArr(svdJ.getSingularValues()),
0, _parms._nv)), sqrt(tB.numRows()));
// to get v, we need to do T(A)*U*D^-1
// stuff A and U into a frame
Vec[] tvecs = new Vec[ncolA];
for (int i = 0; i < ncolA; i++) tvecs[i] = dinfo._adaptedFrame.vec(i);
Frame avfrm = new Frame(tvecs);
Frame fromSVD = null;
avfrm.add(u);
model._output._v = (new SMulTask(dinfo, _parms._nv, _ncolExp).doAll(avfrm))._atq;
// Perform T(A)*U and V is in _atq. Need to be scaled by svd.
model._output._v = ArrayUtils.mult(ArrayUtils.transpose(ArrayUtils.div(ArrayUtils.transpose(model._output._v),
model._output._d)), 1);
if (fromSVD != null) fromSVD.delete();
if (tB != null) tB.delete();
} else {
// 2) Compute SVD of small matrix: If B' = WDV', then B = VDW'
_job.update(1, "Calculating SVD of small matrix locally");
Matrix atqJ = new Matrix(stsk._atq);
SingularValueDecomposition svdJ = atqJ.svd();
// 3) Form orthonormal matrix U = QV
_job.update(1, "Forming distributed orthonormal matrix U");
if (_parms._keep_u) {
u=makeUVec(model, u_name, u, qfrm, atqJ, svdJ);
}
model._output._d = Arrays.copyOfRange(svdJ.getSingularValues(), 0, _parms._nv);
model._output._v = svdJ.getU().getMatrix(0, atqJ.getRowDimension() - 1, 0, _parms._nv - 1).getArray();
}
} finally {
if( qinfo != null ) qinfo.remove();
}
return u;
}
/*
Form orthonormal matrix U = QV
*/
public Frame makeUVec(SVDModel model, String u_name, Frame u, Frame qfrm, Matrix atqJ, SingularValueDecomposition svdJ ) {
model._output._u_key = Key.make(u_name);
double[][] svdJ_u = svdJ.getV().getMatrix(0, atqJ.getColumnDimension() - 1, 0,
_parms._nv - 1).getArray();
DataInfo qinfo = new DataInfo(qfrm, null, true, DataInfo.TransformType.NONE,
false, false, false);
DKV.put(qinfo._key, qinfo);
BMulTask btsk = new BMulTask(_job._key, qinfo, ArrayUtils.transpose(svdJ_u));
btsk.doAll(_parms._nv, Vec.T_NUM, qinfo._adaptedFrame);
qinfo.remove();
return btsk.outputFrame(model._output._u_key, null, null);
// DKV.remove(qinfo._key);
}
@Override
public void computeImpl() {
SVDModel model = null;
DataInfo dinfo = null, tinfo = null;
Frame u = null, qfrm = null;
Vec[] uvecs = null;
try {
init(true); // Initialize parameters
if (error_count() > 0) throw new IllegalArgumentException("Found validation errors: " + validationErrors());
// The model to be built
model = new SVDModel(dest(), _parms, new SVDModel.SVDOutput(SVD.this));
model.delete_and_lock(_job);
// store (possibly) rebalanced input train to pass it to nested SVD job
Frame tranRebalanced = new Frame(_train);
boolean frameHasNas = tranRebalanced.hasNAs();
// 0) Transform training data and save standardization vectors for use in scoring later
if ((!_parms._impute_missing) && frameHasNas) { // remove NAs rows
tinfo = new DataInfo(_train, _valid, 0, _parms._use_all_factor_levels, _parms._transform,
DataInfo.TransformType.NONE, /* skipMissing */ !_parms._impute_missing, /* imputeMissing */
_parms._impute_missing, /* missingBucket */ false, /* weights */ false,
/* offset */ false, /* fold */ false, /* intercept */ false);
DKV.put(tinfo._key, tinfo);
DKV.put(tranRebalanced._key, tranRebalanced);
_train = Rapids.exec(String.format("(na.omit %s)", tranRebalanced._key)).getFrame(); // remove NA rows
DKV.remove(tranRebalanced._key);
checkMemoryFootPrint();
}
dinfo = new DataInfo(_train, _valid, 0, _parms._use_all_factor_levels, _parms._transform,
DataInfo.TransformType.NONE, /* skipMissing */ !_parms._impute_missing, /* imputeMissing */
_parms._impute_missing, /* missingBucket */ false, /* weights */ false,
/* offset */ false, /* fold */ false, /* intercept */ false);
DKV.put(dinfo._key, dinfo);
if (!_parms._impute_missing && frameHasNas) {
// fixed the std and mean of dinfo to that of the frame before removing NA rows
dinfo._normMul = tinfo._normMul;
dinfo._numMeans = tinfo._numMeans;
dinfo._numNAFill = dinfo._numMeans; // NAs will be imputed with means
dinfo._normSub = tinfo._normSub;
}
// Save adapted frame info for scoring later
setSVDModel(model, dinfo);
String u_name = (_parms._u_name == null || _parms._u_name.length() == 0) ? "SVDUMatrix_" + Key.rand() : _parms._u_name;
String v_name = (_parms._v_name == null || _parms._v_name.length() == 0) ? "SVDVMatrix_" + Key.rand() : _parms._v_name;
if(_parms._svd_method == SVDParameters.Method.GramSVD) {
// Calculate and save Gram matrix of training data
// NOTE: Gram computes A'A/n where n = nrow(A) = number of rows in training set (excluding rows with NAs)
_job.update(1, "Begin distributed calculation of Gram matrix");
GramTask gtsk = new GramTask(_job._key, dinfo).doAll(dinfo._adaptedFrame);
Gram gram = gtsk._gram; // TODO: This ends up with all NaNs if training data has too many missing values
assert gram.fullN() == _ncolExp;
model._output._nobs = gtsk._nobs;
model._output._total_variance = gram.diagSum() * gtsk._nobs / (gtsk._nobs-1); // Since gram = X'X/nobs, but variance requires nobs-1 in denominator
model.update(_job);
// Cannot calculate SVD if all rows contain missing value(s) and hence were skipped
if(gtsk._nobs == 0)
error("_train", "Every row in _train contains at least one missing value. Consider setting impute_missing = TRUE.");
if (error_count() > 0) throw new IllegalArgumentException("Found validation errors: " + validationErrors());
// Calculate SVD of G = A'A/n and back out SVD of A. If SVD of A = UDV' then A'A/n = V(D^2/n)V'
_job.update(1, "Calculating SVD of Gram matrix locally");
Matrix gramJ = new Matrix(gtsk._gram.getXX());
SingularValueDecomposition svdJ = gramJ.svd();
// Output diagonal of D
_job.update(1, "Computing stats from SVD");
double[] sval = svdJ.getSingularValues();
model._output._d = MemoryManager.malloc8d(_parms._nv);
// model._output._d = new double[_parms._nv]; // Only want rank = nv diagonal values
for(int k = 0; k < _parms._nv; k++)
model._output._d[k] = Math.sqrt(sval[k] * model._output._nobs);
// Output right singular vectors V
double[][] v = svdJ.getV().getArray();
assert v.length == _ncolExp && LinearAlgebraUtils.numColsExp(dinfo._adaptedFrame,_parms._use_all_factor_levels) == _ncolExp;
model._output._v = MemoryManager.malloc8d(_ncolExp, _parms._nv);
// model._output._v = new double[_ncolExp][_parms._nv]; // Only want rank = nv decomposition
for(int i = 0; i < v.length; i++)
System.arraycopy(v[i], 0, model._output._v[i], 0, _parms._nv);
// Calculate left singular vectors U = AVD^(-1) if requested
if(_parms._keep_u) {
model._output._u_key = Key.make(u_name);
double[][] vt = ArrayUtils.transpose(model._output._v);
for (int k = 0; k < _parms._nv; k++)
ArrayUtils.div(vt[k], model._output._d[k]);
BMulTask tsk = new BMulTask(_job._key, dinfo, vt).doAll(_parms._nv, Vec.T_NUM, dinfo._adaptedFrame);
u = tsk.outputFrame(model._output._u_key, null, null);
}
} else if(_parms._svd_method == SVDParameters.Method.Power) {
// Calculate and save Gram matrix of training data
// NOTE: Gram computes A'A/n where n = nrow(A) = number of rows in training set (excluding rows with NAs)
// NOTE: the Gram also will apply the specified Transforms on the data before performing the operation.
// NOTE: valid transforms are NONE, DEMEAN, STANDARDIZE...
_job.update(1, "Begin distributed calculation of Gram matrix");
GramTask gtsk = null;
Gram.OuterGramTask ogtsk = null;
Gram gram = null, gram_update=null;
double[] randomInitialV = null; // store random initial eigenvectors, actually refering to V'
double[] finalV = null; // store eigenvectors obtained from powerLoop
int eigVecLen = _ncolExp; // size of one eigenvector
GramUpdate guptsk = null;
double[][] gramArrays = null; // store outergram as a double array
double[][] gramUpdatesW = null; // store the result of (I-sum vi*T(vi))*A*T(A)*(I-sum vi*T(vi))
//_estimatedSingularValues = new double[_parms._nv]; // allocate memory once
_estimatedSingularValues = MemoryManager.malloc8d(_parms._nv);
if (_wideDataset) {
ogtsk = new Gram.OuterGramTask(_job._key, dinfo).doAll(dinfo._adaptedFrame);
gram = ogtsk._gram;
model._output._nobs = ogtsk._nobs;
eigVecLen = (int) gram.fullN();
} else {
gtsk = new GramTask(_job._key, dinfo).doAll(dinfo._adaptedFrame);
gram = gtsk._gram; // TODO: This ends up with all NaNs if training data has too many missing values
assert gram.fullN() == _ncolExp;
model._output._nobs = gtsk._nobs;
}
model._output._total_variance = gram.diagSum() * model._output._nobs / (model._output._nobs-1); // Since gram = X'X/nobs, but variance requires nobs-1 in denominator
model.update(_job);
// 1) Run one iteration of power method
_job.update(1, "Iteration 1 of power method"); // One unit of work
// 1a) Initialize right singular vector v_1
model._output._v = MemoryManager.malloc8d(_parms._nv, eigVecLen);
// model._output._v = new double[_parms._nv][eigVecLen]; // Store V' for ease of use and transpose back at end
randomInitialV = MemoryManager.malloc8d(eigVecLen);
// randomInitialV = new double[eigVecLen]; // allocate memroy for randomInitialV and finalV once, save time
finalV = MemoryManager.malloc8d(eigVecLen);
//finalV = new double[eigVecLen];
model._output._v[0] = Arrays.copyOf(powerLoop(gram, _parms._seed, model, randomInitialV, finalV, 0),
eigVecLen);
// Keep track of I - \sum_i v_iv_i' where v_i = eigenvector i
double[][] ivv_sum = new double[eigVecLen][eigVecLen];
for (int i = 0; i < eigVecLen; i++) ivv_sum[i][i] = 1; //generate matrix I
// 1b) Initialize singular value \sigma_1 and update u_1 <- Av_1
if (!_parms._only_v) {
model._output._d = new double[_parms._nv]; // allocate memory once
if (!_wideDataset) {
model._output._u_key = Key.make(u_name);
uvecs = new Vec[_parms._nv];
computeSigmaU(dinfo, model, 0, ivv_sum, uvecs, finalV); // Compute first singular value \sigma_1
}
}
model._output._iterations = 1;
model.update(_job); // Update model in K/V store
// 1c) Update Gram matrix A_1'A_1 = (I - v_1v_1')A'A(I - v_1v_1')
updateIVVSum(ivv_sum, model._output._v[0]);
// double[][] gram_update = ArrayUtils.multArrArr(ArrayUtils.multArrArr(ivv_sum, gram), ivv_sum);
if (_wideDataset) {
gramArrays = new double[eigVecLen][eigVecLen]; // memory allocation is done once here
gramUpdatesW = new double[eigVecLen][eigVecLen];
gram_update = new Gram(eigVecLen, 0, dinfo.numNums(), dinfo._cats,false);
updateGram(ivv_sum, gramArrays, gramUpdatesW, gram, gram_update);
} else {
guptsk = new GramUpdate(_job._key, dinfo, ivv_sum).doAll(dinfo._adaptedFrame);
gram_update = guptsk._gram;
}
for (int k = 1; k < _parms._nv; k++) { // loop through for each eigenvalue/eigenvector...
if (_matrixRankReached || stop_requested()) { // number of eigenvalues found is less than _nv
if (timeout()) {
_job.warn("_train SVD: max_runtime_secs is reached. Not all eigenvalues/eigenvectors are computed.");
}
int newk = k;
_job.warn("_train SVD: Dataset is rank deficient. _parms._nv was "+_parms._nv+" and is now set to "+newk);
_parms._nv = newk; // change number of eigenvector parameters to be the actual number of eigenvectors found
break;
}
_job.update(1, "Iteration " + String.valueOf(k+1) + " of power method"); // One unit of work
// 2) Iterate x_i <- (A_k'A_k/n)x_{i-1} until convergence and set v_k = x_i/||x_i||
model._output._v[k] = Arrays.copyOf(powerLoop(gram_update, _parms._seed, model, randomInitialV, finalV,
k),
eigVecLen);
// 3) Residual data A_k = A - \sum_{i=1}^k \sigma_i u_iv_i' = A - \sum_{i=1}^k Av_iv_i' = A(I - \sum_{i=1}^k v_iv_i')
// 3a) Compute \sigma_k = ||A_{k-1}v_k|| and u_k = A_{k-1}v_k/\sigma_k
if (!_parms._only_v && !_wideDataset)
computeSigmaU(dinfo, model, k, ivv_sum, uvecs, finalV);
// 3b) Compute Gram of residual A_k'A_k = (I - \sum_{i=1}^k v_jv_j')A'A(I - \sum_{i=1}^k v_jv_j')
updateIVVSum(ivv_sum, model._output._v[k]); // Update I - \sum_{i=1}^k v_iv_i' with sum up to current singular value
// gram_update = ArrayUtils.multArrArr(ivv_sum, ArrayUtils.multArrArr(gram, ivv_sum)); // Too slow on wide arrays
if (_wideDataset) {
updateGram(ivv_sum, gramArrays, gramUpdatesW, gram, gram_update);
} else {
guptsk = new GramUpdate(_job._key, dinfo, ivv_sum).doAll(dinfo._adaptedFrame);
gram_update = guptsk._gram;
}
model._output._iterations++;
model.update(_job); // Update model in K/V store
} // end iteration to find eigenvectors
if (!_parms._only_v && !_parms._keep_u && _wideDataset) { // dealing with wide dataset per request from PCA, won't want U
for (int vecIndex = 0; vecIndex < _parms._nv; vecIndex++) {
model._output._d[vecIndex] = Math.sqrt(model._output._nobs*_estimatedSingularValues[vecIndex]);
}
model._output._v = getTransformedEigenvectors(dinfo, transpose(model._output._v));
}
if (!_wideDataset) {
// 4) Normalize output frame columns by singular values to get left singular vectors
model._output._v = ArrayUtils.transpose(model._output._v); // Transpose to get V (since vectors were stored as rows)
if (!_parms._only_v && !_parms._keep_u) { // Delete U vecs if computed, but user does not want it returned
for (int index=0; index < _parms._nv; index++){
uvecs[index].remove();
}
model._output._u_key = null;
} else if (!_parms._only_v && _parms._keep_u) { // Divide U cols by singular values and save to DKV
u = new Frame(model._output._u_key, null, uvecs);
DKV.put(u._key, u);
DivideU utsk = new DivideU(model._output._d);
utsk.doAll(u);
}
}
if (_failedConvergence) {
_job.warn("_train: PCA Power method failed to converge within TOLERANCE. Increase max_iterations or " +
"reduce TOLERANCE to mitigate this problem.");
}
LinkedHashMap<String, ArrayList> scoreTable = new LinkedHashMap<String, ArrayList>();
scoreTable.put("Timestamp", model._output._training_time_ms);
scoreTable.put("err", model._output._history_err);
scoreTable.put("Principal Component #", model._output._history_eigenVectorIndex);
model._output._scoring_history = createScoringHistoryTableDR(scoreTable,
"Scoring History from Power SVD", _job.start_time());
} else if(_parms._svd_method == SVDParameters.Method.Randomized) {
qfrm = randSubIter(dinfo, model);
u = directSVD(dinfo, qfrm, model, u_name);
model._output._training_time_ms.add(System.currentTimeMillis());
if (stop_requested() && model._output._history_average_SEE.size()==0) {
model._output._history_average_SEE.add(Double.POSITIVE_INFINITY);
}
model._output._history_average_SEE.add(model._output._history_average_SEE.get(model._output._history_average_SEE.size()-1)); // add last err back to it
LinkedHashMap<String, ArrayList> scoreTable = new LinkedHashMap<String, ArrayList>();
scoreTable.put("Timestamp", model._output._training_time_ms);
scoreTable.put("average SEE", model._output._history_average_SEE);
model._output._scoring_history = createScoringHistoryTableDR(scoreTable,
"Scoring History from Randomized SVD", _job.start_time());
} else
error("_svd_method", "Unrecognized SVD method " + _parms._svd_method);
if (_parms._save_v_frame) {
model._output._v_key = Key.make(v_name);
ArrayUtils.frame(model._output._v_key, null, model._output._v);
}
if ((model._output._d != null) && _parms._nv < model._output._d.length) { // need to shorten the correct eigen stuff
model._output._d = Arrays.copyOf(model._output._d, _parms._nv);
for (int index=0; index < model._output._v.length; index++) {
model._output._v[index] = Arrays.copyOf(model._output._v[index], _parms._nv);
}
}
model._output._model_summary = createModelSummaryTable(model._output);
model.update(_job);
} finally {
if( model != null ) model.unlock(_job);
if( dinfo != null ) dinfo.remove();
if (tinfo != null) tinfo.remove();
if( u != null & !_parms._keep_u ) u.delete();
if( qfrm != null ) qfrm.delete();
List<Key<Vec>> keep = new ArrayList<>();
if (model._output!=null) {
if (model._output._u_key != null) {
Frame uFrm = DKV.getGet(model._output._u_key);
if (uFrm != null) for (Vec vec : uFrm.vecs()) keep.add(vec._key);
}
Frame vFrm = DKV.getGet(model._output._v_key);
if (vFrm != null) for (Vec vec : vFrm.vecs()) keep.add(vec._key);
}
Scope.untrack(keep);
}
}
}
/*
This method will calculate (I-v1*T(v1))*A*T(A)*(I-v1*T(v1)). Note that we already have
A*T(A) part as a gram matrix. The ivv_sum part provides the (I-v1*T(v1)). All we need to
do here is to get the product and put it into a brand new gram matrix.
*/
private void updateGram(double[][] ivv_sum, double[][] gramToArray, double[][] resultGram, Gram gram, Gram gramUpdate)
{
int numRows = gram.fullN();
// grab gram matrix (A*T(A)) and expand into full matrix represented as 2D double array.
for (int row_index=0; row_index < numRows; row_index++) {
for (int col_index=0; col_index < numRows; col_index++) {
if (col_index <= row_index) {
gramToArray[row_index][col_index] = gram._xx[row_index][col_index];
} else {
gramToArray[row_index][col_index] = gram._xx[col_index][row_index];
}
}
}
resultGram = multArrArr(ivv_sum, gramToArray); // resultGram = (I-v1*T(v1))*A*T(A)
gramToArray = multArrArr(resultGram, ivv_sum); // overwrite gramToArray with final result resultGram*(I-v1*T(v1))
// copy over results from matrix multiplication output to resultGram
for (int row_index = 0; row_index < numRows; row_index++) {
for (int col_index = 0; col_index <= row_index; col_index++) {
gramUpdate._xx[row_index][col_index] = gramToArray[row_index][col_index];
}
}
}
/*
This method may make changes to the dinfo parameters if SVD is called by GLRM as a init method.
*/
private void setSVDModel(SVDModel model, DataInfo dinfo) {
if (_callFromGLRM) {
dinfo._normSub = Arrays.copyOf(_glrmModel._output._normSub, _glrmModel._output._normSub.length);
dinfo._normMul = Arrays.copyOf(_glrmModel._output._normMul, _glrmModel._output._normMul.length);
dinfo._permutation = Arrays.copyOf(_glrmModel._output._permutation, _glrmModel._output._permutation.length);
dinfo._numMeans = Arrays.copyOf(dinfo._normSub, dinfo._normSub.length);
dinfo._numNAFill = dinfo._numMeans; // NAs will be imputed with means
dinfo._nums = _glrmModel._output._nnums;
dinfo._cats = _glrmModel._output._ncats;
dinfo._catOffsets = Arrays.copyOf(_glrmModel._output._catOffsets, _glrmModel._output._catOffsets.length);
model._output._names_expanded = Arrays.copyOf(_glrmModel._output._names_expanded,
_glrmModel._output._names_expanded.length);
} else
model._output._names_expanded = dinfo.coefNames();
model._output._normSub = dinfo._normSub == null ? new double[dinfo._nums] : dinfo._normSub;
if (dinfo._normMul == null) {
model._output._normMul = new double[dinfo._nums];
Arrays.fill(model._output._normMul, 1.0);
} else
model._output._normMul = dinfo._normMul;
model._output._permutation = dinfo._permutation;
model._output._nnums = dinfo._nums;
model._output._ncats = dinfo._cats;
model._output._catOffsets = dinfo._catOffsets;
}
private TwoDimTable createModelSummaryTable(SVDModel.SVDOutput output) {
if(null == output._d) return null;
String[] colTypes = new String[_parms._nv];
String[] colFormats = new String[_parms._nv];
String[] colHeaders = new String[_parms._nv];
Arrays.fill(colTypes, "double");
Arrays.fill(colFormats, "%5f");
for(int i = 0; i < colHeaders.length; i++) colHeaders[i] = "sval" + String.valueOf(i + 1);
return new TwoDimTable("Singular values", null, new String[1],
colHeaders, colTypes, colFormats, "", new String[1][],
new double[][]{output._d});
}
private static class CalcSigmaU extends FrameTask<CalcSigmaU> {
final double[] _svec;
public double _sval;
public long _nobs;
public CalcSigmaU(Key<Job> jobKey, DataInfo dinfo, double[] svec) {
super(jobKey, dinfo);
_svec = svec;
_sval = 0;
}
@Override protected void processRow(long gid, DataInfo.Row r, NewChunk[] outputs) {
double num = r.innerProduct(_svec);
outputs[0].addNum(num);
_sval += num * num;
++_nobs;
}
@Override public void reduce(CalcSigmaU other) {
_nobs += other._nobs;
_sval += other._sval;
}
@Override protected void postGlobal() {
_sval = Math.sqrt(_sval);
}
}
private static class GramUpdate extends FrameTask<GramUpdate> {
final double[][] _ivv;
public Gram _gram;
public long _nobs;
public GramUpdate(Key<Job> jobKey, DataInfo dinfo, double[][] ivv) {
super(jobKey, dinfo);
assert null != ivv && ivv.length == ivv[0].length;
_ivv = ivv;
}
@Override protected boolean chunkInit(){
// To avoid memory allocation during every iteration.
_gram = new Gram(_dinfo.fullN(), 0, _ivv.length, 0, false);
_numRow = _dinfo.newDenseRow(MemoryManager.malloc8d(_ivv.length),0);
return true;
}
private transient Row _numRow;
@Override protected void processRow(long gid, DataInfo.Row r) {
double w = 1; // TODO: add weights to dinfo?
double[] nums = _numRow.numVals;
for(int row = 0; row < _ivv.length; row++)
nums[row] = r.innerProduct(_ivv[row]);
_gram.addRow(_numRow, w);
++_nobs;
}
@Override protected void chunkDone(long n){
double r = 1.0/_nobs;
_gram.mul(r);
}
@Override public void reduce(GramUpdate gt){
double r1 = (double)_nobs/(_nobs+gt._nobs);
_gram.mul(r1);
double r2 = (double)gt._nobs/(_nobs+gt._nobs);
gt._gram.mul(r2);
_gram.add(gt._gram);
_nobs += gt._nobs;
}
}
private static class DivideU extends MRTask<DivideU> {
final double[] _sigma;
public DivideU(double[] sigma) {
_sigma = sigma;
}
@Override public void map(Chunk cs[]) {
assert _sigma.length == cs.length;
for (int col = 0; col < cs.length; col++) {
for(int row = 0; row < cs[0].len(); row++) {
double x = cs[col].atd(row);
cs[col].set(row, x / _sigma[col]);
}
}
}
}
// Compute Y = AG where A is n by p and G is a p by k standard Gaussian matrix
private static class RandSubInit extends FrameTask<RandSubInit> {
final double[][] _gaus; // G' is k by p for convenient multiplication
public RandSubInit(Key<Job> jobKey, DataInfo dinfo, double[][] gaus) {
super(jobKey, dinfo);
_gaus = gaus;
}
@Override protected void processRow(long gid, DataInfo.Row row, NewChunk[] outputs) {
for(int k = 0; k < _gaus.length; k++) {
double y = row.innerProduct(_gaus[k]);
outputs[k].addNum(y);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/svd/SVDModel.java
|
package hex.svd;
import hex.*;
import water.*;
import water.codegen.CodeGeneratorPipeline;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.udf.CFuncRef;
import water.util.JCodeGen;
import water.util.SBPrintStream;
import java.util.ArrayList;
public class SVDModel extends Model<SVDModel, SVDModel.SVDParameters, SVDModel.SVDOutput> {
public static class SVDParameters extends Model.Parameters {
public String algoName() { return "SVD"; }
public String fullName() { return "Singular Value Decomposition"; }
public String javaName() { return SVDModel.class.getName(); }
@Override public long progressUnits() {
switch(_svd_method) {
case GramSVD: return 2;
case Power: return 1 + _nv;
case Randomized: return 5 + _max_iterations;
default: return _nv;
}
}
public DataInfo.TransformType _transform = DataInfo.TransformType.NONE; // Data transformation (demean to compare with PCA)
public Method _svd_method = Method.GramSVD; // Method for computing SVD
public int _nv = 1; // Number of right singular vectors to calculate
public int _max_iterations = 1000; // Maximum number of iterations
// public Key<Frame> _u_key; // Frame key for left singular vectors (U)
public String _u_name;
// public Key<Frame> _v_key; // Frame key for right singular vectors (V)
public String _v_name;
public boolean _keep_u = true; // Should left singular vectors be saved in memory? (Only applies if _only_v = false)
public boolean _save_v_frame = true; // Should right singular vectors be saved as a frame?
public boolean _only_v = false; // For power method (others ignore): Compute only right singular vectors? (Faster if true)
public boolean _use_all_factor_levels = true; // When expanding categoricals, should first level be dropped?
public boolean _impute_missing = false; // Should missing numeric values be imputed with the column mean?
public enum Method {
GramSVD, Power, Randomized
}
}
public static class SVDOutput extends Model.Output {
// Iterations executed (Power and Randomized methods only)
public int _iterations;
// Right singular vectors (V)
public double[][] _v; // Used internally for PCA and GLRM
public Key<Frame> _v_key;
// Singular values (diagonal of D)
public double[] _d;
// Frame key for left singular vectors (U)
public Key<Frame> _u_key;
// Number of categorical and numeric columns
public int _ncats;
public int _nnums;
// Number of good rows in training frame (not skipped)
public long _nobs;
// Total column variance for expanded and transformed data
public double _total_variance;
// Categorical offset vector
public int[] _catOffsets;
// If standardized, mean of each numeric data column
public double[] _normSub;
// If standardized, one over standard deviation of each numeric data column
public double[] _normMul;
// Permutation matrix mapping training col indices to adaptedFrame
public int[] _permutation;
// Expanded column names of adapted training frame
public String[] _names_expanded;
// variables for building up a scoring history
public ArrayList<Double> _history_average_SEE = new ArrayList<>(); // for randomized SVD
public ArrayList<Double> _history_err = new ArrayList<>(); // for power SVD method
public ArrayList<Double> _history_eigenVectorIndex = new ArrayList<>(); // store which eigenvector we are working on
public ArrayList<Long> _training_time_ms = new ArrayList<>();
public SVDOutput(SVD b) { super(b); }
@Override public ModelCategory getModelCategory() { return ModelCategory.DimReduction; }
}
public SVDModel(Key<SVDModel> selfKey, SVDParameters parms, SVDOutput output) { super(selfKey, parms, output); }
@Override protected Futures remove_impl(Futures fs, boolean cascade) {
Keyed.remove(_output._u_key, fs, true);
Keyed.remove(_output._v_key, fs, true);
return super.remove_impl(fs, cascade);
}
/** Write out K/V pairs */
@Override protected AutoBuffer writeAll_impl(AutoBuffer ab) {
ab.putKey(_output._u_key);
ab.putKey(_output._v_key);
return super.writeAll_impl(ab);
}
@Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) {
ab.getKey(_output._u_key,fs);
ab.getKey(_output._v_key,fs);
return super.readAll_impl(ab,fs);
}
@Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
return new ModelMetricsSVD.SVDModelMetrics(_parms._nv);
}
public static class ModelMetricsSVD extends ModelMetricsUnsupervised {
public ModelMetricsSVD(Model model, Frame frame, CustomMetric customMetric) {
super(model, frame, 0, Double.NaN, customMetric);
}
// SVD currently does not have any model metrics to compute during scoring
public static class SVDModelMetrics extends MetricBuilderUnsupervised<SVDModelMetrics> {
public SVDModelMetrics(int dims) {
_work = new double[dims];
}
@Override public double[] perRow(double[] preds, float[] dataRow, Model m) { return preds; }
@Override public ModelMetrics makeModelMetrics(Model m, Frame f) {
return m.addModelMetrics(new ModelMetricsSVD(m, f, _customMetric));
}
}
}
@Override protected PredictScoreResult predictScoreImpl(Frame orig, Frame adaptedFr, String destination_key, final Job j, boolean computeMetrics, CFuncRef customMetricFunc) {
Frame adaptFrm = new Frame(adaptedFr);
for(int i = 0; i < _parms._nv; i++)
adaptFrm.add("PC"+String.valueOf(i+1),adaptFrm.anyVec().makeZero());
new MRTask() {
@Override public void map( Chunk chks[] ) {
if (isCancelled() || j != null && j.stop_requested()) return;
double tmp [] = new double[_output._names.length];
double preds[] = new double[_parms._nv];
for( int row = 0; row < chks[0]._len; row++) {
double p[] = score0(chks, row, tmp, preds);
for( int c=0; c<preds.length; c++ )
chks[_output._names.length+c].set(row, p[c]);
}
if (j !=null) j.update(1);
}
}.doAll(adaptFrm);
// Return the projection into right singular vector (V) space
int x = _output._names.length, y = adaptFrm.numCols();
Frame f = adaptFrm.extractFrame(x, y); // this will call vec_impl() and we cannot call the delete() below just yet
f = new Frame(Key.<Frame>make(destination_key), f.names(), f.vecs());
DKV.put(f);
ModelMetrics.MetricBuilder<?> mb = makeMetricBuilder(null);
return new PredictScoreResult(mb, f, f);
}
@Override protected double[] score0(double data[/*ncols*/], double preds[/*nclasses+1*/]) {
int numStart = _output._catOffsets[_output._catOffsets.length-1];
assert data.length == _output._permutation.length;
for(int i = 0; i < _parms._nv; i++) {
preds[i] = 0;
for (int j = 0; j < _output._ncats; j++) {
double tmp = data[_output._permutation[j]];
int last_cat = _output._catOffsets[j+1]-_output._catOffsets[j]-1; // Missing categorical values are mapped to extra (last) factor
int level = Double.isNaN(tmp) ? last_cat : (int)tmp - (_parms._use_all_factor_levels ? 0:1); // Reduce index by 1 if first factor level dropped during training
if (level < 0 || level > last_cat) continue; // Skip categorical level in test set but not in train
preds[i] += _output._v[_output._catOffsets[j]+level][i];
}
int dcol = _output._ncats;
int vcol = numStart;
for (int j = 0; j < _output._nnums; j++) {
preds[i] += (data[_output._permutation[dcol]] - _output._normSub[j]) * _output._normMul[j] * _output._v[vcol][i];
dcol++; vcol++;
}
}
return preds;
}
@Override protected SBPrintStream toJavaInit(SBPrintStream sb, CodeGeneratorPipeline fileCtx) {
sb = super.toJavaInit(sb, fileCtx);
sb.ip("public boolean isSupervised() { return " + isSupervised() + "; }").nl();
sb.ip("public int nfeatures() { return "+_output.nfeatures()+"; }").nl();
sb.ip("public int nclasses() { return "+_parms._nv+"; }").nl();
if (_output._nnums > 0) {
JCodeGen.toStaticVar(sb, "NORMMUL", _output._normMul, "Standardization/Normalization scaling factor for numerical variables.");
JCodeGen.toStaticVar(sb, "NORMSUB", _output._normSub, "Standardization/Normalization offset for numerical variables.");
}
JCodeGen.toStaticVar(sb, "CATOFFS", _output._catOffsets, "Categorical column offsets.");
JCodeGen.toStaticVar(sb, "PERMUTE", _output._permutation, "Permutation index vector.");
JCodeGen.toStaticVar(sb, "EIGVECS", _output._v, "Eigenvector matrix.");
return sb;
}
@Override protected void toJavaPredictBody(SBPrintStream bodySb,
CodeGeneratorPipeline classCtx,
CodeGeneratorPipeline fileCtx,
final boolean verboseCode) {
bodySb.i().p("java.util.Arrays.fill(preds,0);").nl();
final int cats = _output._ncats;
final int nums = _output._nnums;
bodySb.i().p("final int nstart = CATOFFS[CATOFFS.length-1];").nl();
bodySb.i().p("for(int i = 0; i < ").p(_parms._nv).p("; i++) {").nl();
// Categorical columns
bodySb.i(1).p("for(int j = 0; j < ").p(cats).p("; j++) {").nl();
bodySb.i(2).p("double d = data[PERMUTE[j]];").nl();
bodySb.i(2).p("int last = CATOFFS[j+1]-CATOFFS[j]-1;").nl();
bodySb.i(2).p("int c = Double.isNaN(d) ? last : (int)d").p(_parms._use_all_factor_levels ? ";":"-1;").nl();
bodySb.i(2).p("if(c < 0 || c > last) continue;").nl();
bodySb.i(2).p("preds[i] += EIGVECS[CATOFFS[j]+c][i];").nl();
bodySb.i(1).p("}").nl();
// Numeric columns
if (_output._nnums > 0) {
bodySb.i(1).p("for(int j = 0; j < ").p(nums).p("; j++) {").nl();
bodySb.i(2).p("preds[i] += (data[PERMUTE[j" + (cats > 0 ? "+" + cats : "") + "]]-NORMSUB[j])*NORMMUL[j]*EIGVECS[j" + (cats > 0 ? "+ nstart" : "") + "][i];").nl();
bodySb.i(1).p("}").nl();
}
bodySb.i().p("}").nl();
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/BranchInteractionConstraints.java
|
package hex.tree;
import water.Iced;
import water.util.IcedHashSet;
import water.util.IcedInt;
/**
* Local branch interaction constraints class to save information about allowed interaction between columns indices
*/
public class BranchInteractionConstraints extends Iced<BranchInteractionConstraints> {
// Set of allowed column indices in current split but with information from previous split decision
IcedHashSet<IcedInt> allowedInteractionIndices;
public BranchInteractionConstraints(IcedHashSet<IcedInt> allowedInteractionIndices){
this.allowedInteractionIndices = allowedInteractionIndices;
}
public boolean isAllowedIndex(int i){
return allowedInteractionIndices.contains(new IcedInt(i));
}
/**
* Important method to decide which indices are allowed for the next level of constraints.
* It makes intersection between current allowed indices and input indices to make sure the local constraint
* satisfy the global interaction constraints setting.
* @param set input set
* @return intersection of branch set and input set
*/
public IcedHashSet<IcedInt> intersection(IcedHashSet<IcedInt> set){
IcedHashSet<IcedInt> output = new IcedHashSet<>();
for(IcedInt i: set){
if (allowedInteractionIndices.contains(i)) {
output.add(i);
}
}
return output;
}
/**
* Decide which column indices is allowed to be used for the next split in the next level of a tree.
* @param ics global interaction constraint object generated from input interaction constraints
* @param colIndex column index of the split to decide allowed indices for the next level of constraint
* @return new branch interaction object for the next level of the tree
*/
public BranchInteractionConstraints nextLevelInteractionConstraints(GlobalInteractionConstraints ics, int colIndex){
assert ics != null : "Interaction constraints: Global interaction constraints object cannot be null.";
assert ics.allowedInteractionContainsColumn(colIndex) : "Input column index should be in the allowed interaction map.";
assert this.allowedInteractionIndices != null : "Interaction constraints: Branch allowed interaction set cannot be null.";
IcedHashSet<IcedInt> allowedInteractions = ics.getAllowedInteractionForIndex(colIndex);
IcedHashSet<IcedInt> intersection = intersection(allowedInteractions);
return new BranchInteractionConstraints(intersection);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/CalibrationHelper.java
|
package hex.tree;
import hex.Model;
import hex.ModelBuilder;
import hex.ModelCategory;
import hex.glm.GLM;
import hex.glm.GLMModel;
import hex.isotonic.IsotonicRegression;
import hex.isotonic.IsotonicRegressionModel;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import static hex.ModelCategory.Binomial;
public class CalibrationHelper {
public enum CalibrationMethod {
AUTO("auto", -1),
PlattScaling("platt", 1),
IsotonicRegression("isotonic", 2);
private final int _calibVecIdx;
private final String _id;
CalibrationMethod(String id, int calibVecIdx) {
_calibVecIdx = calibVecIdx;
_id = id;
}
private int getCalibratedVecIdx() {
return _calibVecIdx;
}
public String getId() {
return _id;
}
}
public interface ModelBuilderWithCalibration<M extends Model<M , P, O>, P extends Model.Parameters, O extends Model.Output> {
ModelBuilder<M, P, O> getModelBuilder();
Frame getCalibrationFrame();
void setCalibrationFrame(Frame f);
}
public interface ParamsWithCalibration {
Model.Parameters getParams();
Frame getCalibrationFrame();
boolean calibrateModel();
CalibrationMethod getCalibrationMethod();
void setCalibrationMethod(CalibrationMethod calibrationMethod);
}
public interface OutputWithCalibration {
ModelCategory getModelCategory();
Model<?, ?, ?> calibrationModel();
void setCalibrationModel(Model<?, ?, ?> model);
default CalibrationMethod getCalibrationMethod() {
assert isCalibrated();
return calibrationModel() instanceof IsotonicRegressionModel ?
CalibrationMethod.IsotonicRegression : CalibrationMethod.PlattScaling;
}
default boolean isCalibrated() {
return calibrationModel() != null;
}
}
public static void initCalibration(ModelBuilderWithCalibration builder, ParamsWithCalibration parms, boolean expensive) {
// Calibration
Frame cf = parms.getCalibrationFrame(); // User-given calibration set
if (cf != null) {
if (! parms.calibrateModel())
builder.getModelBuilder().warn("_calibration_frame", "Calibration frame was specified but calibration was not requested.");
Frame adaptedCf = builder.getModelBuilder().init_adaptFrameToTrain(cf, "Calibration Frame", "_calibration_frame", expensive);
builder.setCalibrationFrame(adaptedCf);
}
if (parms.calibrateModel()) {
if (builder.getModelBuilder().nclasses() != 2)
builder.getModelBuilder().error("_calibrate_model", "Model calibration is only currently supported for binomial models.");
if (cf == null)
builder.getModelBuilder().error("_calibrate_model", "Calibration frame was not specified.");
}
}
public static <M extends Model<M , P, O>, P extends Model.Parameters, O extends Model.Output> Model<?, ?, ?> buildCalibrationModel(
ModelBuilderWithCalibration<M, P, O> builder, ParamsWithCalibration parms, Job job, M model
) {
final CalibrationMethod calibrationMethod = parms.getCalibrationMethod() == CalibrationMethod.AUTO ?
CalibrationMethod.PlattScaling : parms.getCalibrationMethod();
Key<Frame> calibInputKey = Key.make();
try {
Scope.enter();
job.update(0, "Calibrating probabilities");
Frame calib = builder.getCalibrationFrame();
Vec calibWeights = parms.getParams()._weights_column != null ? calib.vec(parms.getParams()._weights_column) : null;
Frame calibPredict = Scope.track(model.score(calib, null, job, false));
int calibVecIdx = calibrationMethod.getCalibratedVecIdx();
Frame calibInput = new Frame(calibInputKey,
new String[]{"p", "response"}, new Vec[]{calibPredict.vec(calibVecIdx), calib.vec(parms.getParams()._response_column)});
if (calibWeights != null) {
calibInput.add("weights", calibWeights);
}
DKV.put(calibInput);
final ModelBuilder<?, ?, ?> calibrationModelBuilder;
switch (calibrationMethod) {
case PlattScaling:
calibrationModelBuilder = makePlattScalingModelBuilder(calibInput, calibWeights != null);
break;
case IsotonicRegression:
calibrationModelBuilder = makeIsotonicRegressionModelBuilder(calibInput, calibWeights != null);
break;
default:
throw new UnsupportedOperationException("Unsupported calibration method: " + calibrationMethod);
}
return calibrationModelBuilder.trainModel().get();
} finally {
Scope.exit();
DKV.remove(calibInputKey);
}
}
static ModelBuilder<?, ?, ?> makePlattScalingModelBuilder(Frame calibInput, boolean hasWeights) {
Key<Model> calibModelKey = Key.make();
Job<?> calibJob = new Job<>(calibModelKey, ModelBuilder.javaName("glm"), "Platt Scaling (GLM)");
GLM calibBuilder = ModelBuilder.make("GLM", calibJob, calibModelKey);
calibBuilder._parms._intercept = true;
calibBuilder._parms._response_column = "response";
calibBuilder._parms._train = calibInput._key;
calibBuilder._parms._family = GLMModel.GLMParameters.Family.binomial;
calibBuilder._parms._lambda = new double[] {0.0};
if (hasWeights) {
calibBuilder._parms._weights_column = "weights";
}
return calibBuilder;
}
static ModelBuilder<?, ?, ?> makeIsotonicRegressionModelBuilder(Frame calibInput, boolean hasWeights) {
Key<Model> calibModelKey = Key.make();
Job<?> calibJob = new Job<>(calibModelKey, ModelBuilder.javaName("isotonicregression"), "Isotonic Regression Calibration");
IsotonicRegression calibBuilder = ModelBuilder.make("isotonicregression", calibJob, calibModelKey);
calibBuilder._parms._response_column = "response";
calibBuilder._parms._train = calibInput._key;
calibBuilder._parms._out_of_bounds = IsotonicRegressionModel.OutOfBoundsHandling.Clip;
if (hasWeights) {
calibBuilder._parms._weights_column = "weights";
}
return calibBuilder;
}
public static Frame postProcessPredictions(Frame predictFr, Job j, OutputWithCalibration output) {
if (output.calibrationModel() == null) {
return predictFr;
} else if (output.getModelCategory() == Binomial) {
Key<Job> jobKey = j != null ? j._key : null;
Key<Frame> calibInputKey = Key.make();
Frame calibOutput = null;
Frame toUnlock = null;
try {
final Model<?, ?, ?> calibModel = output.calibrationModel();
final int calibVecIdx = output.getCalibrationMethod().getCalibratedVecIdx();
final String[] calibFeatureNames = calibModel._output.features();
assert calibFeatureNames.length == 1;
final Frame calibInput = new Frame(calibInputKey, calibFeatureNames, new Vec[]{predictFr.vec(calibVecIdx)});
calibOutput = calibModel.score(calibInput);
final Vec[] calPredictions;
if (calibModel instanceof GLMModel) {
assert calibOutput._names.length == 3;
calPredictions = calibOutput.remove(new int[]{1, 2});
} else if (calibModel instanceof IsotonicRegressionModel) {
assert calibOutput._names.length == 1;
Vec p1 = calibOutput.remove(0);
Vec p0 = new P0Task().doAll(Vec.T_NUM, p1).outputFrame().lastVec();
calPredictions = new Vec[]{p0, p1};
} else
throw new UnsupportedOperationException("Unsupported calibration model: " + calibModel);
// append calibrated probabilities to the prediction frame
predictFr.write_lock(jobKey);
toUnlock = predictFr;
for (int i = 0; i < calPredictions.length; i++)
predictFr.add("cal_" + predictFr.name(1 + i), calPredictions[i]);
return predictFr.update(jobKey);
} finally {
if (toUnlock != null) {
predictFr.unlock(jobKey);
}
DKV.remove(calibInputKey);
if (calibOutput != null)
calibOutput.remove();
}
} else {
throw H2O.unimpl("Calibration is only supported for binomial models");
}
}
private static class P0Task extends MRTask<P0Task> {
@Override
public void map(Chunk c, NewChunk nc) {
for (int i = 0; i < c._len; i++) {
if (c.isNA(i))
nc.addNA();
else {
double p1 = c.atd(i);
nc.addNum(1.0 - p1);
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/CompressedForest.java
|
package hex.tree;
import water.DKV;
import water.Iced;
import water.Key;
/**
* Collection of Compressed Trees
* contains:
* - keys to trees
* - metadata shared among all the trees (eg. domain information)
* The purpose of this class is to avoid replicating large common metadata into each Compressed Tree (eg. domains).
*/
public class CompressedForest extends Iced<CompressedTree> {
public final Key<CompressedTree>[/*_ntrees*/][/*_nclass*/] _treeKeys;
public final String[][] _domains;
public CompressedForest(Key<CompressedTree>[][] treeKeys, String[][] domains) {
_treeKeys = treeKeys;
_domains = domains;
}
public final int ntrees() { return _treeKeys.length; }
/**
* Fetches trees from DKV and converts to a node-local structure.
* @return fetched trees
*/
public final LocalCompressedForest fetch() {
int ntrees = _treeKeys.length;
CompressedTree[][] trees = new CompressedTree[ntrees][];
for (int t = 0; t < ntrees; t++) {
Key[] treek = _treeKeys[t];
trees[t] = new CompressedTree[treek.length];
for (int i = 0; i < treek.length; i++)
if (treek[i] != null)
trees[t][i] = DKV.get(treek[i]).get();
}
return new LocalCompressedForest(trees, _domains);
}
/**
* Node-local representation of a collection of trees.
* Is not meant to be Serialized/Iced or send over the wire.
*/
public static class LocalCompressedForest {
public CompressedTree[][] _trees;
public String[][] _domains;
private LocalCompressedForest(CompressedTree[][] trees, String[][] domains) {
_trees = trees;
_domains = domains;
}
/** Score given tree on the row of data.
* @param data row of data
* @param preds array to hold resulting prediction
* @param tidx index of a tree (points to a representation of a single regression tree, or multi tree) */
public final void scoreTree(double data[], double preds[], int tidx) {
CompressedTree[] ts = _trees[tidx];
for( int c=0; c<ts.length; c++ )
if( ts[c] != null )
preds[ts.length==1?0:c+1] += ts[c].score(data, _domains);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/CompressedTree.java
|
package hex.tree;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.*;
import hex.genmodel.algos.tree.SharedTreeMojoModel;
import hex.genmodel.algos.tree.SharedTreeSubgraph;
import water.*;
import water.util.IcedBitSet;
import water.util.SB;
import static hex.genmodel.algos.tree.SharedTreeMojoModel.AuxInfo;
//---------------------------------------------------------------------------
// Note: this description seems to be out-of-date
//
// Highly compressed tree encoding:
// tree: 1B nodeType, 2B colId, 4B splitVal, left-tree-size, left, right
// nodeType: (from lsb):
// 2 bits (1,2) skip-tree-size-size,
// 2 bits (4,8) operator flag (0 --> <, 1 --> ==, 2 --> small (4B) group, 3 --> big (var size) group),
// 1 bit ( 16) left leaf flag,
// 1 bit ( 32) left leaf type flag (0: subtree, 1: small cat, 2: big cat, 3: float)
// 1 bit ( 64) right leaf flag,
// 1 bit (128) right leaf type flag (0: subtree, 1: small cat, 2: big cat, 3: float)
// left, right: tree | prediction
// prediction: 4 bytes of float (or 1 or 2 bytes of class prediction)
//
public class CompressedTree extends Keyed<CompressedTree> {
private static final String KEY_PREFIX = "tree_";
final byte [] _bits;
final long _seed;
public CompressedTree(byte[] bits, long seed, int tid, int cls) {
super(makeTreeKey(tid, cls));
_bits = bits;
_seed = seed;
}
private CompressedTree(Key<CompressedTree> key, byte[] bits, long seed) {
super(key);
_bits = bits;
_seed = seed;
}
public double score(final double row[], final String[][] domains) {
return SharedTreeMojoModel.scoreTree(_bits, row, false, domains);
}
@Deprecated
public String getDecisionPath(final double row[], final String[][] domains) {
double d = SharedTreeMojoModel.scoreTree(_bits, row, true, domains);
return SharedTreeMojoModel.getDecisionPath(d);
}
public <T> T getDecisionPath(final double row[], final String[][] domains, final SharedTreeMojoModel.DecisionPathTracker<T> tr) {
double d = SharedTreeMojoModel.scoreTree(_bits, row, true, domains);
return SharedTreeMojoModel.getDecisionPath(d, tr);
}
public Map<Integer, AuxInfo> toAuxInfos() {
return SharedTreeMojoModel.readAuxInfos(_bits);
}
public int findMaxNodeId() {
return SharedTreeMojoModel.findMaxNodeId(_bits);
}
public CompressedTree updateLeafNodeWeights(double[] leafNodeWeights) {
Map<Integer, AuxInfo> nodeIdToAuxInfo = SharedTreeMojoModel.readAuxInfos(_bits);
List<AuxInfo> auxInfos = new ArrayList<>(nodeIdToAuxInfo.values());
auxInfos.sort(Comparator.comparingInt(o -> -o.pid));
for (AuxInfo auxInfo : auxInfos) {
auxInfo.weightL = 0;
auxInfo.weightR = 0;
}
for (AuxInfo auxInfo : auxInfos) {
auxInfo.weightL += (float) leafNodeWeights[auxInfo.nidL];
auxInfo.weightR += (float) leafNodeWeights[auxInfo.nidR];
if (auxInfo.pid >= 0) {
AuxInfo parentInfo = nodeIdToAuxInfo.get(auxInfo.pid);
float nodeWeight = auxInfo.weightL + auxInfo.weightR;
if (parentInfo.nidL == auxInfo.nid)
parentInfo.weightL += nodeWeight;
else
parentInfo.weightR += nodeWeight;
}
}
ByteBuffer bb = ByteBuffer.allocate(_bits.length).order(ByteOrder.nativeOrder());
SharedTreeMojoModel.writeUpdatedAuxInfos(_bits, nodeIdToAuxInfo, bb);
byte[] bits = bb.array();
return new CompressedTree(_key, bits, _seed);
}
public boolean hasZeroWeight() {
return SharedTreeMojoModel.readAuxInfos(_bits)
.values()
.stream()
.anyMatch(auxInfo -> auxInfo.weightL == 0 || auxInfo.weightR == 0);
}
public SharedTreeSubgraph toSharedTreeSubgraph(final CompressedTree auxTreeInfo,
final String[] colNames, final String[][] domains) {
TreeCoords tc = getTreeCoords();
String treeName = SharedTreeMojoModel.treeName(tc._treeId, tc._clazz, domains[domains.length - 1]);
return SharedTreeMojoModel.computeTreeGraph(tc._treeId, treeName, _bits, auxTreeInfo._bits, colNames, domains);
}
public Random rngForChunk(int cidx) {
Random rand = new Random(_seed);
for (int i = 0; i < cidx; i++) rand.nextLong();
long seed = rand.nextLong();
return new Random(seed);
}
public String toString(SharedTreeModel.SharedTreeOutput tm) {
final String[] names = tm._names;
final SB sb = new SB();
new TreeVisitor<RuntimeException>(this) {
@Override protected void pre(int col, float fcmp, IcedBitSet gcmp, int equal, int naSplitDirInt) {
if (naSplitDirInt == DhnasdNaVsRest)
sb.p("!Double.isNaN(" + sb.i().p(names[col]).p(")"));
else if (naSplitDirInt == DhnasdNaLeft)
sb.p("Double.isNaN(" + sb.i().p(names[col]).p(") || "));
else if (equal==1)
sb.p("!Double.isNaN(" + sb.i().p(names[col]).p(") && "));
if (naSplitDirInt != DhnasdNaVsRest) {
sb.i().p(names[col]).p(' ');
if (equal == 0) sb.p("< ").pj(fcmp);
else if (equal == 1) sb.p("!=").pj(fcmp);
else sb.p("in ").p(gcmp);
}
sb.ii(1).nl();
}
@Override protected void post(int col, float fcmp, int equal) {
sb.di(1);
}
@Override protected void leaf(float pred) {
sb.i().p("return ").pj(pred).nl();
}
}.visit();
return sb.toString();
}
public static Key<CompressedTree> makeTreeKey(int treeId, int clazz) {
return Key.makeSystem("tree_" + treeId + "_" + clazz + "_" + Key.rand());
}
/**
* Retrieves tree coordinates in the tree ensemble
* @return instance of TreeCoord
*/
TreeCoords getTreeCoords() {
return TreeCoords.parseTreeCoords(_key);
}
@Override protected long checksum_impl() {
throw new UnsupportedOperationException();
}
static class TreeCoords {
int _treeId;
int _clazz;
private static TreeCoords parseTreeCoords(Key<CompressedTree> ctKey) {
String key = ctKey.toString();
int prefixIdx = key.indexOf(KEY_PREFIX);
if (prefixIdx < 0)
throw new IllegalStateException("Unexpected structure of a CompressedTree key=" + key);
String[] keyParts = key.substring(prefixIdx + KEY_PREFIX.length()).split("_", 3);
TreeCoords tc = new TreeCoords();
tc._treeId = Integer.valueOf(keyParts[0]);
tc._clazz = Integer.valueOf(keyParts[1]);
return tc;
}
}
public long getSeed() {
return _seed;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/Constraints.java
|
package hex.tree;
import hex.Distribution;
import hex.genmodel.utils.DistributionFamily;
import water.Iced;
public class Constraints extends Iced<Constraints> {
private final int[] _cs;
final double _min;
final double _max;
public final Distribution _dist;
private final boolean _use_bounds;
public Constraints(int[] cs, Distribution dist, boolean useBounds) {
this(cs, dist, useBounds, Double.NaN, Double.NaN);
}
private Constraints(int[] cs, Distribution dist, boolean useBounds, double min, double max) {
_cs = cs;
_min = min;
_max = max;
_dist = dist;
_use_bounds = useBounds;
}
public int getColumnConstraint(int col) {
return _cs[col];
}
Constraints withNewConstraint(int col, int way, double bound) {
assert _cs[col] == 1 || _cs[col] == -1;
if (_cs[col] == 1) { // "increasing" constraint
if (way == 0) { // left
return new Constraints(_cs, _dist, _use_bounds, _min, newMaxBound(_max, bound));
} else { // right
return new Constraints(_cs, _dist, _use_bounds, newMinBound(_min, bound), _max);
}
} else { // "decreasing" constraint
if (way == 0) { // left
return new Constraints(_cs, _dist, _use_bounds, newMinBound(_min, bound), _max);
} else { // right
return new Constraints(_cs, _dist, _use_bounds, _min, newMaxBound(_max, bound));
}
}
}
public boolean useBounds() {
return _use_bounds;
}
private static double newMaxBound(double old_max, double proposed_max) {
if (Double.isNaN(old_max))
return proposed_max;
assert !Double.isNaN(proposed_max);
return Math.min(old_max, proposed_max);
}
private static double newMinBound(double old_min, double proposed_min) {
if (Double.isNaN(old_min))
return proposed_min;
assert !Double.isNaN(proposed_min);
return Math.max(old_min, proposed_min);
}
boolean needsGammaDenom() {
return !_dist._family.equals(DistributionFamily.gaussian) && !_dist._family.equals(DistributionFamily.quantile);
}
boolean needsGammaNom() {
return _dist._family.equals(DistributionFamily.tweedie);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/DHistogram.java
|
package hex.tree;
import hex.Distribution;
import hex.genmodel.utils.DistributionFamily;
import hex.tree.uplift.*;
import org.apache.log4j.Logger;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.RandomUtils;
import java.util.Arrays;
import java.util.Random;
import static hex.tree.SharedTreeModel.SharedTreeParameters.HistogramType;
/** A Histogram, computed in parallel over a Vec.
*
* <p>A {@code DHistogram} bins every value added to it, and computes a the
* vec min and max (for use in the next split), and response mean and variance
* for each bin. {@code DHistogram}s are initialized with a min, max and
* number-of- elements to be added (all of which are generally available from
* a Vec). Bins run from min to max in uniform sizes. If the {@code
* DHistogram} can determine that fewer bins are needed (e.g. boolean columns
* run from 0 to 1, but only ever take on 2 values, so only 2 bins are
* needed), then fewer bins are used.
*
* <p>{@code DHistogram} are shared per-node, and atomically updated. There's
* an {@code add} call to help cross-node reductions. The data is stored in
* primitive arrays, so it can be sent over the wire.
*
* <p>If we are successively splitting rows (e.g. in a decision tree), then a
* fresh {@code DHistogram} for each split will dynamically re-bin the data.
* Each successive split will logarithmically divide the data. At the first
* split, outliers will end up in their own bins - but perhaps some central
* bins may be very full. At the next split(s) - if they happen at all -
* the full bins will get split, and again until (with a log number of splits)
* each bin holds roughly the same amount of data. This 'UniformAdaptive' binning
* resolves a lot of problems with picking the proper bin count or limits -
* generally a few more tree levels will equal any fancy but fixed-size binning strategy.
*
* <p>Support for histogram split points based on quantiles (or random points) is
* available as well, via {@code _histoType}.
*
*/
public final class DHistogram extends Iced<DHistogram> {
private static final Logger LOG = Logger.getLogger(DHistogram.class);
public static final int INT_NA = Integer.MIN_VALUE; // integer representation of NA
public final transient String _name; // Column name (for debugging)
public final double _minSplitImprovement;
public final byte _isInt; // 0: float col, 1: int col, 2: categorical & int col
public final boolean _intOpt;
public char _nbin; // Bin count (excluding NA bucket)
public double _step; // Linear interpolation step per bin
public final double _min, _maxEx; // Conservative Min/Max over whole collection. _maxEx is Exclusive.
public final int _minInt; // Integer version of _min. Used in integer optimized histograms.
public final boolean _initNA; // Does the initial histogram have any NAs?
// Needed to correctly count actual number of bins of the initial histogram.
public final double _pred1; // We calculate what would be the SE for a possible fallback predictions _pred1
public final double _pred2; // and _pred2. Currently used for min-max bounds in monotonic GBMs.
protected double [] _vals; // Values w, wY and wYY encoded per bin in a single array.
// If _pred1 or _pred2 are specified they are included as well.
// If constraints are used and gamma denominator or nominator needs to be calculated its will be included.
protected final int _vals_dim; // _vals.length == _vals_dim * _nbin; How many values per bin are encoded in _vals.
// Current possible values are
// - 3:_pred1 nor _pred2 provided and gamma denominator is not needed
// - 5: if either _pred1 or _pred2 is provided (or both)
// - 5 if gamma denominator and nominator are not needed
// - 6 if gamma denominator is needed
// - 7 if gamma nominator is needed (tweedie constraints)
// also see functions hasPreds() and hasDenominator()
protected final boolean _useUplift;
protected double [] _valsUplift; // if not null always dimension 4:
// 0 treatment group nominator
// 1 treatment group denominator
// 2 control group nominator
// 3 control group denominator
protected final int _valsDimUplift = 4;
protected final Divergence _upliftMetric;
private final Distribution _dist;
public double w(int i){ return _vals[_vals_dim*i+0];}
public double wY(int i){ return _vals[_vals_dim*i+1];}
public double wYY(int i){return _vals[_vals_dim*i+2];}
public double wNA() { return _vals[_vals_dim*_nbin+0]; }
public double wYNA() { return _vals[_vals_dim*_nbin+1]; }
public double wYYNA() { return _vals[_vals_dim*_nbin+2]; }
public double[] getRawVals() {
return _vals;
}
/**
* Squared Error for NA bucket and prediction value _pred1
* @return se
*/
public double seP1NA() { return _vals[_vals_dim*_nbin+3]; }
/**
* Squared Error for NA bucket and prediction value _pred2
* @return se
*/
public double seP2NA() { return _vals[_vals_dim*_nbin+4]; }
public double denNA() { return _vals[_vals_dim*_nbin+5]; }
public double nomNA() { return _vals[_vals_dim*_nbin+6]; }
final boolean hasPreds() {
return _vals_dim >= 5;
}
public double numTreatmentNA() { return _valsUplift[_valsDimUplift*_nbin]; }
public double respTreatmentNA() { return _valsUplift[_valsDimUplift*_nbin+1]; }
public double numControlNA() { return _valsUplift[_valsDimUplift*_nbin+2]; }
public double respControlNA() { return _valsUplift[_valsDimUplift*_nbin+3]; }
final boolean hasDenominator() {
return _vals_dim >= 6;
}
final boolean hasNominator() {
return _vals_dim == 7;
}
final boolean useUplift(){
return _useUplift;
}
protected double _min2, _maxIn; // Min/Max, _maxIn is Inclusive.
public HistogramType _histoType; //whether ot use random split points
transient double[] _splitPts; // split points between _min and _maxEx (either random or based on quantiles)
transient int _zeroSplitPntPos;
public final boolean _checkFloatSplits;
transient float[] _splitPtsFloat;
public final long _seed;
public transient boolean _absoluteSplitPts;
public Key<HistoSplitPoints> _globalSplitPointsKey; // key under which original top-level quantiles are stored;
final double[] _customSplitPoints; // explicitly given split points (for UniformRobust)
/**
* Split direction for missing values.
*
* Warning: If you change this enum, make sure to synchronize them with `hex.genmodel.algos.tree.NaSplitDir` in
* package `h2o-genmodel`.
*/
public enum NASplitDir {
//never saw NAs in training
None(0), //initial state - should not be present in a trained model
// saw NAs in training
NAvsREST(1), //split off non-NA (left) vs NA (right)
NALeft(2), //NA goes left
NARight(3), //NA goes right
// never NAs in training, but have a way to deal with them in scoring
Left(4), //test time NA should go left
Right(5); //test time NA should go right
private final int value;
NASplitDir(int v) { this.value = v; }
public int value() { return value; }
}
static class HistoSplitPoints extends Keyed<HistoSplitPoints> {
public HistoSplitPoints(Key<HistoSplitPoints> key, double[] splitPts) {
this(key, splitPts, true);
}
public HistoSplitPoints(Key<HistoSplitPoints> key, double[] splitPts, boolean canRefine) {
super(key);
this.splitPts = splitPts;
this.canRefine = canRefine;
}
boolean canRefine;
double[/*nbins*/] splitPts;
}
static class StepOutOfRangeException extends RuntimeException {
public StepOutOfRangeException(String name, double step, int xbins, double maxEx, double min) {
super("column=" + name + " leads to invalid histogram(check numeric range) -> [max=" + maxEx + ", min = " + min + "], step= " + step + ", xbin= " + xbins);
}
}
DHistogram(String name, final int nbins, int nbins_cats, byte isInt, double min, double maxEx, boolean intOpt, boolean initNA,
double minSplitImprovement, SharedTreeModel.SharedTreeParameters.HistogramType histogramType, long seed, Key<HistoSplitPoints> globalSplitPointsKey,
Constraints cs, boolean checkFloatSplits, boolean useUplift, UpliftDRFModel.UpliftDRFParameters.UpliftMetricType upliftMetricType,
double[] customSplitPoints) {
assert nbins >= 1;
assert nbins_cats >= 1;
assert maxEx > min : "Caller ensures "+maxEx+">"+min+", since if max==min== the column "+name+" is all constants";
if (cs != null) {
_pred1 = cs._min;
_pred2 = cs._max;
if (!cs.needsGammaDenom() && !cs.needsGammaNom()) {
_vals_dim = Double.isNaN(_pred1) && Double.isNaN(_pred2) ? 3 : 5;
} else if (!cs.needsGammaNom()) {
_vals_dim = 6;
} else {
_vals_dim = 7;
}
_dist = cs._dist;
} else {
_pred1 = Double.NaN;
_pred2 = Double.NaN;
_vals_dim = 3;
_dist = null;
}
_isInt = isInt;
_name = name;
_min = min;
_maxEx = maxEx; // Set Exclusive max
_min2 = Double.MAX_VALUE; // Set min/max to outer bounds
_maxIn= -Double.MAX_VALUE;
_initNA = initNA;
_intOpt = intOpt;
_minInt = (int) min;
_minSplitImprovement = minSplitImprovement;
_histoType = histogramType;
_seed = seed;
if (_histoType == HistogramType.RoundRobin) {
HistogramType[] h = HistogramType.ROUND_ROBIN_CANDIDATES;
_histoType = h[(int)Math.abs(seed % h.length)];
}
assert _histoType != SharedTreeModel.SharedTreeParameters.HistogramType.RoundRobin;
if (_histoType == SharedTreeModel.SharedTreeParameters.HistogramType.AUTO)
_histoType= SharedTreeModel.SharedTreeParameters.HistogramType.UniformAdaptive;
assert(_histoType!= SharedTreeModel.SharedTreeParameters.HistogramType.RoundRobin);
_globalSplitPointsKey = globalSplitPointsKey;
// See if we can show there are fewer unique elements than nbins.
// Common for e.g. boolean columns, or near leaves.
int xbins = isInt == 2 ? nbins_cats : nbins;
if (isInt > 0 && maxEx - min <= xbins) {
assert ((long) min) == min : "Overflow for integer/categorical histogram: minimum value cannot be cast to long without loss: (long)" + min + " != " + min + "!"; // No overflow
xbins = (char) ((long) maxEx - (long) min); // Shrink bins
_step = 1.0f; // Fixed stepsize
} else {
_step = xbins / (maxEx - min); // Step size for linear interpolation, using mul instead of div
if(_step <= 0 || Double.isInfinite(_step) || Double.isNaN(_step))
throw new StepOutOfRangeException(name,_step, xbins, maxEx, min);
}
_nbin = (char) xbins;
_useUplift = useUplift;
if (useUplift) {
switch (upliftMetricType) {
case ChiSquared:
_upliftMetric = new ChiSquaredDivergence();
break;
case Euclidean:
_upliftMetric = new EuclideanDistance();
break;
default:
_upliftMetric = new KLDivergence();
}
} else {
_upliftMetric = null;
}
assert(_nbin>0);
assert(_vals == null);
_checkFloatSplits = checkFloatSplits;
_customSplitPoints = customSplitPoints;
if (LOG.isTraceEnabled()) LOG.trace("Histogram: " + this);
// Do not allocate the big arrays here; wait for scoreCols to pick which cols will be used.
}
// Interpolate d to find bin#
public int bin(final double col_data) {
if(Double.isNaN(col_data)) return _nbin; // NA bucket
if (Double.isInfinite(col_data)) // Put infinity to most left/right bin
if (col_data<0) return 0;
else return _nbin-1;
assert _min <= col_data && col_data < _maxEx : "Coldata " + col_data + " out of range " + this;
// When the model is exposed to new test data, we could have data that is
// out of range of any bin - however this binning call only happens during
// model-building.
int idx1;
double pos = _absoluteSplitPts ? col_data : ((col_data - _min) * _step);
if (_splitPts != null) {
idx1 = pos == 0.0 ? _zeroSplitPntPos : Arrays.binarySearch(_splitPts, pos);
if (idx1 < 0) idx1 = -idx1 - 2;
} else {
idx1 = (int) pos;
}
if (_splitPtsFloat != null) {
if (idx1 + 1 < _splitPtsFloat.length) {
float splitAt = _splitPtsFloat[idx1 + 1];
if (col_data >= splitAt) {
idx1++;
}
if (idx1 > 0) {
if (!(col_data >= _splitPtsFloat[idx1])) {
idx1--;
}
}
}
}
if (idx1 == _nbin)
idx1--; // Round-off error allows idx1 to hit upper bound, so truncate
assert 0 <= idx1 && idx1 < _nbin : idx1 + " " + _nbin;
return idx1;
}
public double binAt( int b ) {
if (_absoluteSplitPts)
return _splitPts[b];
return _min + (_splitPts == null ? b : _splitPts[b]) / _step;
}
// number of bins excluding the NA bin
public int nbins() { return _nbin; }
// actual number of bins (possibly including NA bin)
public int actNBins() {
return nbins() + (hasNABin() ? 1 : 0);
}
public double bins(int b) { return w(b); }
// return number of empty bins (doesn't consider the NA bin)
public int nonEmptyBins() {
if (_vals == null)
return 0;
int nonEmpty = 0;
for (int i = 0; i < _vals.length - _vals_dim; i += _vals_dim) { // don't count NA (last bin)
if (_vals[i] > 0) {
nonEmpty++;
}
}
return nonEmpty;
}
public boolean hasNABin() {
if (_vals == null)
return _initNA; // we are in the initial histogram (and didn't see the data yet)
else
return wNA() > 0;
}
// Big allocation of arrays
public void init() { init(null, null);}
public void init(double[] vals) { init(vals, null);}
public void init(final double[] vals, double[] valsUplift) {
assert _vals == null;
if (_histoType==HistogramType.Random) {
// every node makes the same split points
Random rng = RandomUtils.getRNG((Double.doubleToRawLongBits(((_step+0.324)*_min+8.3425)+89.342*_maxEx) + 0xDECAF*_nbin + 0xC0FFEE*_isInt + _seed));
assert _nbin > 1;
_splitPts = makeRandomSplitPoints(_nbin, rng);
}
else if (_histoType== HistogramType.QuantilesGlobal) {
assert (_splitPts == null);
if (_globalSplitPointsKey != null) {
HistoSplitPoints hq = DKV.getGet(_globalSplitPointsKey);
if (hq != null) {
_splitPts = hq.splitPts;
if (_splitPts!=null) {
if (LOG.isTraceEnabled()) LOG.trace("Obtaining global splitPoints: " + Arrays.toString(_splitPts));
_splitPts = ArrayUtils.limitToRange(_splitPts, _min, _maxEx);
if (hq.canRefine && _splitPts.length > 1 && _splitPts.length < _nbin)
_splitPts = ArrayUtils.padUniformly(_splitPts, _nbin);
if (_splitPts.length <= 1) {
_splitPts = null; //abort, fall back to uniform binning
_histoType = HistogramType.UniformAdaptive;
}
else {
_absoluteSplitPts = true;
_nbin = (char)_splitPts.length;
if (LOG.isTraceEnabled()) LOG.trace("Refined splitPoints: " + Arrays.toString(_splitPts));
}
}
}
}
}
else if (_histoType == HistogramType.UniformRobust) {
if (_customSplitPoints != null) {
defineSplitPointsFromCustomSplitPoints(_customSplitPoints);
} else {
// no custom split points were given - meaning no issue was found with data distribution of the column
// and UniformRobust should behave just like UniformAdaptive (for now)
_histoType = HistogramType.UniformAdaptive;
}
} else { // UniformAdaptive is all that is left
assert _histoType == HistogramType.UniformAdaptive;
}
if (_splitPts != null) {
// Inject canonical representation of zero - convert "negative zero" to 0.0d
// This is for PUBDEV-7161 - Arrays.binarySearch used in bin() method is not able to find a negative zero,
// we always use 0.0d instead
// We also cache the position of zero in the split points for a faster lookup
_zeroSplitPntPos = Arrays.binarySearch(_splitPts, 0.0d);
if (_zeroSplitPntPos < 0) {
int nzPos = Arrays.binarySearch(_splitPts, -0.0d);
if (nzPos >= 0) {
_splitPts[nzPos] = 0.0d;
_zeroSplitPntPos = nzPos;
}
}
}
// otherwise AUTO/UniformAdaptive
_vals = vals == null ? MemoryManager.malloc8d(_vals_dim * _nbin + _vals_dim) : vals;
if(useUplift()) {
_valsUplift = valsUplift == null ? MemoryManager.malloc8d(_valsDimUplift * _nbin + _valsDimUplift) : valsUplift;
}
// this always holds: _vals != null
assert _nbin > 0;
if (_checkFloatSplits) {
_splitPtsFloat = new float[_nbin];
for (int i = 0; i < _nbin; i++) {
_splitPtsFloat[i] = (float) binAt(i);
}
}
assert !_intOpt || _splitPts == null : "Integer-optimization cannot be enabled when split points are defined";
assert !_intOpt || _histoType == HistogramType.UniformAdaptive || _histoType == HistogramType.UniformRobust : "Integer-optimization can only be enabled for histogram type 'UniformAdaptive' or 'UniformRobust'.";
}
void defineSplitPointsFromCustomSplitPoints(double[] customSplitPoints) {
_splitPts = customSplitPoints;
_splitPts = ArrayUtils.limitToRange(_splitPts, _min, _maxEx);
if (_splitPts.length <= 1) {
_splitPts = null; // abort, fall back to uniform binning
_histoType = HistogramType.UniformAdaptive;
}
else {
_absoluteSplitPts = true;
_nbin = (char)(_splitPts.length - 1);
}
}
// Merge two equal histograms together. Done in a F/J reduce, so no
// synchronization needed.
public void add( DHistogram dsh ) {
assert (_vals == null || dsh._vals == null) || (_isInt == dsh._isInt && _nbin == dsh._nbin && _step == dsh._step &&
_min == dsh._min && _maxEx == dsh._maxEx);
if( dsh._vals == null ) return;
if(_vals == null) {
init(dsh._vals, dsh._valsUplift);
} else {
ArrayUtils.add(_vals, dsh._vals);
ArrayUtils.add(_valsUplift, dsh._valsUplift);
}
if (_min2 > dsh._min2) _min2 = dsh._min2;
if (_maxIn < dsh._maxIn) _maxIn = dsh._maxIn;
}
// Inclusive min & max
public double find_min () { return _min2 ; }
public double find_maxIn() { return _maxIn; }
// Exclusive max
public double find_maxEx() { return find_maxEx(_maxIn,_isInt); }
public static double find_maxEx(double maxIn, int isInt ) {
double ulp = Math.ulp(maxIn);
if( isInt > 0 && 1 > ulp ) ulp = 1;
double res = maxIn+ulp;
return Double.isInfinite(res) ? maxIn : res;
}
/**
* The initial histogram bins are setup from the Vec rollups.
* @param fr frame with column data
* @param ncols number of columns
* @param nbins number of bins
* @param hs an array of histograms to be initialize
* @param seed seed to reproduce
* @param parms parameters of the model
* @param globalSplitPointsKey array of global split-points keys
* @param cs monotone constraints (could be null)
* @param checkFloatSplits
* @return array of DHistograms objects
*/
public static DHistogram[] initialHist(Frame fr, int ncols, int nbins, DHistogram hs[], long seed, SharedTreeModel.SharedTreeParameters parms,
Key<HistoSplitPoints>[] globalSplitPointsKey,
Constraints cs, boolean checkFloatSplits, GlobalInteractionConstraints ics) {
Vec vecs[] = fr.vecs();
for( int c=0; c<ncols; c++ ) {
long vlen = 0;
if(ics != null && !ics.allowedInteractionContainsColumn(c)){
hs[c] = null;
} else {
Vec v = vecs[c];
final double minIn = v.isCategorical() ? 0 : Math.max(v.min(), -Double.MAX_VALUE); // inclusive vector min
final double maxIn = v.isCategorical() ? v.domain().length - 1 : Math.min(v.max(), Double.MAX_VALUE); // inclusive vector max
final double maxEx = v.isCategorical() ? v.domain().length : find_maxEx(maxIn, v.isInt() ? 1 : 0); // smallest exclusive max
vlen = v.length();
final long nacnt = v.naCnt();
try {
byte type = (byte) (v.isCategorical() ? 2 : (v.isInt() ? 1 : 0));
boolean intOpt = useIntOpt(v, parms, cs);
hs[c] = nacnt == vlen || v.isConst(true) ?
null : make(fr._names[c], nbins, type, minIn, maxEx, intOpt, nacnt > 0, seed, parms, globalSplitPointsKey[c], cs, checkFloatSplits, null);
} catch (StepOutOfRangeException e) {
hs[c] = null;
LOG.warn("Column " + fr._names[c] + " with min = " + v.min() + ", max = " + v.max() + " has step out of range (" + e.getMessage() + ") and is ignored.");
}
}
assert (hs[c] == null || vlen > 0);
}
return hs;
}
public static DHistogram make(String name, final int nbins, byte isInt, double min, double maxEx, boolean intOpt, boolean hasNAs,
long seed, SharedTreeModel.SharedTreeParameters parms, Key<HistoSplitPoints> globalSplitPointsKey,
Constraints cs, boolean checkFloatSplits, double[] customSplitPoints) {
boolean useUplift = isUplift(parms);
UpliftDRFModel.UpliftDRFParameters.UpliftMetricType upliftMetricType = useUplift ?
((UpliftDRFModel.UpliftDRFParameters) parms)._uplift_metric : null;
return new DHistogram(name, nbins, parms._nbins_cats, isInt, min, maxEx, intOpt, hasNAs,
parms._min_split_improvement, parms._histogram_type, seed, globalSplitPointsKey, cs, checkFloatSplits, useUplift, upliftMetricType, customSplitPoints);
}
private static boolean isUplift(SharedTreeModel.SharedTreeParameters parms) {
return parms instanceof UpliftDRFModel.UpliftDRFParameters;
}
/**
* Determines if histogram making can use integer optimization when extracting data.
*
* @param v input Vec
* @param parms algo params
* @param cs constraints specification
* @return can we use integer representation for extracted data?
*/
public static boolean useIntOpt(Vec v, SharedTreeModel.SharedTreeParameters parms, Constraints cs) {
if (isUplift(parms)) {
// Uplift modelling doesn't support integer optimization (different code path)
return false;
}
if (cs != null) {
// if constraints are not handled on the optimized path to avoid higher code complexity
// (the benefit of this optimization would be small anyway as constraints introduce overhead)
return false;
}
if (parms._histogram_type != HistogramType.AUTO &&
parms._histogram_type != HistogramType.UniformAdaptive &&
parms._histogram_type != HistogramType.UniformRobust) {
// we cannot handle non-integer split points in fast-binning
return false;
}
if (v.isCategorical()) {
// small cardinality categoricals are optimized (default for nbin_cats is 1024)
return v.domain().length < parms._nbins_cats;
} else if (v.isInt()) {
double min = v.min();
double max = v.max();
double intLen = max - min; // length of the interval
// only if we can fit any value in the smallest number of bins could make (nbins, 20 is default)
return intLen < parms._nbins &&
(int) max - (int) min == (int) intLen; // make sure the math can be done in integer domain without overflow
}
return false;
}
// Pretty-print a histogram
@Override public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(_name).append(":").append(_min).append("-").append(_maxEx).append(" step=" + (1 / _step) + " nbins=" + nbins() + " actNBins=" + actNBins() + " isInt=" + _isInt);
if( _vals != null ) {
for(int b = 0; b< _nbin; b++ ) {
sb.append(String.format("\ncnt=%f, [%f - %f], mean/var=", w(b),_min+b/_step,_min+(b+1)/_step));
sb.append(String.format("%6.2f/%6.2f,", mean(b), var(b)));
}
sb.append('\n');
}
return sb.toString();
}
double mean(int b) {
double n = w(b);
return n>0 ? wY(b)/n : 0;
}
/**
* compute the sample variance within a given bin
* @param b bin id
* @return sample variance (>= 0)
*/
public double var (int b) {
double n = w(b);
if( n<=1 ) return 0;
return Math.max(0, (wYY(b) - wY(b)* wY(b)/n)/(n-1)); //not strictly consistent with what is done elsewhere (use n instead of n-1 to get there)
}
void updateHisto(double[] ws, double[] resp, Object cs, double[] ys, double[] preds, int[] rows, int hi, int lo, double[] treatment){
if (_intOpt) {
assert treatment == null : "Integer-optimized histograms cannot be used when treatment is provided";
updateHistoInt(ws, (int[]) cs, ys, rows, hi, lo);
} else
updateHisto(ws, resp, (double[]) cs, ys, preds, rows, hi, lo, treatment);
}
/**
* Update counts in appropriate bins. Not thread safe, assumed to have private copy.
*
* NOTE: Any changes to this method need to be also made in the integer version of this function
* (updateHistoInt).
*
* @param ws observation weights
* @param resp original response (response column of the outer model, needed to calculate Gamma denominator)
* @param cs column data
* @param ys response column of the regression tree (eg. GBM residuals, not the original model response!)
* @param preds current model predictions (optional, provided only if needed)
* @param rows rows sorted by leaf assignemnt
* @param hi upper bound on index into rows array to be processed by this call (exclusive)
* @param lo lower bound on index into rows array to be processed by this call (inclusive)
* @param treatment treatment column data
*/
void updateHisto(double[] ws, double resp[], double[] cs, double[] ys, double[] preds, int[] rows, int hi, int lo, double[] treatment){
// Gather all the data for this set of rows, for 1 column and 1 split/NID
// Gather min/max, wY and sum-squares.
for(int r = lo; r< hi; ++r) {
final int k = rows[r];
final double weight = ws == null ? 1 : ws[k];
if (weight == 0)
continue; // Needed for DRF only
final double col_data = cs[k];
if (col_data < _min2) _min2 = col_data;
if (col_data > _maxIn) _maxIn = col_data;
final double y = ys[r]; // uses absolute indexing, ys is optimized for sequential access
// these assertions hold for GBM, but not for DRF
// assert weight != 0 || y == 0;
// assert !Double.isNaN(y);
double wy = weight * y;
double wyy = wy * y;
int b = bin(col_data);
final int binDimStart = _vals_dim*b;
_vals[binDimStart + 0] += weight;
_vals[binDimStart + 1] += wy;
_vals[binDimStart + 2] += wyy;
if (_vals_dim >= 5 && !Double.isNaN(resp[k])) {
if (_dist._family.equals(DistributionFamily.quantile)) {
_vals[binDimStart + 3] += _dist.deviance(weight, y, _pred1);
_vals[binDimStart + 4] += _dist.deviance(weight, y, _pred2);
} else {
_vals[binDimStart + 3] += weight * (_pred1 - y) * (_pred1 - y);
_vals[binDimStart + 4] += weight * (_pred2 - y) * (_pred2 - y);
}
if (_vals_dim >= 6) {
_vals[binDimStart + 5] += _dist.gammaDenom(weight, resp[k], y, preds[k]);
if (_vals_dim == 7) {
_vals[binDimStart + 6] += _dist.gammaNum(weight, resp[k], y, preds[k]);
}
}
}
if(_useUplift) {
// Note: Only for binomial, response should be (0, 1)
double t = treatment[k];
double rs = resp[k];
int binDimStartUplift = _valsDimUplift * b;
_valsUplift[binDimStartUplift] += t; // treatment number
_valsUplift[binDimStartUplift + 1] += t * rs; // treatment number with response == 1
_valsUplift[binDimStartUplift + 2] += (1 - t); // control number
_valsUplift[binDimStartUplift + 3] += (1 - t) * rs; // control number with response == 1
}
}
}
/**
* This is an integer version of method updateHisto - optimized for handling small
* positive integer numbers and low-cardinality categoricals.
*
* NOTE: Any changes to this method need to be also made in the original updateHisto
* function.
*
* @param ws optional vector of weights, indexed indirectly using rows indices
* @param cs chunk data, indexed indirectly using rows indices
* @param ys targets, uses absolute indexing - maintains data co-locality and optimized for sequential access
* @param rows row indices
* @param hi upper boundary in rows array (exclusive)
* @param lo lower boundary in rows array (inclusive)
*/
void updateHistoInt(double[] ws, int[] cs, double[] ys,
int[] rows, final int hi, final int lo){
// min2_int/maxIn_int integer version of the boundaries, only cast once - only int ops within the loop
int min2_int = _min2 == Double.MAX_VALUE ? Integer.MAX_VALUE : (int) _min2;
int maxIn_int = _maxIn == -Double.MIN_VALUE ? Integer.MIN_VALUE : (int) _maxIn;
for(int r = lo; r < hi; r++) {
final int k = rows[r];
final double weight = ws == null ? 1 : ws[k];
if (weight == 0)
continue; // Needed for DRF only
final int col_data = cs[k];
final int b;
if (col_data != INT_NA) {
if (col_data < min2_int) min2_int = col_data;
if (col_data > maxIn_int) maxIn_int = col_data;
b = col_data - _minInt;
} else
b = _nbin;
final double y = ys[r]; // uses absolute indexing, ys is optimized for sequential access
double wy = weight * y;
double wyy = wy * y;
final int binDimStart = _vals_dim*b;
_vals[binDimStart + 0] += weight;
_vals[binDimStart + 1] += wy;
_vals[binDimStart + 2] += wyy;
}
_min2 = min2_int;
_maxIn = maxIn_int;
}
/**
* Extracts data from a chunk into a structure that is optimized for given column type
*
* @param chk input chunk
* @param cache optional - already existing instance of the cache
* @param len length of the data
* @param maxChunkSz maximum chunk size on the local node, will determine the size of the cache
* @return extracted data
*/
Object extractData(Chunk chk, Object cache, int len, int maxChunkSz) {
if (cache == null) {
if (_intOpt) {
cache = MemoryManager.malloc4(maxChunkSz);
} else {
cache = MemoryManager.malloc8d(maxChunkSz);
}
}
if (_intOpt)
chk.getIntegers((int[])cache, 0, len, INT_NA);
else
chk.getDoubles((double[])cache, 0, len);
return cache;
}
/**
* Cast bin values (except for sums of weights) to floats to drop least significant bits.
* Improves reproducibility (drop bits most affected by floating point error).
*/
public void reducePrecision(){
if(_vals == null) return;
for(int i = 0; i < _vals.length; i+=_vals_dim) {
_vals[i+1] = (float)_vals[i+1];
_vals[i+2] = (float)_vals[i+2];
}
}
static double[] makeRandomSplitPoints(int nbin, Random rng) {
final double[] splitPts = new double[nbin];
splitPts[0] = 0;
for (int i = 1; i < nbin; i++)
splitPts[i] = rng.nextFloat() * nbin;
Arrays.sort(splitPts);
return splitPts;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/DTree.java
|
package hex.tree;
import hex.Distribution;
import hex.genmodel.utils.DistributionFamily;
import hex.tree.uplift.Divergence;
import jsr166y.RecursiveAction;
import org.apache.log4j.Logger;
import water.AutoBuffer;
import water.H2O;
import water.Iced;
import water.MemoryManager;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.util.*;
import java.util.*;
import static hex.tree.SharedTreeModel.SharedTreeParameters.HistogramType;
/** A Decision Tree, laid over a Frame of Vecs, and built distributed.
*
* <p>This class defines an explicit Tree structure, as a collection of {@code
* DTree} {@code Node}s. The Nodes are numbered with a unique {@code _nid}.
* Users need to maintain their own mapping from their data to a {@code _nid},
* where the obvious technique is to have a Vec of {@code _nid}s (ints), one
* per each element of the data Vecs.
*
* <p>Each {@code Node} has a {@code DHistogram}, describing summary data
* about the rows. The DHistogram requires a pass over the data to be filled
* in, and we expect to fill in all rows for Nodes at the same depth at the
* same time. i.e., a single pass over the data will fill in all leaf Nodes'
* DHistograms at once.
*
* @author Cliff Click
*/
public class DTree extends Iced {
private static final Logger LOG = Logger.getLogger(DTree.class);
private static final String CONSTRAINT_CONSISTENCY_CHECK = H2O.OptArgs.SYSTEM_PROP_PREFIX + "tree.constraintConsistencyCheck";
private static final boolean DEFAULT_CONSTRAINT_CONSISTENCY_CHECK = false;
final String[] _names; // Column names
final int _ncols; // Active training columns
final long _seed; // RNG seed; drives sampling seeds if necessary
private Node[] _ns; // All the nodes in the tree. Node 0 is the root.
public int _len; // Resizable array
// Public stats about tree
public int _leaves;
public int _depth;
public final int _mtrys; // Number of columns to choose amongst in splits (at every split)
public final int _mtrys_per_tree; // Number of columns to choose amongst in splits (once per tree)
public final transient Random _rand; // RNG for split decisions & sampling
public final transient int[] _cols; // Per-tree selection of columns to consider for splits
public transient SharedTreeModel.SharedTreeParameters _parms;
public boolean _checkConstraintConsistency;
private boolean checkConstraintConsistency(){
String check = System.getProperty(CONSTRAINT_CONSISTENCY_CHECK);
if(check == null){
return DEFAULT_CONSTRAINT_CONSISTENCY_CHECK;
}
return Boolean.parseBoolean(check);
}
// compute the effective number of columns to sample
public int actual_mtries() {
return Math.min(Math.max(1,(int)((double)_mtrys * Math.pow(_parms._col_sample_rate_change_per_level, _depth))),_ncols);
}
public DTree(Frame fr, int ncols, int mtrys, int mtrys_per_tree, long seed, SharedTreeModel.SharedTreeParameters parms) {
_names = fr.names();
_ncols = ncols;
_parms = parms;
_ns = new Node[1];
_mtrys = mtrys;
_mtrys_per_tree = mtrys_per_tree;
_seed = seed;
_rand = RandomUtils.getRNG(seed);
int[] activeCols=new int[_ncols];
for (int i=0;i<activeCols.length;++i)
activeCols[i] = i;
// per-tree column sample if _mtrys_per_tree < _ncols
int len = _ncols;
if (mtrys_per_tree < _ncols) {
Random colSampleRNG = RandomUtils.getRNG(_seed*0xDA7A);
for( int i=0; i<mtrys_per_tree; i++ ) {
if( len == 0 ) break;
int idx2 = colSampleRNG.nextInt(len);
int col = activeCols[idx2];
activeCols[idx2] = activeCols[--len];
activeCols[len] = col;
}
activeCols = Arrays.copyOfRange(activeCols,len,activeCols.length);
}
_cols = activeCols;
_checkConstraintConsistency = checkConstraintConsistency();
}
/**
* Copy constructor
* @param tree
*/
public DTree(DTree tree){
_names = tree._names;
_ncols = tree._ncols;
_parms = tree._parms;
_ns = new Node[tree._ns.length];
for(int i=0; i<_ns.length; i++) {
Node node = tree._ns[i];
if(node instanceof UndecidedNode) {
_ns[i] = new UndecidedNode((UndecidedNode)node, this);
} else if(node instanceof DecidedNode){
_ns[i] = new DecidedNode((DecidedNode)node, this);
} else if(node instanceof LeafNode) {
_ns[i] = new LeafNode((LeafNode)node, this);
} else {
_ns[i] = null;
}
}
_mtrys = tree._mtrys;
_mtrys_per_tree = tree._mtrys_per_tree;
_seed = tree._seed;
_rand = tree._rand;
_cols = tree._cols;
_leaves = tree._leaves;
_len = tree._len;
_depth = tree._depth;
_checkConstraintConsistency = tree._checkConstraintConsistency;
}
public final Node root() { return _ns[0]; }
// One-time local init after wire transfer
void init_tree( ) { for( int j=0; j<_len; j++ ) _ns[j]._tree = this; }
// Return Node i
public final Node node( int i ) { return _ns[i]; }
public final UndecidedNode undecided( int i ) { return (UndecidedNode)node(i); }
public final DecidedNode decided( int i ) { return ( DecidedNode)node(i); }
// Get a new node index, growing innards on demand
private synchronized int newIdx(Node n) {
if( _len == _ns.length ) _ns = Arrays.copyOf(_ns,_len<<1);
_ns[_len] = n;
return _len++;
}
public final int len() { return _len; }
// --------------------------------------------------------------------------
// Abstract node flavor
public static abstract class Node extends Iced {
transient protected DTree _tree; // Make transient, lest we clone the whole tree
final public int _pid; // Parent node id, root has no parent and uses NO_PARENT
final protected int _nid; // My node-ID, 0 is root
Node( DTree tree, int pid, int nid ) {
_tree = tree;
_pid=pid;
tree._ns[_nid=nid] = this;
}
Node( DTree tree, int pid ) {
_tree = tree;
_pid=pid;
_nid = tree.newIdx(this);
}
Node( DTree tree, int pid, int nid, boolean copy) {
_tree = tree;
_pid = pid;
if(copy) {
_nid = nid;
} else {
_nid = tree.newIdx(this);
}
}
// Recursively print the decision-line from tree root to this child.
StringBuilder printLine(StringBuilder sb ) {
if( _pid== NO_PARENT) return sb.append("[root]");
DecidedNode parent = _tree.decided(_pid);
parent.printLine(sb).append(" to ");
return parent.printChild(sb,_nid);
}
abstract public StringBuilder toString2(StringBuilder sb, int depth);
abstract protected AutoBuffer compress(AutoBuffer ab, AutoBuffer abAux);
abstract protected int size();
abstract protected int numNodes();
public final int nid() { return _nid; }
public final int pid() { return _pid; }
}
// --------------------------------------------------------------------------
// Records a column, a bin to split at within the column, and the MSE.
public static class Split extends Iced {
final public int _col, _bin;// Column to split, bin where being split
final DHistogram.NASplitDir _nasplit;
final IcedBitSet _bs; // For binary y and categorical x (with >= 4 levels), split into 2 non-contiguous groups
final byte _equal; // Split is 0: <, 2: == with group split (<= 32 levels), 3: == with group split (> 32 levels)
final double _se; // Squared error without a split
final double _se0, _se1; // Squared error of each subsplit
final double _n0, _n1; // (Weighted) Rows in each final split
final double _p0, _p1; // Predicted value for each split
final double _tree_p0, _tree_p1;
final double _p0Treat, _p0Contr, _p1Treat, _p1Contr; // uplift predictions
final double _n0Treat, _n0Contr, _n1Treat, _n1Contr;
final double _upliftGainBefore;
final double _upliftGain;
public Split(int col, int bin, DHistogram.NASplitDir nasplit, IcedBitSet bs, byte equal, double se, double se0, double se1, double n0, double n1, double p0, double p1, double tree_p0, double tree_p1) {
assert nasplit != DHistogram.NASplitDir.None;
assert nasplit != DHistogram.NASplitDir.NAvsREST || bs == null : "Split type NAvsREST shouldn't have a bitset";
assert equal != 1; //no longer done
// FIXME: Disabled for testing PUBDEV-6495:
// assert se > se0+se1 || se==Double.MAX_VALUE; // No point in splitting unless error goes down
assert col >= 0;
assert bin >= 0;
_col = col; _bin = bin; _nasplit = nasplit; _bs = bs; _equal = equal; _se = se;
_n0 = n0; _n1 = n1; _se0 = se0; _se1 = se1;
_p0 = p0; _p1 = p1;
_tree_p0 = tree_p0; _tree_p1 = tree_p1;
_p0Treat = _p0Contr = _p1Treat = _p1Contr = 0;
_n0Treat = _n0Contr = _n1Treat = _n1Contr = 0;
_upliftGainBefore = 0;
_upliftGain = 0;
}
public Split(int col, int bin, DHistogram.NASplitDir nasplit, IcedBitSet bs, byte equal, double se, double se0, double se1, double n0, double n1, double p0, double p1, double tree_p0, double tree_p1,
double p0Treat, double p0Contr, double p1Treat, double p1Contr, double n0Treat, double n0Contr, double n1Treat, double n1Contr, double upliftGainBefore, double upliftGain) {
assert(nasplit!= DHistogram.NASplitDir.None);
assert(equal!=1); //no longer done
// FIXME: Disabled for testing PUBDEV-6495:
// assert se > se0+se1 || se==Double.MAX_VALUE; // No point in splitting unless error goes down
assert(col>=0);
assert(bin>=0);
_col = col; _bin = bin; _nasplit = nasplit; _bs = bs; _equal = equal; _se = se;
_n0 = n0; _n1 = n1; _se0 = se0; _se1 = se1;
_p0 = p0; _p1 = p1;
_tree_p0 = tree_p0; _tree_p1 = tree_p1;
_p0Treat = p0Treat; _p0Contr = p0Contr; _p1Treat = p1Treat; _p1Contr = p1Contr;
_n0Treat = n0Treat; _n0Contr = n0Contr; _n1Treat = n1Treat; _n1Contr = n1Contr;
_upliftGainBefore = upliftGainBefore;
_upliftGain = upliftGain;
}
public final double pre_split_se() { return _se; }
public final double se() { return _se0+_se1; }
public final double preSplitUpliftGain() { return _upliftGainBefore; }
public final double upliftGain() { return _upliftGain; }
public final int col() { return _col; }
public final int bin() { return _bin; }
public final DHistogram.NASplitDir naSplitDir() { return _nasplit; }
public final double n0() { return _n0; }
public final double n1() { return _n1; }
/**
* Returns an optimal numeric split point for numerical splits,
* -1 for bitwise splits and Float.NaN if a split should be abandoned.
* @param hs histograms
* @return "split at" value
*/
float splat(DHistogram[] hs) {
return isNumericSplit() ? splatNumeric(hs[_col]) : -1f; // Split-at value (-1 for group-wise splits)
}
boolean isNumericSplit() {
return _nasplit != DHistogram.NASplitDir.NAvsREST && (_equal == 0 || _equal == 1);
}
// Split-at dividing point. Don't use the step*bin+bmin, due to roundoff
// error we can have that point be slightly higher or lower than the bin
// min/max - which would allow values outside the stated bin-range into the
// split sub-bins. Always go for a value which splits the nearest two
// elements.
float splatNumeric(final DHistogram h) {
assert _nasplit != DHistogram.NASplitDir.NAvsREST : "Shouldn't be called for NA split type 'NA vs REST'";
assert _bin > 0 && _bin < h.nbins();
assert _bs==null : "Dividing point is a bitset, not a bin#, so don't call splat() as result is meaningless";
assert _equal != 1;
assert _equal==0; // not here for bitset splits, just range splits
// Find highest non-empty bin below the split
int x=_bin-1;
while( x >= 0 && h.bins(x)==0 ) x--;
// Find lowest non-empty bin above the split
int n=_bin;
while( n < h.nbins() && h.bins(n)==0 ) n++;
// Lo is the high-side of the low non-empty bin, rounded to int for int columns
// Hi is the low -side of the hi non-empty bin, rounded to int for int columns
// Example: Suppose there are no empty bins, and we are splitting an
// integer column at 48.4 (more than nbins, so step != 1.0, perhaps
// step==1.8). The next lowest non-empty bin is from 46.6 to 48.4, and
// we set lo=48.4. The next highest non-empty bin is from 48.4 to 50.2
// and we set hi=48.4. Since this is an integer column, we round lo to
// 48 (largest integer below the split) and hi to 49 (smallest integer
// above the split). Finally we average them, and split at 48.5.
double lo = h.binAt(x+1);
double hi = h.binAt(n );
if (h._isInt > 0) {
lo = h._step==1 ? lo-1 : Math.floor(lo);
hi = h._step==1 ? hi : Math.ceil (hi);
}
final float splitAt = (float) ((lo + hi) / 2.0);
// abandon split if rounding errors could cause observations being incorrectly
// assigned to child nodes at scoring time
// this will typically happen when bin lengths are very small (eg. 1e-6)
// we will abandon a split if `lo` is not a true lower bound to a float `splitAt`
// (and symmetrically for `hi`)
if (h._checkFloatSplits && lo != hi && (lo > splitAt || hi < splitAt)) {
return Float.NaN;
}
return splitAt;
}
/**
* Prepare children histograms, one per column.
* Typically, histograms are created with a level-dependent binning strategy.
* For the histogram of the current split decision, the children histograms are left/right range-adjusted.
*
* Any histgoram can null if there is no point in splitting
* further (such as there's fewer than min_row elements, or zero
* error in the response column). Return an array of DHistograms (one
* per column), which are bounded by the split bin-limits. If the column
* has constant data, or was not being tracked by a prior DHistogram
* (for being constant data from a prior split), then that column will be
* null in the returned array.
* @param currentHistos Histograms for all applicable columns computed for the previous split finding process
* @param way 0 (left) or 1 (right)
* @param splat Split point for previous split (if applicable)
* @param parms user-given parameters (will use nbins, min_rows, etc.)
* @return Array of histograms to be used for the next level of split finding
*/
public DHistogram[] nextLevelHistos(DHistogram[] currentHistos, int way, double splat, SharedTreeModel.SharedTreeParameters parms, Constraints cs, BranchInteractionConstraints bcs) {
double n = way==0 ? _n0 : _n1;
if( n < parms._min_rows ) {
if (LOG.isTraceEnabled()) LOG.trace("Not splitting: too few observations left: " + n);
return null; // Too few elements
}
double se = way==0 ? _se0 : _se1;
if( se <= 1e-30 ) {
LOG.trace("Not splitting: pure node (perfect prediction).");
return null; // No point in splitting a perfect prediction
}
// Build a next-gen split point from the splitting bin
int cnt=0; // Count of possible splits
DHistogram nhists[] = new DHistogram[currentHistos.length]; // A new histogram set
boolean checkBranchInteractions = bcs != null;
for(int j = 0; j < currentHistos.length; j++ ) { // For every column in the new split
// Check branch interaction constraint if it is not null
if (checkBranchInteractions && !bcs.isAllowedIndex(j)) {
// Column is denied by branch interaction constraints -> the histogram is set to null
continue;
}
DHistogram h = currentHistos[j]; // old histogram of column
if( h == null )
continue; // Column was not being tracked?
final int adj_nbins = Math.max(h.nbins()>>1,parms._nbins); //update number of bins dependent on level depth
// min & max come from the original column data, since splitting on an
// unrelated column will not change the j'th columns min/max.
// Tighten min/max based on actual observed data for tracked columns
double min, maxEx;
if( h._vals == null || _equal > 1) { // Not tracked this last pass? For bitset, always keep the full range of factors
min = h._min; // Then no improvement over last go
maxEx = h._maxEx;
} else { // Else pick up tighter observed bounds
min = h.find_min(); // Tracked inclusive lower bound
if( h.find_maxIn() == min )
continue; // This column will not split again
maxEx = h.find_maxEx(); // Exclusive max
}
if (_nasplit== DHistogram.NASplitDir.NAvsREST) {
if (way==1) continue; //no histogram needed - we just split NAs away
// otherwise leave the min/max alone, and make another histogram (but this time, there won't be any NAs)
}
// Tighter bounds on the column getting split: exactly each new
// DHistogram's bound are the bins' min & max.
if( _col==j ) {
switch( _equal ) {
case 0: // Ranged split; know something about the left & right sides
if (_nasplit != DHistogram.NASplitDir.NAvsREST) {
if (h._vals[h._vals_dim*_bin] == 0)
throw H2O.unimpl(); // Here I should walk up & down same as split() above.
}
assert _bs==null : "splat not defined for BitSet splits";
double split = splat;
if( h._isInt > 0 ) split = (float)Math.ceil(split);
if (_nasplit != DHistogram.NASplitDir.NAvsREST) {
if (way == 0) maxEx = split;
else min = split;
}
break;
case 1: // Equality split; no change on unequals-side
if( way == 1 )
continue; // but know exact bounds on equals-side - and this col will not split again
break;
case 2: // BitSet (small) split
case 3: // BitSet (big) split
break;
default: throw H2O.fail();
}
}
if( min > maxEx )
continue; // Happens for all-NA subsplits
if( MathUtils.equalsWithinOneSmallUlp(min, maxEx) )
continue; // This column will not split again
if( Double.isInfinite(adj_nbins/(maxEx-min)) )
continue;
if( h._isInt > 0 && !(min+1 < maxEx ) )
continue; // This column will not split again
assert min < maxEx && adj_nbins > 1 : ""+min+"<"+maxEx+" nbins="+adj_nbins;
// only count NAs if we have any going our way (note: NAvsREST doesn't build a histo for the NA direction)
final boolean hasNAs = (_nasplit == DHistogram.NASplitDir.NALeft && way == 0 ||
_nasplit == DHistogram.NASplitDir.NARight && way == 1) && h.hasNABin();
double[] customSplitPoints = h._customSplitPoints;
if (parms._histogram_type == HistogramType.UniformRobust &&
j != _col && // don't apply if we were able to split on the column with the current bins
GuidedSplitPoints.isApplicableTo(h)
) {
final int nonEmptyBins = h.nonEmptyBins();
final double density = nonEmptyBins / ((double) h.nbins());
if (density <= GuidedSplitPoints.LOW_DENSITY_THRESHOLD) {
customSplitPoints = GuidedSplitPoints.makeSplitPoints(h, adj_nbins, min, maxEx);
}
}
nhists[j] = DHistogram.make(h._name, adj_nbins, h._isInt, min, maxEx, h._intOpt, hasNAs,
h._seed*0xDECAF+(way+1), parms,
h._globalSplitPointsKey, cs, h._checkFloatSplits, customSplitPoints);
cnt++; // At least some chance of splitting
}
return cnt == 0 ? null : nhists;
}
public Constraints nextLevelConstraints(Constraints currentConstraints, int way, double splat, SharedTreeModel.SharedTreeParameters parms) {
int constraint = currentConstraints.getColumnConstraint(_col);
if (constraint == 0) {
return currentConstraints; // didn't split on a column with constraints => no need to modify them
}
double mid = (_tree_p0 + _tree_p1) / 2;
return currentConstraints.withNewConstraint(_col, way, mid);
}
@Override public String toString() {
return "Splitting: col=" + _col + " type=" + ((int)_equal == 0 ? " < " : "bitset")
+ ", splitpoint=" + _bin + ", nadir=" + _nasplit.toString() + ", se0=" + _se0 + ", se1=" + _se1 + ", n0=" + _n0 + ", n1=" + _n1;
}
}
// --------------------------------------------------------------------------
// An UndecidedNode: Has a DHistogram which is filled in (in parallel
// with other histograms) in a single pass over the data. Does not contain
// any split-decision.
public static class UndecidedNode extends Node {
public transient DHistogram[] _hs; //(up to) one histogram per column
public transient Constraints _cs;
public transient BranchInteractionConstraints _bics;
public final int _scoreCols[]; // A list of columns to score; could be null for all
public UndecidedNode( DTree tree, int pid, DHistogram[] hs, Constraints cs, BranchInteractionConstraints bics) {
super(tree,pid);
assert hs.length==tree._ncols;
_hs = hs; //these histograms have no bins yet (just constructed)
_cs = cs;
_bics = bics;
_scoreCols = scoreCols();
}
public UndecidedNode(UndecidedNode node, DTree tree){
super(tree, node._pid, node._nid, true);
_hs = node._hs; //these histograms have no bins yet (just constructed)
_cs = node._cs;
_bics = node._bics;
_scoreCols = node._scoreCols;
}
// Pick a random selection of columns to compute best score.
// Can return null for 'all columns'.
public int[] scoreCols() {
DTree tree = _tree;
if (tree.actual_mtries() == _hs.length && tree._mtrys_per_tree == _hs.length)
return null;
// per-tree pre-selected columns
int[] activeCols = tree._cols;
if (LOG.isTraceEnabled()) LOG.trace("For tree with seed " + tree._seed + ", out of " + _hs.length + " cols, the following cols are activated via mtry_per_tree=" + tree._mtrys_per_tree + ": " + Arrays.toString(activeCols));
int[] cols = new int[activeCols.length];
int len=0;
// collect columns that can be split (non-constant, large enough to split, etc.)
for(int i = 0; i< activeCols.length; i++ ) {
int idx = activeCols[i];
assert(idx == i || tree._mtrys_per_tree < _hs.length);
if( _hs[idx]==null ) continue; // Ignore not-tracked cols
assert _hs[idx]._min < _hs[idx]._maxEx && _hs[idx].actNBins() > 1 : "broken histo range "+_hs[idx];
cols[len++] = idx; // Gather active column
}
if (LOG.isTraceEnabled()) LOG.trace("These columns can be split: " + Arrays.toString(Arrays.copyOfRange(cols, 0, len)));
int choices = len; // Number of columns I can choose from
int mtries = tree.actual_mtries();
if (choices > 0) { // It can happen that we have no choices, because this node cannot be split any more (all active columns are constant, for example).
// Draw up to mtry columns at random without replacement.
for (int i = 0; i < mtries; i++) {
if (len == 0) break; // Out of choices!
int idx2 = tree._rand.nextInt(len);
int col = cols[idx2]; // The chosen column
cols[idx2] = cols[--len]; // Compress out of array; do not choose again
cols[len] = col; // Swap chosen in just after 'len'
}
assert len < choices;
}
if (LOG.isTraceEnabled()) LOG.trace("Picking these (mtry=" + mtries + ") columns to evaluate for splitting: " + Arrays.toString(Arrays.copyOfRange(cols, len, choices)));
return Arrays.copyOfRange(cols, len, choices);
}
// Make the parent of this Node use UNINTIALIZED NIDs for its children to prevent the split that this
// node otherwise induces. Happens if we find out too-late that we have a
// perfect prediction here, and we want to turn into a leaf.
public void doNotSplit( ) {
if( _pid == NO_PARENT) return; // skip root
DecidedNode dn = _tree.decided(_pid);
for( int i=0; i<dn._nids.length; i++ )
if( dn._nids[i]==_nid )
{ dn._nids[i] = ScoreBuildHistogram.UNDECIDED_CHILD_NODE_ID; return; }
throw H2O.fail();
}
@Override public String toString() {
final String colPad=" ";
final int cntW=4, mmmW=4, menW=5, varW=5;
final int colW=cntW+1+mmmW+1+mmmW+1+menW+1+varW;
StringBuilder sb = new StringBuilder();
sb.append("Nid# ").append(_nid).append(", ");
printLine(sb).append("\n");
if( _hs == null ) return sb.append("_hs==null").toString();
for( DHistogram hs : _hs )
if( hs != null )
p(sb,hs._name+String.format(", %4.1f-%4.1f",hs._min,hs._maxEx),colW).append(colPad);
sb.append('\n');
for( DHistogram hs : _hs ) {
if( hs == null ) continue;
p(sb,"cnt" ,cntW).append('/');
p(sb,"min" ,mmmW).append('/');
p(sb,"max" ,mmmW).append('/');
p(sb,"mean",menW).append('/');
p(sb,"var" ,varW).append(colPad);
}
sb.append('\n');
// Max bins
int nbins=0;
for( DHistogram hs : _hs )
if( hs != null && hs.nbins() > nbins ) nbins = hs.nbins();
for( int i=0; i<nbins; i++ ) {
for( DHistogram h : _hs ) {
if( h == null ) continue;
if( i < h.nbins() && h._vals != null ) {
p(sb, h.bins(i),cntW).append('/');
p(sb, h.binAt(i),mmmW).append('/');
p(sb, h.binAt(i+1),mmmW).append('/');
p(sb, h.mean(i),menW).append('/');
p(sb, h.var (i),varW).append(colPad);
} else {
p(sb,"",colW).append(colPad);
}
}
sb.append('\n');
}
sb.append("Nid# ").append(_nid);
return sb.toString();
}
static private StringBuilder p(StringBuilder sb, String s, int w) {
return sb.append(StringUtils.fixedLength(s,w));
}
static private StringBuilder p(StringBuilder sb, double d, int w) {
String s = Double.isNaN(d) ? "NaN" :
((d==Float.MAX_VALUE || d==-Float.MAX_VALUE || d==Double.MAX_VALUE || d==-Double.MAX_VALUE) ? " -" :
(d==0?" 0":Double.toString(d)));
if( s.length() <= w ) return p(sb,s,w);
s = String.format("% 4.2f",d);
if( s.length() > w )
s = String.format("%4.1f",d);
if( s.length() > w )
s = String.format("%4.0f",d);
return p(sb,s,w);
}
@Override public StringBuilder toString2(StringBuilder sb, int depth) {
for( int d=0; d<depth; d++ ) sb.append(" ");
return sb.append("Undecided\n");
}
@Override protected AutoBuffer compress(AutoBuffer ab, AutoBuffer abAux) { throw H2O.fail(); }
@Override protected int size() { throw H2O.fail(); }
@Override protected int numNodes() { throw H2O.fail(); }
}
// --------------------------------------------------------------------------
// Internal tree nodes which split into several children over a single
// column. Includes a split-decision: which child does this Row belong to?
// Does not contain a histogram describing how the decision was made.
public static class DecidedNode extends Node {
public final Split _split; // Split: col, equal/notequal/less/greater, nrows, MSE
public final float _splat; // Split At point: lower bin-edge of split
// _equals\_nids[] \ 0 1
// ----------------+----------
// F | < >=
// T | != ==
public final int _nids[]; // Children NIDS for the split LEFT, RIGHT
transient byte _nodeType; // Complex encoding: see the compressed struct comments
transient int _size = 0; // Compressed byte size of this subtree
transient int _nnodes = 0; // Number of nodes in this subtree
public DecidedNode(DecidedNode node, DTree tree){
super(tree, node._pid, node._nid, true);
_split = node._split;
_splat = node._splat;
_nids = node._nids;
_nodeType = node._nodeType;
_size = node._size;
_nnodes = node._nnodes;
}
// Make a correctly flavored Undecided
public UndecidedNode makeUndecidedNode(DHistogram hs[], Constraints cs, BranchInteractionConstraints bics) {
return new UndecidedNode(_tree, _nid, hs, cs, bics);
}
// Pick the best column from the given histograms
public Split bestCol(UndecidedNode u, DHistogram hs[], Constraints cs) {
DTree.Split best = null;
if( hs == null ) return null;
final int maxCols = u._scoreCols == null /* all cols */ ? hs.length : u._scoreCols.length;
List<FindSplits> findSplits = new ArrayList<>();
//total work is to find the best split across sum_over_cols_to_split(nbins)
long nbinsSum = 0;
for( int i=0; i<maxCols; i++ ) {
int col = u._scoreCols == null ? i : u._scoreCols[i];
if( hs[col]==null || hs[col].actNBins() <= 1 )
continue;
nbinsSum += hs[col].actNBins();
}
// for small work loads, do a serial loop, otherwise, submit work to FJ thread pool
final boolean isSmall = (nbinsSum <= 1024); //heuristic - 50 cols with 20 nbins, or 1 column with 1024 bins, etc.
for( int i=0; i<maxCols; i++ ) {
int col = u._scoreCols == null ? i : u._scoreCols[i];
if( hs[col]==null || hs[col].actNBins() <= 1 )
continue;
FindSplits fs = new FindSplits(hs, cs, col, u._nid);
findSplits.add(fs);
if (isSmall) fs.compute();
}
if (!isSmall) jsr166y.ForkJoinTask.invokeAll(findSplits);
for( FindSplits fs : findSplits) {
DTree.Split s = fs._s;
if( s == null ) continue;
if (best == null || s.se() < best.se()) {
if (hs[s._col]._checkFloatSplits) {
// we evaluate the feasibility of the split only if it brings improvement of SE
// same could be done when building the split (findBestSplitPoint) but the lazy
// evaluation avoids scanning the bins unnecessarily
float splitAt = s.splat(hs);
if (Float.isNaN(splitAt))
continue;
}
best = s;
}
}
return best;
}
public final class FindSplits extends RecursiveAction {
public FindSplits(DHistogram[] hs, Constraints cs, int col, UndecidedNode node) {
this(hs, cs, col, node._nid);
}
private FindSplits(DHistogram[] hs, Constraints cs, int col, int nid) {
_hs = hs; _cs = cs; _col = col; _nid = nid;
_useUplift = _hs[_col].useUplift();
}
final DHistogram[] _hs;
final Constraints _cs;
final int _col;
final int _nid;
DTree.Split _s;
final boolean _useUplift;
@Override public void compute() {
computeSplit();
}
public final DTree.Split computeSplit() {
final double min, max;
final int constraint;
final boolean useBounds;
final Distribution dist;
if (_cs != null) {
min = _cs._min;
max = _cs._max;
constraint = _cs.getColumnConstraint(_col);
useBounds = _cs.useBounds();
dist = _cs._dist;
} else {
min = Double.NaN;
max = Double.NaN;
constraint = 0;
useBounds = false;
dist = null;
}
if(_useUplift){
_s = findBestSplitPointUplift(_hs[_col], _col, _tree._parms._min_rows);
} else {
_s = findBestSplitPoint(_hs[_col], _col, _tree._parms._min_rows, constraint, min, max, useBounds, dist);
}
return _s;
}
}
public DecidedNode(UndecidedNode n, DHistogram hs[], Constraints cs, GlobalInteractionConstraints ics) {
super(n._tree,n._pid,n._nid); // Replace Undecided with this DecidedNode
_nids = new int[2]; // Split into 2 subsets
_split = bestCol(n,hs,cs); // Best split-point for this tree
if( _split == null) {
// Happens because the predictor columns cannot split the responses -
// which might be because all predictor columns are now constant, or
// because all responses are now constant.
_splat = Float.NaN;
Arrays.fill(_nids, ScoreBuildHistogram.UNDECIDED_CHILD_NODE_ID);
return;
}
if(cs != null) {
int constr = cs.getColumnConstraint(_split._col);
if (_tree._checkConstraintConsistency && !cs._dist._family.equals(DistributionFamily.quantile) && nid() != 0 && constr != 0) {
assert constr * _split._tree_p0 <= constr * parentPred() && constr * parentPred() <= constr * _split._tree_p1 :
"Parent prediction and children prediction is not consistent. Parent prediction " + constr * parentPred() +
" should be in interval of the children: " + constr * _split._tree_p0 + " " + constr * _split._tree_p1;
}
}
_splat = _split.splat(hs);
for(int way = 0; way <2; way++ ) { // left / right
// Prepare the next level of constraints if monotone or interaction constraints are set
Constraints ncs = cs != null ? _split.nextLevelConstraints(cs, way, _splat, _tree._parms) : null;
BranchInteractionConstraints nbics = n._bics != null ? n._bics.nextLevelInteractionConstraints(ics, _split._col) : null;
// Create children histograms, not yet populated, but the ranges are set
DHistogram nhists[] = _split.nextLevelHistos(hs, way,_splat, _tree._parms, ncs, nbics); //maintains the full range for NAvsREST
assert nhists==null || nhists.length==_tree._ncols;
// Assign a new (yet undecided) node to each child, and connect this (the parent) decided node and the newly made histograms to it
_nids[way] = nhists == null ? ScoreBuildHistogram.UNDECIDED_CHILD_NODE_ID : makeUndecidedNode(nhists,ncs, nbics)._nid;
}
}
public int getChildNodeID(Chunk [] chks, int row ) {
double d = chks[_split._col].atd(row);
int bin = -1;
boolean isNA = Double.isNaN(d);
if (!isNA) {
if (_split._nasplit == DHistogram.NASplitDir.NAvsREST)
bin = 0;
else if (_split._equal == 0) {
assert(!Float.isNaN(_splat));
bin = d >= _splat ? 1 : 0;
// else if (_split._equal == 1)
// bin = d == _splat ? 1 : 0;
}
else if (_split._equal >= 2) {
int b = (int)d;
if (_split._bs.isInRange(b)) {
bin = _split._bs.contains(b) ? 1 : 0; // contains goes right
} else {
isNA = true;
}
}
}
// NA handling
if (isNA) {
if (_split._nasplit== DHistogram.NASplitDir.NALeft || _split._nasplit == DHistogram.NASplitDir.Left) {
bin = 0;
} else if (_split._nasplit == DHistogram.NASplitDir.NARight || _split._nasplit == DHistogram.NASplitDir.Right || _split._nasplit == DHistogram.NASplitDir.NAvsREST) {
bin = 1;
} else if (_split._nasplit == DHistogram.NASplitDir.None) {
bin = 1; // if no NAs in training, but NAs in testing -> go right TODO: Pick optimal direction
} else throw H2O.unimpl();
}
return _nids[bin];
}
public double pred( int nid ) {
return nid==0 ? _split._p0 : _split._p1;
}
public double predTreatment( int nid ) {
return nid==0 ? _split._p0Treat : _split._p1Treat;
}
public double predControl( int nid ) {
return nid==0 ? _split._p0Contr : _split._p1Contr;
}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("DecidedNode:\n");
sb.append("_nid: " + _nid + "\n");
sb.append("_nids (children): " + Arrays.toString(_nids) + "\n");
if (_split!=null)
sb.append("_split:" + _split.toString() + "\n");
sb.append("_splat:" + _splat + "\n");
if( _split == null ) {
sb.append(" col = -1\n");
} else {
int col = _split._col;
if (_split._equal == 1) {
sb.append(_tree._names[col] + " != " + _splat + "\n" +
_tree._names[col] + " == " + _splat + "\n");
} else if (_split._equal == 2 || _split._equal == 3) {
sb.append(_tree._names[col] + " not in " + _split._bs.toString() + "\n" +
_tree._names[col] + " is in " + _split._bs.toString() + "\n");
} else {
sb.append(
_tree._names[col] + " < " + _splat + "\n" +
_splat + " >=" + _tree._names[col] + "\n");
}
}
return sb.toString();
}
StringBuilder printChild( StringBuilder sb, int nid ) {
int i = _nids[0]==nid ? 0 : 1;
assert _nids[i]==nid : "No child nid "+nid+"? " +Arrays.toString(_nids);
sb.append("[").append(_tree._names[_split._col]);
sb.append(_split._equal != 0
? (i==0 ? " != " : " == ")
: (i==0 ? " < " : " >= "));
sb.append((_split._equal == 2 || _split._equal == 3) ? _split._bs.toString() : _splat).append("]");
return sb;
}
@Override public StringBuilder toString2(StringBuilder sb, int depth) {
assert(_nids.length==2);
for( int i=0; i<_nids.length; i++ ) {
for( int d=0; d<depth; d++ ) sb.append(" ");
sb.append(_nid).append(" ");
if( _split._col < 0 ) sb.append("init");
else {
sb.append(_tree._names[_split._col]);
if (_split._nasplit == DHistogram.NASplitDir.NAvsREST) {
if (i==0) sb.append(" not NA");
if (i==1) sb.append(" is NA");
}
else {
if (_split._equal < 2) {
if (_split._nasplit == DHistogram.NASplitDir.NARight || _split._nasplit == DHistogram.NASplitDir.Right || _split._nasplit == DHistogram.NASplitDir.None)
sb.append(_split._equal != 0 ? (i == 0 ? " != " : " == ") : (i == 0 ? " < " : " is NA or >= "));
if (_split._nasplit == DHistogram.NASplitDir.NALeft || _split._nasplit == DHistogram.NASplitDir.Left)
sb.append(_split._equal != 0 ? (i == 0 ? " is NA or != " : " == ") : (i == 0 ? " is NA or < " : " >= "));
} else {
sb.append(i == 0 ? " not in " : " is in ");
}
sb.append((_split._equal == 2 || _split._equal == 3) ? _split._bs.toString() : _splat).append("\n");
}
}
if( _nids[i] >= 0 && _nids[i] < _tree._len )
_tree.node(_nids[i]).toString2(sb,depth+1);
}
return sb;
}
// Size of this subtree; sets _nodeType also
@Override public final int size(){
if( _size != 0 ) return _size; // Cached size
assert _nodeType == 0:"unexpected node type: " + _nodeType;
if(_split._equal != 0)
_nodeType |= _split._equal == 1 ? 4 : (_split._equal == 2 ? 8 : 12);
// int res = 7; // 1B node type + flags, 2B colId, 4B float split val
// 1B node type + flags, 2B colId, 4B split val/small group or (2B offset + 4B size) + large group
int res = _split._equal == 3 ? 9 + _split._bs.numBytes() : 7;
// NA handling correction
res++; //1 byte for NA split dir
if (_split._nasplit == DHistogram.NASplitDir.NAvsREST) {
assert _split._equal == 0;
res -= 4; // we don't need to represent the actual split value
}
Node left = _tree.node(_nids[0]);
int lsz = left.size();
res += lsz;
if( left instanceof LeafNode ) _nodeType |= (byte)48;
else {
int slen = lsz < 256 ? 0 : (lsz < 65535 ? 1 : (lsz<(1<<24) ? 2 : 3));
_nodeType |= slen; // Set the size-skip bits
res += (slen+1); //
}
Node right = _tree.node(_nids[1]);
if( right instanceof LeafNode ) _nodeType |= (byte)(48 << 2);
res += right.size();
assert (_nodeType&0x33) != 51;
assert res != 0;
return (_size = res);
}
@Override
protected int numNodes() {
if (_nnodes > 0)
return _nnodes;
_nnodes = 1 + _tree.node(_nids[0]).numNodes() + _tree.node(_nids[1]).numNodes();
return _nnodes;
}
// Compress this tree into the AutoBuffer
@Override public AutoBuffer compress(AutoBuffer ab, AutoBuffer abAux) {
int pos = ab.position();
if( _nodeType == 0 ) size(); // Sets _nodeType & _size both
ab.put1(_nodeType); // Includes left-child skip-size bits
assert _split != null; // Not a broken root non-decision?
assert _split._col >= 0;
ab.put2((short)_split._col);
ab.put1((byte)_split._nasplit.value());
// Save split-at-value or group
if (_split._nasplit!= DHistogram.NASplitDir.NAvsREST) {
if (_split._equal == 0 || _split._equal == 1) ab.put4f(_splat);
else if(_split._equal == 2) _split._bs.compress2(ab);
else _split._bs.compress3(ab);
}
if (abAux != null) {
abAux.put4(_nid);
abAux.put4(_tree.node(_nids[0]).numNodes()); // number of nodes in the left subtree; this used to be 'parent node id'
abAux.put4f((float)_split._n0);
abAux.put4f((float)_split._n1);
abAux.put4f((float)_split._p0);
abAux.put4f((float)_split._p1);
abAux.put4f((float)_split._se0);
abAux.put4f((float)_split._se1);
abAux.put4(_nids[0]);
abAux.put4(_nids[1]);
}
Node left = _tree.node(_nids[0]);
if( (_nodeType&48) == 0 ) { // Size bits are optional for left leaves !
int sz = left.size();
if(sz < 256) ab.put1( sz);
else if (sz < 65535) ab.put2((short)sz);
else if (sz < (1<<24)) ab.put3( sz);
else ab.put4( sz); // 1<<31-1
}
// now write the subtree in
left.compress(ab, abAux);
Node rite = _tree.node(_nids[1]);
rite.compress(ab, abAux);
assert _size == ab.position()-pos:"reported size = " + _size + " , real size = " + (ab.position()-pos);
return ab;
}
private boolean isLeftChild(){
int[] parentNids = ((DecidedNode) _tree.node(pid()))._nids;
return parentNids[0] == _nid;
}
public double parentPred(){
Split parentSplit = ((DecidedNode) _tree.node(pid()))._split;
return isLeftChild() ? parentSplit._tree_p0 : parentSplit._tree_p1;
}
}
public final static class LeafNode extends Node {
public float _pred;
public LeafNode( DTree tree, int pid ) { super(tree,pid); tree._leaves++; }
public LeafNode( DTree tree, int pid, int nid ) { super(tree,pid,nid); tree._leaves++; }
public LeafNode( LeafNode node, DTree tree) {
super(tree,node._pid, node._nid, true);
_pred = node._pred;
}
@Override public String toString() { return "Leaf#"+_nid+" = "+_pred; }
@Override public final StringBuilder toString2(StringBuilder sb, int depth) {
for( int d=0; d<depth; d++ ) sb.append(" ");
sb.append(_nid).append(" ");
return sb.append("pred=").append(_pred).append("\n");
}
// Insert just the predictions: a single byte/short if we are predicting a
// single class, or else the full distribution.
@Override protected AutoBuffer compress(AutoBuffer ab, AutoBuffer abAux) {
assert !Double.isNaN(_pred); return ab.put4f(_pred);
}
@Override protected int size() { return 4; }
@Override protected int numNodes() { return 0; }
public final double pred() { return _pred; }
// returns prediction calculated while building the regression tree (extract it from Split)
// for some distributions this can be used to calculate the leaf node predictions
public final double getSplitPrediction() {
DTree.DecidedNode parent = (DTree.DecidedNode) _tree.node(_pid);
boolean isLeft = parent._nids[0] == _nid;
return isLeft ? parent._split._tree_p0 : parent._split._tree_p1;
}
}
final static public int NO_PARENT = -1;
static public boolean isRootNode(Node n) { return n._pid == NO_PARENT; }
public transient AutoBuffer _abAux;
// Build a compressed-tree struct
public CompressedTree compress(int tid, int cls, String[][] domains) {
int sz = root().size();
if( root() instanceof LeafNode ) sz += 3; // Oops - tree-stump
AutoBuffer ab = new AutoBuffer(sz);
_abAux = new AutoBuffer();
if( root() instanceof LeafNode ) // Oops - tree-stump
ab.put1(0).put2((char)65535); // Flag it special so the decompress doesn't look for top-level decision
root().compress(ab, _abAux); // Compress whole tree
assert ab.position() == sz;
return new CompressedTree(ab.buf(), _seed,tid,cls);
}
static Split findBestSplitPoint(DHistogram hs, int col, double min_rows, int constraint, double min, double max,
boolean useBounds, Distribution dist) {
if(hs._vals == null) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": histogram not filled yet.");
return null;
}
final int nbins = hs.nbins();
assert nbins >= 1;
final boolean hasPreds = hs.hasPreds();
final boolean hasDenom = hs.hasDenominator();
final boolean hasNomin = hs.hasNominator();
// Histogram arrays used for splitting, these are either the original bins
// (for an ordered predictor), or sorted by the mean response (for an
// unordered predictor, i.e. categorical predictor).
double[] vals = hs._vals;
final int vals_dim = hs._vals_dim;
int idxs[] = null; // and a reverse index mapping
// For categorical (unordered) predictors, sort the bins by average
// prediction then look for an optimal split.
if( hs._isInt == 2 && hs._step == 1 ) {
// Sort the index by average response
idxs = MemoryManager.malloc4(nbins+1); // Reverse index
for( int i=0; i<nbins+1; i++ ) idxs[i] = i; //index in 0..nbins-1
final double[] avgs = MemoryManager.malloc8d(nbins+1);
for( int i=0; i<nbins; i++ ) avgs[i] = hs.w(i)==0 ? -Double.MAX_VALUE /* value doesn't matter - see below for sending empty buckets (unseen levels) into the NA direction */: hs.wY(i) / hs.w(i); // Average response
avgs[nbins] = Double.MAX_VALUE;
ArrayUtils.sort(idxs, avgs);
// Fill with sorted data. Makes a copy, so the original data remains in
// its original order.
vals = MemoryManager.malloc8d(vals_dim*nbins);
for( int i=0; i<nbins; i++ ) {
int id = idxs[i];
vals[vals_dim*i+0] = hs._vals[vals_dim*id+0];
vals[vals_dim*i+1] = hs._vals[vals_dim*id+1];
vals[vals_dim*i+2] = hs._vals[vals_dim*id+2];
if (hasPreds) {
vals[vals_dim * i + 3] = hs._vals[vals_dim * id + 3];
vals[vals_dim * i + 4] = hs._vals[vals_dim * id + 4];
if (hasDenom)
vals[vals_dim * i + 5] = hs._vals[vals_dim * id + 5];
if (hasNomin)
vals[vals_dim * i + 6] = hs._vals[vals_dim * id + 6];
}
if (LOG.isTraceEnabled()) LOG.trace(vals[3*i] + " obs have avg response [" + i + "]=" + avgs[id]);
}
}
// Compute mean/var for cumulative bins from 0 to nbins inclusive.
double wlo[] = MemoryManager.malloc8d(nbins+1);
double wYlo[] = MemoryManager.malloc8d(nbins+1);
double wYYlo[] = MemoryManager.malloc8d(nbins+1);
double pr1lo[] = hasPreds ? MemoryManager.malloc8d(nbins+1) : null;
double pr2lo[] = hasPreds ? MemoryManager.malloc8d(nbins+1) : null;
double denlo[] = hasDenom ? MemoryManager.malloc8d(nbins+1) : wlo;
double nomlo[] = hasNomin ? MemoryManager.malloc8d(nbins+1) : wYlo;
for( int b=1; b<=nbins; b++ ) {
int id = vals_dim*(b-1);
double n0 = wlo[b-1], n1 = vals[id+0];
if( n0==0 && n1==0 )
continue;
double m0 = wYlo[b-1], m1 = vals[id+1];
double s0 = wYYlo[b-1], s1 = vals[id+2];
wlo[b] = n0+n1;
wYlo[b] = m0+m1;
wYYlo[b] = s0+s1;
if (hasPreds) {
double p10 = pr1lo[b - 1], p11 = vals[id + 3];
double p20 = pr2lo[b - 1], p21 = vals[id + 4];
pr1lo[b] = p10 + p11;
pr2lo[b] = p20 + p21;
if (hasDenom) {
double d0 = denlo[b - 1], d1 = vals[id + 5];
denlo[b] = d0 + d1;
}
if (hasNomin) {
double d0 = nomlo[b - 1], d1 = vals[id + 6];
nomlo[b] = d0 + d1;
}
}
}
final double wNA = hs.wNA();
double tot = wlo[nbins] + wNA; //total number of (weighted) rows
// Is any split possible with at least min_obs?
if( tot < 2*min_rows ) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": min_rows: total number of observations is " + tot);
return null;
}
// If we see zero variance, we must have a constant response in this
// column. Normally this situation is cut out before we even try to split,
// but we might have NA's in THIS column...
double wYNA = hs.wYNA();
double wYYNA = hs.wYYNA();
double var = (wYYlo[nbins]+wYYNA)*tot - (wYlo[nbins]+wYNA)*(wYlo[nbins]+wYNA);
if( ((float)var) == 0f ) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": var = 0.");
return null;
}
final double denNA = hasDenom ? hs.denNA() : wNA;
final double nomNA = hasNomin ? hs.nomNA() : wYNA;
// Compute mean/var for cumulative bins from nbins to 0 inclusive.
double whi[] = MemoryManager.malloc8d(nbins+1);
double wYhi[] = MemoryManager.malloc8d(nbins+1);
double wYYhi[] = MemoryManager.malloc8d(nbins+1);
double pr1hi[] = hasPreds ? MemoryManager.malloc8d(nbins+1) : null;
double pr2hi[] = hasPreds ? MemoryManager.malloc8d(nbins+1) : null;
double denhi[] = hasDenom ? MemoryManager.malloc8d(nbins+1) : whi;
double nomhi[] = hasNomin ? MemoryManager.malloc8d(nbins+1) : wYhi;
for( int b=nbins-1; b>=0; b-- ) {
double n0 = whi[b+1], n1 = vals[vals_dim*b];
if( n0==0 && n1==0 )
continue;
double m0 = wYhi[b+1], m1 = vals[vals_dim*b+1];
double s0 = wYYhi[b+1], s1 = vals[vals_dim*b+2];
whi[b] = n0+n1;
wYhi[b] = m0+m1;
wYYhi[b] = s0+s1;
if (hasPreds) {
double p10 = pr1hi[b + 1], p11 = vals[vals_dim * b + 3];
double p20 = pr2hi[b + 1], p21 = vals[vals_dim * b + 4];
pr1hi[b] = p10 + p11;
pr2hi[b] = p20 + p21;
if (hasDenom) {
double d0 = denhi[b + 1], d1 = vals[vals_dim * b + 5];
denhi[b] = d0 + d1;
}
if (hasNomin) {
double d0 = nomhi[b + 1], d1 = vals[vals_dim * b + 6];
nomhi[b] = d0 + d1;
}
}
assert MathUtils.compare(wlo[b]+ whi[b]+wNA,tot,1e-5,1e-5);
}
double best_seL=Double.MAX_VALUE; // squared error for left side of the best split (so far)
double best_seR=Double.MAX_VALUE; // squared error for right side of the best split (so far)
DHistogram.NASplitDir nasplit = DHistogram.NASplitDir.None;
// squared error of all non-NAs
double seNonNA = wYYhi[0] - wYhi[0]* wYhi[0]/ whi[0]; // Squared Error with no split
if (seNonNA < 0) seNonNA = 0;
double seBefore = seNonNA;
double nLeft = 0;
double nRight = 0;
double predLeft = 0;
double predRight = 0;
double tree_p0 = 0;
double tree_p1 = 0;
// if there are any NAs, then try to split them from the non-NAs
if (wNA>=min_rows) {
double seAll = (wYYhi[0] + wYYNA) - (wYhi[0] + wYNA) * (wYhi[0] + wYNA) / (whi[0] + wNA);
double seNA = wYYNA - wYNA * wYNA / wNA;
if (seNA < 0) seNA = 0;
best_seL = seNonNA;
best_seR = seNA;
nasplit = DHistogram.NASplitDir.NAvsREST;
seBefore = seAll;
nLeft = whi[0]; //all non-NAs
predLeft = wYhi[0];
nRight = wNA;
predRight = wYNA;
if(hasDenom){
tree_p0 = nomhi[0] /denhi[0];
tree_p1 = nomNA / denNA;
} else {
tree_p0 = predLeft / nLeft;
tree_p1 = predRight / nRight;
}
}
// Now roll the split-point across the bins. There are 2 ways to do this:
// split left/right based on being less than some value, or being equal/
// not-equal to some value. Equal/not-equal makes sense for categoricals
// but both splits could work for any integral datatype. Do the less-than
// splits first.
int best=0; // The no-split
byte equal=0; // Ranged check
for( int b=1; b<=nbins-1; b++ ) {
if( vals[vals_dim*b] == 0 ) continue; // Ignore empty splits
if( wlo[b]+wNA < min_rows ) continue;
if( whi[b]+wNA < min_rows ) break; // w1 shrinks at the higher bin#s, so if it fails once it fails always
// We're making an unbiased estimator, so that MSE==Var.
// Then Squared Error = MSE*N = Var*N
// = (wYY/N - wY^2)*N
// = wYY - N*wY^2
// = wYY - N*(wY/N)(wY/N)
// = wYY - wY^2/N
// no NAs
if (wNA==0) {
double selo = wYYlo[b] - wYlo[b] * wYlo[b] / wlo[b];
double sehi = wYYhi[b] - wYhi[b] * wYhi[b] / whi[b];
if (selo < 0) selo = 0; // Roundoff error; sometimes goes negative
if (sehi < 0) sehi = 0; // Roundoff error; sometimes goes negative
if ((selo + sehi < best_seL + best_seR) || // Strictly less error?
// Or tied MSE, then pick split towards middle bins
(selo + sehi == best_seL + best_seR &&
Math.abs(b - (nbins >> 1)) < Math.abs(best - (nbins >> 1)))) {
double tmpPredLeft;
double tmpPredRight;
if(constraint != 0 && dist._family.equals(DistributionFamily.quantile)) {
int quantileBinLeft = 0;
int quantileBinRight = 0;
for (int bin = 1; bin <= nbins; bin++) {
// left tree prediction quantile
if (bin <= b) {
double n = wlo[b];
double quantilePosition = dist._quantileAlpha * n;
if(quantilePosition < wlo[bin]){
quantileBinLeft = bin;
bin = b+1;
}
// right tree prediction quantile
} else {
double n = (wlo[nbins] - wlo[b]);
double quantilePosition = dist._quantileAlpha * n;
if (quantilePosition < wlo[bin] - wlo[b]) {
quantileBinRight = bin;
break;
}
}
}
tmpPredLeft = wYlo[quantileBinLeft];
tmpPredRight = wYlo[quantileBinRight] - wYlo[b];
} else {
tmpPredLeft = hasDenom ? nomlo[b] / denlo[b] : wYlo[b] / wlo[b];
tmpPredRight = hasDenom ? nomhi[b] / denhi[b] : wYhi[b] / whi[b];
}
if (constraint == 0 || (constraint * tmpPredLeft <= constraint * tmpPredRight)) {
best_seL = selo;
best_seR = sehi;
best = b;
nLeft = wlo[best];
nRight = whi[best];
predLeft = wYlo[best];
predRight = wYhi[best];
tree_p0 = tmpPredLeft;
tree_p1 = tmpPredRight;
}
}
} else {
// option 1: split the numeric feature and throw NAs to the left
{
double selo = wYYlo[b] + wYYNA - (wYlo[b] + wYNA) * (wYlo[b] + wYNA) / (wlo[b] + wNA);
double sehi = wYYhi[b] - wYhi[b] * wYhi[b] / whi[b];
if (selo < 0) selo = 0; // Roundoff error; sometimes goes negative
if (sehi < 0) sehi = 0; // Roundoff error; sometimes goes negative
if ((selo + sehi < best_seL + best_seR) || // Strictly less error?
// Or tied SE, then pick split towards middle bins
(selo + sehi == best_seL + best_seR &&
Math.abs(b - (nbins >> 1)) < Math.abs(best - (nbins >> 1)))) {
if((wlo[b] + wNA) >= min_rows && whi[b] >= min_rows) {
double tmpPredLeft;
double tmpPredRight;
if(constraint != 0 && dist._family.equals(DistributionFamily.quantile)) {
int quantileBinLeft = 0;
int quantileBinRight = 0;
for (int bin = 1; bin <= nbins; bin++) {
// left tree prediction quantile
if (bin <= b) {
double n = wlo[b];
double quantilePosition = dist._quantileAlpha * n;
if(quantilePosition < wlo[bin]){
quantileBinLeft = bin;
bin = b+1;
}
// right tree prediction quantile
} else {
double n = (wlo[nbins] - wlo[b]);
double quantilePosition = dist._quantileAlpha * n;
if (quantilePosition < wlo[bin] - wlo[b]) {
quantileBinRight = bin;
break;
}
}
}
tmpPredLeft = wYlo[quantileBinLeft] + wYNA;
tmpPredRight = wYlo[quantileBinRight] - wYlo[b];
} else {
tmpPredLeft = hasDenom ? (nomlo[b] + nomNA) / (denlo[b] + denNA) : (wYlo[b] + wYNA) / (wlo[b] + wNA);
tmpPredRight = hasDenom ? nomhi[b] / denhi[b] : wYhi[b] / whi[b];
}
if (constraint == 0 || (constraint * tmpPredLeft <= constraint * tmpPredRight)) {
best_seL = selo;
best_seR = sehi;
best = b;
nLeft = wlo[best] + wNA;
nRight = whi[best];
predLeft = wYlo[best] + wYNA;
predRight = wYhi[best];
nasplit = DHistogram.NASplitDir.NALeft;
tree_p0 = tmpPredLeft;
tree_p1 = tmpPredRight;
}
}
}
}
// option 2: split the numeric feature and throw NAs to the right
{
double selo = wYYlo[b] - wYlo[b] * wYlo[b] / wlo[b];
double sehi = wYYhi[b]+wYYNA - (wYhi[b]+wYNA) * (wYhi[b]+wYNA) / (whi[b]+wNA);
if (selo < 0) selo = 0; // Roundoff error; sometimes goes negative
if (sehi < 0) sehi = 0; // Roundoff error; sometimes goes negative
if ((selo + sehi < best_seL + best_seR) || // Strictly less error?
// Or tied SE, then pick split towards middle bins
(selo + sehi == best_seL + best_seR &&
Math.abs(b - (nbins >> 1)) < Math.abs(best - (nbins >> 1)))) {
if( wlo[b] >= min_rows && (whi[b] + wNA) >= min_rows ) {
double tmpPredLeft;
double tmpPredRight;
if(constraint != 0 && dist._family.equals(DistributionFamily.quantile)) {
int quantileBinLeft = 0;
int quantileBinRight = 0;
double ratio = 1;
double delta = 1;
for (int bin = 1; bin <= nbins; bin++) {
// left tree prediction quantile
if (bin <= b) {
double n = wlo[b];
double quantilePosition = dist._quantileAlpha * n;
if(quantilePosition < wlo[bin]){
quantileBinLeft = bin;
bin = b+1;
}
// right tree prediction quantile
} else {
double n = (wlo[nbins] - wlo[b]);
double quantilePosition = dist._quantileAlpha * n;
if (quantilePosition < wlo[bin] - wlo[b]) {
quantileBinRight = bin;
break;
}
}
}
tmpPredLeft = wYlo[quantileBinLeft];
tmpPredRight = wYlo[quantileBinRight] - wYlo[b] + wYNA;
} else {
tmpPredLeft = hasDenom ? nomlo[b] / denlo[b] : wYlo[b] / wlo[b];
tmpPredRight = hasDenom ? (nomhi[b] + nomNA) / (denhi[b] + denNA) : (wYhi[b] + wYNA) / (whi[b] + wNA);
}
if (constraint == 0 || (constraint * tmpPredLeft <= constraint * tmpPredRight)) {
best_seL = selo;
best_seR = sehi;
best = b;
nLeft = wlo[best];
nRight = whi[best] + wNA;
predLeft = wYlo[best];
predRight = wYhi[best] + wYNA;
nasplit = DHistogram.NASplitDir.NARight;
tree_p0 = tmpPredLeft;
tree_p1 = tmpPredRight;
}
}
}
}
}
}
if( best==0 && nasplit== DHistogram.NASplitDir.None) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": no optimal split found:\n" + hs);
return null;
}
//if( se <= best_seL+best_se1) return null; // Ultimately roundoff error loses, and no split actually helped
if (!(best_seL+ best_seR < seBefore * (1- hs._minSplitImprovement))) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": not enough relative improvement: " + (1-(best_seL + best_seR) / seBefore) + "\n" + hs);
return null;
}
assert(Math.abs(tot - (nRight + nLeft)) < 1e-5*tot);
if( MathUtils.equalsWithinOneSmallUlp((float)(predLeft / nLeft),(float)(predRight / nRight)) ) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": Predictions for left/right are the same.");
return null;
}
if (nLeft < min_rows || nRight < min_rows) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": split would violate min_rows limit.");
return null;
}
// FIXME (PUBDEV-7553): these asserts do not hold because histogram doesn't skip rows with NA response
// assert hasNomin || nomLeft == predLeft;
// assert hasNomin || nomRight == predRight;
final double node_p0 = predLeft / nLeft;
final double node_p1 = predRight / nRight;
if (constraint != 0) {
if (constraint * tree_p0 > constraint * tree_p1) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": split would violate monotone constraint.");
return null;
}
}
if (!Double.isNaN(min)) {
if (tree_p0 < min) {
if (! useBounds) {
if (LOG.isTraceEnabled()) LOG.trace("minimum constraint violated in the left split of " + hs._name + ": node will not split");
return null;
}
if (LOG.isTraceEnabled()) LOG.trace("minimum constraint violated in the left split of " + hs._name + ": left node will predict minimum bound: " + min);
tree_p0 = min;
if (nasplit == DHistogram.NASplitDir.NAvsREST) {
best_seL = pr1hi[0];
} else if (nasplit == DHistogram.NASplitDir.NALeft) {
best_seL = pr1lo[best] + hs.seP1NA();
} else {
best_seL = pr1lo[best];
}
}
if (tree_p1 < min) {
if (! useBounds) {
if (LOG.isTraceEnabled()) LOG.trace("minimum constraint violated in the right split of " + hs._name + ": node will not split");
return null;
}
if (LOG.isTraceEnabled()) LOG.trace("minimum constraint violated in the right split of " + hs._name + ": right node will predict minimum bound: " + min);
tree_p1 = min;
if (nasplit == DHistogram.NASplitDir.NAvsREST) {
best_seR = hs.seP1NA();
} else if (nasplit == DHistogram.NASplitDir.NARight) {
best_seR = pr1hi[best] + hs.seP1NA();
} else {
best_seR = pr1hi[best];
}
}
}
if (!Double.isNaN(max)) {
if (tree_p0 > max) {
if (! useBounds) {
if (LOG.isTraceEnabled()) LOG.trace("minimum constraint violated in the left split of " + hs._name + ": node will not split");
return null;
}
if (LOG.isTraceEnabled()) LOG.trace("maximum constraint violated in the left split of " + hs._name + ": left node will predict maximum bound: " + max);
tree_p0 = max;
if (nasplit == DHistogram.NASplitDir.NAvsREST) {
best_seL = pr2hi[0];
} else if (nasplit == DHistogram.NASplitDir.NALeft) {
best_seL = pr2lo[best] + hs.seP2NA();
} else {
best_seL = pr2lo[best];
}
}
if (tree_p1 > max) {
if (! useBounds) {
if (LOG.isTraceEnabled()) LOG.trace("minimum constraint violated in the right split of " + hs._name + ": node will not split");
return null;
}
if (LOG.isTraceEnabled()) LOG.trace("maximum constraint violated in the right split of " + hs._name + ": right node will predict maximum bound: " + max);
tree_p1 = max;
if (nasplit == DHistogram.NASplitDir.NAvsREST) {
best_seR = hs.seP2NA();
} else if (nasplit == DHistogram.NASplitDir.NARight) {
best_seR = pr2hi[best] + hs.seP2NA();
} else {
best_seR = pr2hi[best];
}
}
}
if (!(best_seL + best_seR < seBefore * (1- hs._minSplitImprovement))) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": not enough relative improvement: " + (1-(best_seL + best_seR) / seBefore) + "\n" + hs);
return null;
}
if( MathUtils.equalsWithinOneSmallUlp((float) tree_p0,(float) tree_p1) ) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": Predictions for left/right are the same.");
return null;
}
// For categorical (unordered) predictors, we sorted the bins by average
// prediction then found the optimal split on sorted bins
IcedBitSet bs = null; // In case we need an arbitrary bitset
if (idxs != null // We sorted bins; need to build a bitset
&& nasplit != DHistogram.NASplitDir.NAvsREST) { // NA vs REST don't need a bitset
final int off = (int) hs._min;
bs = new IcedBitSet(nbins, off);
equal = fillBitSet(hs, off, idxs, best, nbins, bs);
if (equal < 0)
return null;
}
// if still undecided (e.g., if there are no NAs in training), pick a good default direction for NAs in test time
if (nasplit == DHistogram.NASplitDir.None) {
nasplit = nLeft > nRight ? DHistogram.NASplitDir.Left : DHistogram.NASplitDir.Right;
}
assert constraint == 0 || constraint * tree_p0 <= constraint * tree_p1;
assert (Double.isNaN(min) || min <= tree_p0) && (Double.isNaN(max) || tree_p0 <= max);
assert (Double.isNaN(min) || min <= tree_p1) && (Double.isNaN(max) || tree_p1 <= max);
Split split = new Split(col, best, nasplit, bs, equal, seBefore, best_seL, best_seR, nLeft, nRight, node_p0, node_p1, tree_p0, tree_p1);
if (LOG.isTraceEnabled()) LOG.trace("splitting on " + hs._name + ": " + split);
return split;
}
static Split findBestSplitPointUplift(DHistogram hs, int col, double min_rows) {
if(hs._valsUplift == null) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": histogram not filled yet.");
return null;
}
final int nbins = hs.nbins();
assert nbins >= 1;
final Divergence upliftMetric = hs._upliftMetric;
// Histogram arrays used for splitting, these are either the original bins
// (for an ordered predictor), or sorted by the mean response (for an
// unordered predictor, i.e. categorical predictor).
double[] vals = hs._vals;
final int vals_dim = hs._vals_dim;
double[] valsUplift = hs._valsUplift;
// 0 treatment group nominator (number of rows where treatment=1)
// 1 treatment group denominator (number of rows where response=1 AND treatment=1)
// 2 control group nominator (number of rows where treatment=0)
// 3 control group denominator (number of rows where response=1 AND treatment=0)
final int valsUpliftDim = 4;
int idxs[] = null; // and a reverse index mapping
// For categorical (unordered) predictors, sort the bins by average
// prediction then look for an optimal split.
if( hs._isInt == 2 && hs._step == 1 ) {
// Sort the index by average response
idxs = MemoryManager.malloc4(nbins+1); // Reverse index
for( int i=0; i<nbins+1; i++ ) idxs[i] = i; //index in 0..nbins-1
final double[] avgs = MemoryManager.malloc8d(nbins+1);
for( int i=0; i<nbins; i++ ) avgs[i] = hs.w(i)==0 ? -Double.MAX_VALUE /* value doesn't matter - see below for sending empty buckets (unseen levels) into the NA direction */: hs.wY(i) / hs.w(i); // Average response
avgs[nbins] = Double.MAX_VALUE;
ArrayUtils.sort(idxs, avgs);
// Fill with sorted data. Makes a copy, so the original data remains in
// its original order.
vals = MemoryManager.malloc8d(vals_dim*nbins);
valsUplift = MemoryManager.malloc8d(4*nbins);
for( int i=0; i<nbins; i++ ) {
int id = idxs[i];
vals[vals_dim*i+0] = hs._vals[vals_dim*id+0];
vals[vals_dim*i+1] = hs._vals[vals_dim*id+1];
vals[vals_dim*i+2] = hs._vals[vals_dim*id+2];
valsUplift[valsUpliftDim * i] = valsUplift[valsUpliftDim * id];
valsUplift[valsUpliftDim * i + 1] = valsUplift[valsUpliftDim * id + 1];
valsUplift[valsUpliftDim * i + 2] = valsUplift[valsUpliftDim * id + 2];
valsUplift[valsUpliftDim * i + 3] = valsUplift[valsUpliftDim * id + 3];
if (LOG.isTraceEnabled()) LOG.trace(vals[3*i] + " obs have avg response [" + i + "]=" + avgs[id]);
}
}
// Compute mean/var for cumulative bins from 0 to nbins inclusive.
double wlo[] = MemoryManager.malloc8d(nbins+1);
double wYlo[] = MemoryManager.malloc8d(nbins+1);
double wYYlo[] = MemoryManager.malloc8d(nbins+1);
double[] numloTreat = MemoryManager.malloc8d(nbins + 1); // cumulative sums of rows where treatment=1
double[] resploTreat = MemoryManager.malloc8d(nbins + 1); // cumulative sums of rows where response=1 AND treatment=1
double[] numloContr = MemoryManager.malloc8d(nbins + 1); // cumulative sums of rows where treatment=0
double[] resploContr = MemoryManager.malloc8d(nbins + 1); // cumulative sums of rows where response=1 AND treatment=0
for( int b = 1; b <= nbins; b++ ) {
int id = vals_dim * (b - 1);
double n0 = wlo[b - 1], n1 = vals[id + 0];
if( n0==0 && n1==0 )
continue;
double m0 = wYlo[b - 1], m1 = vals[id + 1];
double s0 = wYYlo[b - 1], s1 = vals[id + 2];
wlo[b] = n0+n1;
wYlo[b] = m0+m1;
wYYlo[b] = s0+s1;
id = valsUpliftDim * (b - 1);
double nt0 = numloTreat[b - 1], nt1 = valsUplift[id];
numloTreat[b] = nt0 + nt1;
double dt0 = resploTreat[b - 1], dt1 = valsUplift[id + 1];
resploTreat[b] = dt0 + dt1;
double nc0 = numloContr[b - 1], nc1 = valsUplift[id + 2];
numloContr[b] = nc0 + nc1;
double dc0 = resploContr[b - 1], dc1 = valsUplift[id + 3];
resploContr[b] = dc0 + dc1;
}
final double wNA = hs.wNA();
double tot = wlo[nbins] + wNA; //total number of (weighted) rows
// Is any split possible with at least min_obs?
if( tot < 2*min_rows ) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": min_rows: total number of observations is " + tot);
return null;
}
// If we see zero variance, we must have a constant response in this
// column. Normally this situation is cut out before we even try to split,
// but we might have NA's in THIS column...
double wYNA = hs.wYNA();
double wYYNA = hs.wYYNA();
double var = (wYYlo[nbins]+wYYNA)*tot - (wYlo[nbins]+wYNA)*(wYlo[nbins]+wYNA);
if( ((float)var) == 0f ) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": var = 0.");
return null;
}
// Compute mean/var for cumulative bins from nbins to 0 inclusive.
double whi[] = MemoryManager.malloc8d(nbins+1);
double wYhi[] = MemoryManager.malloc8d(nbins+1);
double wYYhi[] = MemoryManager.malloc8d(nbins+1);
double[] numhiTreat = MemoryManager.malloc8d(nbins+1); // cumulative sums of rows where treatment=1
double[] resphiTreat = MemoryManager.malloc8d(nbins+1); // cumulative sums of rows where response=1 AND treatment=1
double[] numhiContr = MemoryManager.malloc8d(nbins+1); // cumulative sums of rows where treatment=0
double[] resphiContr = MemoryManager.malloc8d(nbins+1); // cumulative sums of rows where response=1 AND treatment=0
for( int b = nbins-1; b >= 0; b-- ) {
int id = vals_dim * b;
double n0 = whi[b+1], n1 = vals[id];
if( n0==0 && n1==0 )
continue;
double m0 = wYhi[b + 1], m1 = vals[id + 1];
double s0 = wYYhi[b + 1], s1 = vals[id + 2];
whi[b] = n0+n1;
wYhi[b] = m0+m1;
wYYhi[b] = s0+s1;
id = valsUpliftDim * b;
double nt0 = numhiTreat[b + 1], nt1 = valsUplift[id];
numhiTreat[b] = nt0 + nt1;
double dt0 = resphiTreat[b + 1], dt1 = valsUplift[id + 1];
resphiTreat[b] = dt0 + dt1;
double nc0 = numhiContr[b + 1], nc1 = valsUplift[id + 2];
numhiContr[b] = nc0 + nc1;
double dc0 = resphiContr[b + 1], dc1 = valsUplift[id + 3];
resphiContr[b] = dc0 + dc1;
assert MathUtils.compare(wlo[b]+ whi[b]+wNA,tot,1e-5,1e-5);
}
double best_seL=Double.MAX_VALUE; // squared error for left side of the best split (so far)
double best_seR=Double.MAX_VALUE; // squared error for right side of the best split (so far)
DHistogram.NASplitDir nasplit = DHistogram.NASplitDir.None;
// squared error of all non-NAs
double seNonNA = wYYhi[0] - wYhi[0]* wYhi[0]/ whi[0]; // Squared Error with no split
if (seNonNA < 0) seNonNA = 0;
double seBefore = seNonNA;
double nLeft = 0;
double nRight = 0;
double predLeft = 0;
double predRight = 0;
double tree_p0 = 0;
double tree_p1 = 0;
double numTreatNA = hs.numTreatmentNA();
double respTreatNA = hs.respTreatmentNA();
double numContrNA = hs.numControlNA();
double respContrNA = hs.respControlNA();
// after find the best split:
double bestNLCT1 = 0; // number of rows where treatment=1 in the left child
double bestNLCT0 = 0; // number of rows where treatment=0 in the left child
double bestNRCT1 = 0; // number of rows where treatment=1 in the right child
double bestNRCT0 = 0; // number of rows where treatment=0 in the right child
double bestPrLY1CT1 = 0; // probability treatment=1 AND response=1 in the left child
double bestPrLY1CT0 = 0; // probability treatment=0 AND response=1 in the left child
double bestPrRY1CT1 = 0; // probability treatment=1 AND response=1 in the right child
double bestPrRY1CT0 = 0; // probability treatment=0 AND response=1 in the right child
double nCT1 = numhiTreat[0]; // number of rows where treatment=1 before split
double nCT0 = numhiContr[0]; // number of rows where treatment=0 before split
double nCT1Y1hi = resphiTreat[0]; // number of rows where treatment=1 and response=1 before split
double nCT0Y1hi = resphiContr[0]; // number of rows where treatment=0 and response=1 before split
// no response in treatment or control group -> can't split
if(nCT1 == 0 || nCT0 == 0 || nCT1Y1hi == 0 || nCT0Y1hi == 0){
return null;
}
double prY1CT1 = nCT1Y1hi/nCT1; // probability treatment=1 AND response=1 before split
double prY1CT0 = nCT0Y1hi/nCT0; // probability treatment=0 AND response=1 before split
double bestUpliftGain = upliftMetric.node(prY1CT1 , prY1CT0);
double upliftGainBefore = bestUpliftGain; // uplift gain before split
// if there are any NAs, then try to split them from the non-NAs
if (wNA>=min_rows) {
double prCT1All = (nCT1 + numTreatNA + 1)/(nCT0 + numContrNA + nCT1 + numTreatNA + 2);
double prCT0All = 1-prCT1All;
double prY1CT1All = (nCT1Y1hi + respTreatNA) / (nCT1 + numTreatNA);
double prY1CT0All = (nCT0Y1hi + respContrNA) / (nCT0 + numContrNA);
double prLCT1 = (nCT1 + 1)/(nCT0 + nCT1 + 2);
double prLCT0 = 1 - prLCT1;
double prL = prLCT1 * prCT1All + prLCT0 * prCT0All;
double prR = 1 - prL;
double nLCT1 = nCT1Y1hi;
double nLCT0 = nCT0Y1hi;
double prLY1CT1 = (nCT1Y1hi + 1) / (numhiTreat[0] + 2);
double prLY1CT0 = (nCT0Y1hi + 1) / (numhiContr[0] + 2);
double nRCT1 = numTreatNA;
double nRCT0 = numContrNA;
double prRY1CT1 = (respTreatNA + 1) / (numTreatNA + 2);
double prRY1CT0 = (respContrNA + 1) / (numContrNA + 2);
bestUpliftGain = upliftMetric.value(prY1CT1All, prY1CT0All, prL, prLY1CT1, prLY1CT0, prR, prRY1CT1, prRY1CT0, prCT1All, prCT0All, prLCT1, prLCT0);
if (bestUpliftGain != Double.POSITIVE_INFINITY) {
bestNLCT1 = nLCT1;
bestNLCT0 = nLCT0;
bestNRCT1 = nRCT1;
bestNRCT0 = nRCT0;
bestPrLY1CT1 = prLY1CT1;
bestPrLY1CT0 = prLY1CT0;
bestPrRY1CT1 = prRY1CT1;
bestPrRY1CT0 = prRY1CT0;
double seAll = (wYYhi[0] + wYYNA) - (wYhi[0] + wYNA) * (wYhi[0] + wYNA) / (whi[0] + wNA);
double seNA = wYYNA - wYNA * wYNA / wNA;
if (seNA < 0) seNA = 0;
best_seL = seNonNA;
best_seR = seNA;
nasplit = DHistogram.NASplitDir.NAvsREST;
seBefore = seAll;
nLeft = whi[0]; //all non-NAs
predLeft = wYhi[0];
nRight = wNA;
predRight = wYNA;
tree_p0 = predLeft / nLeft;
tree_p1 = predRight / nRight;
}
}
// Now roll the split-point across the bins. There are 2 ways to do this:
// split left/right based on being less than some value, or being equal/
// not-equal to some value. Equal/not-equal makes sense for categoricals
// but both splits could work for any integral datatype. Do the less-than
// splits first.
int best=0; // The no-split
byte equal=0; // Ranged check
for( int b=1; b<=nbins-1; b++ ) {
if( vals[vals_dim*b] == 0 ) continue; // Ignore empty splits
if( wlo[b]+wNA < min_rows ) continue;
if( whi[b]+wNA < min_rows ) break; // w1 shrinks at the higher bin#s, so if it fails once it fails always
// We're making an unbiased estimator, so that MSE==Var.
// Then Squared Error = MSE*N = Var*N
// = (wYY/N - wY^2)*N
// = wYY - N*wY^2
// = wYY - N*(wY/N)(wY/N)
// = wYY - wY^2/N
// no NAs
if (wNA==0) {
double selo = wYYlo[b] - wYlo[b] * wYlo[b] / wlo[b];
double sehi = wYYhi[b] - wYhi[b] * wYhi[b] / whi[b];
if (selo < 0) selo = 0; // Roundoff error; sometimes goes negative
if (sehi < 0) sehi = 0; // Roundoff error; sometimes goes negative
nCT1 = numhiTreat[b];
nCT0 = numhiContr[b];
double prCT1 = (nCT1 + 1)/(nCT0 + nCT1 + 2);
double prCT0 = 1-prCT1;
double prLCT1 = (numloTreat[b] + 1)/(numloTreat[b] + numhiTreat[b] + 2);
double prLCT0 = 1 - prLCT1;
double prL = prLCT1 * prCT1 + prLCT0 * prCT0;
double prR = 1 - prL;
double nLCT1 = numloTreat[b];
double nLCT0 = numloContr[b];
double prLY1CT1 = (resploTreat[b] + 1) / (numloTreat[b] + 2);
double prLY1CT0 = (resploContr[b] + 1) / (numloContr[b] + 2);
double nRCT1 = numhiTreat[b];
double nRCT0 = numhiContr[b];
double prRY1CT1 = (resphiTreat[b] + 1) / (numhiTreat[b] + 2);
double prRY1CT0 = (resphiContr[b] + 1) / (numhiContr[b] + 2);
double upliftGain = upliftMetric.value(prY1CT1, prY1CT0, prL, prLY1CT1, prLY1CT0, prR, prRY1CT1, prRY1CT0, prCT1, prCT0, prLCT1, prLCT0);
if (upliftGain != Double.POSITIVE_INFINITY && upliftGain > bestUpliftGain) {
double tmpPredLeft = wYlo[b] / wlo[b];
double tmpPredRight = wYhi[b] / whi[b];
best_seL = selo;
best_seR = sehi;
best = b;
nLeft = wlo[best];
nRight = whi[best];
predLeft = wYlo[best];
predRight = wYhi[best];
tree_p0 = tmpPredLeft;
tree_p1 = tmpPredRight;
bestUpliftGain = upliftGain;
bestNLCT1 = nLCT1;
bestNLCT0 = nLCT0;
bestNRCT1 = nRCT1;
bestNRCT0 = nRCT0;
bestPrLY1CT1 = prLY1CT1;
bestPrLY1CT0 = prLY1CT0;
bestPrRY1CT1 = prRY1CT1;
bestPrRY1CT0 = prRY1CT0;
}
} else {
// option 1: split the numeric feature and throw NAs to the left
{
double selo = wYYlo[b] + wYYNA - (wYlo[b] + wYNA) * (wYlo[b] + wYNA) / (wlo[b] + wNA);
double sehi = wYYhi[b] - wYhi[b] * wYhi[b] / whi[b];
if (selo < 0) selo = 0; // Roundoff error; sometimes goes negative
if (sehi < 0) sehi = 0; // Roundoff error; sometimes goes negative
nCT1 = numhiTreat[b] + numTreatNA;
nCT0 = numhiContr[b] + numContrNA;
double prCT1 = (nCT1 + 1)/(nCT0 + nCT1 + 2);
double prCT0 = 1 - prCT1;
double prLCT1 = (numloTreat[b] + numTreatNA + 1)/(numloTreat[b] + numTreatNA + numhiTreat[b] + 2);
double prLCT0 = 1 - prLCT1;
double prL = prLCT1 * prCT1 + prLCT0 * prCT0;
double prR = 1 - prL;
double nLCT1 = numloTreat[b] + numTreatNA;
double nLCT0 = numloContr[b] + numContrNA;
double prLY1CT1 = (resploTreat[b] + respTreatNA + 1) / (numloTreat[b] + numTreatNA + 2);
double prLY1CT0 = (resploContr[b] + respContrNA + 1) / (numloContr[b] + numContrNA + 2);
double nRCT1 = numhiTreat[b];
double nRCT0 = numhiContr[b];
double prRY1CT1 = (resphiTreat[b] + 1) / (numhiTreat[b] + 2);
double prRY1CT0 = (resphiContr[b] + 1) / (numhiContr[b] + 2);
double upliftGain = upliftMetric.value(prY1CT1, prY1CT0, prL, prLY1CT1, prLY1CT0, prR, prRY1CT1, prRY1CT0, prCT1, prCT0, prLCT1, prLCT0);
if (upliftGain != Double.POSITIVE_INFINITY && upliftGain > bestUpliftGain) {
if((wlo[b] + wNA) >= min_rows && whi[b] >= min_rows) {
double tmpPredLeft = (wYlo[b] + wYNA) / (wlo[b] + wNA);
double tmpPredRight = wYhi[b] / whi[b];
best_seL = selo;
best_seR = sehi;
best = b;
nLeft = wlo[best] + wNA;
nRight = whi[best];
predLeft = wYlo[best] + wYNA;
predRight = wYhi[best];
nasplit = DHistogram.NASplitDir.NALeft;
tree_p0 = tmpPredLeft;
tree_p1 = tmpPredRight;
bestUpliftGain = upliftGain;
bestNLCT1 = nLCT1;
bestNLCT0 = nLCT0;
bestNRCT1 = nRCT1;
bestNRCT0 = nRCT0;
bestPrLY1CT1 = prLY1CT1;
bestPrLY1CT0 = prLY1CT0;
bestPrRY1CT1 = prRY1CT1;
bestPrRY1CT0 = prRY1CT0;
}
}
}
// option 2: split the numeric feature and throw NAs to the right
{
double selo = wYYlo[b] - wYlo[b] * wYlo[b] / wlo[b];
double sehi = wYYhi[b]+wYYNA - (wYhi[b]+wYNA) * (wYhi[b]+wYNA) / (whi[b]+wNA);
if (selo < 0) selo = 0; // Roundoff error; sometimes goes negative
if (sehi < 0) sehi = 0; // Roundoff error; sometimes goes negative
nCT1 = numhiTreat[b] + numTreatNA;
nCT0 = numhiContr[b] + numContrNA;
double prCT1 = (nCT1 + 1)/(nCT0 + nCT1 + 2);
double prCT0 = 1 - prCT1;
double prLCT1 = (numloTreat[b] + 1)/(numloTreat[b] + numhiTreat[0] + numTreatNA + 2);
double prLCT0 = 1 - prLCT1;
double prL = prLCT1 * prCT1 + prLCT0 * prCT0;
double prR = 1 - prL;
double nLCT1 = numloTreat[b];
double nLCT0 = numloContr[b];
double prLY1CT1 = (resploTreat[b] + respTreatNA + 1) / (numloTreat[b] + 2);
double prLY1CT0 = (resploContr[b] + respContrNA + 1) / (numloContr[b] + 2);
double nRCT1 = numhiTreat[b] + numTreatNA;
double nRCT0 = numhiContr[b] + numContrNA;
double prRY1CT1 = (resphiTreat[b] + 1) / (numhiTreat[b] + numTreatNA + 2);
double prRY1CT0 = (resphiContr[b] + 1) / (numhiContr[b] + numContrNA + 2);
double upliftGain = upliftMetric.value(prY1CT1, prY1CT0, prL, prLY1CT1, prLY1CT0, prR, prRY1CT1, prRY1CT0, prCT1, prCT0, prLCT1, prLCT0);
if (upliftGain != Double.POSITIVE_INFINITY && upliftGain > bestUpliftGain) {
if( wlo[b] >= min_rows && (whi[b] + wNA) >= min_rows ) {
double tmpPredLeft = wYlo[b] / wlo[b];
double tmpPredRight = (wYhi[b] + wYNA) / (whi[b] + wNA);
best_seL = selo;
best_seR = sehi;
best = b;
nLeft = wlo[best];
nRight = whi[best] + wNA;
predLeft = wYlo[best];
predRight = wYhi[best] + wYNA;
nasplit = DHistogram.NASplitDir.NARight;
tree_p0 = tmpPredLeft;
tree_p1 = tmpPredRight;
bestUpliftGain = upliftGain;
bestNLCT1 = nLCT1;
bestNLCT0 = nLCT0;
bestNRCT1 = nRCT1;
bestNRCT0 = nRCT0;
bestPrLY1CT1 = prLY1CT1;
bestPrLY1CT0 = prLY1CT0;
bestPrRY1CT1 = prRY1CT1;
bestPrRY1CT0 = prRY1CT0;
}
}
}
}
}
if( best==0 && nasplit== DHistogram.NASplitDir.None) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": no optimal split found:\n" + hs);
return null;
}
if (!(best_seL+ best_seR < seBefore * (1- hs._minSplitImprovement))) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": not enough relative improvement: " + (1-(best_seL + best_seR) / seBefore) + "\n" + hs);
return null;
}
assert(Math.abs(tot - (nRight + nLeft)) < 1e-5*tot);
if( MathUtils.equalsWithinOneSmallUlp((float)(predLeft / nLeft),(float)(predRight / nRight)) ) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": Predictions for left/right are the same.");
return null;
}
if (nLeft < min_rows || nRight < min_rows) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": split would violate min_rows limit.");
return null;
}
final double node_p0 = predLeft / nLeft;
final double node_p1 = predRight / nRight;
if( MathUtils.equalsWithinOneSmallUlp((float) tree_p0,(float) tree_p1) ) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": Predictions for left/right are the same.");
return null;
}
// For categorical (unordered) predictors, we sorted the bins by average
// prediction then found the optimal split on sorted bins
IcedBitSet bs = null; // In case we need an arbitrary bitset
if( idxs != null ) { // We sorted bins; need to build a bitset
final int off = (int) hs._min;
bs = new IcedBitSet(nbins, off);
equal = fillBitSet(hs, off, idxs, best, nbins, bs);
if (equal < 0)
return null;
}
// if still undecided (e.g., if there are no NAs in training), pick a good default direction for NAs in test time
if (nasplit == DHistogram.NASplitDir.None) {
nasplit = nLeft > nRight ? DHistogram.NASplitDir.Left : DHistogram.NASplitDir.Right;
}
Split split = new Split(col, best, nasplit, bs, equal, seBefore, best_seL, best_seR, nLeft, nRight, node_p0, node_p1, tree_p0, tree_p1, bestPrLY1CT1, bestPrLY1CT0, bestPrRY1CT1, bestPrRY1CT0, bestNLCT1, bestNLCT0, bestNRCT1, bestNRCT0, upliftGainBefore, bestUpliftGain);
if (LOG.isTraceEnabled()) LOG.trace("splitting on " + hs._name + ": " + split);
return split;
}
private static byte fillBitSet(DHistogram hs, int off, int[] idxs, int best, int nbins, IcedBitSet bs) {
for( int i=best; i<nbins; i++ )
bs.set(idxs[i] + off);
// Throw empty (unseen) categorical buckets into the majority direction (should behave like NAs during testing)
int nonEmptyThatWentRight = 0;
int nonEmptyThatWentLeft = 0;
for (int i=0; i<nbins; i++) {
if (hs.w(i) > 0) {
if (bs.contains(i + off))
nonEmptyThatWentRight++;
else
nonEmptyThatWentLeft++;
}
}
boolean shouldGoLeft = nonEmptyThatWentLeft >= nonEmptyThatWentRight;
for (int i=0; i<nbins; i++) {
assert(bs.isInRange(i + off));
if (hs.w(i) == 0) {
if (bs.contains(i + off) && shouldGoLeft) {
bs.clear(i + off);
}
if (!bs.contains(i + off) && !shouldGoLeft) {
bs.set(i + off);
}
}
}
if (bs.cardinality()==0 || bs.cardinality()==bs.size()) {
if (LOG.isTraceEnabled()) LOG.trace("can't split " + hs._name + ": no separation of categoricals possible");
return -1;
}
return (byte)(bs.max() <= 32 ? 2 : 3); // Flag for bitset split; also check max size
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/DTreeScorer.java
|
package hex.tree;
import water.*;
public abstract class DTreeScorer<T extends DTreeScorer<T>> extends MRTask<T> {
protected final int _ncols;
protected final int _nclass;
protected final int _skip;
protected final CompressedForest _cforest;
protected transient CompressedForest.LocalCompressedForest _forest;
protected SharedTree _st;
public DTreeScorer(int ncols, int nclass, SharedTree st, CompressedForest cforest) {
_ncols = ncols;
_nclass = nclass;
_cforest = cforest;
_st = st;
_skip = _st.numSpecialCols();
}
protected int ntrees() { return _cforest.ntrees(); }
@Override protected final void setupLocal() {
_forest = _cforest.fetch();
}
protected void score0(double data[], double preds[], int tidx) { _forest.scoreTree(data, preds, tidx); }
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/ExactSplitPoints.java
|
package hex.tree;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.util.IcedDouble;
import water.util.IcedHashSet;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
/**
* Finds exact split points for low-cardinality columns.
*/
public class ExactSplitPoints extends MRTask<ExactSplitPoints> {
private final int _maxCardinality;
private final IcedHashSet<IcedDouble>[] _values;
static double[][] splitPoints(Frame trainFr, int maxCardinality) {
final Frame fr = new Frame();
final int[] frToTrain = new int[trainFr.numCols()];
for (int i = 0; i < trainFr.numCols(); ++i) {
if (!trainFr.vec(i).isNumeric() || trainFr.vec(i).isCategorical() ||
trainFr.vec(i).isBinary() || trainFr.vec(i).isConst()) {
continue;
}
frToTrain[fr.numCols()] = i;
fr.add(trainFr.name(i), trainFr.vec(i));
}
IcedHashSet<IcedDouble>[] values = new ExactSplitPoints(maxCardinality, fr.numCols())
.doAll(fr)._values;
double[][] splitPoints = new double[trainFr.numCols()][];
for (int i = 0; i < values.length; i++) {
if (values[i] == null) {
continue;
}
double[] vals = new double[values[i].size()];
int valsSize = 0;
for (IcedDouble wrapper : values[i]) {
vals[valsSize++] = wrapper._val;
}
assert valsSize == vals.length;
Arrays.sort(vals);
assert isUniqueSequence(vals);
splitPoints[frToTrain[i]] = vals;
}
return splitPoints;
}
static boolean isUniqueSequence(double[] seq) {
if (seq.length == 1)
return true;
double lastValue = seq[0];
for (int i = 1; i < seq.length; i++) {
if (lastValue >= seq[i])
return false;
lastValue = seq[i];
}
return true;
}
@SuppressWarnings("unchecked")
private ExactSplitPoints(int maxCardinality, int nCols) {
_maxCardinality = maxCardinality;
_values = new IcedHashSet[nCols];
for (int i = 0; i < _values.length; i++) {
_values[i] = new IcedHashSet<>();
}
}
@Override
public void map(Chunk[] cs) {
Set<IcedDouble> localValues = new HashSet<>(_maxCardinality);
for (int col = 0; col < cs.length; col++) {
localValues.clear();
if (_values[col] == null)
continue;
Chunk c = cs[col];
IcedDouble wrapper = new IcedDouble();
for (int i = 0; i < c._len; i++) {
double num = c.atd(i);
if (Double.isNaN(num))
continue;
if (wrapper._val == num)
continue;
wrapper.setVal(num);
if (localValues.add(wrapper)) {
if (localValues.size() > _maxCardinality) {
_values[col] = null;
break;
}
wrapper = new IcedDouble();
}
}
merge(col, localValues);
}
}
private void merge(int col, Collection<IcedDouble> localValues) {
final Set<IcedDouble> allValues = _values[col];
if (allValues == null)
return;
allValues.addAll(localValues);
if (allValues.size() > _maxCardinality) {
_values[col] = null;
}
}
@Override
public void reduce(ExactSplitPoints mrt) {
if (mrt._values != _values) { // merging with a result from a different node
for (int col = 0; col < _values.length; col++) {
if (_values[col] == null || mrt._values[col] == null)
_values[col] = null;
else {
merge(col, mrt._values[col]);
}
}
} // else: nothing to do on the same node
}
@Override
protected void postGlobal() {
for (int col = 0; col < _values.length; col++) {
if (_values[col] != null && _values[col].size() > _maxCardinality) {
_values[col] = null;
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/FriedmanPopescusH.java
|
package hex.tree;
import hex.genmodel.algos.tree.SharedTreeNode;
import hex.genmodel.algos.tree.SharedTreeSubgraph;
import water.DKV;
import water.Key;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Rapids;
import water.rapids.Val;
import water.util.ArrayUtils;
import water.util.VecUtils;
import java.util.*;
/**
* Calculates Friedman and Popescu's H statistics, in order to test for the presence of an interaction between specified variables in h2o gbm and xgb models.
* H varies from 0 to 1. It will have a value of 0 if the model exhibits no interaction between specified variables and a correspondingly larger value for a
* stronger interaction effect between them. NaN is returned if a computation is spoiled by weak main effects and rounding errors.
* This statistic can be calculated only for numerical variables. Missing values are supported.
*
* See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", *Ann. Appl. Stat.*
* **2**:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1.
*
* Reference implementation: https://pypi.org/project/sklearn-gbmi/
* */
public class FriedmanPopescusH {
public static double h(Frame frame, String[] vars, double learnRate, SharedTreeSubgraph[][] sharedTreeSubgraphs) {
Frame filteredFrame = filterFrame(frame, vars);
int[] modelIds = getModelIds(frame.names(), vars);
Map<String, Frame> fValues = new HashMap<>();
int numCols = filteredFrame.numCols();
int[] colIds = new int[numCols];
for (int i = 0; i < numCols; i++) {
colIds[i] = i;
}
for (int i = numCols; i > 0; i--) {
List<int[]> currCombinations = combinations(colIds, i);
for (int j = 0; j < currCombinations.size(); j++) {
int[] currCombination = currCombinations.get(j);
String[] cols = getCurrCombinationCols(currCombination, vars);
int[] currModelIds = getCurrentCombinationModelIds(currCombination, modelIds);
fValues.put(Arrays.toString(currCombination), computeFValues(currModelIds, filteredFrame, cols, learnRate, sharedTreeSubgraphs));
}
}
return computeHValue(fValues, filteredFrame, colIds);
}
static int[] getCurrentCombinationModelIds(int[] currCombination, int[] modelIds) {
int[] currCombinationCols = new int[currCombination.length];
for (int i = 0; i < currCombination.length; i++) {
currCombinationCols[i] = modelIds[currCombination[i]];
}
return currCombinationCols;
}
static double computeHValue(Map<String, Frame> fValues, Frame filteredFrame, int[] inds) {
if (filteredFrame._key == null)
filteredFrame._key = Key.make();
Frame uniqueWithCounts = uniqueRowsWithCounts(filteredFrame);
long uniqHeight = uniqueWithCounts.numRows();
Vec numerEls = Vec.makeZero(uniqHeight);
Vec denomEls = Vec.makeZero(uniqHeight);
for (long i = 0; i < uniqHeight; i++) {
int sign = 1;
for (int n = inds.length; n > 0; n--) {
List<int[]> currCombinations = combinations(inds, n);
for (int j = 0; j < currCombinations.size(); j++) {
double fValue = findFValue(i, (int[])currCombinations.toArray()[j], fValues.get(Arrays.toString((int[])currCombinations.toArray()[j])), filteredFrame);
numerEls.set(i, numerEls.at(i) + (float)sign * (float)fValue);
}
sign *= -1;
}
denomEls.set(i, (float)fValues.get(Arrays.toString(inds)).vec(0).at(i));
}
double numer = new Transform(2).doAll(numerEls, uniqueWithCounts.vec("nrow")).result;
double denom = new Transform(2).doAll(denomEls, uniqueWithCounts.vec("nrow")).result;
return numer < denom ? Math.sqrt(numer/denom) : Double.NaN;
}
private static class Transform extends MRTask<Transform> {
double result;
int power;
Transform(int power) {
this.power = power;
}
@Override public void map( Chunk[] bvs ) {
result = 0;
int len = bvs[0]._len;
for (int i = 0; i < len; i++) {
result += Math.pow(bvs[0].atd(i), 2) * bvs[1].atd(i);
}
}
@Override public void reduce(Transform mrt ) {
result += mrt.result;
}
}
static double[] getValueToFindFValueFor(int[] currCombination, Frame filteredFrame, long i) {
int combinationLength = currCombination.length;
double[] value = new double[combinationLength];
for (int j = 0; j < combinationLength; j++) {
value[j] = filteredFrame.vec(currCombination[j]).at(i);
}
return value;
}
static double findFValue(long i, int[] currCombination, Frame currFValues, Frame filteredFrame) {
double[] valueToFindFValueFor = getValueToFindFValueFor(currCombination, filteredFrame, i);
String[] currNames = getCurrCombinationNames(currCombination, filteredFrame.names());
FindFValue findFValueTask = new FindFValue(valueToFindFValueFor, currNames, currFValues._names, 1e-5);
Double result = findFValueTask.doAll(currFValues).result;
if (null == result) {
throw new RuntimeException("FValue was not found!" + Arrays.toString(currCombination) + "value: " + Arrays.toString(valueToFindFValueFor));
} else {
return result.doubleValue();
}
}
static class FindFValue extends MRTask<FindFValue> {
double[] valueToFindFValueFor;
String[] currNames;
String[] currFValuesNames;
double eps;
public Double result;
long resultIndex = Long.MAX_VALUE;
FindFValue(double[] valueToFindFValueFor, String[] currNames, String[] currFValuesNames, double eps) {
this.valueToFindFValueFor = valueToFindFValueFor;
this.currNames = currNames;
this.currFValuesNames = currFValuesNames;
this.eps = eps;
}
@Override public void map(Chunk[] cs) {
int count = 0;
if (cs[0].start() > resultIndex) return;
for (int iRow = 0; iRow < cs[0].len(); iRow++) {
for (int k = 0; k < valueToFindFValueFor.length; k++) {
int id = ArrayUtils.find(currFValuesNames, currNames[k]);
if (Double.isNaN(valueToFindFValueFor[k]) && Double.isNaN(cs[id].atd(iRow))){
count++;
}
if (Math.abs(valueToFindFValueFor[k] - cs[id].atd(iRow)) < eps) {
count++;
}
}
if (count == valueToFindFValueFor.length) {
if (cs[0].start()+iRow < resultIndex) {
result = cs[0].atd(iRow);
resultIndex = cs[0].start()+iRow;
}
break;
} else {
count = 0;
}
}
}
@Override
public void reduce(FindFValue mrt) {
if (null != mrt && null != mrt.result) {
if (this.resultIndex > mrt.resultIndex) {
this.result = mrt.result;
this.resultIndex = mrt.resultIndex;
}
}
}
}
static String[] getCurrCombinationNames(int[] currCombination, String[] names) {
String[] currNames = new String[currCombination.length];
for (int j = 0; j < currCombination.length; j++) {
currNames[j] = names[currCombination[j]];
}
return currNames;
}
static String[] getCurrCombinationCols(int[] currCombination, String[] vars) {
String[] currCombinationCols = new String[currCombination.length];
for (int i = 0; i < currCombination.length; i++) {
currCombinationCols[i] = vars[currCombination[i]];
}
return currCombinationCols;
}
static int findFirstNumericalColumn(Frame frame) {
for (int i = 0; i < frame.names().length; i++) {
if (frame.vec(i).isNumeric())
return i;
}
return -1;
}
static Frame uniqueRowsWithCounts(Frame frame) {
DKV.put(frame);
StringBuilder sb = new StringBuilder("(GB ");
String[] cols = frame.names();
sb.append(frame._key.toString());
sb.append(" [");
for (int i = 0; i < cols.length; i++) {
if (i != 0) sb.append(",");
sb.append(i);
}
sb.append("] ");
int i = findFirstNumericalColumn(frame);
if (i == -1) {
frame.add("nrow", Vec.makeOne(frame.numRows()));
return frame;
}
sb.append(" nrow ").append(i).append(" \"all\")");
Val val = Rapids.exec(sb.toString());
DKV.remove(frame._key);
return val.getFrame();
}
static Frame computeFValues(int[] modelIds, Frame filteredFrame, String[] cols, double learnRate, SharedTreeSubgraph[][] sharedTreeSubgraphs) {
// filter frame -> only curr combination cols will be used
filteredFrame = filterFrame(filteredFrame, cols);
filteredFrame = new Frame(Key.make(), filteredFrame.names(), filteredFrame.vecs());
Frame uniqueWithCounts = uniqueRowsWithCounts(filteredFrame);
Frame uncenteredFvalues = new Frame(partialDependence(modelIds, uniqueWithCounts, learnRate, sharedTreeSubgraphs).vec(0));
VecUtils.DotProduct multiply = new VecUtils.DotProduct().doAll(uniqueWithCounts.vec("nrow"), uncenteredFvalues.vec(0));
final double meanUncenteredFValue = multiply.result / filteredFrame.numRows();
try (Vec.Writer uncenteredFValuesWriter = uncenteredFvalues.vec(0).open()) {
Vec.Reader uncenteredFValuesReader = uncenteredFvalues.vec(0).new Reader();
for (int i = 0; i < uncenteredFvalues.numRows(); i++) {
uncenteredFValuesWriter.set(i, uncenteredFValuesReader.at(i) - meanUncenteredFValue);
}
}
return uncenteredFvalues.add(uniqueWithCounts);
}
static Frame partialDependence(int[] modelIds, Frame uniqueWithCounts, double learnRate, SharedTreeSubgraph[][] sharedTreeSubgraphs) {
Frame result = new Frame();
int nclasses = sharedTreeSubgraphs[0].length;
int ntrees = sharedTreeSubgraphs.length;
for (int treeClass = 0; treeClass < nclasses; treeClass++) {
Vec pdp = Vec.makeZero(uniqueWithCounts.numRows());
for (int i = 0; i < ntrees; i++) {
try(Vec.Writer pdpWriter = pdp.open()) {
SharedTreeSubgraph sharedTreeSubgraph = sharedTreeSubgraphs[i][treeClass];
Vec currTreePdp = partialDependenceTree(sharedTreeSubgraph, modelIds, learnRate, uniqueWithCounts);
Vec.Reader currTreePdpReader = currTreePdp.new Reader();
Vec.Reader pdpReader = pdp.new Reader();
for (long j = 0; j < uniqueWithCounts.numRows(); j++) {
pdpWriter.set(j, pdpReader.at(j) + currTreePdpReader.at(j));
}
}
}
result.add("pdp_C" + treeClass , pdp);
}
return result;
}
public static double[] add(double[] first, double[] second) {
int length = Math.min(first.length, second.length);
double[] result = new double[length];
for (int i = 0; i < length; i++) {
result[i] = first[i] + second[i];
}
return result;
}
static Frame filterFrame(Frame frame, String[] cols) {
Frame frame1 = new Frame();
frame1.add(cols, frame.vecs(cols));
return frame1;
}
static int[] getModelIds(String[] frameNames, String[] vars) {
int[] modelIds = new int[vars.length];
Arrays.fill(modelIds, -1);
for (int i = 0; i < vars.length; i++) {
for (int j = 0; j < frameNames.length; j++) {
if (vars[i].equals(frameNames[j])) {
modelIds[i] = j;
}
}
if (modelIds[i] == -1) {
throw new RuntimeException("Column " + vars[i] + " is not present in the input frame!");
}
}
return modelIds;
}
static List<int[]> combinations(int[] vals, int combinationSize) {
List<int[]> overallResult = new ArrayList<>();
combinations(vals, combinationSize, 0, new int[combinationSize], overallResult);
return overallResult;
}
private static void combinations(int[] arr, int len, int startPosition, int[] result, List<int[]> overallResult) {
if (len == 0) {
overallResult.add(result.clone());
return;
}
for (int i = startPosition; i <= arr.length-len; i++){
result[result.length - len] = arr[i];
combinations(arr, len - 1, i + 1, result, overallResult);
}
}
/**
* For each row in ``X`` a tree traversal is performed.
* Each traversal starts from the root with weight 1.0.
*
* At each non-terminal node that splits on a target variable either
* the left child or the right child is visited based on the feature
* value of the current sample and the weight is not modified.
* At each non-terminal node that splits on a complementary feature
* both children are visited and the weight is multiplied by the fraction
* of training samples which went to each child.
*
* At each terminal node the value of the node is multiplied by the
* current weight (weights sum to 1 for all visited terminal nodes).
*
* @param tree tree to traverse
* @param targetFeature the set of target features for which the partial dependence should be evaluated
* @param learnRate constant scaling factor for the leaf predictions
* @param grid the grid points on which the partial dependence should be evaluated
*
* @return Vec with the resulting partial dependence values for each point of the input grid
*/
static Vec partialDependenceTree(SharedTreeSubgraph tree, int[] targetFeature, double learnRate, Frame grid) {
Vec outVec = Vec.makeZero(grid.numRows());
int stackSize;
SharedTreeNode[] nodeStackAr = new SharedTreeNode[tree.nodesArray.size() * 2];
Double[] weightStackAr = new Double[tree.nodesArray.size() * 2];
Arrays.fill(weightStackAr, 1.0);
double totalWeight;
SharedTreeNode currNode;
double currWeight;
try(Vec.Writer outVecWriter = outVec.open()) {
Vec.Reader gridReaders[] = new Vec.Reader[grid.numCols()];
for (int i = 0; i < grid.numCols(); i++) {
gridReaders[i] = grid.vec(i).new Reader();
}
for (long i = 0; i < grid.numRows(); i++) {
stackSize = 1;
nodeStackAr[0] = tree.rootNode;
weightStackAr[0] = 1.0;
totalWeight = 0.0;
double result = 0;
while (stackSize > 0) {
// get top node on stack
stackSize -= 1;
currNode = nodeStackAr[stackSize];
if (currNode.isLeaf()) {
result += weightStackAr[stackSize] * currNode.getPredValue() * learnRate;
totalWeight += weightStackAr[stackSize];
} else {
// non-terminal node:
int featureId = ArrayUtils.find(targetFeature, currNode.getColId());
if (featureId >= 0) {
// split feature in target set
// push left or right child on stack
if (gridReaders[featureId].at(i) <= currNode.getSplitValue()) {
// left
nodeStackAr[stackSize] = currNode.getLeftChild();
} else {
nodeStackAr[stackSize] = currNode.getRightChild();
}
stackSize += 1;
} else {
double left_sample_frac;
// split feature complement set
// push both children onto stack
currWeight = weightStackAr[stackSize];
// push left
nodeStackAr[stackSize] = currNode.getLeftChild();
left_sample_frac = currNode.getLeftChild().getWeight() / currNode.getWeight();
weightStackAr[stackSize] = currWeight * left_sample_frac;
stackSize++;
// push right
nodeStackAr[stackSize] = currNode.getRightChild();
weightStackAr[stackSize] = currWeight * (1.0 - left_sample_frac);
stackSize++;
}
}
}
outVecWriter.set(i, result);
if (!(0.999 < totalWeight && totalWeight < 1.001)) {
throw new RuntimeException("Total weight should be 1.0 but was " + totalWeight);
}
}
}
return outVec;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/GlobalInteractionConstraints.java
|
package hex.tree;
import water.Iced;
import water.util.ArrayUtils;
import water.util.IcedHashMap;
import water.util.IcedHashSet;
import water.util.IcedInt;
import java.util.*;
/**
* Class to process global interaction constraints information and use this information for
* make a split decision in a tree.
*/
public class GlobalInteractionConstraints extends Iced<GlobalInteractionConstraints> {
// Map where key is column index and value is a set of columns indices which can interact with the key column
private IcedHashMap<IcedInt, IcedHashSet<IcedInt>> allowedInteractionMap;
public GlobalInteractionConstraints(String[][] userFeatureInteractions, String[] treeFeatureNames){
this.allowedInteractionMap = new IcedHashMap<>();
parseInteractionsIndices(userFeatureInteractions, treeFeatureNames);
// There should be always at least one column index in the map as a key
assert this.allowedInteractionMap != null;
assert this.allowedInteractionMap.size() != 0;
}
/**
* Parse input interaction constraints String array into Map to easy use for split decision.
* @param userInteractionConstraints input interaction constraints String array
* @param columnNames column names from used dataset for training to match indices correctly
*/
private void parseInteractionsIndices(String[][] userInteractionConstraints, String[] columnNames){
IcedHashSet<IcedInt> interactions;
for (String[] list : userInteractionConstraints) {
interactions = new IcedHashSet<>();
for (int i = 0; i < list.length; i++) {
String item = list[i];
// first find only name
int start = ArrayUtils.findWithPrefix(columnNames, item);
// find start index and add indices until end index
assert start != -1 : "Column name should be in defined column names.";
if (start > -1) { // find exact position - no encoding
interactions.add(new IcedInt(start));
} else { // find first occur of the name with prefix - encoding
start = - start - 2;
assert columnNames[start].startsWith(item): "The column name should be find correctly.";
// iterate until find all encoding indices
int end = start;
while (end < columnNames.length && columnNames[end].startsWith(item)) {
interactions.add(new IcedInt(end));
end++;
}
}
}
addInteractionsSetToMap(interactions);
}
}
private void addInteractionsSetToMap(IcedHashSet<IcedInt> interactions){
for (IcedInt index : interactions) {
if (!allowedInteractionMap.containsKey(index)) {
allowedInteractionMap.put(index, interactions);
} else {
IcedHashSet<IcedInt> set = new IcedHashSet<>();
set.addAll(allowedInteractionMap.get(index));
set.addAll(interactions);
allowedInteractionMap.put(index, set);
}
}
}
public IcedHashSet<IcedInt> getAllowedInteractionForIndex(int columnIndex){
return allowedInteractionMap.get(new IcedInt(columnIndex));
}
public boolean allowedInteractionContainsColumn(int columnIndex){
return allowedInteractionMap.containsKey(new IcedInt(columnIndex));
}
public IcedHashSet<IcedInt> getAllAllowedColumnIndices(){
IcedHashSet<IcedInt> indices = new IcedHashSet<>();
indices.addAll(allowedInteractionMap.keySet());
return indices;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/GlobalQuantilesCalc.java
|
package hex.tree;
import hex.quantile.Quantile;
import hex.quantile.QuantileModel;
import water.DKV;
import water.Job;
import water.Key;
import water.fvec.Frame;
import water.util.ArrayUtils;
/**
* Helper class for calculating split points used when histogram type is "QuantilesGlobal"
*/
class GlobalQuantilesCalc {
/**
* Calculates split points for histogram type = QuantilesGlobal.
*
* @param trainFr (adapted) training frame
* @param weightsColumn name of column containing observation weights (optional)
* @param priorSplitPoints optional pre-existing split points for some columns
* @param N number of bins
* @param nbins_top_level number of top-level bins
* @return array of split points for each feature column of the input training frame
*/
static double[][] splitPoints(Frame trainFr, String weightsColumn,
double[][] priorSplitPoints, final int N, int nbins_top_level) {
final int[] frToTrain = new int[trainFr.numCols()];
final Frame fr = collectColumnsForQuantile(trainFr, weightsColumn, priorSplitPoints, frToTrain);
final double[][] splitPoints = new double[trainFr.numCols()][];
if (fr.numCols() == 0 || weightsColumn != null && fr.numCols() == 1 && weightsColumn.equals(fr.name(0))) {
return splitPoints;
}
Key<Frame> tmpFrameKey = Key.make();
DKV.put(tmpFrameKey, fr);
QuantileModel qm = null;
try {
QuantileModel.QuantileParameters p = new QuantileModel.QuantileParameters();
p._train = tmpFrameKey;
p._weights_column = weightsColumn;
p._combine_method = QuantileModel.CombineMethod.INTERPOLATE;
p._probs = new double[N];
for (int i = 0; i < N; ++i) //compute quantiles such that they span from (inclusive) min...maxEx (exclusive)
p._probs[i] = i * 1. / N;
Job<QuantileModel> job = new Quantile(p).trainModel();
qm = job.get();
job.remove();
double[][] origQuantiles = qm._output._quantiles;
//pad the quantiles until we have nbins_top_level bins
for (int q = 0; q < origQuantiles.length; q++) {
if (origQuantiles[q].length <= 1) {
continue;
}
final int i = frToTrain[q];
// make the quantiles split points unique
splitPoints[i] = ArrayUtils.makeUniqueAndLimitToRange(origQuantiles[q], fr.vec(q).min(), fr.vec(q).max());
if (splitPoints[i].length <= 1) //not enough split points left - fall back to regular binning
splitPoints[i] = null;
else
splitPoints[i] = ArrayUtils.padUniformly(splitPoints[i], nbins_top_level);
assert splitPoints[i] == null || splitPoints[i].length > 1;
}
return splitPoints;
} finally {
DKV.remove(tmpFrameKey);
if (qm != null) {
qm.delete();
}
}
}
static Frame collectColumnsForQuantile(Frame trainFr, String weightsColumn, double[][] priorSplitPoints,
int[] frToTrainMap) {
final Frame fr = new Frame();
final int weightsIdx = trainFr.find(weightsColumn);
for (int i = 0; i < trainFr.numCols(); ++i) {
if (i != weightsIdx) {
if (priorSplitPoints != null && priorSplitPoints[i] != null) {
continue;
}
if (!trainFr.vec(i).isNumeric() || trainFr.vec(i).isCategorical() ||
trainFr.vec(i).isBinary() || trainFr.vec(i).isConst()) {
continue;
}
}
frToTrainMap[fr.numCols()] = i;
fr.add(trainFr.name(i), trainFr.vec(i));
}
return fr;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/GuidedSplitPoints.java
|
package hex.tree;
import water.util.ArrayUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/**
* Implements a method for finding new histogram bins split-points based on a result of previous binning.
* Idea:
* We take non-empty bins and look at the squared error they have. Based on the target bin number, we discard
* the empty bins and used the frees-up space to refine the non-non empty bins. Splitting of non-empty bins
* is guided by Squared Error accumulated in the bin. Bins with higher SE are split more than the bins with lower SE.
* Sub-bins (bins created from a single original bin) are refined uniformly.
*
* If uniform splitting fails in this iteration (= the distribution of values is significantly skewed), next iteration
* will attempt correct the issue by repeating the procedure with new bins (we are recursively refining the promising
* bins as we get deeper in the tree).
*/
public class GuidedSplitPoints {
static final double LOW_DENSITY_THRESHOLD = 0.2;
static boolean isApplicableTo(DHistogram h) {
return h._vals != null && // observations were not yet binned, we don't have the data to guide the splitting
h._isInt != 2 && // categorical columns have a specific handing
!h._intOpt; // integer optimized columns have a single value per bin, no point in refining such bins
}
static double[] makeSplitPoints(DHistogram h, final int targetNBins, final double min, final double maxEx) {
// Collect bins to consider for refining
final List<BinDescriptor> bins = extractNonEmptyBins(h);
// Budget is given by target number of bins in the new layer, we keep all non-empty bins
final int totalBudget = targetNBins - bins.size() - 2; // how many bins we have to allocate (save 2 spots for min/max)
if (bins.isEmpty() || totalBudget <= 0)
return null;
int budgetLeft = totalBudget; // how many bins do we have left to redistribute
double totalSE = 0;
for (BinDescriptor bin : bins) {
totalSE += bin._se;
}
// For each bin find out how many new bins we can split it into
int[] newBinCounts = new int[bins.size()];
Collections.sort(bins); // sort by SE descending
for (int b = 0; budgetLeft > 0 && b < newBinCounts.length; b++) {
BinDescriptor bin = bins.get(b);
// distributed budget proportionally to SE
int newBins = Math.min((int) Math.ceil(totalBudget * bin._se / totalSE), budgetLeft);
budgetLeft -= newBins;
newBinCounts[b] = newBins;
}
// Define new split-points
final double[] customSplitPoints = new double[targetNBins - budgetLeft];
int i = 0;
for (int b = 0; b < newBinCounts.length; b++) {
BinDescriptor bin = bins.get(b);
customSplitPoints[i++] = bin._start;
double stepSize = (bin._end - bin._start) / (1 + newBinCounts[b]);
for (int s = 0; s < newBinCounts[b]; s++) {
customSplitPoints[i] = customSplitPoints[i - 1] + stepSize;
i++;
}
}
customSplitPoints[i++] = min; // This is based on QuantilesGlobal - DHistogram has assumption min/max will be in the split-points
customSplitPoints[i++] = h._maxIn;
assert i == customSplitPoints.length;
Arrays.sort(customSplitPoints);
return ArrayUtils.makeUniqueAndLimitToRange(customSplitPoints, min, maxEx);
}
static List<BinDescriptor> extractNonEmptyBins(DHistogram h) {
final int nonEmptyBins = h.nonEmptyBins();
final List<BinDescriptor> bins = new ArrayList<>(nonEmptyBins);
for (int i = 0; i < h.nbins(); i++) {
double weight = h.w(i);
if (weight > 0) {
BinDescriptor bin = BinDescriptor.fromBin(h, i);
bins.add(bin);
}
}
return bins;
}
static class BinDescriptor implements Comparable<BinDescriptor> {
final double _start;
final double _end;
final double _se;
final double _weight;
public BinDescriptor(double start, double end, double se, double weight) {
_start = start;
_end = end;
_se = Math.max(se, 0); // rounding errors can cause SE to be negative
_weight = weight;
}
@Override
public int compareTo(BinDescriptor o) {
return -Double.compare(_se, o._se);
}
static BinDescriptor fromBin(DHistogram h, int i) {
double w = h.w(i);
double wY = h.wY(i);
double wYY = h.wYY(i);
double se = w != 0 ? wYY - wY * wY / w : 0;
return new BinDescriptor(h.binAt(i), h.binAt(i + 1), se, w);
}
// IntelliJ generated //
@Override
public String toString() {
return "BinDescriptor{" +
"_start=" + _start +
", _end=" + _end +
", _se=" + _se +
", _weight=" + _weight +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
BinDescriptor that = (BinDescriptor) o;
if (Double.compare(that._start, _start) != 0) return false;
if (Double.compare(that._end, _end) != 0) return false;
if (Double.compare(that._se, _se) != 0) return false;
return Double.compare(that._weight, _weight) == 0;
}
@Override
public int hashCode() {
int result;
long temp;
temp = Double.doubleToLongBits(_start);
result = (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_end);
result = 31 * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_se);
result = 31 * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(_weight);
result = 31 * result + (int) (temp ^ (temp >>> 32));
return result;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/MojoUtils.java
|
package hex.tree;
import hex.genmodel.algos.tree.SharedTreeMojoModel;
public abstract class MojoUtils {
public static CompressedTree[][] extractCompressedTrees(SharedTreeMojoModel mojo) {
final int ntrees = mojo.getNTreeGroups();
final int ntreesPerGroup = mojo.getNTreesPerGroup();
final int nclasses = mojo.nclasses();
CompressedTree[][] trees = new CompressedTree[ntrees][];
for (int t = 0; t < ntrees; t++) {
CompressedTree[] tc = new CompressedTree[nclasses];
for (int c = 0; c < ntreesPerGroup; c++) {
tc[c] = new CompressedTree(mojo.treeBytes(t, c), -1L, t, c);
}
trees[t] = tc;
}
return trees;
}
public static boolean isUsingBinomialOpt(SharedTreeMojoModel mojo, CompressedTree[][] trees) {
if (mojo.nclasses() != 2) {
return false;
}
for (CompressedTree[] group : trees) {
if (group.length != 2 || group[1] != null)
return false;
}
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/PathResult.java
|
package hex.tree;
class PathResult {
StringBuilder path;
int nodeId;
PathResult(int nodeId) {
path = new StringBuilder();
this.nodeId = nodeId;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/ReconstructTreeState.java
|
package hex.tree;
import java.util.Arrays;
import java.util.Random;
import water.fvec.C0DChunk;
import water.fvec.Chunk;
/**
* Computing oob scores over all trees and rows
* and reconstructing <code>ntree_id, oobt</code> fields in given frame.
*
* <p>It prepares voter per tree and also marks
* rows which were consider out-of-bag.</p>
*/
/* package */ public class ReconstructTreeState extends DTreeScorer<ReconstructTreeState> {
/* @IN */ final protected double _rate;
/* @IN */ final protected boolean _OOBEnabled;
public ReconstructTreeState(int ncols, int nclass, SharedTree st, double rate, CompressedForest cforest, boolean oob) {
super(ncols,nclass,st,cforest);
_rate = rate;
_OOBEnabled = oob;
}
@Override public void map(Chunk[] chks) {
double[] data = new double[_ncols];
double [] preds = new double[_nclass+1];
int ntrees = ntrees();
Chunk weight = _st.hasWeightCol() ? _st.chk_weight(chks) : new C0DChunk(1, chks[0]._len);
Chunk oobt = _st.chk_oobt(chks);
Chunk resp = _st.chk_resp(chks);
for( int tidx=0; tidx<ntrees; tidx++) { // tree
// OOB RNG for this tree
Random rng = rngForTree(_forest._trees[tidx], oobt.cidx());
for (int row = 0; row< oobt._len; row++) {
double w = weight.atd(row);
if (w==0) continue;
double y = resp.atd(row);
if (Double.isNaN(y)) continue;
boolean rowIsOOB = _OOBEnabled && rng.nextFloat() >= _rate;
if( !_OOBEnabled || rowIsOOB) {
// Make a prediction
for (int i=0;i<_ncols;i++) data[i] = chks[i].atd(row);
Arrays.fill(preds, 0);
score0(data, preds, tidx);
if (_nclass==1) preds[1]=preds[0]; // Only for regression, keep consistency
// Write tree predictions
for (int c=0;c<_nclass;c++) { // over all class
double prediction = preds[1+c];
if (preds[1+c] != 0) {
Chunk ctree = _st.chk_tree(chks, c);
double wcount = oobt.atd(row);
if (_OOBEnabled && _nclass >= 2)
ctree.set(row, (float) (ctree.atd(row)*wcount + prediction)/(wcount+w)); //store avg prediction
else
ctree.set(row, (float) (ctree.atd(row) + prediction));
}
}
// Mark oob row and store number of trees voting for this row
if (rowIsOOB)
oobt.set(row, oobt.atd(row)+w);
}
}
}
_st = null;
}
private Random rngForTree(CompressedTree[] ts, int cidx) {
return ts[0].rngForChunk(cidx); // k-class set of trees shares the same random number
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/Sample.java
|
package hex.tree;
import water.MRTask;
import water.fvec.C4VolatileChunk;
import water.fvec.Chunk;
import water.util.RandomUtils;
import java.util.Random;
// Deterministic sampling
public class Sample extends MRTask<Sample> {
final long _seed;
final int _useMarker;
final int _ignoreMarker;
final double _rate;
final double[] _rate_per_class;
public Sample(DTree tree, double rate, double[] rate_per_class) {
this(tree._seed, rate, rate_per_class, 0, ScoreBuildHistogram.OUT_OF_BAG);
}
public Sample(long seed, double rate, double[] rate_per_class, int useMarker,int ignoreMarker) {
_seed = seed;
_useMarker = useMarker;
_ignoreMarker = ignoreMarker;
_rate = rate;
_rate_per_class = rate_per_class;
}
@Override
public void map(Chunk nids, Chunk ys) {
C4VolatileChunk nids2 = (C4VolatileChunk) nids;
Random rand = RandomUtils.getRNG(_seed);
int [] is = nids2.getValues();
for (int row = 0; row < nids._len; row++) {
boolean skip = ys.isNA(row);
if (!skip) {
double rate = _rate_per_class==null ? _rate : _rate_per_class[(int)ys.at8(row)];
rand.setSeed(_seed + row + nids.start()); //seeding is independent of chunking
skip = rand.nextFloat() >= rate; //float is good enough, half as much cost
}
if (skip) is[row] = _ignoreMarker; // Flag row as being ignored by sampling
else if (_useMarker != 0) is[row] = _useMarker;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/Score.java
|
package hex.tree;
import hex.*;
import hex.genmodel.GenModel;
import hex.genmodel.utils.DistributionFamily;
import hex.tree.gbm.GBMModel;
import hex.tree.uplift.UpliftDRFModel;
import org.apache.log4j.Logger;
import water.Iced;
import water.Key;
import water.fvec.C0DChunk;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.udf.CFuncRef;
/** Score the tree columns, and produce a confusion matrix and AUC
*/
public class Score extends CMetricScoringTask<Score> {
private static final Logger LOG = Logger.getLogger(Score.class);
final SharedTree _bldr;
final boolean _is_train; // Scoring on pre-scored training data vs full-score data
final boolean _oob; // Computed on OOB
final Key<Vec> _kresp; // Response vector key (might be either train or validation)
final ModelCategory _mcat; // Model category (Binomial, Regression, etc)
final boolean _computeGainsLift;
final ScoreIncInfo _sii; // Incremental scoring (on a validation dataset), null indicates full scoring
final Frame _preds; // Prediction cache (typically not too many Vecs => it is not too costly embed the object in MRTask)
final ScoreExtension _ext; // Optional extension to customize scoring (eg. for Uplift)
/** Output parameter: Metric builder */
ModelMetrics.MetricBuilder _mb;
/** Compute ModelMetrics on the testing dataset.
* It expect already adapted validation dataset which is adapted to a model
* and contains a response which is adapted to confusion matrix domain.
*/
public Score(SharedTree bldr, boolean is_train, boolean oob, Vec kresp, ModelCategory mcat, boolean computeGainsLift, Frame preds, CFuncRef customMetricFunc) {
this(bldr, is_train, null, oob, kresp, mcat, computeGainsLift, preds, customMetricFunc);
}
public Score(SharedTree bldr, ScoreIncInfo sii, boolean oob, Vec kresp, ModelCategory mcat, boolean computeGainsLift, Frame preds, CFuncRef customMetricFunc) {
this(bldr, false, sii, oob, kresp, mcat, computeGainsLift, preds, customMetricFunc);
}
private Score(SharedTree bldr, boolean is_train, ScoreIncInfo sii, boolean oob, Vec kresp, ModelCategory mcat, boolean computeGainsLift, Frame preds, CFuncRef customMetricFunc) {
super(customMetricFunc);
_bldr = bldr; _is_train = is_train; _sii = sii; _oob = oob; _kresp = kresp != null ? kresp._key : null; _mcat = mcat; _computeGainsLift = computeGainsLift;
_preds = computeGainsLift ? preds : null; // don't keep the prediction cache if we don't need to compute gainslift
assert _kresp != null || !_bldr.isSupervised();
assert (! _is_train) || (_sii == null);
_ext = _bldr.makeScoreExtension();
}
@Override public void map(Chunk allchks[]) {
final Chunk[] chks = getScoringChunks(allchks);
Chunk ys; // Response
if (_bldr.isSupervised()) {
ys = _bldr.chk_resp(chks);
} else if (_bldr.isResponseOptional() && _kresp != null) {
ys = _kresp.get().chunkForChunkIdx(chks[0].cidx());
} else {
ys = new C0DChunk(0, chks[0]._len); // Dummy response to simplify code
}
SharedTreeModel<?, ?, ?> m = _bldr._model;
Chunk weightsChunk = m._output.hasWeights() ? chks[m._output.weightsIdx()] : null;
Chunk offsetChunk = m._output.hasOffset() ? chks[m._output.offsetIdx()] : null;
// Because of adaption - the validation training set has at least as many
// classes as the training set (it may have more). The Confusion Matrix
// needs to be at least as big as the training set domain.
final String[] domain;
if (m._parms._distribution == DistributionFamily.quasibinomial) {
domain = ((GBMModel) m)._output._quasibinomialDomains;
} else {
domain = _kresp != null ? _kresp.get().domain() : null;
}
final int nclass = _bldr.nclasses();
_mb = m.makeMetricBuilder(domain);
// If this is a score-on-train AND DRF, then oobColIdx makes sense,
// otherwise this field is unused.
final int oobColIdx = _bldr.idx_oobt();
final double[] cdists = _mb._work; // Temp working array for class distributions
// If working a validation set, need to push thru official model scoring
// logic which requires a temp array to hold the features.
final double[] tmp = _is_train && _bldr._ntrees > 0 ? null : new double[_bldr._ncols];
// Score all Rows
final int[] responseComplements = _ext == null ? new int[0] : _ext.getResponseComplements(m);
final float[] val = new float[1 + responseComplements.length];
for( int row=0; row<ys._len; row++ ) {
if( ys.isNA(row) ) continue; // Ignore missing response vars only if it was actual NA
// Ignore rows that were never out-of-bag (= were always used to build trees so far)
if( _oob && chks[oobColIdx].atd(row)==0 ) continue;
double weight = weightsChunk!=null?weightsChunk.atd(row):1;
if (weight == 0) continue; //ignore holdout rows
double offset = offsetChunk!=null?offsetChunk.atd(row):0;
if( _is_train ) // Passed in the model-specific columns
_bldr.score2(chks, weight, offset, cdists, row); // Use the training data directly (per-row predictions already made)
else if (_sii != null)
m.score0Incremental(_sii, chks, offset, row, tmp, cdists); // Incremental scoring (only use new trees)
else // Must score "the hard way"
m.score0(chks, offset, row, tmp, cdists);
// fill tmp with training data for null model - to have proper tie breaking
if (_is_train && _bldr._ntrees == 0)
for( int i=0; i< tmp.length; i++ )
tmp[i] = chks[i].atd(row);
if (_ext != null) {
cdists[0] = _ext.getPrediction(cdists);
} else if (nclass > 2) { // Fill in prediction for multinomial
cdists[0] = GenModel.getPredictionMultinomial(cdists, m._output._priorClassDist, tmp);
} else if (nclass == 2) {
// for binomial the predicted class is not needed
// and it even cannot be returned because the threshold is calculated based on model metrics that are not known yet
// (we are just building the metrics)
cdists[0] = -1;
}
val[0] = (float)ys.atd(row);
if (responseComplements.length > 0) {
for (int i = 0; i < responseComplements.length; i++) {
val[1 + i] = (float) chks[responseComplements[i]].atd(row);
}
}
_mb.perRow(cdists, val, weight, offset, m);
if (_preds != null) {
_mb.cachePrediction(cdists, allchks, row, chks.length, m);
}
// Compute custom metric if necessary
customMetricPerRow(cdists, val, weight, offset, m);
}
}
// scoring chunks are those chunks that make the input to one of the scoring functions
private Chunk[] getScoringChunks(Chunk[] allChunks) {
if (_preds == null)
return allChunks;
Chunk[] chks = new Chunk[allChunks.length - _preds.numCols()];
System.arraycopy(allChunks, 0, chks, 0, chks.length);
return chks;
}
@Override
protected boolean modifiesVolatileVecs() {
return _sii != null || _preds != null;
}
@Override public void reduce(Score t) {
super.reduce(t);
_mb.reduce(t._mb);
}
// We need to satisfy MB invariant
@Override protected void postGlobal() {
super.postGlobal();
if(_mb != null) {
_mb.postGlobal(getComputedCustomMetric());
if (null != cFuncRef)
_mb._CMetricScoringTask = (CMetricScoringTask) this;
}
}
ModelMetrics scoreAndMakeModelMetrics(SharedTreeModel model, Frame fr, Frame adaptedFr, boolean buildTreeOneNode) {
Frame input = _preds != null ? new Frame(adaptedFr).add(_preds) : adaptedFr;
return doAll(input, buildTreeOneNode)
.makeModelMetrics(model, fr, adaptedFr, _preds);
}
// Run after the doAll scoring to convert the MetricsBuilder to a ModelMetrics
private ModelMetrics makeModelMetrics(SharedTreeModel model, Frame fr, Frame adaptedFr, Frame preds) {
ModelMetrics mm;
if ((model._output.nclasses() == 2 && _computeGainsLift) || _ext != null) {
assert preds != null : "Predictions were pre-created";
mm = _mb.makeModelMetrics(model, fr, adaptedFr, preds);
} else {
boolean calculatePreds = preds == null && model.isDistributionHuber();
// FIXME: PUBDEV-4992 we should avoid doing full scoring!
if (calculatePreds) {
LOG.warn("Going to calculate predictions from scratch. This can be expensive for large models! See PUBDEV-4992");
preds = model.score(fr);
}
mm = _mb.makeModelMetrics(model, fr, null, preds);
if (calculatePreds && (preds != null))
preds.remove();
}
return mm;
}
static Frame makePredictionCache(SharedTreeModel model, Vec templateVec, String[] domain) {
ModelMetrics.MetricBuilder mb = model.makeMetricBuilder(domain);
return mb.makePredictionCache(model, templateVec);
}
public static class ScoreIncInfo extends Iced<ScoreIncInfo> {
public final int _startTree;
public final int _workspaceColIdx;
public final int _workspaceColCnt;
public final int _predsAryOffset;
public ScoreIncInfo(int startTree, int workspaceColIdx, int workspaceColCnt, int predsAryOffset) {
_startTree = startTree;
_workspaceColIdx = workspaceColIdx;
_workspaceColCnt = workspaceColCnt;
_predsAryOffset = predsAryOffset;
}
}
public static abstract class ScoreExtension extends Iced<ScoreExtension> {
/**
* Get prediction from per class-probabilities or algo-specific data
*
* @param cdist prediction array
* @return prediction
*/
protected abstract double getPrediction(double[] cdist);
/**
* Return indices of columns that need to be extracted from Frame chunks in addition to response
* @param m instance of SharedTreeModel
* @return training frame column indices
*/
protected abstract int[] getResponseComplements(SharedTreeModel<?, ?, ?> m);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/ScoreBuildHistogram.java
|
package hex.tree;
import hex.genmodel.utils.DistributionFamily;
import water.H2O.H2OCountedCompleter;
import water.MRTask;
/** Score and Build Histogram
*
* <p>Fuse 2 conceptual passes into one:
*
* <dl>
*
* <dt>Pass 1:</dt><dd>Score a prior partially-built tree model, and make new Node assignments to
* every row. This involves pulling out the current assigned DecidedNode,
* "scoring" the row against that Node's decision criteria, and assigning the
* row to a new child UndecidedNode (and giving it an improved prediction).</dd>
*
* <dt>Pass 2:</dt><dd>Build new summary DHistograms on the new child UndecidedNodes
* every row got assigned into. Collect counts, mean, variance, min,
* max per bin, per column.</dd>
* </dl>
*
* <p>The result is a set of DHistogram arrays; one DHistogram array for each
* unique 'leaf' in the tree being histogramed in parallel. These have node
* ID's (nids) from 'leaf' to 'tree._len'. Each DHistogram array is for all
* the columns in that 'leaf'.
*
* <p>The other result is a prediction "score" for the whole dataset, based on
* the previous passes' DHistograms.
*/
public class ScoreBuildHistogram extends MRTask<ScoreBuildHistogram> {
final int _k; // Which tree
final int _ncols;// Active feature columns
final int _nbins;// Numerical columns: Number of bins in each histogram
final DTree _tree; // Read-only, shared (except at the histograms in the Nodes)
final int _leaf; // Number of active leaves (per tree)
// Histograms for every tree, split & active column
DHistogram[/*tree-relative node-id*/][/*column*/] _hcs;
final DistributionFamily _family;
final int _weightIdx;
final int _workIdx;
final int _nidIdx;
final int _treatmentIdx;
public ScoreBuildHistogram(H2OCountedCompleter cc, int k, int ncols, int nbins, DTree tree, int leaf, DHistogram[][] hcs, DistributionFamily family, int weightIdx, int workIdx, int nidIdx, int treatmentIdx) {
super(cc);
_k = k;
_ncols= ncols;
_nbins= nbins;
_tree = tree;
_leaf = leaf;
_hcs = hcs;
_family = family;
_weightIdx = weightIdx;
_workIdx = workIdx;
_nidIdx = nidIdx;
_treatmentIdx = treatmentIdx;
}
/** Marker for already decided row. */
static public final int DECIDED_ROW = -1;
/** Marker for sampled out rows */
static public final int OUT_OF_BAG = -2;
/** Marker for a fresh tree */
static public final int UNDECIDED_CHILD_NODE_ID = -1;
static public final int FRESH = 0;
static public boolean isOOBRow(int nid) { return nid <= OUT_OF_BAG; }
static public boolean isDecidedRow(int nid) { return nid == DECIDED_ROW; }
static public int oob2Nid(int oobNid) { return -oobNid + OUT_OF_BAG; }
static public int nid2Oob(int nid) { return -nid + OUT_OF_BAG; }
@Override public void reduce( ScoreBuildHistogram sbh ) {
// Merge histograms
if( sbh._hcs == _hcs )
return; // Local histograms all shared; free to merge
// Distributed histograms need a little work
for( int i=0; i<_hcs.length; i++ ) {
DHistogram[] hs1 = _hcs[i], hs2 = sbh._hcs[i];
if( hs1 == null ) _hcs[i] = hs2;
else if( hs2 != null )
for( int j=0; j<hs1.length; j++ )
if( hs1[j] == null ) hs1[j] = hs2[j];
else if( hs2[j] != null )
hs1[j].add(hs2[j]);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/ScoreBuildHistogram2.java
|
package hex.tree;
import hex.genmodel.utils.DistributionFamily;
import jsr166y.CountedCompleter;
import water.*;
import water.fvec.*;
import water.util.ArrayUtils;
import water.util.IcedBitSet;
import water.util.Log;
import water.util.VecUtils;
import static hex.tree.SharedTree.ScoreBuildOneTree;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
/**
* Created by tomas on 10/28/16.
*
* Score and Build Histogram.
*
* This is an updated version ditching histogram sharing (still optional) to improve perfomance on multi-cpu systems (witnessed speedup of up to 4x).
*
* NOTE: unlike standard MRTask, launch via dfork2 instead of doAll/dfork. Has custom 2-phase local mapreduce task.
*
* <p>Fuse 2 conceptual passes into one (MRTask):
*
* <dl>
*
* <dt>Pass 1:</dt><dd>Score a prior partially-built tree model, and make new Node assignments to
* every row. This involves pulling out the current assigned DecidedNode,
* "scoring" the row against that Node's decision criteria, and assigning the
* row to a new child UndecidedNode (and giving it an improved prediction).</dd>
*
* <dt>Pass 2:</dt><dd>Build new summary DHistograms on the new child UndecidedNodes
* every row got assigned into. Collect counts, mean, variance, min,
* max per bin, per column.</dd>
* </dl>
*
* The 2 passes are executed (locally) in sequence.
*
* <p>The result is a set of DHistogram arrays; one DHistogram array for each
* unique 'leaf' in the tree being histogramed in parallel. These have node
* ID's (nids) from 'leaf' to 'tree._len'. Each DHistogram array is for all
* the columns in that 'leaf'.
*
* <p>The other result is a prediction "score" for the whole dataset, based on
* the previous passes' DHistograms.
*
*
* No CAS update:
*
* Sharing the histograms proved to be a performance problem on larger multi-cpu machines with many running threads, CAS was the bottleneck.
*
* To remove the CAS while minimizing the memory overhead (private copies of histograms), phase 2 is paralellized both over columns (primary) and rows (secondary).
* Parallelization over different columns precedes paralellization within each column to reduce number of extra histogram copies made.
*
* Expected number of per-column tasks running in parallel (and hence histogram copies) is given by
*
* exp(nthreads-pre-column) = max(1,H2O.NUMCPUS - num_cols)
*
*/
public class ScoreBuildHistogram2 extends ScoreBuildHistogram {
transient int [] _cids;
transient Chunk[][] _chks;
transient double [][] _ys;
transient double [][] _ws;
transient int [][] _nhs;
transient int [][] _rss;
Frame _fr2;
final int _numLeafs;
final IcedBitSet _activeCols;
final int _respIdx;
final int _predsIdx;
final boolean _reproducibleHistos;
// only for debugging purposes
final boolean _reduceHistoPrecision; // if enabled allows to test that histograms are 100% reproducible when reproducibleHistos are enabled
transient Consumer<DHistogram[][]> _hcsMonitor;
final int _treatmentIdx;
public ScoreBuildHistogram2(ScoreBuildOneTree sb, int treeNum, int k, int ncols, int nbins, DTree tree, int leaf,
DHistogram[][] hcs, DistributionFamily family,
int respIdx, int weightIdx, int predsIdx, int workIdx, int nidIdxs, int treatmentIdx) {
super(sb, k, ncols, nbins, tree, leaf, hcs, family, weightIdx, workIdx, nidIdxs, treatmentIdx);
_numLeafs = _hcs.length;
_respIdx = respIdx;
_predsIdx = predsIdx;
_treatmentIdx = treatmentIdx;
int hcslen = _hcs.length;
IcedBitSet activeCols = new IcedBitSet(ncols);
for (int n = 0; n < hcslen; n++) {
int [] acs = _tree.undecided(n + _leaf)._scoreCols;
if(acs != null) {
for (int c : acs) // Columns to score (null, or a list of selected cols)
activeCols.set(c);
} else {
activeCols = null;
break;
}
}
_activeCols = activeCols;
_hcs = ArrayUtils.transpose(_hcs);
// override defaults using debugging parameters where applicable
SharedTree.SharedTreeDebugParams dp = sb._st.getDebugParams();
_reproducibleHistos = tree._parms.forceStrictlyReproducibleHistograms() || dp._reproducible_histos;
_reduceHistoPrecision = !dp._keep_orig_histo_precision;
if (_reproducibleHistos && treeNum == 0 && k == 0 && leaf == 0) {
Log.info("Using a deterministic way of building histograms");
}
_hcsMonitor = dp.makeDHistogramMonitor(treeNum, k, leaf);
}
void dfork2(Frame fr) {
_fr2 = fr;
asyncExecOnAllNodes();
}
@Override public void map(Chunk [] chks){
// Even though this is an MRTask over a Frame, map(Chunk [] chks) should not be called for this task.
// Instead, we do a custom 2-stage local pass (launched from setupLocal) using LocalMR.
//
// There are 2 reasons for that:
// a) We have 2 local passes. 1st pass scores the trees and sorts rows, 2nd pass starts after the 1st pass is done and computes the histogram.
// Conceptually two tasks but since we do not need global result we want to do the two passes inside of 1 task - no need to insert extra communication overhead here.
// b) To reduce the memory overhead in pass 2(in case we're making private DHistogram copies).
// There is a private copy made for each task. MRTask forks one task per one line of chunks and we do not want to make too many copies.
// By reusing the same DHisto for multiple chunks we save memory and calls to reduce.
//
throw H2O.unimpl();
}
// Pass 1: Score a prior partially-built tree model, and make new Node
// assignments to every row. This involves pulling out the current
// assigned DecidedNode, "scoring" the row against that Node's decision
// criteria, and assigning the row to a new child UndecidedNode (and
// giving it an improved prediction).
protected int[] score_decide(Chunk chks[], int nnids[]) {
int [] res = nnids.clone();
for( int row=0; row<nnids.length; row++ ) { // Over all rows
int nid = nnids[row]; // Get Node to decide from
if( isDecidedRow(nid)) { // already done
res[row] -= _leaf;
continue;
}
// Score row against current decisions & assign new split
boolean oob = isOOBRow(nid);
if( oob ) nid = oob2Nid(nid); // sampled away - we track the position in the tree
DTree.DecidedNode dn = _tree.decided(nid);
if( dn._split == null ) { // Might have a leftover non-split
if( DTree.isRootNode(dn) ) { res[row] = nid - _leaf; continue; }
nid = dn._pid; // Use the parent split decision then
int xnid = oob ? nid2Oob(nid) : nid;
nnids[row] = xnid;
res[row] = xnid - _leaf;
dn = _tree.decided(nid); // Parent steers us
}
assert !isDecidedRow(nid);
nid = dn.getChildNodeID(chks,row); // Move down the tree 1 level
if( !isDecidedRow(nid) ) {
if( oob ) nid = nid2Oob(nid); // Re-apply OOB encoding
nnids[row] = nid;
}
res[row] = nid-_leaf;
}
return res;
}
@Override
public void setupLocal() {
addToPendingCount(1);
// Init all the internal tree fields after shipping over the wire
_tree.init_tree();
Vec v = _fr2.anyVec();
assert(v!=null);
_cids = VecUtils.getLocalChunkIds(v);
_chks = new Chunk[_cids.length][_fr2.numCols()];
_ys = new double[_cids.length][];
_ws = new double[_cids.length][];
_nhs = new int[_cids.length][];
_rss = new int[_cids.length][];
long [] espc = v.espc();
int largestChunkSz = 0;
for(int i = 1; i < espc.length; ++i){
int sz = (int)(espc[i] - espc[i-1]);
if(sz > largestChunkSz) largestChunkSz = sz;
}
final int fLargestChunkSz = largestChunkSz;
final AtomicInteger cidx = new AtomicInteger(0);
// First do the phase 1 on all local data
new LocalMR(new MrFun(){
// more or less copied from ScoreBuildHistogram
private void map(int id, Chunk [] chks) {
final C4VolatileChunk nids = (C4VolatileChunk) chks[_nidIdx];
// Pass 1: Score a prior partially-built tree model, and make new Node
// assignments to every row. This involves pulling out the current
// assigned DecidedNode, "scoring" the row against that Node's decision
// criteria, and assigning the row to a new child UndecidedNode (and
// giving it an improved prediction).
int [] nnids;
if( _leaf > 0) // Prior pass exists?
nnids = score_decide(chks,nids.getValues());
else { // Just flag all the NA rows
nnids = new int[nids._len];
int [] is = nids.getValues();
for (int row = 0; row < nids._len; row++) {
if (isDecidedRow(is[row]))
nnids[row] = DECIDED_ROW;
}
}
// Pass 2: accumulate all rows, cols into histograms
// Sort the rows by NID, so we visit all the same NIDs in a row
// Find the count of unique NIDs in this chunk
int nh[] = (_nhs[id] = new int[_numLeafs + 1]);
for (int i : nnids)
if (i >= 0)
nh[i + 1]++;
// Rollup the histogram of rows-per-NID in this chunk
for (int i = 0; i <_numLeafs; i++) nh[i + 1] += nh[i];
// Splat the rows into NID-groups
int rows[] = (_rss[id] = new int[nnids.length]);
for (int row = 0; row < nnids.length; row++)
if (nnids[row] >= 0)
rows[nh[nnids[row]]++] = row;
}
@Override
protected void map(int id) {
Vec[] vecs = _fr2.vecs();
for(id = cidx.getAndIncrement(); id < _cids.length; id = cidx.getAndIncrement()) {
int cidx = _cids[id];
Chunk [] chks = _chks[id];
for (int i = 0; i < chks.length; ++i)
chks[i] = vecs[i].chunkForChunkIdx(cidx);
map(id,chks);
chks[_nidIdx].close(cidx,_fs);
Chunk resChk = chks[_workIdx];
int len = resChk.len();
final double[] y;
if(resChk instanceof C8DVolatileChunk){
y = ((C8DVolatileChunk)resChk).getValues();
} else
y = resChk.getDoubles(MemoryManager.malloc8d(len), 0, len);
int[] nh = _nhs[id];
_ys[id] = MemoryManager.malloc8d(len);
// Important optimization that helps to avoid cache misses when working on larger datasets
// `y` has original order corresponding to row order
// In binning we are accessing data semi-randomly - we only touch values/rows that are in the given
// node. These are not necessarily next to each other in memory. This is done on a per-feature basis.
// To optimize for sequential access we reorder the target so that values corresponding to the same node
// are co-located. Observed speed-up is up to 50% for larger datasets.
// See DHistogram#updateHisto for reference.
for (int n = 0; n < nh.length; n++) {
final int lo = (n == 0 ? 0 : nh[n - 1]);
final int hi = nh[n];
if (hi == lo)
continue;
for (int i = lo; i < hi; i++) {
_ys[id][i] = y[_rss[id][i]];
}
}
// Only allocate weights if weight columns is actually used. It is faster to handle null case
// in binning that to represent the weights using a constant array (it still needs to be in memory
// and is accessed frequently - waste of CPU cache).
if (_weightIdx != -1) {
_ws[id] = chks[_weightIdx].getDoubles(MemoryManager.malloc8d(len), 0, len);
}
}
}
},new H2O.H2OCountedCompleter(this){
public void onCompletion(CountedCompleter cc){
final int ncols = _ncols;
final int [] active_cols = _activeCols == null?null:new int[Math.max(1,_activeCols.cardinality())];
final int nactive_cols = active_cols == null?ncols:active_cols.length;
ScoreBuildHistogram2.this.addToPendingCount(1+nactive_cols);
if(active_cols != null) {
int j = 0;
for (int i = 0; i < ncols; ++i)
if (_activeCols.contains(i))
active_cols[j++] = i;
}
// MRTask (over columns) launching MrTasks (over number of workers) for each column.
// We want FJ to start processing all the columns before parallelizing within column to reduce memory overhead.
// (running single column in n threads means n-copies of the histogram)
// This is how it works:
// 1) Outer MRTask walks down it's tree, forking tasks with exponentially decreasing number of columns until reaching its left most leaf for columns 0.
// At this point, the local fjq for this thread has a task for processing half of columns at the bottom, followed by task for 1/4 of columns and so on.
// Other threads start stealing work from the bottom.
// 2) forks the leaf task and (because its polling from the top) executes the LocalMr for the column 0.
// This way we should have columns as equally distributed as possible without resorting to shared priority queue
final int numWrks = _hcs.length * nactive_cols < 16 * 1024 ? H2O.NUMCPUS : Math.min(H2O.NUMCPUS, Math.max(4 * H2O.NUMCPUS / nactive_cols, 1));
final int rem = H2O.NUMCPUS - numWrks * ncols;
new LocalMR(new MrFun() {
@Override
protected void map(int c) {
c = active_cols == null ? c : active_cols[c];
final int nthreads = numWrks + (c < rem ? 1 : 0);
WorkAllocator workAllocator = _reproducibleHistos ? new RangeWorkAllocator(_cids.length, nthreads) : new SharedPoolWorkAllocator(_cids.length);
ComputeHistoThread computeHistoThread = new ComputeHistoThread(_hcs.length == 0?new DHistogram[0]:_hcs[c],c,fLargestChunkSz,workAllocator);
LocalMR mr = new LocalMR(computeHistoThread, nthreads, ScoreBuildHistogram2.this);
if (_reproducibleHistos) {
mr = mr.withNoPrevTaskReuse();
assert mr.isReproducible();
}
mr.fork();
}
},nactive_cols,ScoreBuildHistogram2.this).fork();
}
}).fork();
}
private static void mergeHistos(DHistogram [] hcs, DHistogram [] hcs2){
// Distributed histograms need a little work
for( int i=0; i< hcs.length; i++ ) {
DHistogram hs1 = hcs[i], hs2 = hcs2[i];
if( hs1 == null ) hcs[i] = hs2;
else if( hs2 != null )
hs1.add(hs2);
}
}
interface WorkAllocator {
int getMaxId(int subsetId);
int allocateWork(int subsetId);
}
static class SharedPoolWorkAllocator implements WorkAllocator {
final int _workAmount;
final AtomicInteger _id;
SharedPoolWorkAllocator(int workAmount) {
_workAmount = workAmount;
_id = new AtomicInteger();
}
@Override
public int getMaxId(int subsetId) {
return _workAmount;
}
@Override
public int allocateWork(int subsetId) {
return _id.getAndIncrement();
}
}
static class RangeWorkAllocator implements WorkAllocator {
final int _workAmount;
final int[] _rangePositions;
final int _rangeLength;
RangeWorkAllocator(int workAmount, int nWorkers) {
_workAmount = workAmount;
_rangePositions = new int[nWorkers];
_rangeLength = (int) Math.ceil(workAmount / (double) nWorkers);
int p = 0;
for (int i = 0; i < _rangePositions.length; i++) {
_rangePositions[i] = p;
p += _rangeLength;
}
}
@Override
public int getMaxId(int subsetId) {
return Math.min((subsetId + 1) * _rangeLength, _workAmount);
}
@Override
public int allocateWork(int subsetId) {
return _rangePositions[subsetId]++;
}
}
private class ComputeHistoThread extends MrFun<ComputeHistoThread> {
final int _maxChunkSz;
final int _col;
final DHistogram [] _lh;
WorkAllocator _allocator;
ComputeHistoThread(DHistogram [] hcs, int col, int maxChunkSz, WorkAllocator allocator){
_lh = hcs; _col = col; _maxChunkSz = maxChunkSz;
_allocator = allocator;
}
@Override
public ComputeHistoThread makeCopy() {
return new ComputeHistoThread(ArrayUtils.deepClone(_lh),_col,_maxChunkSz,_allocator);
}
@Override
protected void map(int id){
Object cs = null;
double[] resp = null;
double[] preds = null;
double[] treatment = null;
final int maxWorkId = _allocator.getMaxId(id);
for(int i = _allocator.allocateWork(id); i < maxWorkId; i = _allocator.allocateWork(id)) {
if (cs == null) { // chunk data cache doesn't exist yet
if (_respIdx >= 0)
resp = MemoryManager.malloc8d(_maxChunkSz);
if (_predsIdx >= 0)
preds = MemoryManager.malloc8d(_maxChunkSz);
if (_treatmentIdx >= 0)
treatment = MemoryManager.malloc8d(_maxChunkSz);
}
cs = computeChunk(i, cs, _ws[i], resp, preds, treatment);
}
}
private Object computeChunk(int id, Object cs, double[] ws, double[] resp, double[] preds, double[] treatment){
int [] nh = _nhs[id];
int [] rs = _rss[id];
Chunk resChk = _chks[id][_workIdx];
int len = resChk._len;
double [] ys = ScoreBuildHistogram2.this._ys[id];
final int hcslen = _lh.length;
boolean extracted = false;
for (int n = 0; n < hcslen; n++) {
int sCols[] = _tree.undecided(n + _leaf)._scoreCols; // Columns to score (null, or a list of selected cols)
if (sCols == null || ArrayUtils.find(sCols, _col) >= 0) {
DHistogram h = _lh[n];
int hi = nh[n];
int lo = (n == 0 ? 0 : nh[n - 1]);
if (hi == lo || h == null) continue; // Ignore untracked columns in this split
if (h._vals == null) h.init();
if (! extracted) {
cs = h.extractData(_chks[id][_col], cs, len, _maxChunkSz);
if (h._vals_dim >= 6) {
_chks[id][_respIdx].getDoubles(resp, 0, len);
if (h._vals_dim == 7) {
_chks[id][_predsIdx].getDoubles(preds, 0, len);
}
}
if(h._useUplift){
_chks[id][_respIdx].getDoubles(resp, 0, len);
_chks[id][_treatmentIdx].getDoubles(treatment, 0, len);
}
extracted = true;
}
h.updateHisto(ws, resp, cs, ys, preds, rs, hi, lo, treatment);
}
}
return cs;
}
@Override
protected void reduce(ComputeHistoThread cc) {
assert _lh != cc._lh;
mergeHistos(_lh, cc._lh);
}
}
@Override public void postGlobal(){
_hcs = ArrayUtils.transpose(_hcs);
for(DHistogram [] ary:_hcs)
for(DHistogram dh:ary) {
if (dh == null)
continue;
if (_reduceHistoPrecision)
dh.reducePrecision();
}
if (_hcsMonitor != null)
_hcsMonitor.accept(_hcs);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/SharedTree.java
|
package hex.tree;
import hex.*;
import hex.genmodel.GenModel;
import hex.genmodel.utils.DistributionFamily;
import hex.tree.gbm.GBMModel;
import hex.util.CheckpointUtils;
import hex.util.LinearAlgebraUtils;
import jsr166y.CountedCompleter;
import org.apache.log4j.Logger;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import water.*;
import water.H2O.H2OCountedCompleter;
import water.exceptions.H2OModelBuilderIllegalArgumentException;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.udf.CFuncRef;
import water.util.*;
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.function.Consumer;
public abstract class SharedTree<
M extends SharedTreeModel<M,P,O>,
P extends SharedTreeModel.SharedTreeParameters,
O extends SharedTreeModel.SharedTreeOutput>
extends ModelBuilder<M,P,O>
implements CalibrationHelper.ModelBuilderWithCalibration<M, P, O> {
private static final Logger LOG = Logger.getLogger(SharedTree.class);
private static final boolean DEBUG_PUBDEV_6686 = Boolean.getBoolean(H2O.OptArgs.SYSTEM_PROP_PREFIX + "debug.pubdev6686");
public boolean shouldReorder(Vec v) {
return _parms._categorical_encoding == Model.Parameters.CategoricalEncodingScheme.SortByResponse
&& v.cardinality() > _parms._nbins_cats; // no need to sort categoricals with fewer than nbins_cats - they will be sorted in every leaf anyway
}
protected int _mtry;
protected int _mtry_per_tree;
protected GlobalInteractionConstraints _ics;
public static final int MAX_NTREES = 100000;
public SharedTree(P parms ) { super(parms ); /*only call init in leaf classes*/ }
public SharedTree(P parms, Key<M> key) { super(parms,key); /*only call init in leaf classes*/ }
public SharedTree(P parms, Job job ) { super(parms,job); /*only call init in leaf classes*/ }
public SharedTree(P parms, boolean startup_once) { super(parms,startup_once); /*only call init in leaf classes*/ }
// Number of trees requested, including prior trees from a checkpoint
protected int _ntrees;
// The in-progress model being built
protected M _model;
// Number of columns in training set, not counting the response column
protected int _ncols;
// Initially predicted value (for zero trees)
protected double _initialPrediction;
// Sum of variable empirical improvement in squared-error. The value is not scaled.
protected transient float[/*nfeatures*/] _improvPerVar;
protected Random _rand;
private transient Frame _calib;
protected final Frame validWorkspace() { return _validWorkspace; }
protected transient Frame _validWorkspace;
protected transient int _lastScoredTree = 0;
protected transient Frame _trainPredsCache;
protected transient Frame _validPredsCache;
private transient SharedTreeDebugParams _debugParms;
public boolean isSupervised(){return true;}
public boolean isUplift() {return false;}
public boolean providesVarImp() {
return isSupervised();
}
protected Score.ScoreExtension makeScoreExtension() {
return null;
}
@Override public boolean haveMojo() { return true; }
@Override public boolean havePojo() {
if (_parms == null)
return true;
return _parms._offset_column == null; // offset column is not supported for POJO
}
public boolean scoreZeroTrees(){return true;}
@Override protected boolean computePriorClassDistribution(){ return true;}
@Override
public ToEigenVec getToEigenVec() {
return LinearAlgebraUtils.toEigen;
}
@Override
protected void ignoreInvalidColumns(int npredictors, boolean expensive) {
// Drop invalid columns
new FilterCols(npredictors) {
@Override protected boolean filter(Vec v, String name) {
return (v.max() > Float.MAX_VALUE ); }
}.doIt(_train,"Dropping columns with too large numeric values: ",expensive);
}
/** Initialize the ModelBuilder, validating all arguments and preparing the
* training frame. This call is expected to be overridden in the subclasses
* and each subclass will start with "super.init();". This call is made
* by the front-end whenever the GUI is clicked, and needs to be fast;
* heavy-weight prep needs to wait for the trainModel() call.
*
* Validate the requested ntrees; precompute actual ntrees. Validate
* the number of classes to predict on; validate a checkpoint. */
@Override public void init(boolean expensive) {
super.init(expensive);
if (H2O.ARGS.client && _parms._build_tree_one_node)
error("_build_tree_one_node", "Cannot run on a single node in client mode.");
if( _parms._min_rows < 0 )
error("_min_rows", "Requested min_rows must be greater than 0");
if (_parms._categorical_encoding == Model.Parameters.CategoricalEncodingScheme.OneHotInternal) {
error("_categorical_encoding", "Cannot use OneHotInternal categorical encoding for tree methods.");
}
if( _parms._ntrees < 0 || _parms._ntrees > MAX_NTREES)
error("_ntrees", "Requested ntrees must be between 1 and " + MAX_NTREES);
_ntrees = _parms._ntrees; // Total trees in final model
if( _parms.hasCheckpoint() ) { // Asking to continue from checkpoint?
Value cv = DKV.get(_parms._checkpoint);
if( cv != null ) { // Look for prior model
SharedTreeModel<M, P, O> checkpointModel = CheckpointUtils.getAndValidateCheckpointModel(this, SharedTreeModel.SharedTreeParameters.CHECKPOINT_NON_MODIFIABLE_FIELDS, cv);
// Compute number of trees to build for this checkpoint
_ntrees = _parms._ntrees - checkpointModel._output._ntrees; // Needed trees
}
}
if (_parms._nbins <= 1) error ("_nbins", "nbins must be > 1.");
if (_parms._nbins >= 1<<16) error ("_nbins", "nbins must be < " + (1<<16));
if (_parms._nbins_cats <= 1) error ("_nbins_cats", "nbins_cats must be > 1.");
if (_parms._nbins_cats >= 1<<16) error ("_nbins_cats", "nbins_cats must be < " + (1<<16));
if (_parms._nbins_top_level < _parms._nbins) error ("_nbins_top_level", "nbins_top_level must be >= nbins (" + _parms._nbins + ").");
if (_parms._nbins_top_level >= 1<<16) error ("_nbins_top_level", "nbins_top_level must be < " + (1<<16));
if (_parms._max_depth < 0) error("_max_depth", "_max_depth must be >= 0.");
if (_parms._max_depth == 0) _parms._max_depth = Integer.MAX_VALUE;
if (_parms._min_rows <=0) error ("_min_rows", "_min_rows must be > 0.");
if (_parms._r2_stopping!=Double.MAX_VALUE) warn("_r2_stopping", "_r2_stopping is no longer supported - please use stopping_rounds, stopping_metric and stopping_tolerance instead.");
if (_parms._score_tree_interval < 0) error ("_score_tree_interval", "_score_tree_interval must be >= 0.");
if (_parms._in_training_checkpoints_tree_interval <= 0) error ("_in_training_checkpoints_tree_interval", "_in_training_checkpoints_tree_interval must be > 0.");
validateRowSampleRate();
if (_parms._min_split_improvement < 0)
error("_min_split_improvement", "min_split_improvement must be >= 0, but is " + _parms._min_split_improvement + ".");
if (!(0.0 < _parms._col_sample_rate_per_tree && _parms._col_sample_rate_per_tree <= 1.0))
error("_col_sample_rate_per_tree", "col_sample_rate_per_tree should be in interval [0,1] but it is " + _parms._col_sample_rate_per_tree + ".");
if( !(0. < _parms._col_sample_rate_change_per_level && _parms._col_sample_rate_change_per_level <= 2) )
error("_col_sample_rate_change_per_level", "col_sample_rate_change_per_level must be > 0" +
" and <= 2");
if (_train != null) {
double sumWeights = _train.numRows() * (_weights != null ? _weights.mean() : 1);
if (sumWeights < 2*_parms._min_rows ) // Need at least 2*min_rows weighted rows to split even once
error("_min_rows", "The dataset size is too small to split for min_rows=" + _parms._min_rows
+ ": must have at least " + 2*_parms._min_rows + " (weighted) rows, but have only " + sumWeights + ".");
}
if( _train != null )
_ncols = _train.numCols()-(isSupervised()?1:0)-numSpecialCols();
CalibrationHelper.initCalibration(this, _parms, expensive);
_orig_projection_array = LinearAlgebraUtils.toEigenProjectionArray(_origTrain, _train, expensive);
_parms._use_best_cv_iteration = isSupervised() && H2O.getSysBoolProperty(
"sharedtree.crossvalidation.useBestCVIteration", _parms._use_best_cv_iteration);
_parms._parallel_main_model_building = H2O.getSysBoolProperty(
"sharedtree.crossvalidation.parallelMainModelBuilding", _parms._parallel_main_model_building);
if (_parms._max_runtime_secs > 0 && _parms._parallel_main_model_building) {
_parms._parallel_main_model_building = false;
warn("_parallel_main_model_building",
"Parallel main model will be disabled because max_runtime_secs is specified.");
}
if (_parms._use_best_cv_iteration && _parms._parallel_main_model_building) {
_parms._parallel_main_model_building = false;
warn("_parallel_main_model_building",
"Parallel main model will be disabled because use_best_cv_iteration is specified.");
}
if (_parms._build_tree_one_node) {
warn("_build_tree_one_node", "Single-node tree building is not supported in this version of H2O.");
}
if (!StringUtils.isNullOrEmpty(_parms._in_training_checkpoints_dir)) {
if (!H2O.getPM().isWritableDirectory(_parms._in_training_checkpoints_dir)) {
error("_in_training_checkpoints_dir", "In training checkpoints directory path must point to a writable path.");
}
}
}
protected void validateRowSampleRate() {
if (!(0.0 < _parms._sample_rate && _parms._sample_rate <= 1.0))
error("_sample_rate", "sample_rate should be in interval ]0,1] but it is " + _parms._sample_rate + ".");
if (_parms._sample_rate_per_class != null) {
warn("_sample_rate", "_sample_rate is ignored if _sample_rate_per_class is specified.");
if (_parms._sample_rate_per_class.length != nclasses()) error("_sample_rate_per_class", "_sample_rate_per_class must have " + nclasses() + " values (one per class).");
for (int i=0;i<_parms._sample_rate_per_class.length;++i) {
if (!(0.0 < _parms._sample_rate_per_class[i] && _parms._sample_rate_per_class[i] <= 1.0))
error("_sample_rate_per_class", "sample_rate_per_class for class " + response().domain()[i] + " should be in interval ]0,1] but it is " + _parms._sample_rate_per_class[i] + ".");
}
}
}
@Override
protected void checkEarlyStoppingReproducibility() {
if (_parms._score_tree_interval == 0 && !_parms._score_each_iteration) {
warn("_stopping_rounds", "early stopping is enabled but neither score_tree_interval or score_each_iteration are defined. Early stopping will not be reproducible!");
}
}
// --------------------------------------------------------------------------
// Top-level tree-algo driver
abstract protected class Driver extends ModelBuilder<M,P,O>.Driver {
@Override public void computeImpl() {
_model = null; // Resulting model!
try {
init(true); // Do any expensive tests & conversions now
if( error_count() > 0 )
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(SharedTree.this);
// Create a New Model or continuing from a checkpoint
if (_parms.hasCheckpoint()) {
// Get the model to continue
M model = DKV.get(_parms._checkpoint).<M>get().deepClone(_result);
// Override original parameters by new parameters
model._parms = _parms;
// We create a new model
_model = model.delete_and_lock(_job);
} else { // New Model
// Compute the zero-tree error - guessing only the class distribution.
// MSE is stddev squared when guessing for regression.
// For classification, guess the largest class.
M model = makeModel(dest(), _parms);
_model = model.delete_and_lock(_job); // and clear & write-lock it (smashing any prior)
_model._output._init_f = _initialPrediction;
}
final boolean isQuasibinomial = _parms._distribution == DistributionFamily.quasibinomial;
// Get the actual response domain
final String[] actualDomain;
if (isQuasibinomial) {
// Quasibinomial GBM can have different domains than {0, 1}
actualDomain = new VecUtils.CollectDoubleDomain(null,2)
.doAll(_response).stringDomain(_response.isInt());
((GBMModel)_model)._output._quasibinomialDomains = actualDomain;
} else if (isSupervised()) {
// Regular supervised case, most common
actualDomain = _response.domain();
} else {
// Unsupervised, no domain
actualDomain = null;
}
// Compute the print-out response domain; makes for nicer printouts
assert (_nclass > 1 && actualDomain != null) || (_nclass==1 && actualDomain==null);
final String[] domain = _nclass == 1 ? new String[] {"r"} : actualDomain; // For regression, give a name to class 0
// Compute class distribution, used to for initial guesses and to
// upsample minority classes (if asked for).
if( _nclass>1 ) { // Classification?
// Handle imbalanced classes by stratified over/under-sampling.
// initWorkFrame sets the modeled class distribution, and
// model.score() corrects the probabilities back using the
// distribution ratios
if(_model._output.isClassifier() && _parms._balance_classes ) {
float[] trainSamplingFactors = new float[_train.lastVec().domain().length]; //leave initialized to 0 -> will be filled up below
if (_parms._class_sampling_factors != null) {
if (_parms._class_sampling_factors.length != _train.lastVec().domain().length)
throw new IllegalArgumentException("class_sampling_factors must have " + _train.lastVec().domain().length + " elements");
trainSamplingFactors = _parms._class_sampling_factors.clone(); //clone: don't modify the original
}
boolean verboseSampling = Boolean.getBoolean(H2O.OptArgs.SYSTEM_PROP_PREFIX + "debug.sharedTree.sampleFrameStratified.verbose");
Frame stratified;
if(isQuasibinomial) {
stratified = water.util.MRUtils.sampleFrameStratified(_train, _train.lastVec(), _train.vec(_model._output.weightsName()), trainSamplingFactors, (long) (_parms._max_after_balance_size * _train.numRows()), _parms._seed, true, verboseSampling, domain);
} else {
stratified = water.util.MRUtils.sampleFrameStratified(_train, _train.lastVec(), _train.vec(_model._output.weightsName()), trainSamplingFactors, (long) (_parms._max_after_balance_size * _train.numRows()), _parms._seed, true, verboseSampling, null);
}
if (stratified != _train) {
_train = stratified;
_response = stratified.vec(_parms._response_column);
_weights = stratified.vec(_parms._weights_column);
// Recompute distribution since the input frame was modified
if (isQuasibinomial){
MRUtils.ClassDistQuasibinomial cdmt2 = _weights != null ?
new MRUtils.ClassDistQuasibinomial(domain).doAll(_response, _weights) : new MRUtils.ClassDistQuasibinomial(domain).doAll(_response);
_model._output._distribution = cdmt2.dist();
_model._output._modelClassDist = cdmt2.relDist();
_model._output._domains[_model._output._domains.length] = domain;
} else {
MRUtils.ClassDist cdmt2 = _weights != null ?
new MRUtils.ClassDist(_nclass).doAll(_response, _weights) : new MRUtils.ClassDist(_nclass).doAll(_response);
_model._output._distribution = cdmt2.dist();
_model._output._modelClassDist = cdmt2.relDist();
}
}
}
LOG.info("Prior class distribution: " + Arrays.toString(_model._output._priorClassDist));
LOG.info("Model class distribution: " + Arrays.toString(_model._output._modelClassDist));
if (_parms._sample_rate_per_class != null) {
LOG.info("Sample rates per tree (this affects the distribution of probabilities):");
for (int i = 0; i < nclasses(); ++i)
LOG.info(" sample rate for class '" + response().domain()[i] + "' : " + _parms._sample_rate_per_class[i]);
}
}
// top-level quantiles for all columns
// non-numeric columns get a vector full of NAs
if (_parms._histogram_type == SharedTreeModel.SharedTreeParameters.HistogramType.QuantilesGlobal
|| _parms._histogram_type == SharedTreeModel.SharedTreeParameters.HistogramType.RoundRobin) {
_job.update(1, "Computing top-level histogram split-points.");
final Timer exactT = new Timer();
final double[][] exactSplitPoints = ExactSplitPoints.splitPoints(_train, _parms._nbins);
LOG.info("Calculating exact (low cardinality) histogram split-points took " + exactT);
final Timer quantileT = new Timer();
final double[][] quantileSplitPoints = GlobalQuantilesCalc.splitPoints(_train, _parms._weights_column,
exactSplitPoints, _parms._nbins, _parms._nbins_top_level);
Futures fs = new Futures();
int qCnt = 0, eCnt = 0;
for (int i = 0; i < quantileSplitPoints.length; i++) {
assert exactSplitPoints[i] == null || quantileSplitPoints[i] == null;
Key<DHistogram.HistoSplitPoints> key = getGlobalSplitPointsKey(i);
if (key == null)
continue;
boolean useQuantiles = exactSplitPoints[i] == null;
double[] sp = useQuantiles ? quantileSplitPoints[i] : exactSplitPoints[i];
if (sp != null) {
if (useQuantiles) { qCnt++; } else { eCnt++; }
DKV.put(new DHistogram.HistoSplitPoints(key, sp, useQuantiles), fs);
}
}
fs.blockForPending();
LOG.info("Split-points are defined using " + eCnt + " exact sets of points and " + qCnt + " sets of quantile values.");
LOG.info("Calculating top-level histogram split-points took " + quantileT);
}
// Also add to the basic working Frame these sets:
// nclass Vecs of current forest results (sum across all trees)
// nclass Vecs of working/temp data
// nclass Vecs of NIDs, allowing 1 tree per class
String [] twNames = new String[_nclass*2];
for(int i = 0; i < _nclass; ++i){
twNames[i] = "Tree_" + domain[i];
twNames[_nclass+i] = "Work_" + domain[i];
}
Vec [] twVecs = templateVec().makeVolatileDoubles(_nclass*2);
_train.add(twNames,twVecs);
// One Tree per class, each tree needs a NIDs. For empty classes use a -1
// NID signifying an empty regression tree.
String [] names = new String[_nclass];
final int [] cons = new int[_nclass];
for( int i=0; i<_nclass; i++ ) {
names[i] = "NIDs_" + domain[i];
cons[i] = isSupervised() && _model._output._distribution[i] == 0 ? -1 : 0;
}
Vec [] vs = templateVec().makeVolatileInts(cons);
_train.add(names, vs);
// Append number of trees participating in on-the-fly scoring
_train.add("OUT_BAG_TREES", templateVec().makeZero());
if (_valid != null) {
_validWorkspace = makeValidWorkspace();
String[] vdomain = isQuasibinomial ? actualDomain : vresponse().domain();
_validPredsCache = Score.makePredictionCache(_model, vresponse(), vdomain);
}
_trainPredsCache = Score.makePredictionCache(_model, templateVec(), actualDomain);
// Variable importance: squared-error-improvement-per-variable-per-split
_improvPerVar = new float[_ncols];
_rand = RandomUtils.getRNG(_parms._seed);
SharedTreeDebugParams debugParms = getDebugParams();
if (! debugParms.isDefault()) {
LOG.warn("Model will be trained with debug parameters enabled: " + debugParms.toJsonString());
}
initializeModelSpecifics();
resumeFromCheckpoint(SharedTree.this);
scoreAndBuildTrees(doOOBScoring());
postProcessModel();
} finally {
if (_eventPublisher != null) {
_eventPublisher.onAllIterationsComplete();
}
if( _model!=null ) _model.unlock(_job);
for (Key<?> k : getGlobalSplitPointsKeys()) Keyed.remove(k);
if (_validWorkspace != null) {
_validWorkspace.remove();
_validWorkspace = null;
}
if (_validPredsCache != null) {
_validPredsCache.remove();
_validPredsCache = null;
}
if (_trainPredsCache != null) {
_trainPredsCache.remove();
_trainPredsCache = null;
}
}
}
/** Vec to be used as template to create workspaces */
private Vec templateVec() {
return isSupervised() ? _response : _train.anyVec();
}
// Abstract classes implemented by the tree builders
abstract protected M makeModel(Key<M> modelKey, P parms);
abstract protected boolean doOOBScoring();
abstract protected boolean buildNextKTrees();
abstract protected void initializeModelSpecifics();
protected void doInTrainingCheckpoint() {
throw new UnsupportedOperationException("In training checkpoints are not supported for this algorithm");
}
// Common methods for all tree builders
protected Frame makeValidWorkspace() { return null; }
// Helpers to store split-points in DKV - keep a cache on each node (instead of sending around over and over)
protected Key<DHistogram.HistoSplitPoints> getGlobalSplitPointsKey(int i) {
if (_model==null || _model._key == null || _parms._histogram_type!= SharedTreeModel.SharedTreeParameters.HistogramType.QuantilesGlobal
&& _parms._histogram_type!= SharedTreeModel.SharedTreeParameters.HistogramType.RoundRobin) return null;
return Key.makeSystem(_model._key+"_splits_col_"+i);
}
protected Key<DHistogram.HistoSplitPoints>[] getGlobalSplitPointsKeys() {
@SuppressWarnings("unchecked")
Key<DHistogram.HistoSplitPoints>[] keys = new Key[_ncols];
for (int i=0;i<keys.length;++i)
keys[i] = getGlobalSplitPointsKey(i);
return keys;
}
/**
* Restore the workspace from a previous model (checkpoint)
*/
protected final void resumeFromCheckpoint(SharedTree st) {
if( !_parms.hasCheckpoint() ) return;
// Reconstruct the working tree state from the checkpoint
Timer t = new Timer();
int ntreesFromCheckpoint = ((SharedTreeModel.SharedTreeParameters) _parms._checkpoint.get()._parms)._ntrees;
new ReconstructTreeState(_ncols, _nclass, st /*large, but cleaner code this way*/, _parms._sample_rate,
new CompressedForest(_model._output._treeKeys, _model._output._domains), doOOBScoring())
.doAll(_train, _parms._build_tree_one_node);
for (int i = 0; i < ntreesFromCheckpoint; i++) _rand.nextLong(); //for determinism
LOG.info("Reconstructing OOB stats from checkpoint took " + t);
if (LOG.isTraceEnabled()) LOG.trace(_train.toTwoDimTable());
}
/**
* Build more trees, as specified by the model parameters
* @param oob Whether or not Out-Of-Bag scoring should be performed
*/
protected final void scoreAndBuildTrees(boolean oob) {
int[] scoredNum = new int[0];
if (_coordinator != null) {
_coordinator.initStoppingParameters();
}
for( int tid=0; tid< _ntrees; tid++) {
// During first iteration model contains 0 trees, then 1-tree, ...
final boolean scored = doScoringAndSaveModel(false, oob, _parms._build_tree_one_node);
if (scored) {
scoredNum = ArrayUtils.append(scoredNum, tid);
if (ScoreKeeper.stopEarly(_model._output.scoreKeepers(), _parms._stopping_rounds, getProblemType(), _parms._stopping_metric, _parms._stopping_tolerance, "model's last", true)) {
if (_parms._is_cv_model && _parms._use_best_cv_iteration) {
ScoreKeeper[] sk = _model._output.scoreKeepers();
int best = ScoreKeeper.best(sk, _parms._stopping_rounds, _parms._stopping_metric);
if (best != sk.length - 1) {
int bestNTrees = scoredNum[best];
LOG.info(_desc + " built total of " + scoredNum[scoredNum.length - 1] +
" trees, however the best score was obtained using only ntrees=" + bestNTrees +
". Trimming model to " + bestNTrees + " trees.");
_model._output.trimTo(bestNTrees);
_model.update(_job);
}
} else if (!_parms._is_cv_model) {
LOG.info("Stopping early and setting actual ntrees to the " + _model._output._ntrees);
_parms._ntrees = _model._output._ntrees;
}
_job.update(_ntrees-_model._output._ntrees); // finish the progress bar
LOG.info(_model.toString()); // we don't know if doScoringAndSaveModel printed the model or not
return;
}
}
boolean manualCheckpointsInterval = tid > 0 && tid % _parms._in_training_checkpoints_tree_interval == 0;
if (!StringUtils.isNullOrEmpty(_parms._in_training_checkpoints_dir) && manualCheckpointsInterval) {
doInTrainingCheckpoint();
}
Timer kb_timer = new Timer();
boolean converged = buildNextKTrees();
LOG.info((tid + 1) + ". tree was built in " + kb_timer.toString());
if (_eventPublisher != null) {
_eventPublisher.onIterationComplete();
}
_job.update(1);
if (_model._output._treeStats._max_depth==0) {
LOG.warn("Nothing to split on: Check that response and distribution are meaningful (e.g., you are not using laplace/quantile regression with a binary response).");
}
if (converged || timeout()) {
_job.update(_parms._ntrees-tid-1); // add remaining trees to progress bar
break; // If timed out, do the final scoring
}
if (stop_requested()) throw new Job.JobCancelledException(_job);
if (tid == _ntrees - 1 && _coordinator != null) {
_coordinator.updateParameters();
}
}
// Final scoring (skip if job was cancelled)
doScoringAndSaveModel(true, oob, _parms._build_tree_one_node);
}
}
private void postProcessModel() {
// Model Calibration (only for the final model, not CV models)
if (_parms.calibrateModel() && (!_parms._is_cv_model)) {
_model._output.setCalibrationModel(
CalibrationHelper.buildCalibrationModel(SharedTree.this, _parms, _job, _model)
);
_model.update(_job);
}
}
protected ScoreKeeper.ProblemType getProblemType() {
assert isSupervised();
return ScoreKeeper.ProblemType.forSupervised(isClassifier(), isUplift());
}
// --------------------------------------------------------------------------
// Build an entire layer of all K trees
protected DHistogram[][][] buildLayer(final Frame fr, final int nbins, final DTree ktrees[], final int leafs[], final DHistogram hcs[][][], boolean build_tree_one_node) {
// Build K trees, one per class.
// Build up the next-generation tree splits from the current histograms.
// Nearly all leaves will split one more level. This loop nest is
// O( #active_splits * #bins * #ncols )
// but is NOT over all the data.
ScoreBuildOneTree sb1ts[] = new ScoreBuildOneTree[_nclass];
Vec vecs[] = fr.vecs();
for( int k=0; k<_nclass; k++ ) {
final DTree tree = ktrees[k]; // Tree for class K
if( tree == null ) continue;
// Build a frame with just a single tree (& work & nid) columns, so the
// nested MRTask ScoreBuildHistogram in ScoreBuildOneTree does not try
// to close other tree's Vecs when run in parallel.
final String[] fr2cols = Arrays.copyOf(fr._names,_ncols+1);
final Vec[] fr2vecs = Arrays.copyOf(vecs,_ncols+1);
if (DEBUG_PUBDEV_6686) {
boolean hasNull = false;
for (Vec v : fr2vecs) {
if (v == null) {
hasNull = true;
break;
}
}
if (hasNull) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < fr2vecs.length; i++) {
sb.append(fr2cols[i]).append(":").append(fr2vecs[i] == null).append("; ");
}
LOG.warn("A null Vec found in `fr2=" + fr._key + "`: " + sb.toString());
LOG.warn("Training frame: " + _train._key + "; model: " + _result);
LOG.warn("Params: " + _parms.toJsonString());
}
}
Frame fr2 = new Frame(fr2cols, fr2vecs); //predictors, weights and the actual response
if (isSupervised() && fr2.find(_parms._response_column) == -1) {
fr2.add(_parms._response_column, fr.vec(_parms._response_column));
}
// Add temporary workspace vectors (optional weights are taken over from fr)
int respIdx = fr2.find(_parms._response_column);
int weightIdx = fr2.find(_parms._weights_column);
int treatmentIdx = -1;
int predsIdx = fr2.numCols(); fr2.add(fr._names[idx_tree(k)],vecs[idx_tree(k)]); //tree predictions
int workIdx = fr2.numCols(); fr2.add(fr._names[idx_work(k)],vecs[idx_work(k)]); //target value to fit (copy of actual response for DRF, residual for GBM)
int nidIdx = fr2.numCols(); fr2.add(fr._names[idx_nids(k)],vecs[idx_nids(k)]); //node indices for tree construction
if (LOG.isTraceEnabled()) LOG.trace("Building a layer for class " + k + ":\n" + fr2.toTwoDimTable());
// Async tree building
// step 1: build histograms
// step 2: split nodes
H2O.submitTask(sb1ts[k] = new ScoreBuildOneTree(this,k, nbins, tree, leafs, hcs, fr2, build_tree_one_node, _improvPerVar, _model._parms._distribution,
respIdx, weightIdx, predsIdx, workIdx, nidIdx, treatmentIdx));
}
// Block for all K trees to complete.
boolean did_split=false;
for( int k=0; k<_nclass; k++ ) {
final DTree tree = ktrees[k]; // Tree for class K
if( tree == null ) continue;
sb1ts[k].join();
if( sb1ts[k]._did_split ) did_split=true;
if (LOG.isTraceEnabled()) {
LOG.trace("Done with this layer for class " + k + ":\n" + new Frame(
new String[]{"TREE", "WORK", "NIDS"},
new Vec[]{
vecs[idx_tree(k)],
vecs[idx_work(k)],
vecs[idx_nids(k)]
}
).toTwoDimTable());
}
}
// The layer is done.
return did_split ? hcs : null;
}
protected static class ScoreBuildOneTree extends H2OCountedCompleter {
final SharedTree _st;
final int _k; // The tree
final int _nbins; // Numerical columns: Number of histogram bins
final DTree _tree;
final int _leafOffsets[/*nclass*/]; //Index of the first leaf node. Leaf indices range from _leafOffsets[k] to _tree._len-1
final DHistogram _hcs[/*nclass*/][][];
final Frame _fr2;
final boolean _build_tree_one_node;
final float[] _improvPerVar; // Squared Error improvement per variable per split
final DistributionFamily _family;
final int _respIdx; // index of the actual response column for the whole model (not the residuals!)
final int _weightIdx;
final int _predsIdx;
final int _workIdx;
final int _nidIdx;
final int _treatmentIdx;
final GlobalInteractionConstraints _ics;
public boolean _did_split;
public ScoreBuildOneTree(SharedTree st, int k, int nbins, DTree tree, int leafs[], DHistogram hcs[][][], Frame fr2, boolean build_tree_one_node, float[] improvPerVar, DistributionFamily family,
int respIdx, int weightIdx, int predsIdx, int workIdx, int nidIdx, int treatmentIdx) {
_st = st;
_k = k;
_nbins= nbins;
_tree = tree;
_leafOffsets = leafs;
_hcs = hcs;
_fr2 = fr2;
_build_tree_one_node = build_tree_one_node;
_improvPerVar = improvPerVar;
_family = family;
_respIdx = respIdx;
_weightIdx = weightIdx;
_predsIdx = predsIdx;
_workIdx = workIdx;
_nidIdx = nidIdx;
_treatmentIdx = treatmentIdx;
_ics = st._ics;
}
@Override public void compute2() {
// Fuse 2 conceptual passes into one:
// Pass 1: Score a prior DHistogram, and make new Node assignments
// to every row. This involves pulling out the current assigned Node,
// "scoring" the row against that Node's decision criteria, and assigning
// the row to a new child Node (and giving it an improved prediction).
// Pass 2: Build new summary DHistograms on the new child Nodes every row
// got assigned into. Collect counts, mean, variance, min, max per bin,
// per column.
int treeNum = ((SharedTreeModel.SharedTreeOutput) _st._model._output)._ntrees;
new ScoreBuildHistogram2(this, treeNum, _k, _st._ncols, _nbins, _tree, _leafOffsets[_k], _hcs[_k], _family,
_respIdx, _weightIdx, _predsIdx, _workIdx, _nidIdx, _treatmentIdx).dfork2(_fr2);
}
@Override public void onCompletion(CountedCompleter caller) {
ScoreBuildHistogram sbh = (ScoreBuildHistogram) caller;
final int leafOffset = _leafOffsets[_k];
int tmax = _tree.len(); // Number of total splits in tree K
for (int leaf = leafOffset; leaf < tmax; leaf++) { // Visit all the new splits (leaves)
DTree.UndecidedNode udn = _tree.undecided(leaf);
if (LOG.isTraceEnabled()) LOG.trace((_st._nclass==1?"Regression":("Class "+_st._response.domain()[_k]))+",\n Undecided node:"+udn);
// Replace the Undecided with the Split decision
DTree.DecidedNode dn = _st.makeDecided(udn, sbh._hcs[leaf - leafOffset], udn._cs);
if (LOG.isTraceEnabled()) LOG.trace(dn + "\n" + dn._split);
if (dn._split == null) udn.doNotSplit();
else {
_did_split = true;
DTree.Split s = dn._split; // Accumulate squared error improvements per variable
float improvement;
if(_st.isUplift()){
// gain after split should be higher, gain can be negative
improvement = (float) Math.abs(s.upliftGain() - s.preSplitUpliftGain());
} else {
improvement = (float) (s.pre_split_se() - s.se());
}
assert (improvement >= 0);
AtomicUtils.FloatArray.add(_improvPerVar, s.col(), improvement);
}
}
_leafOffsets[_k] = tmax; // Setup leafs for next tree level
int new_leafs = _tree.len() - tmax; //new_leafs can be 0 if no actual splits were made
_hcs[_k] = new DHistogram[new_leafs][/*ncol*/];
for (int nl = tmax; nl < _tree.len(); nl++)
_hcs[_k][nl - tmax] = _tree.undecided(nl)._hs;
// if (_did_split && new_leafs > 0) _tree._depth++;
if (_did_split) _tree._depth++; //
}
}
// --------------------------------------------------------------------------
// Convenience accessor for a complex chunk layout.
// Wish I could name the array elements nicer...
protected int idx_weight( ) { return _model._output.weightsIdx(); }
protected int idx_offset( ) { return _model._output.offsetIdx(); }
protected int idx_resp( ) { return _model._output.responseIdx(); }
protected int idx_tree(int c) { return _ncols+(isSupervised()?1:0)+c+numSpecialCols(); }
protected int idx_work(int c) { return idx_tree(c) + _nclass; }
protected int idx_nids(int c) { return idx_work(c) + _nclass; }
protected int idx_oobt() { return idx_nids(0) + _nclass; }
protected int idx_treatment() { return _model._output.treatmentIdx(); }
public Chunk chk_weight( Chunk chks[] ) { return chks[idx_weight()]; }
protected Chunk chk_offset( Chunk chks[] ) { return chks[idx_offset()]; }
public Chunk chk_resp(Chunk chks[]) { return chks[idx_resp()]; }
public Chunk chk_tree(Chunk chks[], int c) { return chks[idx_tree(c)]; }
protected Chunk chk_work( Chunk chks[], int c ) { return chks[idx_work(c)]; }
protected Chunk chk_nids( Chunk chks[], int c ) { return chks[idx_nids(c)]; }
protected Chunk chk_oobt(Chunk chks[]) { return chks[idx_oobt()]; }
protected final Vec vec_weight(Frame fr ) { return fr.vecs()[idx_weight()]; }
protected final Vec vec_offset(Frame fr ) { return fr.vecs()[idx_offset()]; }
protected final Vec vec_resp( Frame fr ) { return fr.vecs()[idx_resp() ]; }
protected final Vec vec_tree( Frame fr, int c) { return fr.vecs()[idx_tree(c)]; }
protected final Vec vec_work( Frame fr, int c) { return fr.vecs()[idx_work(c)]; }
protected final Vec vec_nids( Frame fr, int c) { return fr.vecs()[idx_nids(c)]; }
protected final Vec vec_oobt( Frame fr ) { return fr.vecs()[idx_oobt()]; }
protected static class FrameMap extends Iced<FrameMap> {
public int responseIndex;
public int offsetIndex;
public int weightIndex;
public int tree0Index;
public int work0Index;
public int nids0Index;
public int oobtIndex;
public int treatmentIndex;
public FrameMap() {} // For Externalizable interface
public FrameMap(SharedTree t) {
responseIndex = t.idx_resp();
offsetIndex = t.idx_offset();
weightIndex = t.idx_weight();
tree0Index = t.idx_tree(0);
work0Index = t.idx_work(0);
nids0Index = t.idx_nids(0);
oobtIndex = t.idx_oobt();
treatmentIndex = t.idx_treatment();
}
}
protected double[] data_row( Chunk chks[], int row, double[] data) {
assert data.length == _ncols;
for(int f=0; f<_ncols; f++) data[f] = chks[f].atd(row);
return data;
}
// Builder-specific decision node
protected DTree.DecidedNode makeDecided( DTree.UndecidedNode udn, DHistogram hs[], Constraints cs) {
return new DTree.DecidedNode(udn, hs, cs, _ics);
}
// Read the 'tree' columns, do model-specific math and put the results in the
// fs[] array, and return the sum. Dividing any fs[] element by the sum
// turns the results into a probability distribution.
abstract protected double score1( Chunk chks[], double offset, double weight, double fs[/*nclass*/], int row );
// Call builder specific score code and then correct probabilities
// if it is necessary.
void score2(Chunk chks[], double weight, double offset, double fs[/*nclass*/], int row ) {
double sum = score1(chks, weight, offset, fs, row);
if( isClassifier()) {
if( !Double.isInfinite(sum) && sum>0f && sum!=1f) ArrayUtils.div(fs, sum);
if (_parms._balance_classes)
GenModel.correctProbabilities(fs, _model._output._priorClassDist, _model._output._modelClassDist);
}
}
// --------------------------------------------------------------------------
transient long _timeLastScoreStart, _timeLastScoreEnd, _firstScore;
protected final boolean doScoringAndSaveModel(boolean finalScoring, boolean oob, boolean build_tree_one_node ) {
long now = System.currentTimeMillis();
if( _firstScore == 0 ) _firstScore=now;
long sinceLastScore = now-_timeLastScoreStart;
boolean updated = false;
// the update message is prefix with model description (main model/cv model x/y) - CV is run in parallel - the updates are otherwise confusing
_job.update(0,_desc + ": Built " + _model._output._ntrees + " trees so far (out of " + _parms._ntrees + ").");
boolean timeToScore = (now-_firstScore < _parms._initial_score_interval) || // Score every time for 4 secs
// Throttle scoring to keep the cost sane; limit to a 10% duty cycle & every 4 secs
(sinceLastScore > _parms._score_interval && // Limit scoring updates to every 4sec
(double)(_timeLastScoreEnd-_timeLastScoreStart)/sinceLastScore < 0.1); //10% duty cycle
boolean manualInterval = _parms._score_tree_interval > 0 && _model._output._ntrees % _parms._score_tree_interval == 0;
// Now model already contains tid-trees in serialized form
if( _parms._score_each_iteration || finalScoring || // always score under these circumstances
(timeToScore && _parms._score_tree_interval == 0) || // use time-based duty-cycle heuristic only if the user didn't specify _score_tree_interval
manualInterval) {
checkMemoryFootPrint();
if (error_count() > 0)
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(SharedTree.this);
// If validation is specified we use a model for scoring, so we need to
// update it! First we save model with trees (i.e., make them available
// for scoring) and then update it with resulting error
_model.update(_job);
updated = true;
LOG.info("============================================================== ");
O out = _model._output;
_timeLastScoreStart = now;
final boolean printout = (_parms._score_each_iteration || finalScoring || sinceLastScore > _parms._score_interval);
// Score on training data
_job.update(0,"Scoring the model.");
_model._output._job = _job; // to allow to share the job for quantiles task
Score sc = new Score(this,_model._output._ntrees>0/*score 0-tree model from scratch*/,oob,response(),_model._output.getModelCategory(),true,_trainPredsCache, CFuncRef.from(_parms._custom_metric_func));
ModelMetrics mm = sc.scoreAndMakeModelMetrics(_model, _parms.train(), train(), build_tree_one_node);
out._training_metrics = mm;
if (oob) out._training_metrics._description = "Metrics reported on Out-Of-Bag training samples";
out._scored_train[out._ntrees].fillFrom(mm);
// Score again on validation data
if( _parms._valid != null) {
Frame v = new Frame(valid());
Score.ScoreIncInfo sii;
if (validWorkspace() != null) {
v = v.add(validWorkspace());
sii = new Score.ScoreIncInfo(_lastScoredTree, valid().numCols(), validWorkspace().numCols(), _nclass > 1 ? 1 : 0 /* skip class for classification problems */);
} else
sii = null;
Score scv = new Score(this, sii,false, vresponse(), _model._output.getModelCategory(), true, _validPredsCache, CFuncRef.from(_parms._custom_metric_func));
ModelMetrics mmv = scv.scoreAndMakeModelMetrics(_model, _parms.valid(), v, build_tree_one_node);
_lastScoredTree = _model._output._ntrees;
out._validation_metrics = mmv;
out._validation_metrics._description = "Validation metrics";
if (_model._output._ntrees>0 || scoreZeroTrees()) //don't score the 0-tree model - the error is too large
out._scored_valid[out._ntrees].fillFrom(mmv);
}
out._model_summary = createModelSummaryTable(out._ntrees, out._treeStats);
out._scoring_history = createScoringHistoryTable();
if (out._ntrees > 0 && providesVarImp()) { // Compute variable importances
out._varimp = new hex.VarImp(_improvPerVar, out._names);
out._variable_importances = hex.ModelMetrics.calcVarImp(out._varimp);
}
addCustomInfo(out);
if (printout) {
LOG.info(_model.toString());
}
_timeLastScoreEnd = System.currentTimeMillis();
}
// Double update - after either scoring or variable importance
if( updated ) _model.update(_job);
return updated;
}
@Override
public ModelBuilder getModelBuilder() {
return this;
}
@Override
public final Frame getCalibrationFrame() {
return _calib;
}
@Override
public void setCalibrationFrame(Frame f) {
_calib = f;
}
@Override
protected boolean canLearnFromNAs() {
return true;
}
protected void addCustomInfo(O out) {
// nothing by default - can be overridden in subclasses
}
protected TwoDimTable createScoringHistoryTable() {
O out = _model._output;
return createScoringHistoryTable(out, out._scored_train, out._scored_valid, _job,
out._training_time_ms, _parms._custom_metric_func != null,
_parms._custom_distribution_func != null);
}
public static TwoDimTable createScoringHistoryTable(Model.Output _output,
ScoreKeeper[] _scored_train,
ScoreKeeper[] _scored_valid,
Job job, long[] _training_time_ms,
boolean hasCustomMetric,
boolean hasCustomDistribution) {
List<String> colHeaders = new ArrayList<>();
List<String> colTypes = new ArrayList<>();
List<String> colFormat = new ArrayList<>();
colHeaders.add("Timestamp"); colTypes.add("string"); colFormat.add("%s");
colHeaders.add("Duration"); colTypes.add("string"); colFormat.add("%s");
colHeaders.add("Number of Trees"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Training RMSE"); colTypes.add("double"); colFormat.add("%.5f");
if (_output.getModelCategory() == ModelCategory.Regression) {
colHeaders.add("Training MAE"); colTypes.add("double"); colFormat.add("%.5f");
if (!hasCustomDistribution) {
colHeaders.add("Training Deviance");
colTypes.add("double");
colFormat.add("%.5f");
}
}
if (_output.isClassifier()) {
colHeaders.add("Training LogLoss"); colTypes.add("double"); colFormat.add("%.5f");
}
if (_output.getModelCategory() == ModelCategory.Binomial) {
colHeaders.add("Training AUC"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Training pr_auc"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Training Lift"); colTypes.add("double"); colFormat.add("%.5f");
}
if(_output.isClassifier()){
colHeaders.add("Training Classification Error"); colTypes.add("double"); colFormat.add("%.5f");
}
if (_output.getModelCategory() == ModelCategory.Multinomial) {
colHeaders.add("Training AUC"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Training pr_auc"); colTypes.add("double"); colFormat.add("%.5f");
}
if (hasCustomMetric) {
colHeaders.add("Training Custom"); colTypes.add("double"); colFormat.add("%.5f");
}
if (_output._validation_metrics != null) {
colHeaders.add("Validation RMSE"); colTypes.add("double"); colFormat.add("%.5f");
if (_output.getModelCategory() == ModelCategory.Regression) {
colHeaders.add("Validation MAE"); colTypes.add("double"); colFormat.add("%.5f");
if (!hasCustomDistribution) {
colHeaders.add("Validation Deviance");
colTypes.add("double");
colFormat.add("%.5f");
}
}
if (_output.isClassifier()) {
colHeaders.add("Validation LogLoss"); colTypes.add("double"); colFormat.add("%.5f");
}
if (_output.getModelCategory() == ModelCategory.Binomial) {
colHeaders.add("Validation AUC"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Validation pr_auc"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Validation Lift"); colTypes.add("double"); colFormat.add("%.5f");
}
if(_output.isClassifier()){
colHeaders.add("Validation Classification Error"); colTypes.add("double"); colFormat.add("%.5f");
}
if (_output.getModelCategory() == ModelCategory.Multinomial) {
colHeaders.add("Validation AUC"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Validation pr_auc"); colTypes.add("double"); colFormat.add("%.5f");
}
if (hasCustomMetric) {
colHeaders.add("Validation Custom"); colTypes.add("double"); colFormat.add("%.5f");
}
}
int rows = 0;
for( int i = 0; i<_scored_train.length; i++ ) {
if (i != 0 && _scored_train[i].isEmpty() && (_scored_valid == null || _scored_valid[i].isEmpty())) continue;
rows++;
}
TwoDimTable table = new TwoDimTable(
"Scoring History", null,
new String[rows],
colHeaders.toArray(new String[0]),
colTypes.toArray(new String[0]),
colFormat.toArray(new String[0]),
"");
int row = 0;
for( int i = 0; i<_scored_train.length; i++ ) {
if (i != 0 && _scored_train[i].isEmpty() && (_scored_valid == null || _scored_valid[i].isEmpty())) continue;
int col = 0;
DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss");
table.set(row, col++, fmt.print(_training_time_ms[i]));
table.set(row, col++, PrettyPrint.msecs(_training_time_ms[i] - job.start_time(), true));
table.set(row, col++, i);
ScoreKeeper st = _scored_train[i];
table.set(row, col++, st._rmse);
if (_output.getModelCategory() == ModelCategory.Regression) {
table.set(row, col++, st._mae);
if (!hasCustomDistribution) {
table.set(row, col++, st._mean_residual_deviance);
}
}
if (_output.isClassifier()) table.set(row, col++, st._logloss);
if (_output.getModelCategory() == ModelCategory.Binomial) {
table.set(row, col++, st._AUC);
table.set(row, col++, st._pr_auc);
table.set(row, col++, st._lift);
}
if (_output.isClassifier()) table.set(row, col++, st._classError);
if (_output.getModelCategory() == ModelCategory.Multinomial) {
table.set(row, col++, st._AUC);
table.set(row, col++, st._pr_auc);
}
if (hasCustomMetric) table.set(row, col++, st._custom_metric);
if (_output._validation_metrics != null) {
st = _scored_valid[i];
table.set(row, col++, st._rmse);
if (_output.getModelCategory() == ModelCategory.Regression) {
table.set(row, col++, st._mae);
if (!hasCustomDistribution) {
table.set(row, col++, st._mean_residual_deviance);
}
}
if (_output.isClassifier()) table.set(row, col++, st._logloss);
if (_output.getModelCategory() == ModelCategory.Binomial) {
table.set(row, col++, st._AUC);
table.set(row, col++, st._pr_auc);
table.set(row, col++, st._lift);
}
if (_output.isClassifier()) table.set(row, col++, st._classError);
if (_output.getModelCategory() == ModelCategory.Multinomial) {
table.set(row, col++, st._AUC);
table.set(row, col++, st._pr_auc);
}
if (hasCustomMetric) table.set(row, col++, st._custom_metric);
}
row++;
}
return table;
}
public static TwoDimTable createModelSummaryTable(int ntrees, TreeStats treeStats) {
List<String> colHeaders = new ArrayList<>();
List<String> colTypes = new ArrayList<>();
List<String> colFormat = new ArrayList<>();
colHeaders.add("Number of Trees"); colTypes.add("long"); colFormat.add("%d");
if (treeStats!=null) {
colHeaders.add("Number of Internal Trees"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Model Size in Bytes"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Min. Depth"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Max. Depth"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Mean Depth"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Min. Leaves"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Max. Leaves"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Mean Leaves"); colTypes.add("double"); colFormat.add("%.5f");
}
final int rows = 1;
TwoDimTable table = new TwoDimTable(
"Model Summary", null,
new String[rows],
colHeaders.toArray(new String[0]),
colTypes.toArray(new String[0]),
colFormat.toArray(new String[0]),
"");
int row = 0;
int col = 0;
table.set(row, col++, ntrees);
if (treeStats!=null) {
table.set(row, col++, treeStats._num_trees); //internal number of trees (more for multinomial)
table.set(row, col++, treeStats._byte_size);
table.set(row, col++, treeStats._min_depth);
table.set(row, col++, treeStats._max_depth);
table.set(row, col++, treeStats._mean_depth);
table.set(row, col++, treeStats._min_leaves);
table.set(row, col++, treeStats._max_leaves);
table.set(row, col++, treeStats._mean_leaves);
}
return table;
}
/**
* Compute the *actual* byte size of a tree model in the KV store
*/
private static class ComputeModelSize extends MRTask<ComputeModelSize> {
long _model_mem_size; //OUTPUT
final int trees_so_far; //INPUT
final public Key<CompressedTree>[/*_ntrees*/][/*_nclass*/] _treeKeys; //INPUT
public ComputeModelSize(int trees_so_far, Key<CompressedTree>[][] _treeKeys) {
this.trees_so_far = trees_so_far;
this._treeKeys = _treeKeys;
}
@Override protected void setupLocal() {
_model_mem_size = 0;
for (int i=0; i< trees_so_far; ++i) {
Key<CompressedTree>[] per_class = _treeKeys[i];
for (int j=0; j<per_class.length; ++j) {
if (per_class[j] == null) continue;
if (!per_class[j].home()) continue;
// only look at homed tree keys
_model_mem_size += DKV.get(per_class[j])._max;
}
}
}
@Override public void reduce(ComputeModelSize cms){
if (cms != null)
_model_mem_size += cms._model_mem_size;
}
}
@Override protected void checkMemoryFootPrint_impl() {
if (_model._output._ntrees == 0) return;
int trees_so_far = _model._output._ntrees; //existing trees
long model_mem_size = new ComputeModelSize(trees_so_far, _model._output._treeKeys).doAllNodes()._model_mem_size;
_model._output._treeStats._byte_size = model_mem_size;
double avg_tree_mem_size = (double)model_mem_size / trees_so_far;
LOG.debug("Average tree size (for all classes): " + PrettyPrint.bytes((long) avg_tree_mem_size));
// all the compressed trees are stored on the driver node
long max_mem = H2O.SELF._heartbeat.get_free_mem();
if (_parms._ntrees * avg_tree_mem_size > max_mem) {
String msg = "The tree model will not fit in the driver node's memory ("
+ PrettyPrint.bytes((long)avg_tree_mem_size)
+ " per tree x " + _parms._ntrees + " > "
+ PrettyPrint.bytes(max_mem)
+ ") - try decreasing ntrees and/or max_depth or increasing min_rows!";
error("_ntrees", msg);
}
}
/**
* Compute the inital value for a given distribution
* @return initial value
*/
protected double getInitialValue() {
return new InitialValue(_parms, _nclass).doAll(
_response,
hasWeightCol() ? _weights : _response.makeCon(1),
hasOffsetCol() ? _offset : _response.makeCon(0)
).initialValue();
}
// Helper MRTask to compute the initial value
private static class InitialValue extends MRTask<InitialValue> {
public InitialValue(Model.Parameters parms, int nclass) {
_nclass = nclass;
_dist = DistributionFactory.getDistribution(parms);
}
private Distribution _dist;
final private int _nclass;
private double _num;
private double _denom;
@Override
protected void setupLocal() {
super.setupLocal();
_dist.reset();
}
public double initialValue() {
if (_dist._family == DistributionFamily.multinomial || (_dist._family == DistributionFamily.custom && _nclass > 2))
return -0.5*DistributionFactory.getDistribution(DistributionFamily.bernoulli).link(_num/_denom);
else return _dist.link(_num / _denom);
}
@Override public void map(Chunk response, Chunk weight, Chunk offset) {
for (int i=0;i<response._len;++i) {
if (response.isNA(i)) continue;
double w = weight.atd(i);
if (w == 0) continue;
double y = response.atd(i);
double o = offset.atd(i);
_num += _dist.initFNum(w,o,y);
_denom += _dist.initFDenom(w,o,y);
}
}
@Override public void reduce(InitialValue mrt) {
_num += mrt._num;
_denom += mrt._denom;
}
}
@Override protected boolean cv_canBuildMainModelInParallel() {
assert !_parms._parallel_main_model_building || _parms._max_runtime_secs == 0 :
"Parallel main model building shouldn't be be enabled when max_runtime_secs is specified.";
return _parms._parallel_main_model_building;
}
@Override public void cv_computeAndSetOptimalParameters(ModelBuilder<M, P, O>[] cvModelBuilders) {
// Extract stopping conditions from each CV model, and compute the best stopping answer
if (!cv_initStoppingParameters())
return; // No exciting changes to stopping conditions
_parms._ntrees = computeOptimalNTrees(cvModelBuilders);
warn("_ntrees", "Setting optimal _ntrees to " + _parms._ntrees + " for cross-validation main model based on early stopping of cross-validation models.");
warn("_stopping_rounds", "Disabling convergence-based early stopping for cross-validation main model.");
if (_parms._main_model_time_budget_factor == 0)
warn("_max_runtime_secs", "Disabling maximum allowed runtime for cross-validation main model.");
}
private int computeOptimalNTrees(ModelBuilder<M, P, O>[] cvModelBuilders) {
int totalNTrees = 0;
for(ModelBuilder<M, P, O> mb : cvModelBuilders) {
M model = DKV.getGet(mb.dest());
if (model == null)
continue;
totalNTrees += model._output._ntrees;
}
return (int)((double)totalNTrees / cvModelBuilders.length);
}
@Override protected final boolean cv_updateOptimalParameters(ModelBuilder<M, P, O>[] cvModelBuilders) {
final int ntreesOld = _ntrees;
_ntrees = computeOptimalNTrees(cvModelBuilders);
_parms._ntrees = _ntrees;
return _ntrees > ntreesOld;
}
@Override protected final boolean cv_initStoppingParameters() {
if( _parms._stopping_rounds == 0 && _parms._max_runtime_secs == 0)
return false;
_parms._stopping_rounds = 0;
setMaxRuntimeSecsForMainModel();
_ntrees = 1;
_parms._ntrees = _ntrees;
return true;
}
SharedTreeDebugParams getDebugParams() {
if (_debugParms == null) {
_debugParms = new SharedTreeDebugParams();
}
return _debugParms;
}
/**
* Modify algorithm inner workings - only meant for development
*
* @param debugParms instance of SharedTreeDebugParams
*/
public void setDebugParams(SharedTreeDebugParams debugParms) {
_debugParms = debugParms;
}
public static class SharedTreeDebugParams extends Iced<SharedTreeDebugParams> {
static SharedTreeDebugParams DEFAULT = new SharedTreeDebugParams(false);
public boolean _reproducible_histos;
public boolean _keep_orig_histo_precision;
public String _histo_monitor_class;
public SharedTreeDebugParams(boolean initFromSysProps) {
if (initFromSysProps) {
_reproducible_histos = H2O.getSysBoolProperty("tree.SharedTree.reproducibleHistos", DEFAULT._reproducible_histos);
_keep_orig_histo_precision = H2O.getSysBoolProperty("tree.SharedTree.keepOrigHistoPrecision", DEFAULT._keep_orig_histo_precision);
_histo_monitor_class = H2O.getSysProperty("tree.SharedTree.histoMonitorClass", DEFAULT._histo_monitor_class);
}
}
public SharedTreeDebugParams() {
this(true);
}
boolean isDefault() {
return this.equals(DEFAULT);
}
@SuppressWarnings("unchecked")
public Consumer<DHistogram[][]> makeDHistogramMonitor(int treeNum, int k, int leaf) {
if (_histo_monitor_class == null) {
return null;
}
try {
Class<?> histoMonitorClass = Class.forName(_histo_monitor_class);
Constructor<?> histoMonitorConstructor = histoMonitorClass.getConstructor(int.class, int.class, int.class);
Object histoMonitor = histoMonitorConstructor.newInstance(treeNum, k, leaf);
return (Consumer<DHistogram[][]>) histoMonitor;
} catch (Exception e) {
throw new IllegalStateException("Failed initialize Histogram Monitor Class: " + _histo_monitor_class, e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SharedTreeDebugParams that = (SharedTreeDebugParams) o;
if (_reproducible_histos != that._reproducible_histos) return false;
if (_keep_orig_histo_precision != that._keep_orig_histo_precision) return false;
return _histo_monitor_class != null ? _histo_monitor_class.equals(that._histo_monitor_class) : that._histo_monitor_class == null;
}
@Override
public int hashCode() {
int result = (_reproducible_histos ? 1 : 0);
result = 31 * result + (_keep_orig_histo_precision ? 1 : 0);
result = 31 * result + (_histo_monitor_class != null ? _histo_monitor_class.hashCode() : 0);
return result;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/SharedTreeModel.java
|
package hex.tree;
import hex.*;
import hex.genmodel.CategoricalEncoding;
import hex.genmodel.algos.tree.SharedTreeMojoModel;
import hex.genmodel.algos.tree.SharedTreeNode;
import hex.genmodel.algos.tree.SharedTreeSubgraph;
import hex.tree.uplift.UpliftDRFModel;
import hex.util.LinearAlgebraUtils;
import org.apache.log4j.Logger;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.parser.BufferedString;
import water.util.*;
import java.util.ArrayList;
import java.util.Arrays;
import static hex.genmodel.GenModel.createAuxKey;
import static hex.genmodel.algos.tree.SharedTreeMojoModel.__INTERNAL_MAX_TREE_DEPTH;
import static hex.tree.SharedTree.createModelSummaryTable;
public abstract class SharedTreeModel<
M extends SharedTreeModel<M, P, O>,
P extends SharedTreeModel.SharedTreeParameters,
O extends SharedTreeModel.SharedTreeOutput
> extends Model<M, P, O> implements Model.LeafNodeAssignment, Model.GetMostImportantFeatures, Model.FeatureFrequencies, Model.UpdateAuxTreeWeights {
private static final Logger LOG = Logger.getLogger(SharedTreeModel.class);
@Override
public String[] getMostImportantFeatures(int n) {
if (_output == null) return null;
TwoDimTable vi = _output._variable_importances;
if (vi==null) return null;
n = Math.min(n, vi.getRowHeaders().length);
String[] res = new String[n];
System.arraycopy(vi.getRowHeaders(), 0, res, 0, n);
return res;
}
@Override public ToEigenVec getToEigenVec() { return LinearAlgebraUtils.toEigen; }
public abstract static class SharedTreeParameters extends Model.Parameters implements Model.GetNTrees, CalibrationHelper.ParamsWithCalibration {
public int _ntrees=50; // Number of trees in the final model. Grid Search, comma sep values:50,100,150,200
public int _max_depth = 5; // Maximum tree depth. Grid Search, comma sep values:5,7
public double _min_rows = 10; // Fewest allowed observations in a leaf (in R called 'nodesize'). Grid Search, comma sep values
public int _nbins = 20; // Numerical (real/int) cols: Build a histogram of this many bins, then split at the best point
public int _nbins_cats = 1024; // Categorical (factor) cols: Build a histogram of this many bins, then split at the best point
public double _min_split_improvement = 1e-5; // Minimum relative improvement in squared error reduction for a split to happen
public enum HistogramType {
AUTO, UniformAdaptive, Random, QuantilesGlobal, RoundRobin, UniformRobust;
public static HistogramType[] ROUND_ROBIN_CANDIDATES = {
AUTO, // Note: the inclusion of AUTO means UniformAdaptive has effectively higher chance of being used
UniformAdaptive, Random, QuantilesGlobal
};
}
public HistogramType _histogram_type = HistogramType.AUTO; // What type of histogram to use for finding optimal split points
public double _r2_stopping = Double.MAX_VALUE; // Stop when the r^2 metric equals or exceeds this value
public int _nbins_top_level = 1<<10; //hardcoded maximum top-level number of bins for real-valued columns
public boolean _build_tree_one_node = false;
public int _score_tree_interval = 0; // score every so many trees (no matter what)
public int _initial_score_interval = 4000; //Adding this parameter to take away the hard coded value of 4000 for scoring the first 4 secs
public int _score_interval = 4000; //Adding this parameter to take away the hard coded value of 4000 for scoring each iteration every 4 secs
public double _sample_rate = 0.632; //fraction of rows to sample for each tree
public double[] _sample_rate_per_class; //fraction of rows to sample for each tree, per class
public boolean useRowSampling() {
return _sample_rate < 1 || _sample_rate_per_class != null;
}
// Platt scaling (by default)
public boolean _calibrate_model;
public Key<Frame> _calibration_frame;
public CalibrationHelper.CalibrationMethod _calibration_method = CalibrationHelper.CalibrationMethod.AUTO;
@Override public long progressUnits() { return _ntrees + (_histogram_type==HistogramType.QuantilesGlobal || _histogram_type==HistogramType.RoundRobin ? 1 : 0); }
public double _col_sample_rate_change_per_level = 1.0f; //relative change of the column sampling rate for every level
public double _col_sample_rate_per_tree = 1.0f; //fraction of columns to sample for each tree
public boolean useColSampling() {
return _col_sample_rate_change_per_level != 1.0f || _col_sample_rate_per_tree != 1.0f;
}
public boolean isStochastic() {
return useRowSampling() || useColSampling();
}
public boolean _parallel_main_model_building = false;
public boolean _use_best_cv_iteration = true; // when early stopping is enabled, cv models will pick the iteration that produced the best score instead of the stopping iteration
public String _in_training_checkpoints_dir;
public int _in_training_checkpoints_tree_interval = 1; // save model checkpoint every so many trees (no matter what)
/** Fields which can NOT be modified if checkpoint is specified.
* FIXME: should be defined in Schema API annotation
*/
static final String[] CHECKPOINT_NON_MODIFIABLE_FIELDS = { "_build_tree_one_node", "_sample_rate", "_max_depth", "_min_rows", "_nbins", "_nbins_cats", "_nbins_top_level"};
@Override
public int getNTrees() {
return _ntrees;
}
@Override
public Frame getCalibrationFrame() {
return _calibration_frame == null ? null : _calibration_frame.get();
}
@Override
public boolean calibrateModel() {
return _calibrate_model;
}
@Override
public CalibrationHelper.CalibrationMethod getCalibrationMethod() {
return _calibration_method;
}
@Override
public void setCalibrationMethod(CalibrationHelper.CalibrationMethod calibrationMethod) {
_calibration_method = calibrationMethod;
}
@Override
public Parameters getParams() {
return this;
}
/**
* Do we need to enable strictly deterministic way of building histograms?
*
* Used eg. when monotonicity constraints in GBM are enabled, by default disabled
*
* @return true if histograms should be built in deterministic way
*/
public boolean forceStrictlyReproducibleHistograms() {
return false;
}
}
@Override public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
switch(_output.getModelCategory()) {
case Binomial: return new ModelMetricsBinomial.MetricBuilderBinomial(domain);
case Multinomial: return new ModelMetricsMultinomial.MetricBuilderMultinomial(_output.nclasses(),domain, _parms._auc_type);
case Regression: return new ModelMetricsRegression.MetricBuilderRegression();
case BinomialUplift: return new ModelMetricsBinomialUplift.MetricBuilderBinomialUplift(domain, ((UpliftDRFModel.UpliftDRFOutput)_output)._defaultAuucThresholds);
default: throw H2O.unimpl();
}
}
public abstract static class SharedTreeOutput extends Model.Output implements Model.GetNTrees, CalibrationHelper.OutputWithCalibration {
/** InitF value (for zero trees)
* f0 = mean(yi) for gaussian
* f0 = log(yi/1-yi) for bernoulli
*
* For GBM bernoulli, the initial prediction for 0 trees is
* p = 1/(1+exp(-f0))
*
* From this, the mse for 0 trees (null model) can be computed as follows:
* mean((yi-p)^2)
* */
public double _init_f;
/** Number of trees actually in the model (as opposed to requested) */
public int _ntrees;
/** More indepth tree stats */
public final TreeStats _treeStats;
/** Trees get big, so store each one separately in the DKV. */
public Key<CompressedTree>[/*_ntrees*/][/*_nclass*/] _treeKeys;
public Key<CompressedTree>[/*_ntrees*/][/*_nclass*/] _treeKeysAux;
public ScoreKeeper[/*ntrees+1*/] _scored_train;
public ScoreKeeper[/*ntrees+1*/] _scored_valid;
public ScoreKeeper[] scoreKeepers() {
ArrayList<ScoreKeeper> skl = new ArrayList<>();
ScoreKeeper[] ska = _validation_metrics != null ? _scored_valid : _scored_train;
for( ScoreKeeper sk : ska )
if (!sk.isEmpty())
skl.add(sk);
return skl.toArray(new ScoreKeeper[skl.size()]);
}
/** Training time */
public long[/*ntrees+1*/] _training_time_ms = {System.currentTimeMillis()};
/**
* Variable importances computed during training
*/
public TwoDimTable _variable_importances;
public VarImp _varimp;
@Override
public TwoDimTable getVariableImportances() {
return _variable_importances;
}
public Model<?, ?, ?> _calib_model;
public SharedTreeOutput( SharedTree b) {
super(b);
_ntrees = 0; // No trees yet
_treeKeys = new Key[_ntrees][]; // No tree keys yet
_treeKeysAux = new Key[_ntrees][]; // No tree keys yet
_treeStats = new TreeStats();
_scored_train = new ScoreKeeper[]{new ScoreKeeper(Double.NaN)};
_scored_valid = new ScoreKeeper[]{new ScoreKeeper(Double.NaN)};
_modelClassDist = _priorClassDist;
}
@Override
public TwoDimTable createInputFramesInformationTable(ModelBuilder modelBuilder) {
SharedTreeParameters params = (SharedTreeParameters) modelBuilder._parms;
TwoDimTable table = super.createInputFramesInformationTable(modelBuilder);
table.set(2, 0, "calibration_frame");
table.set(2, 1, params.getCalibrationFrame() != null ? params.getCalibrationFrame().checksum() : -1);
table.set(2, 2, params.getCalibrationFrame() != null ? Arrays.toString(params.getCalibrationFrame().anyVec().espc()) : -1);
return table;
}
@Override
public int getInformationTableNumRows() {
return super.getInformationTableNumRows() + 1;// +1 row for calibration frame
}
// Append next set of K trees
public void addKTrees( DTree[] trees) {
// DEBUG: Print the generated K trees
//SharedTree.printGenerateTrees(trees);
assert nclasses()==trees.length;
// Compress trees and record tree-keys
_treeKeys = Arrays.copyOf(_treeKeys ,_ntrees+1);
_treeKeysAux = Arrays.copyOf(_treeKeysAux ,_ntrees+1);
Key[] keys = _treeKeys[_ntrees] = new Key[trees.length];
Key[] keysAux = _treeKeysAux[_ntrees] = new Key[trees.length];
Futures fs = new Futures();
for( int i=0; i<nclasses(); i++ ) if( trees[i] != null ) {
CompressedTree ct = trees[i].compress(_ntrees,i,_domains);
DKV.put(keys[i]=ct._key,ct,fs);
_treeStats.updateBy(trees[i]); // Update tree shape stats
CompressedTree ctAux = new CompressedTree(trees[i]._abAux.buf(),-1,-1,-1);
keysAux[i] = ctAux._key = Key.make(createAuxKey(ct._key.toString()));
DKV.put(ctAux,fs);
}
_ntrees++;
// 1-based for errors; _scored_train[0] is for zero trees, not 1 tree
_scored_train = ArrayUtils.copyAndFillOf(_scored_train, _ntrees+1, new ScoreKeeper());
_scored_valid = _scored_valid != null ? ArrayUtils.copyAndFillOf(_scored_valid, _ntrees+1, new ScoreKeeper()) : null;
_training_time_ms = ArrayUtils.copyAndFillOf(_training_time_ms, _ntrees+1, System.currentTimeMillis());
fs.blockForPending();
}
public void trimTo(final int ntrees) {
Futures fs = new Futures();
for (int i = ntrees; i < _treeKeys.length; i++) {
for (int tc = 0; tc < _treeKeys[i].length; tc++) {
if (_treeKeys[i][tc] == null)
continue;
DKV.remove(_treeKeys[i][tc], fs);
DKV.remove(_treeKeysAux[i][tc], fs);
}
}
_ntrees = ntrees;
_treeKeys = Arrays.copyOf(_treeKeys ,_ntrees);
_treeKeysAux = Arrays.copyOf(_treeKeysAux ,_ntrees);
// 1-based for errors; _scored_train[0] is for zero trees, not 1 tree
_scored_train = Arrays.copyOf(_scored_train, _ntrees + 1);
_scored_valid = _scored_valid != null ? Arrays.copyOf(_scored_valid, _ntrees + 1) : null;
_training_time_ms = Arrays.copyOf(_training_time_ms, _ntrees + 1);
_model_summary = createModelSummaryTable(_ntrees, _treeStats);
fs.blockForPending();
}
@Override
public int getNTrees() {
return _ntrees;
}
@Override
public Model<?, ?, ?> calibrationModel() {
return _calib_model;
}
@Override
public void setCalibrationModel(Model<?, ?, ?> model) {
_calib_model = model;
}
public CompressedTree ctree(int tnum, int knum ) { return _treeKeys[tnum][knum].get(); }
public String toStringTree ( int tnum, int knum ) { return ctree(tnum,knum).toString(this); }
}
public SharedTreeModel(Key<M> selfKey, P parms, O output) {
super(selfKey, parms, output);
}
protected String[] makeAllTreeColumnNames() {
int classTrees = 0;
for (int i = 0; i < _output._treeKeys[0].length; ++i) {
if (_output._treeKeys[0][i] != null) classTrees++;
}
final int outputcols = _output._treeKeys.length * classTrees;
final String[] names = new String[outputcols];
int col = 0;
for (int tidx = 0; tidx < _output._treeKeys.length; tidx++) {
Key[] keys = _output._treeKeys[tidx];
for (int c = 0; c < keys.length; c++) {
if (keys[c] != null) {
names[col++] = "T" + (tidx + 1) + (keys.length == 1 ? "" : (".C" + (c + 1)));
}
}
}
return names;
}
@Override
public Frame scoreLeafNodeAssignment(Frame frame, LeafNodeAssignmentType type, Key<Frame> destination_key) {
Frame adaptFrm = new Frame(frame);
adaptTestForTrain(adaptFrm, true, false);
final String[] names = makeAllTreeColumnNames();
AssignLeafNodeTaskBase task = AssignLeafNodeTaskBase.make(_output, type);
return task.execute(adaptFrm, names, destination_key);
}
@Override
public UpdateAuxTreeWeightsReport updateAuxTreeWeights(Frame frame, String weightsColumn) {
if (weightsColumn == null) {
throw new IllegalArgumentException("Weights column name is not defined");
}
Frame adaptFrm = new Frame(frame);
Vec weights = adaptFrm.remove(weightsColumn);
if (weights == null) {
throw new IllegalArgumentException("Input frame doesn't contain weights column `" + weightsColumn + "`");
}
adaptTestForTrain(adaptFrm, true, false);
// keep features only and re-introduce weights column at the end of the frame
Frame featureFrm = new Frame(_output.features(), frame.vecs(_output.features()));
featureFrm.add(weightsColumn, weights);
UpdateAuxTreeWeightsTask t = new UpdateAuxTreeWeightsTask(_output).doAll(featureFrm);
UpdateAuxTreeWeights.UpdateAuxTreeWeightsReport report = new UpdateAuxTreeWeights.UpdateAuxTreeWeightsReport();
report._warn_trees = t._warnTrees;
report._warn_classes = t._warnClasses;
return report;
}
public static class BufStringDecisionPathTracker implements SharedTreeMojoModel.DecisionPathTracker<BufferedString> {
private final byte[] _buf = new byte[__INTERNAL_MAX_TREE_DEPTH];
private final BufferedString _bs = new BufferedString(_buf, 0, 0);
private int _pos = 0;
@Override
public boolean go(int depth, boolean right) {
_buf[depth] = right ? (byte) 'R' : (byte) 'L';
if (right) _pos = depth;
return true;
}
@Override
public BufferedString terminate() {
_bs.setLen(_pos);
_pos = 0;
return _bs;
}
@Override
public BufferedString invalidPath() {
return null;
}
}
private static abstract class AssignLeafNodeTaskBase extends MRTask<AssignLeafNodeTaskBase> {
final Key<CompressedTree>[/*_ntrees*/][/*_nclass*/] _treeKeys;
final String[][] _domains;
AssignLeafNodeTaskBase(SharedTreeOutput output) {
_treeKeys = output._treeKeys;
_domains = output._domains;
}
protected abstract void initMap();
protected abstract void assignNode(final int tidx, final int cls, final CompressedTree tree, final double[] input,
final NewChunk out);
@Override
public void map(Chunk[] chks, NewChunk[] ncs) {
double[] input = new double[chks.length];
initMap();
for (int row = 0; row < chks[0]._len; row++) {
for (int i = 0; i < chks.length; i++)
input[i] = chks[i].atd(row);
int col = 0;
for (int tidx = 0; tidx < _treeKeys.length; tidx++) {
Key[] keys = _treeKeys[tidx];
for (int cls = 0; cls < keys.length; cls++) {
Key key = keys[cls];
if (key != null) {
CompressedTree tree = DKV.get(key).get();
assignNode(tidx, cls, tree, input, ncs[col++]);
}
}
}
assert (col == ncs.length);
}
}
protected abstract Frame execute(Frame adaptFrm, String[] names, Key<Frame> destKey);
private static AssignLeafNodeTaskBase make(SharedTreeOutput modelOutput, LeafNodeAssignmentType type) {
switch (type) {
case Path:
return new AssignTreePathTask(modelOutput);
case Node_ID:
return new AssignLeafNodeIdTask(modelOutput);
default:
throw new UnsupportedOperationException("Unknown leaf node assignment type: " + type);
}
}
}
private static class AssignTreePathTask extends AssignLeafNodeTaskBase {
private transient BufStringDecisionPathTracker _tr;
private AssignTreePathTask(SharedTreeOutput output) {
super(output);
}
@Override
protected void initMap() {
_tr = new BufStringDecisionPathTracker();
}
@Override
protected void assignNode(int tidx, int cls, CompressedTree tree, double[] input,
NewChunk nc) {
BufferedString pred = tree.getDecisionPath(input, _domains, _tr);
nc.addStr(pred);
}
@Override
protected Frame execute(Frame adaptFrm, String[] names, Key<Frame> destKey) {
Frame res = doAll(names.length, Vec.T_STR, adaptFrm).outputFrame(destKey, names, null);
// convert to categorical
Vec vv;
Vec[] nvecs = new Vec[res.vecs().length];
boolean hasInvalidPaths = false;
for(int c=0;c<res.vecs().length;++c) {
vv = res.vec(c);
try {
hasInvalidPaths = hasInvalidPaths || vv.naCnt() > 0;
nvecs[c] = vv.toCategoricalVec();
} catch (Exception e) {
VecUtils.deleteVecs(nvecs, c);
throw e;
}
}
res.delete();
res = new Frame(destKey, names, nvecs);
if (destKey != null) {
DKV.put(res);
}
if (hasInvalidPaths) {
LOG.warn("Some of the leaf node assignments were skipped (NA), " +
"only tree-paths up to length 64 are supported.");
}
return res;
}
}
private static class AssignLeafNodeIdTask extends AssignLeafNodeTaskBase {
final Key<CompressedTree>[/*_ntrees*/][/*_nclass*/] _auxTreeKeys;
private AssignLeafNodeIdTask(SharedTreeOutput output) {
super(output);
_auxTreeKeys = output._treeKeysAux;
}
@Override
protected void initMap() {
}
@Override
protected void assignNode(int tidx, int cls, CompressedTree tree, double[] input, NewChunk nc) {
CompressedTree auxTree = _auxTreeKeys[tidx][cls].get();
assert auxTree != null;
final double d = SharedTreeMojoModel.scoreTree(tree._bits, input, true, _domains);
final int nodeId = SharedTreeMojoModel.getLeafNodeId(d, auxTree._bits);
nc.addNum(nodeId, 0);
}
@Override
protected Frame execute(Frame adaptFrm, String[] names, Key<Frame> destKey) {
Frame result = doAll(names.length, Vec.T_NUM, adaptFrm).outputFrame(destKey, names, null);
if (result.vec(0).min() < 0) {
LOG.warn("Some of the observations were not assigned a Leaf Node ID (-1), " +
"only tree-paths up to length 64 are supported.");
}
return result;
}
}
private static class UpdateAuxTreeWeightsTask extends MRTask<UpdateAuxTreeWeightsTask> {
// IN
private final Key<CompressedTree>[/*_ntrees*/][/*_nclass*/] _treeKeys;
private final Key<CompressedTree>[/*_ntrees*/][/*_nclass*/] _auxTreeKeys;
private final String[][] _domains;
// WORKING
private transient int[/*treeId*/][/*classId*/] _maxNodeIds;
// OUT
private double[/*treeId*/][/*classId*/][/*leafNodeId*/] _leafNodeWeights;
private int[] _warnTrees;
private int[] _warnClasses;
private UpdateAuxTreeWeightsTask(SharedTreeOutput output) {
_treeKeys = output._treeKeys;
_auxTreeKeys = output._treeKeysAux;
_domains = output._domains;
}
@Override
protected void setupLocal() {
_maxNodeIds = new int[_auxTreeKeys.length][];
for (int treeId = 0; treeId < _auxTreeKeys.length; treeId++) {
Key<CompressedTree>[] classAuxTreeKeys = _auxTreeKeys[treeId];
_maxNodeIds[treeId] = new int[classAuxTreeKeys.length];
for (int classId = 0; classId < classAuxTreeKeys.length; classId++) {
if (classAuxTreeKeys[classId] == null) {
_maxNodeIds[treeId][classId] = -1;
continue;
}
CompressedTree tree = classAuxTreeKeys[classId].get();
assert tree != null;
_maxNodeIds[treeId][classId] = tree.findMaxNodeId();
}
}
}
protected void initMap() {
_leafNodeWeights = new double[_maxNodeIds.length][][];
for (int treeId = 0; treeId < _maxNodeIds.length; treeId++) {
int[] classMaxNodeIds = _maxNodeIds[treeId];
_leafNodeWeights[treeId] = new double[classMaxNodeIds.length][];
for (int classId = 0; classId < classMaxNodeIds.length; classId++) {
if (classMaxNodeIds[classId] < 0)
continue;
_leafNodeWeights[treeId][classId] = new double[classMaxNodeIds[classId] + 1];
}
}
}
@Override
public void map(Chunk[] chks) {
double[] input = new double[chks.length - 1];
initMap();
for (int row = 0; row < chks[0]._len; row++) {
double weight = chks[input.length].atd(row);
if (weight == 0 || Double.isNaN(weight))
continue;
for (int i = 0; i < input.length; i++)
input[i] = chks[i].atd(row);
for (int tidx = 0; tidx < _treeKeys.length; tidx++) {
Key<CompressedTree>[] keys = _treeKeys[tidx];
for (int cls = 0; cls < keys.length; cls++) {
Key<CompressedTree> key = keys[cls];
if (key != null) {
CompressedTree tree = DKV.get(key).get();
CompressedTree auxTree = _auxTreeKeys[tidx][cls].get();
assert auxTree != null;
final double d = SharedTreeMojoModel.scoreTree(tree._bits, input, true, _domains);
final int nodeId = SharedTreeMojoModel.getLeafNodeId(d, auxTree._bits);
_leafNodeWeights[tidx][cls][nodeId] += weight;
}
}
}
}
}
@Override
public void reduce(UpdateAuxTreeWeightsTask mrt) {
ArrayUtils.add(_leafNodeWeights, mrt._leafNodeWeights);
}
@Override
protected void postGlobal() {
_warnTrees = new int[0];
_warnClasses = new int[0];
Futures fs = new Futures();
for (int treeId = 0; treeId < _leafNodeWeights.length; treeId++) {
double[][] classWeights = _leafNodeWeights[treeId];
for (int classId = 0; classId < classWeights.length; classId++) {
double[] nodeWeights = classWeights[classId];
if (nodeWeights == null)
continue;
CompressedTree auxTree = _auxTreeKeys[treeId][classId].get();
assert auxTree != null;
CompressedTree updatedTree = auxTree.updateLeafNodeWeights(nodeWeights);
assert auxTree._key.equals(updatedTree._key);
DKV.put(updatedTree, fs);
if (updatedTree.hasZeroWeight()) {
_warnTrees = ArrayUtils.append(_warnTrees, treeId);
_warnClasses = ArrayUtils.append(_warnClasses, classId);
}
}
}
fs.blockForPending();
assert _warnTrees.length == _warnClasses.length;
}
}
@Override
public Frame scoreFeatureFrequencies(Frame frame, Key<Frame> destination_key) {
Frame adaptFrm = new Frame(frame);
adaptTestForTrain(adaptFrm, true, false);
// remove non-feature columns
adaptFrm.remove(_parms._response_column);
adaptFrm.remove(_parms._fold_column);
adaptFrm.remove(_parms._weights_column);
adaptFrm.remove(_parms._offset_column);
if(_parms._treatment_column != null){
adaptFrm.remove(_parms._treatment_column);
}
assert adaptFrm.numCols() == _output.nfeatures();
return new ScoreFeatureFrequenciesTask(_output)
.doAll(adaptFrm.numCols(), Vec.T_NUM, adaptFrm)
.outputFrame(destination_key, adaptFrm.names(), null);
}
private static class ComputeSharedTreesFun extends MrFun<ComputeSharedTreesFun> {
final Key<CompressedTree>[/*_ntrees*/][/*_nclass*/] _treeKeys;
final Key<CompressedTree>[/*_ntrees*/][/*_nclass*/] _auxTreeKeys;
final String[] _names;
final String[][] _domains;
transient SharedTreeSubgraph[/*_ntrees*/][/*_nclass*/] _trees;
ComputeSharedTreesFun(SharedTreeSubgraph[][] trees,
Key<CompressedTree>[][] treeKeys, Key<CompressedTree>[][] auxTreeKeys,
String[] names, String[][] domains) {
_trees = trees;
_treeKeys = treeKeys;
_auxTreeKeys = auxTreeKeys;
_names = names;
_domains = domains;
}
@Override
protected void map(int t) {
for (int c = 0; c < _treeKeys[t].length; c++) {
if (_treeKeys[t][c] == null)
continue;
_trees[t][c] = SharedTreeMojoModel.computeTreeGraph(0, "T",
_treeKeys[t][c].get()._bits, _auxTreeKeys[t][c].get()._bits, _names, _domains);
}
}
}
private static class ScoreFeatureFrequenciesTask extends MRTask<ScoreFeatureFrequenciesTask> {
final Key<CompressedTree>[/*_ntrees*/][/*_nclass*/] _treeKeys;
final Key<CompressedTree>[/*_ntrees*/][/*_nclass*/] _auxTreeKeys;
final String _domains[][];
transient SharedTreeSubgraph[/*_ntrees*/][/*_nclass*/] _trees;
ScoreFeatureFrequenciesTask(SharedTreeOutput output) {
_treeKeys = output._treeKeys;
_auxTreeKeys = output._treeKeysAux;
_domains = output._domains;
}
@Override
protected void setupLocal() {
_trees = new SharedTreeSubgraph[_treeKeys.length][];
for (int t = 0; t < _treeKeys.length; t++) {
_trees[t] = new SharedTreeSubgraph[_treeKeys[t].length];
}
MrFun<?> getSharedTreesFun = new ComputeSharedTreesFun(_trees, _treeKeys, _auxTreeKeys, _fr.names(), _domains);
H2O.submitTask(new LocalMR(getSharedTreesFun, _trees.length)).join();
}
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
double[] input = new double[cs.length];
int[] output = new int[ncs.length];
for (int r = 0; r < cs[0]._len; r++) {
for (int i = 0; i < cs.length; i++)
input[i] = cs[i].atd(r);
Arrays.fill(output, 0);
for (int t = 0; t < _treeKeys.length; t++) {
for (int c = 0; c < _treeKeys[t].length; c++) {
if (_treeKeys[t][c] == null)
continue;
double d = SharedTreeMojoModel.scoreTree(_treeKeys[t][c].get()._bits, input, true, _domains);
String decisionPath = SharedTreeMojoModel.getDecisionPath(d);
SharedTreeNode n = _trees[t][c].walkNodes(decisionPath);
updateStats(n, output);
}
}
for (int i = 0; i < ncs.length; i++) {
ncs[i].addNum(output[i]);
}
}
}
private void updateStats(final SharedTreeNode leaf, int[] stats) {
SharedTreeNode n = leaf.getParent();
while (n != null) {
stats[n.getColId()]++;
n = n.getParent();
}
}
}
@Override
protected Frame postProcessPredictions(Frame adaptedFrame, Frame predictFr, Job j) {
return CalibrationHelper.postProcessPredictions(predictFr, j, _output);
}
protected double[] score0Incremental(Score.ScoreIncInfo sii, Chunk chks[], double offset, int row_in_chunk, double[] tmp, double[] preds) {
return score0(chks, offset, row_in_chunk, tmp, preds); // by default delegate to non-incremental implementation
}
@Override protected double[] score0(double[] data, double[] preds, double offset) {
return score0(data, preds, offset, _output._treeKeys.length);
}
@Override protected double[] score0(double[/*ncols*/] data, double[/*nclasses+1*/] preds) {
return score0(data, preds, 0.0);
}
protected double[] score0(double[] data, double[] preds, double offset, int ntrees) {
Arrays.fill(preds,0);
return score0(data, preds, offset, 0, ntrees);
}
protected double[] score0(double[] data, double[] preds, double offset, int startTree, int ntrees) {
// Prefetch trees into the local cache if it is necessary
// Invoke scoring
for( int tidx=startTree; tidx<ntrees; tidx++ )
score0(data, preds, tidx);
return preds;
}
// Score per line per tree
private void score0(double[] data, double[] preds, int treeIdx) {
Key[] keys = _output._treeKeys[treeIdx];
for( int c=0; c<keys.length; c++ ) {
if (keys[c] != null) {
double pred = DKV.get(keys[c]).<CompressedTree>get().score(data,_output._domains);
assert (!Double.isInfinite(pred));
preds[keys.length == 1 ? 0 : c + 1] += pred;
}
}
}
/** Performs deep clone of given model. */
protected M deepClone(Key<M> result) {
M newModel = IcedUtils.deepCopy(self());
newModel._key = result;
// Do not clone model metrics
newModel._output.clearModelMetrics(false);
newModel._output._training_metrics = null;
newModel._output._validation_metrics = null;
// Clone trees
Key[][] treeKeys = newModel._output._treeKeys;
for (int i = 0; i < treeKeys.length; i++) {
for (int j = 0; j < treeKeys[i].length; j++) {
if (treeKeys[i][j] == null) continue;
CompressedTree ct = DKV.get(treeKeys[i][j]).get();
CompressedTree newCt = IcedUtils.deepCopy(ct);
newCt._key = CompressedTree.makeTreeKey(i, j);
DKV.put(treeKeys[i][j] = newCt._key,newCt);
}
}
// Clone Aux info
Key[][] treeKeysAux = newModel._output._treeKeysAux;
if (treeKeysAux!=null) {
for (int i = 0; i < treeKeysAux.length; i++) {
for (int j = 0; j < treeKeysAux[i].length; j++) {
if (treeKeysAux[i][j] == null) continue;
CompressedTree ct = DKV.get(treeKeysAux[i][j]).get();
CompressedTree newCt = IcedUtils.deepCopy(ct);
newCt._key = Key.make(createAuxKey(treeKeys[i][j].toString()));
DKV.put(treeKeysAux[i][j] = newCt._key,newCt);
}
}
}
return newModel;
}
@Override protected Futures remove_impl(Futures fs, boolean cascade) {
for (Key[] ks : _output._treeKeys)
for (Key k : ks)
Keyed.remove(k, fs, true);
for (Key[] ks : _output._treeKeysAux)
for (Key k : ks)
Keyed.remove(k, fs, true);
if (_output._calib_model != null)
_output._calib_model.remove(fs);
return super.remove_impl(fs, cascade);
}
/** Write out K/V pairs */
@Override protected AutoBuffer writeAll_impl(AutoBuffer ab) {
for (Key<CompressedTree>[] ks : _output._treeKeys)
for (Key<CompressedTree> k : ks)
ab.putKey(k);
for (Key<CompressedTree>[] ks : _output._treeKeysAux)
for (Key<CompressedTree> k : ks)
ab.putKey(k);
return super.writeAll_impl(ab);
}
@Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) {
for (Key<CompressedTree>[] ks : _output._treeKeys)
for (Key<CompressedTree> k : ks)
ab.getKey(k,fs);
for (Key<CompressedTree>[] ks : _output._treeKeysAux)
for (Key<CompressedTree> k : ks)
ab.getKey(k,fs);
return super.readAll_impl(ab,fs);
}
@SuppressWarnings("unchecked") // `M` is really the type of `this`
private M self() { return (M)this; }
/**
* Converts a given tree of the ensemble to a user-understandable representation.
* @param tidx tree index
* @param cls tree class
* @return instance of SharedTreeSubgraph
*/
public SharedTreeSubgraph getSharedTreeSubgraph(final int tidx, final int cls) {
if (tidx < 0 || tidx >= _output._ntrees) {
throw new IllegalArgumentException("Invalid tree index: " + tidx +
". Tree index must be in range [0, " + (_output._ntrees -1) + "].");
}
Key<CompressedTree> treeKey = _output._treeKeysAux[tidx][cls];
if (treeKey == null)
return null;
final CompressedTree auxCompressedTree = treeKey.get();
return _output._treeKeys[tidx][cls].get().toSharedTreeSubgraph(auxCompressedTree, _output._names, _output._domains);
}
@Override
public boolean isFeatureUsedInPredict(String featureName) {
if (featureName.equals(_output.responseName())) return false;
int featureIdx = ArrayUtils.find(_output._varimp._names, featureName);
return featureIdx != -1 && (double) _output._varimp._varimp[featureIdx] != 0d;
}
//--------------------------------------------------------------------------------------------------------------------
// Serialization into a POJO
//--------------------------------------------------------------------------------------------------------------------
public boolean binomialOpt() {
return true;
}
@Override
public CategoricalEncoding getGenModelEncoding() {
switch (_parms._categorical_encoding) {
case AUTO:
case Enum:
case SortByResponse:
return CategoricalEncoding.AUTO;
case OneHotExplicit:
return CategoricalEncoding.OneHotExplicit;
case Binary:
return CategoricalEncoding.Binary;
case EnumLimited:
return CategoricalEncoding.EnumLimited;
case Eigen:
return CategoricalEncoding.Eigen;
case LabelEncoder:
return CategoricalEncoding.LabelEncoder;
default:
return null;
}
}
protected SharedTreePojoWriter makeTreePojoWriter() {
throw new UnsupportedOperationException("POJO is not supported for model " + _parms.algoName() + ".");
}
@Override
protected final PojoWriter makePojoWriter() {
CategoricalEncoding encoding = getGenModelEncoding();
if (encoding == null) {
throw new IllegalArgumentException("Only default, SortByResponse, EnumLimited and 1-hot explicit scheme is supported for POJO/MOJO");
}
return makeTreePojoWriter();
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/SharedTreeModelWithContributions.java
|
package hex.tree;
import hex.ContributionsWithBackgroundFrameTask;
import hex.DistributionFactory;
import hex.Model;
import hex.genmodel.algos.tree.*;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.Log;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public abstract class SharedTreeModelWithContributions<
M extends SharedTreeModel<M, P, O>,
P extends SharedTreeModel.SharedTreeParameters,
O extends SharedTreeModel.SharedTreeOutput
> extends SharedTreeModel<M, P, O> implements Model.Contributions {
public SharedTreeModelWithContributions(Key<M> selfKey, P parms, O output) {
super(selfKey, parms, output);
}
@Override
public Frame scoreContributions(Frame frame, Key<Frame> destination_key) {
return scoreContributions(frame, destination_key, null);
}
protected Frame removeSpecialColumns(Frame frame) {
Frame adaptFrm = new Frame(frame);
adaptTestForTrain(adaptFrm, true, false);
// remove non-feature columns
adaptFrm.remove(_parms._response_column);
adaptFrm.remove(_parms._fold_column);
adaptFrm.remove(_parms._weights_column);
adaptFrm.remove(_parms._offset_column);
return adaptFrm;
}
protected Frame removeSpecialNNonNumericColumns(Frame frame) {
Frame adaptFrm = removeSpecialColumns(frame);
// remove non-numeric columns
int numCols = adaptFrm.numCols()-1;
for (int index=numCols; index>=0; index--) {
if (!adaptFrm.vec(index).isNumeric())
adaptFrm.remove(index);
}
return adaptFrm;
}
@Override
public Frame scoreContributions(Frame frame, Key<Frame> destination_key, Job<Frame> j) {
if (_output.nclasses() > 2) {
throw new UnsupportedOperationException(
"Calculating contributions is currently not supported for multinomial models.");
}
Frame adaptFrm = removeSpecialColumns(frame);
final String[] outputNames = ArrayUtils.append(adaptFrm.names(), "BiasTerm");
return getScoreContributionsTask(this)
.withPostMapAction(JobUpdatePostMap.forJob(j))
.doAll(outputNames.length, Vec.T_NUM, adaptFrm)
.outputFrame(destination_key, outputNames, null);
}
@Override
public Frame scoreContributions(Frame frame, Key<Frame> destination_key, Job<Frame> j, ContributionsOptions options) {
if (_output.nclasses() > 2) {
throw new UnsupportedOperationException(
"Calculating contributions is currently not supported for multinomial models.");
}
//FIXME: Original in DRF and GBM corresponds to Compact in XGBoost
// if (options._outputFormat == ContributionsOutputFormat.Compact) {
// throw new UnsupportedOperationException(
// "Only output_format \"Original\" is supported for this model.");
// }
if (!options.isSortingRequired()) {
return scoreContributions(frame, destination_key, j);
}
Frame adaptFrm = removeSpecialColumns(frame);
final String[] contribNames = ArrayUtils.append(adaptFrm.names(), "BiasTerm");
final ContributionComposer contributionComposer = new ContributionComposer();
int topNAdjusted = contributionComposer.checkAndAdjustInput(options._topN, adaptFrm.names().length);
int bottomNAdjusted = contributionComposer.checkAndAdjustInput(options._bottomN, adaptFrm.names().length);
int outputSize = Math.min((topNAdjusted+bottomNAdjusted)*2, adaptFrm.names().length*2);
String[] names = new String[outputSize+1];
byte[] types = new byte[outputSize+1];
String[][] domains = new String[outputSize+1][contribNames.length];
composeScoreContributionTaskMetadata(names, types, domains, adaptFrm.names(), options);
return getScoreContributionsSoringTask(this, options)
.withPostMapAction(JobUpdatePostMap.forJob(j))
.doAll(types, adaptFrm)
.outputFrame(destination_key, names, domains);
}
protected abstract ScoreContributionsWithBackgroundTask getScoreContributionsWithBackgroundTask(SharedTreeModel model, Frame fr, Frame backgroundFrame, boolean expand, int[] catOffsets, ContributionsOptions options);
@Override
public Frame scoreContributions(Frame frame, Key<Frame> destination_key, Job<Frame> j, ContributionsOptions options, Frame backgroundFrame) {
if (backgroundFrame == null)
return scoreContributions(frame, destination_key, j, options);
assert !options.isSortingRequired();
if (_output.nclasses() > 2) {
throw new UnsupportedOperationException(
"Calculating contributions is currently not supported for multinomial models.");
}
Log.info("Starting contributions calculation for " + this._key + "...");
try (Scope.Safe s = Scope.safe(frame, backgroundFrame)) {
Frame scoreContribution;
if (options._outputFormat == ContributionsOutputFormat.Compact || _output._domains == null) {
Frame adaptedFrame = Scope.track(removeSpecialColumns(frame));
Frame adaptedBgFrame = Scope.track(removeSpecialColumns(backgroundFrame));
DKV.put(adaptedFrame);
DKV.put(adaptedBgFrame);
final String[] outputNames = ArrayUtils.append(adaptedFrame.names(), "BiasTerm");
scoreContribution = getScoreContributionsWithBackgroundTask(this, adaptedFrame, adaptedBgFrame, false, null, options)
.runAndGetOutput(j, destination_key, outputNames);
} else {
Frame adaptedFrame = Scope.track(removeSpecialColumns(frame));
Frame adaptedBgFrame = Scope.track(removeSpecialColumns(backgroundFrame));
DKV.put(adaptedFrame);
DKV.put(adaptedBgFrame);
assert Parameters.CategoricalEncodingScheme.Enum.equals(_parms._categorical_encoding) : "Unsupported categorical encoding. Only enum is supported.";
int[] catOffsets = new int[_output._domains.length + 1];
String[] outputNames;
int nCols = 1;
for (int i = 0; i < _output._domains.length; i++) {
if (!(_output._names[i].equals(_parms._response_column) ||
_output._names[i].equals(_parms._fold_column) ||
_output._names[i].equals(_parms._weights_column) ||
_output._names[i].equals(_parms._offset_column))) {
if (null == _output._domains[i]) {
catOffsets[i + 1] = catOffsets[i] + 1; // numeric
} else {
catOffsets[i + 1] = catOffsets[i] + _output._domains[i].length + 1; // +1 for missing(NA)
}
nCols++;
}
}
catOffsets = Arrays.copyOf(catOffsets, nCols);
outputNames = new String[catOffsets[catOffsets.length - 1] + 1];
outputNames[catOffsets[catOffsets.length - 1]] = "BiasTerm";
int l = 0;
for (int i = 0; i < _output._names.length; i++) {
if (!(_output._names[i].equals(_parms._response_column) ||
_output._names[i].equals(_parms._fold_column) ||
_output._names[i].equals(_parms._weights_column) ||
_output._names[i].equals(_parms._offset_column))) {
if (null == _output._domains[i]) {
outputNames[l++] = _output._names[i];
} else {
for (int k = 0; k < _output._domains[i].length; k++) {
outputNames[l++] = _output._names[i] + "." + _output._domains[i][k];
}
outputNames[l++] = _output._names[i] + ".missing(NA)";
}
}
}
scoreContribution = getScoreContributionsWithBackgroundTask(this, adaptedFrame, adaptedBgFrame, true, catOffsets, options)
.runAndGetOutput(j, destination_key, outputNames);
}
return Scope.untrack(scoreContribution);
} finally {
Log.info("Finished contributions calculation for " + this._key + "...");
}
}
protected abstract ScoreContributionsTask getScoreContributionsTask(SharedTreeModel model);
protected abstract ScoreContributionsTask getScoreContributionsSoringTask(SharedTreeModel model, ContributionsOptions options);
public class ScoreContributionsTask extends MRTask<ScoreContributionsTask> {
protected final Key<SharedTreeModel> _modelKey;
protected transient SharedTreeModel _model;
protected transient SharedTreeOutput _output;
protected transient TreeSHAPPredictor<double[]> _treeSHAP;
public ScoreContributionsTask(SharedTreeModel model) {
_modelKey = model._key;
}
@Override
@SuppressWarnings("unchecked")
protected void setupLocal() {
_model = _modelKey.get();
assert _model != null;
_output = (SharedTreeOutput) _model._output; // Need to cast to SharedTreeModel to access ntrees, treeKeys, & init_f params
assert _output != null;
List<TreeSHAPPredictor<double[]>> treeSHAPs = new ArrayList<>(_output._ntrees);
for (int treeIdx = 0; treeIdx < _output._ntrees; treeIdx++) {
for (int treeClass = 0; treeClass < _output._treeKeys[treeIdx].length; treeClass++) {
if (_output._treeKeys[treeIdx][treeClass] == null) {
continue;
}
SharedTreeSubgraph tree = _model.getSharedTreeSubgraph(treeIdx, treeClass);
SharedTreeNode[] nodes = tree.getNodes();
treeSHAPs.add(new TreeSHAP<>(nodes));
}
}
assert treeSHAPs.size() == _output._ntrees; // for now only regression and binomial to keep the output sane
_treeSHAP = new TreeSHAPEnsemble<>(treeSHAPs, (float) _output._init_f);
}
protected void fillInput(Chunk chks[], int row, double[] input, float[] contribs) {
for (int i = 0; i < chks.length; i++) {
input[i] = chks[i].atd(row);
}
Arrays.fill(contribs, 0);
}
@Override
public void map(Chunk chks[], NewChunk[] nc) {
assert chks.length == nc.length - 1; // calculate contribution for each feature + the model bias
double[] input = MemoryManager.malloc8d(chks.length);
float[] contribs = MemoryManager.malloc4f(nc.length);
TreeSHAPPredictor.Workspace workspace = _treeSHAP.makeWorkspace();
for (int row = 0; row < chks[0]._len; row++) {
fillInput(chks, row, input, contribs);
// calculate Shapley values
_treeSHAP.calculateContributions(input, contribs, 0, -1, workspace);
doModelSpecificComputation(contribs);
// Add contribs to new chunk
addContribToNewChunk(contribs, nc);
}
}
protected void doModelSpecificComputation(float[] contribs) {/*For children*/}
protected void addContribToNewChunk(float[] contribs, NewChunk[] nc) {
for (int i = 0; i < nc.length; i++) {
nc[i].addNum(contribs[i]);
}
}
}
public class ScoreContributionsSortingTask extends ScoreContributionsTask {
private final int _topN;
private final int _bottomN;
private final boolean _compareAbs;
public ScoreContributionsSortingTask(SharedTreeModel model, ContributionsOptions options) {
super(model);
_topN = options._topN;
_bottomN = options._bottomN;
_compareAbs = options._compareAbs;
}
protected void fillInput(Chunk[] chks, int row, double[] input, float[] contribs, int[] contribNameIds) {
super.fillInput(chks, row, input, contribs);
for (int i = 0; i < contribNameIds.length; i++) {
contribNameIds[i] = i;
}
}
@Override
public void map(Chunk chks[], NewChunk[] nc) {
double[] input = MemoryManager.malloc8d(chks.length);
float[] contribs = MemoryManager.malloc4f(chks.length+1);
int[] contribNameIds = MemoryManager.malloc4(chks.length+1);
TreeSHAPPredictor.Workspace workspace = _treeSHAP.makeWorkspace();
for (int row = 0; row < chks[0]._len; row++) {
fillInput(chks, row, input, contribs, contribNameIds);
// calculate Shapley values
_treeSHAP.calculateContributions(input, contribs, 0, -1, workspace);
doModelSpecificComputation(contribs);
ContributionComposer contributionComposer = new ContributionComposer();
int[] contribNameIdsSorted = contributionComposer.composeContributions(
contribNameIds, contribs, _topN, _bottomN, _compareAbs);
// Add contribs to new chunk
addContribToNewChunk(contribs, contribNameIdsSorted, nc);
}
}
protected void addContribToNewChunk(float[] contribs, int[] contribNameIdsSorted, NewChunk[] nc) {
for (int i = 0, inputPointer = 0; i < nc.length-1; i+=2, inputPointer++) {
nc[i].addNum(contribNameIdsSorted[inputPointer]);
nc[i+1].addNum(contribs[contribNameIdsSorted[inputPointer]]);
}
nc[nc.length-1].addNum(contribs[contribs.length-1]); // bias
}
}
public class ScoreContributionsWithBackgroundTask extends ContributionsWithBackgroundFrameTask<ScoreContributionsWithBackgroundTask> {
protected final Key<SharedTreeModel> _modelKey;
protected transient SharedTreeModel _model;
protected transient SharedTreeOutput _output;
protected transient TreeSHAPPredictor<double[]> _treeSHAP;
protected boolean _expand;
protected boolean _outputSpace;
protected int[] _catOffsets;
public ScoreContributionsWithBackgroundTask(Key<Frame> frKey, Key<Frame> backgroundFrameKey, boolean perReference, SharedTreeModel model, boolean expand, int[] catOffsets, boolean outputSpace) {
super(frKey, backgroundFrameKey, perReference);
_modelKey = model._key;
_expand = expand;
_catOffsets = catOffsets;
_outputSpace = outputSpace;
}
@Override
@SuppressWarnings("unchecked")
protected void setupLocal() {
_model = _modelKey.get();
assert _model != null;
_output = (SharedTreeOutput) _model._output; // Need to cast to SharedTreeModel to access ntrees, treeKeys, & init_f params
assert _output != null;
List<TreeSHAPPredictor<double[]>> treeSHAPs = new ArrayList<>(_output._ntrees);
for (int treeIdx = 0; treeIdx < _output._ntrees; treeIdx++) {
for (int treeClass = 0; treeClass < _output._treeKeys[treeIdx].length; treeClass++) {
if (_output._treeKeys[treeIdx][treeClass] == null) {
continue;
}
SharedTreeSubgraph tree = _model.getSharedTreeSubgraph(treeIdx, treeClass);
SharedTreeNode[] nodes = tree.getNodes();
treeSHAPs.add(new TreeSHAP<>(nodes));
}
}
assert treeSHAPs.size() == _output._ntrees; // for now only regression and binomial to keep the output sane
_treeSHAP = new TreeSHAPEnsemble<>(treeSHAPs, (float) _output._init_f);
}
protected void fillInput(Chunk chks[], int row, double[] input) {
for (int i = 0; i < chks.length; i++) {
input[i] = chks[i].atd(row);
}
}
@Override
public void map(Chunk[] cs, Chunk[] bgCs, NewChunk[] nc) {
assert cs.length <= nc.length - 1; // calculate contribution for each feature + the model bias; nc can be bigger due to expanding cat.vars
double[] input = MemoryManager.malloc8d(cs.length);
double[] inputBg = MemoryManager.malloc8d(bgCs.length);
double[] contribs = MemoryManager.malloc8d(nc.length);
for (int row = 0; row < cs[0]._len; row++) {
fillInput(cs, row, input);
for (int bgRow = 0; bgRow < bgCs[0]._len; bgRow++) {
Arrays.fill(contribs, 0);
fillInput(bgCs, bgRow, inputBg);
// calculate Shapley values
_treeSHAP.calculateInterventionalContributions(input, inputBg, contribs, _catOffsets, _expand);
doModelSpecificComputation(contribs);
// Add contribs to new chunk
addContribToNewChunk(contribs, nc);
}
}
}
protected void doModelSpecificComputation(double[] contribs) {/*For children*/}
protected void addContribToNewChunk(double[] contribs, NewChunk[] nc) {
double transformationRatio = 1;
double biasTerm = contribs[contribs.length - 1];
if (_outputSpace) {
final double linkSpaceX = Arrays.stream(contribs).sum();
final double linkSpaceBg = biasTerm;
final double outSpaceX = DistributionFactory.getDistribution(_parms).linkInv(linkSpaceX);
final double outSpaceBg = DistributionFactory.getDistribution(_parms).linkInv(linkSpaceBg);
transformationRatio = Math.abs(linkSpaceX - linkSpaceBg) < 1e-6 ? 0 : (outSpaceX - outSpaceBg) / (linkSpaceX - linkSpaceBg);
biasTerm = outSpaceBg;
}
for (int i = 0; i < nc.length - 1; i++) {
nc[i].addNum(contribs[i] * transformationRatio);
}
nc[nc.length - 1].addNum(biasTerm);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/SharedTreeMojoWriter.java
|
package hex.tree;
import hex.Model;
import hex.ModelMojoWriter;
import hex.glm.GLMModel;
import hex.isotonic.IsotonicRegressionModel;
import water.DKV;
import water.Key;
import water.Value;
import java.io.IOException;
/**
* Shared Mojo definition file for DRF and GBM models.
*/
public abstract class SharedTreeMojoWriter<
M extends SharedTreeModel<M, P, O>,
P extends SharedTreeModel.SharedTreeParameters,
O extends SharedTreeModel.SharedTreeOutput
> extends ModelMojoWriter<M, P, O> {
public SharedTreeMojoWriter() {}
public SharedTreeMojoWriter(M model) {
super(model);
}
@Override
protected void writeModelData() throws IOException {
assert model._output._treeKeys.length == model._output._ntrees;
int nclasses = model._output.nclasses();
int ntreesPerClass = model.binomialOpt() && nclasses == 2 ? 1 : nclasses;
writekv("n_trees", model._output._ntrees);
writekv("n_trees_per_class", ntreesPerClass);
if (model._output.isCalibrated()) {
final CalibrationHelper.CalibrationMethod calibMethod = model._output.getCalibrationMethod();
final Model<?, ?, ?> calibModel = model._output.calibrationModel();
writekv("calib_method", calibMethod.getId());
switch (calibMethod) {
case PlattScaling:
double[] beta = ((GLMModel) calibModel).beta();
assert beta.length == nclasses; // n-1 coefficients + 1 intercept
writekv("calib_glm_beta", beta);
break;
case IsotonicRegression:
IsotonicRegressionModel isotonic = (IsotonicRegressionModel) calibModel;
write(isotonic.toIsotonicCalibrator());
break;
default:
throw new UnsupportedOperationException("MOJO is not (yet) support for calibration model " + calibMethod);
}
}
writekv("_genmodel_encoding", model.getGenModelEncoding());
String[] origNames = model._output._origNames;
if (origNames != null) {
int nOrigNames = origNames.length;
writekv("_n_orig_names", nOrigNames);
writeStringArray(origNames, "_orig_names");
}
if (model._output._origDomains != null) {
int nOrigDomainValues = model._output._origDomains.length;
writekv("_n_orig_domain_values", nOrigDomainValues);
for (int i=0; i < nOrigDomainValues; i++) {
String[] currOrigDomain = model._output._origDomains[i];
writekv("_m_orig_domain_values_" + i, currOrigDomain == null ? 0 : currOrigDomain.length);
if (currOrigDomain != null) {
writeStringArray(currOrigDomain, "_orig_domain_values_" + i);
}
}
}
writekv("_orig_projection_array", model._output._orig_projection_array);
for (int i = 0; i < model._output._ntrees; i++) {
for (int j = 0; j < ntreesPerClass; j++) {
Key<CompressedTree> key = model._output._treeKeys[i][j];
Value ctVal = key != null ? DKV.get(key) : null;
if (ctVal == null)
continue; //throw new H2OKeyNotFoundArgumentException("CompressedTree " + key + " not found");
CompressedTree ct = ctVal.get();
// assume ct._seed is useless and need not be persisted
writeblob(String.format("trees/t%02d_%03d.bin", j, i), ct._bits);
if (model._output._treeKeysAux!=null) {
key = model._output._treeKeysAux[i][j];
ctVal = key != null ? DKV.get(key) : null;
if (ctVal != null) {
ct = ctVal.get();
writeblob(String.format("trees/t%02d_%03d_aux.bin", j, i), ct._bits);
}
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/SharedTreePojoWriter.java
|
package hex.tree;
import hex.Model;
import hex.PojoWriter;
import hex.genmodel.CategoricalEncoding;
import water.Key;
import water.codegen.CodeGeneratorPipeline;
import water.exceptions.JCodeSB;
import water.util.JCodeGen;
import water.util.PojoUtils;
import water.util.SB;
import water.util.SBPrintStream;
public abstract class SharedTreePojoWriter implements PojoWriter {
// common for all models
protected final Key<?> _modelKey;
protected final Model.Output _output;
// specific to tree based models
protected final CategoricalEncoding _encoding;
protected final boolean _binomialOpt;
protected final CompressedTree[/*_ntrees*/][/*_nclass*/] _trees;
protected final TreeStats _treeStats; // optional (can be null)
protected SharedTreePojoWriter(Key<?> modelKey, Model.Output output,
CategoricalEncoding encoding, boolean binomialOpt, CompressedTree[][] trees,
TreeStats treeStats) {
_modelKey = modelKey;
_output = output;
_encoding = encoding;
_binomialOpt = binomialOpt;
_trees = trees;
_treeStats = treeStats;
}
@Override
public boolean toJavaCheckTooBig() {
return _treeStats == null || _treeStats._num_trees * _treeStats._mean_leaves > 1000000;
}
@Override
public SBPrintStream toJavaInit(SBPrintStream sb, CodeGeneratorPipeline fileContext) {
sb.nl();
sb.ip("public boolean isSupervised() { return true; }").nl();
sb.ip("public int nfeatures() { return " + _output.nfeatures() + "; }").nl();
sb.ip("public int nclasses() { return " + _output.nclasses() + "; }").nl();
if (_encoding == CategoricalEncoding.Eigen) {
sb.ip("public double[] getOrigProjectionArray() { return " + PojoUtils.toJavaDoubleArray(_output._orig_projection_array) + "; }").nl();
}
if (_encoding != CategoricalEncoding.AUTO) {
sb.ip("public hex.genmodel.CategoricalEncoding getCategoricalEncoding() { return hex.genmodel.CategoricalEncoding." +
_encoding.name() + "; }").nl();
}
return sb;
}
@Override
public void toJavaPredictBody(SBPrintStream body,
CodeGeneratorPipeline classCtx, CodeGeneratorPipeline fileCtx,
final boolean verboseCode) {
final int nclass = _output.nclasses();
body.ip("java.util.Arrays.fill(preds,0);").nl();
final String mname = JCodeGen.toJavaId(_modelKey.toString());
// One forest-per-GBM-tree, with a real-tree-per-class
for (int t=0; t < _trees.length; t++) {
// Generate score method for given tree
toJavaForestName(body.i(),mname,t).p(".score0(data,preds);").nl();
final int treeIdx = t;
fileCtx.add(out -> {
try {
// Generate a class implementing a tree
out.nl();
toJavaForestName(out.ip("class "), mname, treeIdx).p(" {").nl().ii(1);
out.ip("public static void score0(double[] fdata, double[] preds) {").nl().ii(1);
for (int c = 0; c < nclass; c++) {
if (_trees[treeIdx][c] == null) continue;
if (!(_binomialOpt && c == 1 && nclass == 2)) // Binomial optimization
toJavaTreeName(out.ip("preds[").p(nclass == 1 ? 0 : c + 1).p("] += "), mname, treeIdx, c).p(".score0(fdata);").nl();
}
out.di(1).ip("}").nl(); // end of function
out.di(1).ip("}").nl(); // end of forest class
// Generate the pre-tree classes afterwards
for (int c = 0; c < nclass; c++) {
if (_trees[treeIdx][c] == null) continue;
if (!(_binomialOpt && c == 1 && nclass == 2)) { // Binomial optimization
String javaClassName = toJavaTreeName(new SB(), mname, treeIdx, c).toString();
SB sb = new SB();
new TreeJCodeGen(_output, _trees[treeIdx][c], sb, javaClassName, verboseCode).generate();
out.p(sb);
}
}
} catch (Exception e) {
throw new RuntimeException("Internal error creating the POJO.", e);
}
});
}
toJavaUnifyPreds(body);
}
protected abstract void toJavaUnifyPreds(SBPrintStream body);
private static <T extends JCodeSB<T>> T toJavaTreeName(T sb, String mname, int t, int c ) {
return sb.p(mname).p("_Tree_").p(t).p("_class_").p(c);
}
private static <T extends JCodeSB<T>> T toJavaForestName(T sb, String mname, int t ) {
return sb.p(mname).p("_Forest_").p(t);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/TreeHandler.java
|
package hex.tree;
import hex.Model;
import hex.genmodel.algos.tree.SharedTreeGraph;
import hex.genmodel.algos.tree.SharedTreeNode;
import hex.genmodel.algos.tree.SharedTreeSubgraph;
import hex.genmodel.algos.tree.SharedTreeGraphConverter;
import hex.schemas.TreeV3;
import water.Keyed;
import water.MemoryManager;
import water.api.Handler;
import java.util.*;
import java.util.stream.IntStream;
import static hex.tree.TreeUtils.getResponseLevelIndex;
/**
* Handling requests for various model trees
*/
public class TreeHandler extends Handler {
private static final int NO_CHILD = -1;
public enum PlainLanguageRules {AUTO, TRUE, FALSE}
public TreeV3 getTree(final int version, final TreeV3 args) {
if (args.tree_number < 0) {
throw new IllegalArgumentException("Invalid tree number: " + args.tree_number + ". Tree number must be >= 0.");
}
final Keyed possibleModel = args.model.key().get();
if (possibleModel == null) throw new IllegalArgumentException("Given model does not exist: " + args.model.key().toString());
else if (!(possibleModel instanceof SharedTreeModel) && !(possibleModel instanceof SharedTreeGraphConverter)) {
throw new IllegalArgumentException("Given model is not tree-based.");
}
final SharedTreeSubgraph sharedTreeSubgraph;
if (possibleModel instanceof SharedTreeGraphConverter) {
final SharedTreeGraphConverter treeBackedModel = (SharedTreeGraphConverter) possibleModel;
final SharedTreeGraph sharedTreeGraph = treeBackedModel.convert(args.tree_number, args.tree_class);
assert sharedTreeGraph.subgraphArray.size() == 1;
sharedTreeSubgraph = sharedTreeGraph.subgraphArray.get(0);
if (! ((Model)possibleModel)._output.isClassifier()) {
args.tree_class = null; // Class may not be provided by the user, should be always filled correctly on output. NULL for regression.
}
} else {
final SharedTreeModel model = (SharedTreeModel) possibleModel;
final SharedTreeModel.SharedTreeOutput sharedTreeOutput = (SharedTreeModel.SharedTreeOutput) model._output;
final int treeClass = getResponseLevelIndex(args.tree_class, sharedTreeOutput);
sharedTreeSubgraph = model.getSharedTreeSubgraph(args.tree_number, treeClass);
// Class may not be provided by the user, should be always filled correctly on output. NULL for regression.
args.tree_class = sharedTreeOutput.isClassifier() ? sharedTreeOutput.classNames()[treeClass] : null;
}
final TreeProperties treeProperties = convertSharedTreeSubgraph(sharedTreeSubgraph, args.plain_language_rules);
args.left_children = treeProperties._leftChildren;
args.right_children = treeProperties._rightChildren;
args.descriptions = treeProperties._descriptions;
args.root_node_id = sharedTreeSubgraph.rootNode.getNodeNumber();
args.thresholds = treeProperties._thresholds;
args.features = treeProperties._features;
args.nas = treeProperties._nas;
args.levels = treeProperties.levels;
args.predictions = treeProperties._predictions;
args.tree_decision_path = treeProperties._treeDecisionPath;
args.decision_paths = treeProperties._decisionPaths;
return args;
}
private static String getLanguageRepresentation(SharedTreeSubgraph sharedTreeSubgraph) {
return getNodeRepresentation(sharedTreeSubgraph.rootNode, new StringBuilder(), 0).toString();
}
private static StringBuilder getNodeRepresentation(SharedTreeNode node, StringBuilder languageRepresentation, int padding) {
if (node.getRightChild() != null) {
languageRepresentation.append(getConditionLine(node, padding));
languageRepresentation.append(getNewPaddedLine(padding));
languageRepresentation = getNodeRepresentation(node.getRightChild(), languageRepresentation, padding +1);
languageRepresentation.append(getNewPaddedLine(padding));
languageRepresentation.append(getElseLine(node));
languageRepresentation.append(getNewPaddedLine(padding));
languageRepresentation = getNodeRepresentation(node.getLeftChild(), languageRepresentation, padding + 1);
languageRepresentation.append(getNewPaddedLine(padding));
languageRepresentation.append("}");
} else {
languageRepresentation.append(getNewPaddedLine(padding));
if (Float.compare(node.getPredValue(),Float.NaN) != 0) {
languageRepresentation.append("Predicted value: " + node.getPredValue());
} else {
languageRepresentation.append("Predicted value: NaN");
}
languageRepresentation.append(getNewPaddedLine(padding));
}
return languageRepresentation;
}
private static StringBuilder getNewPaddedLine(int padding) {
StringBuilder line = new StringBuilder("\n");
for(int i = 0; i < padding; i++) {
line.append("\t");
}
return line;
}
private static StringBuilder getElseLine(SharedTreeNode node) {
StringBuilder elseLine = new StringBuilder();
if (node.getDomainValues() == null) {
elseLine.append("} else {");
} else {
SharedTreeNode leftChild = node.getLeftChild();
elseLine.append("} else if ( ").append(node.getColName()).append(" is in [ ");
BitSet inclusiveLevelsSet = leftChild.getInclusiveLevels();
if (inclusiveLevelsSet != null) {
String stringToParseInclusiveLevelsFrom = inclusiveLevelsSet.toString();
int inclusiveLevelsLength = inclusiveLevelsSet.toString().length();
if (inclusiveLevelsLength > 2) {
// get rid of curly braces:
stringToParseInclusiveLevelsFrom = stringToParseInclusiveLevelsFrom.substring(1, inclusiveLevelsLength - 1);
String[] inclusiveLevels = stringToParseInclusiveLevelsFrom.split(",");
for (String index : inclusiveLevels) {
elseLine.append(node.getDomainValues()[Integer.parseInt(index.trim())] + " ");
}
} else {
elseLine.append("Missing set of levels for underlying node");
}
}
elseLine.append("]) {");
}
return elseLine;
}
private static StringBuilder getConditionLine(SharedTreeNode node, int padding) {
StringBuilder conditionLine = new StringBuilder();
if (padding != 0) {
conditionLine.append(getNewPaddedLine(padding));
}
if (node.getDomainValues() == null) {
if (Float.compare(node.getSplitValue(),Float.NaN) == 0) {
conditionLine.append("If ( " + node.getColName() + " is NaN ) {");
} else {
conditionLine.append("If ( " + node.getColName() + " >= " + node.getSplitValue());
if ("RIGHT".equals(getNaDirection(node))) {
conditionLine.append(" or ").append(node.getColName()).append(" is NaN ) {");
} else {
conditionLine.append(" ) {");
}
}
} else {
conditionLine.append("If ( " + node.getColName() + " is in [ ");
// get inclusive levels:
SharedTreeNode rightChild = node.getRightChild();
String stringToParseInclusiveLevelsFrom = rightChild.getInclusiveLevels().toString();
int inclusiveLevelsLength = rightChild.getInclusiveLevels().toString().length();
if (inclusiveLevelsLength > 2) {
// get rid of curly braces:
stringToParseInclusiveLevelsFrom = stringToParseInclusiveLevelsFrom.substring(1, inclusiveLevelsLength - 1);
String[] inclusiveLevels = stringToParseInclusiveLevelsFrom.split(",");
Arrays.stream(inclusiveLevels)
.map(String::trim)
.map(Integer::parseInt)
.forEach(index -> conditionLine.append(node.getDomainValues()[index] + " "));
} else {
conditionLine.append("Missing set of levels for underlying node");
}
conditionLine.append("]) {");
}
return conditionLine;
}
/**
* Converts H2O-3's internal representation of a tree in a form of {@link SharedTreeSubgraph} to a format
* expected by H2O clients.
*
* @param sharedTreeSubgraph An instance of {@link SharedTreeSubgraph} to convert
* @return An instance of {@link TreeProperties} with some attributes possibly empty if suitable. Never null.
*/
static TreeProperties convertSharedTreeSubgraph(final SharedTreeSubgraph sharedTreeSubgraph, PlainLanguageRules plainLanguageRules) {
Objects.requireNonNull(sharedTreeSubgraph);
final TreeProperties treeprops = new TreeProperties();
treeprops._leftChildren = MemoryManager.malloc4(sharedTreeSubgraph.nodesArray.size());
treeprops._rightChildren = MemoryManager.malloc4(sharedTreeSubgraph.nodesArray.size());
treeprops._descriptions = new String[sharedTreeSubgraph.nodesArray.size()];
treeprops._thresholds = MemoryManager.malloc4f(sharedTreeSubgraph.nodesArray.size());
treeprops._features = new String[sharedTreeSubgraph.nodesArray.size()];
treeprops._nas = new String[sharedTreeSubgraph.nodesArray.size()];
treeprops._predictions = MemoryManager.malloc4f(sharedTreeSubgraph.nodesArray.size());
treeprops._leafNodeAssignments = new String[sharedTreeSubgraph.nodesArray.size()];
treeprops._decisionPaths = new String[sharedTreeSubgraph.nodesArray.size()];
treeprops._leftChildrenNormalized = MemoryManager.malloc4(sharedTreeSubgraph.nodesArray.size());
treeprops._rightChildrenNormalized = MemoryManager.malloc4(sharedTreeSubgraph.nodesArray.size());
// Set root node's children, there is no guarantee the root node will be number 0
treeprops._rightChildren[0] = sharedTreeSubgraph.rootNode.getRightChild() != null ? sharedTreeSubgraph.rootNode.getRightChild().getNodeNumber() : -1;
treeprops._leftChildren[0] = sharedTreeSubgraph.rootNode.getLeftChild() != null ? sharedTreeSubgraph.rootNode.getLeftChild().getNodeNumber() : -1;
treeprops._thresholds[0] = sharedTreeSubgraph.rootNode.getSplitValue();
treeprops._features[0] = sharedTreeSubgraph.rootNode.getColName();
treeprops._nas[0] = getNaDirection(sharedTreeSubgraph.rootNode);
treeprops.levels = new int[sharedTreeSubgraph.nodesArray.size()][];
if (plainLanguageRules.equals(PlainLanguageRules.AUTO)) {
/* 255 = number of nodes for complete binary tree of depth 7 (2^(k+1)−1) */
plainLanguageRules = sharedTreeSubgraph.nodesArray.size() < 256 ? PlainLanguageRules.TRUE : PlainLanguageRules.FALSE;
}
if (plainLanguageRules.equals(PlainLanguageRules.TRUE)) {
treeprops._treeDecisionPath = getLanguageRepresentation(sharedTreeSubgraph);
treeprops._decisionPaths[0] = "Predicted value: " + sharedTreeSubgraph.rootNode.getPredValue();
treeprops._leftChildrenNormalized[0] = sharedTreeSubgraph.rootNode.getLeftChild() != null ? sharedTreeSubgraph.rootNode.getLeftChild().getNodeNumber() : -1;
treeprops._rightChildrenNormalized[0] = sharedTreeSubgraph.rootNode.getRightChild() != null ? sharedTreeSubgraph.rootNode.getRightChild().getNodeNumber() : -1;
}
treeprops._domainValues = new String[sharedTreeSubgraph.nodesArray.size()][];
treeprops._domainValues[0] = sharedTreeSubgraph.rootNode.getDomainValues();
List<SharedTreeNode> nodesToTraverse = new ArrayList<>();
nodesToTraverse.add(sharedTreeSubgraph.rootNode);
append(treeprops._rightChildren, treeprops._leftChildren,
treeprops._descriptions, treeprops._thresholds, treeprops._features, treeprops._nas,
treeprops.levels, treeprops._predictions, nodesToTraverse, -1, false, treeprops._domainValues);
if (plainLanguageRules.equals(PlainLanguageRules.TRUE)) fillLanguagePathRepresentation(treeprops, sharedTreeSubgraph.rootNode);
return treeprops;
}
private static void append(final int[] rightChildren, final int[] leftChildren, final String[] nodesDescriptions,
final float[] thresholds, final String[] splitColumns, final String[] naHandlings,
final int[][] levels, final float[] predictions,
final List<SharedTreeNode> nodesToTraverse, int pointer, boolean visitedRoot,
String[][] domainValues) {
if(nodesToTraverse.isEmpty()) return;
List<SharedTreeNode> discoveredNodes = new ArrayList<>();
for (SharedTreeNode node : nodesToTraverse) {
pointer++;
final SharedTreeNode leftChild = node.getLeftChild();
final SharedTreeNode rightChild = node.getRightChild();
if(visitedRoot){
fillnodeDescriptions(node, nodesDescriptions, thresholds, splitColumns, levels, predictions,
naHandlings, pointer, domainValues);
} else {
StringBuilder rootDescriptionBuilder = new StringBuilder();
rootDescriptionBuilder.append("*** WARNING: This property is deprecated! *** ");
rootDescriptionBuilder.append("Root node has id ");
rootDescriptionBuilder.append(node.getNodeNumber());
rootDescriptionBuilder.append(" and splits on column '");
rootDescriptionBuilder.append(node.getColName());
rootDescriptionBuilder.append("'. ");
fillNodeSplitTowardsChildren(rootDescriptionBuilder, node);
nodesDescriptions[pointer] = rootDescriptionBuilder.toString();
visitedRoot = true;
}
if (leftChild != null) {
discoveredNodes.add(leftChild);
leftChildren[pointer] = leftChild.getNodeNumber();
} else {
leftChildren[pointer] = NO_CHILD;
}
if (rightChild != null) {
discoveredNodes.add(rightChild);
rightChildren[pointer] = rightChild.getNodeNumber();
} else {
rightChildren[pointer] = NO_CHILD;
}
}
append(rightChildren, leftChildren, nodesDescriptions, thresholds, splitColumns, naHandlings, levels, predictions,
discoveredNodes, pointer, true, domainValues);
}
private static List<Integer> extractInternalIds(TreeProperties properties) {
int nodeId = 0;
List<Integer> nodeIds = new ArrayList<>();
nodeIds.add(nodeId);
for (int i = 0; i < properties._leftChildren.length; i++) {
if (properties._leftChildren[i] != -1) {
nodeId++;
nodeIds.add(properties._leftChildren[i]);
properties._leftChildrenNormalized[i] = nodeId;
} else {
properties._leftChildrenNormalized[i] = -1;
}
if (properties._rightChildren[i] != -1) {
nodeId++;
nodeIds.add(properties._rightChildren[i]);
properties._rightChildrenNormalized[i] = nodeId;
} else {
properties._rightChildrenNormalized[i] = -1;
}
}
return nodeIds;
}
static String getCondition(SharedTreeNode node, String from) {
StringBuilder sb = new StringBuilder();
if (node.getDomainValues() != null) {
sb.append("If (");
sb.append(node.getColName());
sb.append(" is in [");
BitSet inclusiveLevels;
if (from.equals("R")) {
inclusiveLevels = node.getLeftChild().getInclusiveLevels();
} else {
inclusiveLevels = node.getRightChild().getInclusiveLevels();
}
if (inclusiveLevels != null) {
String stringToParseInclusiveLevelsFrom = inclusiveLevels.toString();
int inclusiveLevelsLength = stringToParseInclusiveLevelsFrom.length();
if (inclusiveLevelsLength > 2) {
stringToParseInclusiveLevelsFrom = stringToParseInclusiveLevelsFrom.substring(1, inclusiveLevelsLength - 1);
String[] inclusiveLevelsStr = stringToParseInclusiveLevelsFrom.split(",");
for (String level : inclusiveLevelsStr) {
sb.append(node.getDomainValues()[Integer.parseInt(level.replaceAll("\\s", ""))]).append(" ");
}
}
} else {
sb.append(" ");
}
sb.append("]) -> ");
} else {
if (Float.compare(node.getSplitValue(), Float.NaN) == 0) {
String sign;
if ("R".equals(from)) {
sign = " is not ";
} else {
sign = " is ";
}
sb.append("If ( ").append(node.getColName()).append(sign).append("NaN )");
} else {
String sign;
boolean useNan = false;
String nanString = " or " + node.getColName() + " is NaN";
if ("R".equals(from)) {
sign = " < ";
if (node.getLeftChild().isInclusiveNa()) {
useNan = true;
}
} else {
sign = " >= ";
if (node.getRightChild().isInclusiveNa()) {
useNan = true;
}
}
sb.append("If ( " ).append(node.getColName()).append(sign).append(node.getSplitValue());
if (useNan) {
sb.append(nanString);
}
sb.append(" ) -> ");
}
}
return sb.toString();
}
private static List<PathResult> findPaths(SharedTreeNode node) {
if (node == null)
return new ArrayList<>();
List<PathResult> result = new ArrayList<>();
List<PathResult> leftSubtree = findPaths(node.getLeftChild());
List<PathResult> rightSubtree = findPaths(node.getRightChild());
for (int i = 0; i < leftSubtree.size(); i++){
PathResult leftResult = leftSubtree.get(i);
PathResult newResult = leftResult;
newResult.path.insert(0, getCondition(node, "R"));
result.add(newResult);
}
for (int i = 0; i < rightSubtree.size(); i++){
PathResult rightResult = rightSubtree.get(i);
PathResult newResult = rightResult;
newResult.path.insert(0, getCondition(node, "L"));
result.add(newResult);
}
if (result.size() == 0) {
result.add(new PathResult(node.getNodeNumber()));
result.get(0).path.append("Prediction: ").append(node.getPredValue());
}
return result;
}
private static void fillLanguagePathRepresentation(TreeProperties properties, SharedTreeNode root) {
List<Integer> nodeIds = extractInternalIds(properties);
List<PathResult> paths = findPaths(root);
for (PathResult path : paths) {
properties._decisionPaths[nodeIds.indexOf(path.nodeId)] = path.path.toString();
}
}
private static void fillnodeDescriptions(final SharedTreeNode node, final String[] nodeDescriptions,
final float[] thresholds, final String[] splitColumns, final int[][] levels,
final float[] predictions, final String[] naHandlings, final int pointer, final String[][] domainValues) {
final StringBuilder nodeDescriptionBuilder = new StringBuilder();
int[] nodeLevels = node.getParent().isBitset() ? extractNodeLevels(node) : null;
nodeDescriptionBuilder.append("*** WARNING: This property is deprecated! *** ");
nodeDescriptionBuilder.append("Node has id ");
nodeDescriptionBuilder.append(node.getNodeNumber());
if (node.getColName() != null && node.isLeaf()) {
nodeDescriptionBuilder.append(" and splits on column '");
nodeDescriptionBuilder.append(node.getColName());
nodeDescriptionBuilder.append("'. ");
} else {
nodeDescriptionBuilder.append(" and is a terminal node. ");
}
fillNodeSplitTowardsChildren(nodeDescriptionBuilder, node);
if (!Float.isNaN(node.getParent().getSplitValue())) {
nodeDescriptionBuilder.append(" Parent node split threshold is ");
nodeDescriptionBuilder.append(node.getParent().getSplitValue());
nodeDescriptionBuilder.append(". Prediction: ");
nodeDescriptionBuilder.append(node.getPredValue());
nodeDescriptionBuilder.append(".");
} else if (node.getParent().isBitset()) {
nodeLevels = extractNodeLevels(node);
nodeDescriptionBuilder.append(" Parent node split on column [");
nodeDescriptionBuilder.append(node.getParent().getColName());
if(nodeLevels != null) {
nodeDescriptionBuilder.append("]. Inherited categorical levels from parent split: ");
for (int nodeLevelsindex = 0; nodeLevelsindex < nodeLevels.length; nodeLevelsindex++) {
nodeDescriptionBuilder.append(node.getParent().getDomainValues()[nodeLevels[nodeLevelsindex]]);
if (nodeLevelsindex != nodeLevels.length - 1) nodeDescriptionBuilder.append(",");
}
} else {
nodeDescriptionBuilder.append("]. No categoricals levels inherited from parent.");
}
} else {
nodeDescriptionBuilder.append("Split value is NA.");
}
nodeDescriptions[pointer] = nodeDescriptionBuilder.toString();
splitColumns[pointer] = node.getColName();
naHandlings[pointer] = getNaDirection(node);
levels[pointer] = nodeLevels;
predictions[pointer] = node.getPredValue();
thresholds[pointer] = node.getSplitValue();
domainValues[pointer] = node.getDomainValues();
}
private static void fillNodeSplitTowardsChildren(final StringBuilder nodeDescriptionBuilder, final SharedTreeNode node){
if (!Float.isNaN(node.getSplitValue())) {
nodeDescriptionBuilder.append("Split threshold is ");
if (node.getLeftChild() != null) {
nodeDescriptionBuilder.append(" < ");
nodeDescriptionBuilder.append(node.getSplitValue());
nodeDescriptionBuilder.append(" to the left node (");
nodeDescriptionBuilder.append(node.getLeftChild().getNodeNumber());
nodeDescriptionBuilder.append(")");
}
if (node.getLeftChild() != null) {
if(node.getLeftChild() != null) nodeDescriptionBuilder.append(", ");
nodeDescriptionBuilder.append(" >= ");
nodeDescriptionBuilder.append(node.getSplitValue());
nodeDescriptionBuilder.append(" to the right node (");
nodeDescriptionBuilder.append(node.getRightChild().getNodeNumber());
nodeDescriptionBuilder.append(")");
}
nodeDescriptionBuilder.append(".");
} else if (node.isBitset()) {
fillNodeCategoricalSplitDescription(nodeDescriptionBuilder, node);
}
}
private static int[] extractNodeLevels(final SharedTreeNode node) {
final BitSet childInclusiveLevels = node.getInclusiveLevels();
final int cardinality = childInclusiveLevels.cardinality();
if (cardinality > 0) {
int[] nodeLevels = MemoryManager.malloc4(cardinality);
int bitsignCounter = 0;
for (int i = childInclusiveLevels.nextSetBit(0); i >= 0; i = childInclusiveLevels.nextSetBit(i + 1)) {
nodeLevels[bitsignCounter] = i;
bitsignCounter++;
}
return nodeLevels;
}
return null;
}
private static void fillNodeCategoricalSplitDescription(final StringBuilder nodeDescriptionBuilder, final SharedTreeNode node) {
final SharedTreeNode leftChild = node.getLeftChild();
final SharedTreeNode rightChild = node.getRightChild();
final int[] leftChildLevels = extractNodeLevels(leftChild);
final int[] rightChildLevels = extractNodeLevels(rightChild);
if (leftChild != null) {
nodeDescriptionBuilder.append(" Left child node (");
nodeDescriptionBuilder.append(leftChild.getNodeNumber());
nodeDescriptionBuilder.append(") inherits categorical levels: ");
if (leftChildLevels != null) {
for (int nodeLevelsindex = 0; nodeLevelsindex < leftChildLevels.length; nodeLevelsindex++) {
nodeDescriptionBuilder.append(node.getDomainValues()[leftChildLevels[nodeLevelsindex]]);
if (nodeLevelsindex != leftChildLevels.length - 1) nodeDescriptionBuilder.append(",");
}
}
}
if (rightChild != null) {
nodeDescriptionBuilder.append(". Right child node (");
nodeDescriptionBuilder.append(rightChild.getNodeNumber());
nodeDescriptionBuilder.append(") inherits categorical levels: ");
if (rightChildLevels != null) {
for (int nodeLevelsindex = 0; nodeLevelsindex < rightChildLevels.length; nodeLevelsindex++) {
nodeDescriptionBuilder.append(node.getDomainValues()[rightChildLevels[nodeLevelsindex]]);
if (nodeLevelsindex != rightChildLevels.length - 1) nodeDescriptionBuilder.append(",");
}
}
}
nodeDescriptionBuilder.append(". ");
}
private static String getNaDirection(final SharedTreeNode node) {
final boolean leftNa = node.getLeftChild() != null && node.getLeftChild().isInclusiveNa();
final boolean rightNa = node.getRightChild() != null && node.getRightChild().isInclusiveNa();
assert (rightNa ^ leftNa) || (rightNa == false && leftNa == false);
if (leftNa) {
return "LEFT";
} else if (rightNa) {
return "RIGHT";
}
return null; // No direction
}
public static class TreeProperties {
public int[] _leftChildren;
public int[] _rightChildren;
public String[] _descriptions; // General node description, most likely to contain serialized threshold or inclusive dom. levels
public float[] _thresholds;
public String[] _features;
public int[][] levels; // Categorical levels, points to a list of categoricals that is already existing within the model on the client.
public String[] _nas;
public float[] _predictions; // Prediction values on terminal nodes
public String _treeDecisionPath;
public String[] _leafNodeAssignments;
public String[] _decisionPaths;
private int[] _leftChildrenNormalized;
private int[] _rightChildrenNormalized;
private String[][] _domainValues;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/TreeJCodeGen.java
|
package hex.tree;
import hex.Model;
import water.util.IcedBitSet;
import water.util.SB;
/** A tree code generator producing Java code representation of the tree:
*
* - A generated class contains score0 method
* - if score0 method is too long, it redirects prediction to a new subclass's score0 method
*/
class TreeJCodeGen extends TreeVisitor<RuntimeException> {
public static final int MAX_NODES = (1 << 12) / 4; // limit for a number decision nodes visited per generated class
//public static final int MAX_NODES = 5; // limit for a number decision nodes
private static final int MAX_DEPTH = 70;
private static final int MAX_CONSTANT_POOL_SIZE = (1 << 16) - 4096; // Keep some space for method and string constants
private static final int MAX_METHOD_SIZE = (1 << 16) - 4096;
// FIXME: the dataset gbm_test/30k_cattest.csv produces trees ~ 100 depth
//
// Simulate stack since we need to preserve each info per generated class
final SB _sbs [] = new SB [MAX_DEPTH];
final int _nodesCnt[]= new int[MAX_DEPTH];
final SB _grpSplits[] = new SB[MAX_DEPTH];
final int _grpSplitsCnt[] = new int[MAX_DEPTH];
final int _constantPool[] = new int[MAX_DEPTH];
final int _staticInit[] = new int[MAX_DEPTH];
final String _javaClassName;
final Model.Output _output;
SB _sb;
SB _csb;
SB _grpsplit;
int _subtrees = 0;
int _grpCnt = 0;
int _constantPoolSize = 0;
int _staticInitSize = 0;
final private boolean _verboseCode;
public TreeJCodeGen(Model.Output output, CompressedTree ct, SB sb, String javaClassName, boolean verboseCode) {
super(ct);
_output = output;
_sb = sb;
_csb = new SB();
_grpsplit = new SB();
_verboseCode = verboseCode;
_javaClassName = javaClassName;
}
// code preamble
protected void preamble(SB sb, int subtree) throws RuntimeException {
String subt = subtree > 0 ? "_" + String.valueOf(subtree) : "";
sb.p("class ").p(_javaClassName).p(subt).p(" {").nl().ii(1);
sb.ip("static final double score0").p("(double[] data) {").nl().ii(1); // predict method for one tree
sb.ip("double pred = ");
}
// close the code
protected void closure(SB sb) throws RuntimeException {
sb.p(";").nl();
sb.ip("return pred;").nl().di(1);
sb.ip("}").p(" // constant pool size = ").p(_constantPoolSize).p("B, number of visited nodes = ").p(_nodes).p(", static init size = ").p(_staticInitSize).p("B");
sb.nl(); // close the method
// Append actual group splits
_sb.p(_grpsplit);
sb.di(1).ip("}").nl().nl(); // close the class
}
@Override protected void pre(int col, float fcmp, IcedBitSet gcmp, int equal, int naSplitDirInt) {
// Check for method size and number of constants generated in constant pool
if (_nodes > MAX_NODES || _constantPoolSize > MAX_CONSTANT_POOL_SIZE || _staticInitSize > MAX_METHOD_SIZE ) {
_sb.p(_javaClassName).p('_').p(_subtrees).p(".score0").p("(data)");
_nodesCnt[_depth] = _nodes;
_sbs[_depth] = _sb;
_grpSplits[_depth] = _grpsplit;
_grpSplitsCnt[_depth] = _grpCnt;
_constantPool[_depth] = _constantPoolSize;
_staticInit[_depth] = _staticInitSize;
_sb = new SB();
_nodes = 0;
_grpsplit = new SB();
_grpCnt = 0;
_constantPoolSize = 0;
_staticInitSize = 0;
preamble(_sb, _subtrees);
_subtrees++;
}
// Generates array for group splits
if(equal == 2 || equal == 3 && gcmp != null) {
_grpsplit.i(1).p("// ").p(gcmp.toString()).nl();
_grpsplit.i(1).p("public static final byte[] GRPSPLIT").p(_grpCnt).p(" = new byte[] ").p(gcmp.toStrArray()).p(";").nl();
_constantPoolSize += gcmp.numBytes() + 3; // Each byte stored in split (NOT TRUE) and field reference and field name (Utf8) and NameAndType
_staticInitSize += 6 + gcmp.numBytes() * 6; // byte size of instructions to create an array and load all byte values (upper bound = dup, bipush, bipush, bastore = 5bytes)
}
// Generates decision
_sb.ip(" (");
// Generate column names only if necessary
String colName = _verboseCode ? " /* " + _output._names[col] + " */" : "";
String[][] domains = _output._domains;
// size of the training domains (i.e., one larger than the max number of "seen" categorical IDs)
int limit = (domains != null && domains[col] != null) ? domains[col].length : Integer.MAX_VALUE;
assert(equal!=1);
if(equal == 0) {
// for the special case of a split of a categorical column if there's not enough bins to resolve the levels,
// we treat the categorical levels as ordinal integer levels, and split at a certain point (<=, not using a bitset)
// => need to add the out-of-bound check explicitly here to handle unseen categoricals
if (naSplitDirInt == DhnasdNaVsRest) {
_sb.p("!Double.isNaN(data[").p(col).p("])");
if (limit != Integer.MAX_VALUE)
_sb.p(" && (data[").p(col).p("] < " + limit + ") ");
}
else if (naSplitDirInt == DhnasdNaLeft || naSplitDirInt == DhnasdLeft) {
_sb.p("Double.isNaN(data[").p(col).p("]) ");
if (limit != Integer.MAX_VALUE)
_sb.p("|| (data[").p(col).p("] >= " + limit + ") ");
_sb.p("|| ");
}
if (naSplitDirInt != DhnasdNaVsRest) {
_sb.p("data[").p(col);
_sb.p(colName);
_sb.p("] < ").pj(fcmp);
_constantPoolSize += 2; // * bytes for generated float which is represented as double because of cast (Double occupies 2 slots in constant pool)
}
} else {
assert naSplitDirInt != DhnasdNaVsRest : "NAvsREST splits are expected to be represented with equal==0";
boolean leftward = naSplitDirInt == DhnasdNaLeft || naSplitDirInt == DhnasdLeft;
if (leftward) {
_sb.p("Double.isNaN(data[").p(col).p(colName).p("]) || !"); //NAs (or out of range) go left
gcmp.toJavaRangeCheck(_sb, col);
if (limit != Integer.MAX_VALUE) {
_sb.p(" || (data[").p(col).p("] >= " + limit + ")");
}
_sb.p(" || ");
} else {
_sb.p("!Double.isNaN(data[").p(col).p(colName).p("]) && ");
}
_sb.p("(");
gcmp.toJavaRangeCheck(_sb, col);
_sb.p(" && ");
if (limit != Integer.MAX_VALUE) {
_sb.p("(data[").p(col).p("] < " + limit + ")");
}
_sb.p(" && ");
gcmp.toJava(_sb, "GRPSPLIT" + _grpCnt, col);
_sb.p(")");
_grpCnt++;
}
_sb.p(" ? ").ii(2).nl();
}
@Override protected void leaf( float pred ) {
_sb.i().pj(pred);
// We are generating float which occupies single slot in constant pool, however
// left side of final expression is double, hence javac directly stores double in constant pool (2places)
_constantPoolSize += 2;
}
@Override
protected void mid(int col, float fcmp, int equal) throws RuntimeException {
_sb.p(" : ").nl();
}
@Override protected void post(int col, float fcmp, int equal ) {
_sb.p(')').di(2);
if (_sbs[_depth]!=null) { // Top of stack - finalize the class generate into _sb
closure(_sb);
_csb.p(_sb);
_sb = _sbs[_depth];
_nodes = _nodesCnt[_depth];
_sbs[_depth] = null;
_grpsplit = _grpSplits[_depth];
_grpCnt = _grpSplitsCnt[_depth];
_grpSplits[_depth] = null;
_constantPoolSize = _constantPool[_depth];
_staticInitSize = _staticInit[_depth];
}
}
public void generate() {
preamble(_sb, _subtrees++); // TODO: Need to pass along group split BitSet
visit();
closure(_sb);
_sb.p(_csb);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/TreeStats.java
|
package hex.tree;
import water.Iced;
public class TreeStats extends Iced {
public int _min_depth = 0;
public int _max_depth = 0;
public float _mean_depth;
public int _min_leaves = 0;
public int _max_leaves = 0;
public float _mean_leaves;
public long _byte_size;
public int _num_trees = 0;
long _sum_depth = 0;
long _sum_leaves = 0;
public boolean isValid() { return _min_depth <= _max_depth; }
public void updateBy(DTree tree) {
if( tree == null ) return;
if( _min_depth == 0 || _min_depth > tree._depth ) _min_depth = tree._depth;
if( _max_depth == 0 || _max_depth < tree._depth ) _max_depth = tree._depth;
if( _min_leaves == 0 || _min_leaves > tree._leaves) _min_leaves = tree._leaves;
if( _max_leaves == 0 || _max_leaves < tree._leaves) _max_leaves = tree._leaves;
_sum_depth += tree._depth;
_sum_leaves += tree._leaves;
_num_trees++;
updateMeans();
}
public void setNumTrees(int i) { _num_trees = i; }
@Override
public String toString() {
return "TreeStats{" +
"_min_depth=" + _min_depth +
", _max_depth=" + _max_depth +
", _mean_depth=" + _mean_depth +
'}';
}
private void updateMeans() {
_mean_depth = ((float) _sum_depth / _num_trees);
_mean_leaves = ((float) _sum_leaves / _num_trees);
}
public void mergeWith(TreeStats otherTreeStats) {
if (otherTreeStats._min_depth < this._min_depth) this._min_depth = otherTreeStats._min_depth;
if (otherTreeStats._max_depth > this._max_depth) this._max_depth = otherTreeStats._max_depth;
if (otherTreeStats._min_leaves < this._min_leaves) this._min_leaves = otherTreeStats._min_leaves;
if (otherTreeStats._max_leaves > this._max_leaves) this._max_leaves = otherTreeStats._max_leaves;
this._byte_size += otherTreeStats._byte_size;
this._num_trees += otherTreeStats._num_trees;
this._sum_depth += otherTreeStats._sum_depth;
this._sum_leaves += otherTreeStats._sum_leaves;
updateMeans();
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/TreeUtils.java
|
package hex.tree;
import hex.KeyValue;
import hex.ModelBuilder;
import hex.ModelCategory;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import java.util.HashSet;
import java.util.Set;
public class TreeUtils {
public static void checkMonotoneConstraints(ModelBuilder<?, ?, ?> mb, Frame train, KeyValue[] constraints) {
// we check that there are no duplicate definitions and constraints are defined only for numerical columns
Set<String> constrained = new HashSet<>();
for (KeyValue constraint : constraints) {
if (constrained.contains(constraint.getKey())) {
mb.error("_monotone_constraints", "Feature '" + constraint.getKey() + "' has multiple constraints.");
continue;
}
constrained.add(constraint.getKey());
Vec v = train.vec(constraint.getKey());
if (v == null) {
mb.error("_monotone_constraints", "Invalid constraint - there is no column '" + constraint.getKey() + "' in the training frame.");
} else if (v.get_type() != Vec.T_NUM) {
mb.error("_monotone_constraints", "Invalid constraint - column '" + constraint.getKey() +
"' has type " + v.get_type_str() + ". Only numeric columns can have monotonic constraints.");
}
}
}
public static void checkInteractionConstraints(ModelBuilder<?, ?, ?> mb, Frame train, String[][] constraints) {
for (String[] constraintsSet : constraints) {
for (String constraint : constraintsSet) {
if(mb._parms._ignored_columns != null && ArrayUtils.find(mb._parms._ignored_columns, constraint) != -1) {
mb.error("_interaction_constraints", "Column with the name '" + constraint + "' is set in ignored columns and cannot be used in interaction.");
} else {
Vec v = train.vec(constraint);
if (v == null) {
mb.error("_interaction_constraints", "Invalid interaction constraint - there is no column '" + constraint + "' in the training frame.");
}
}
if(constraint.equals(mb._parms._response_column)){
mb.error("'_interaction_constraints'", "Column with the name '" + constraint + "' is used as response column and cannot be used in interaction.");
}
if(constraint.equals(mb._parms._weights_column)){
mb.error("'_interaction_constraints'","Column with the name '" + constraint + "' is used as weights column and cannot be used in interaction.");
}
if(constraint.equals(mb._parms._fold_column)){
mb.error("_interaction_constraints", "Column with the name '" + constraint + "' is used as fold column and cannot be used in interaction.");
}
}
}
}
public static int getResponseLevelIndex(final String categorical, final SharedTreeModel.SharedTreeOutput sharedTreeOutput) {
final String trimmedCategorical = categorical != null ? categorical.trim() : ""; // Trim the categorical once - input from the user
if (! sharedTreeOutput.isClassifier()) {
if (!trimmedCategorical.isEmpty())
throw new IllegalArgumentException("There are no tree classes for " + sharedTreeOutput.getModelCategory() + ".");
return 0; // There is only one tree for non-classification models
}
final String[] responseColumnDomain = sharedTreeOutput._domains[sharedTreeOutput.responseIdx()];
if (sharedTreeOutput.getModelCategory() == ModelCategory.Binomial) {
if (!trimmedCategorical.isEmpty() && !trimmedCategorical.equals(responseColumnDomain[0])) {
throw new IllegalArgumentException("For binomial, only one tree class has been built per each iteration: " + responseColumnDomain[0]);
} else {
return 0;
}
} else {
for (int i = 0; i < responseColumnDomain.length; i++) {
// User is supposed to enter the name of the categorical level correctly, not ignoring case
if (trimmedCategorical.equals(responseColumnDomain[i]))
return i;
}
throw new IllegalArgumentException("There is no such tree class. Given categorical level does not exist in response column: " + trimmedCategorical);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/TreeVisitor.java
|
package hex.tree;
import water.AutoBuffer;
import water.util.IcedBitSet;
/** Abstract visitor class for serialized trees.*/
public abstract class TreeVisitor<T extends Exception> {
protected static final int DhnasdNaVsRest = DHistogram.NASplitDir.NAvsREST.value();
protected static final int DhnasdNaLeft = DHistogram.NASplitDir.NALeft.value();
protected static final int DhnasdLeft = DHistogram.NASplitDir.Left.value();
// Override these methods to get walker behavior.
protected void pre (int col, float fcmp, IcedBitSet gcmp, int equal, int naSplitDirInt) throws T { }
protected void mid ( int col, float fcmp, int equal ) throws T { }
protected void post( int col, float fcmp, int equal ) throws T { }
protected void leaf( float pred ) throws T { }
long result( ) { return 0; } // Override to return simple results
protected final CompressedTree _ct;
private final AutoBuffer _ts;
private final IcedBitSet _gcmp; // Large-count categorical bit-set splits
protected int _depth; // actual depth
protected int _nodes; // number of visited nodes
public TreeVisitor( CompressedTree ct ) {
_ts = new AutoBuffer((_ct=ct)._bits);
_gcmp = new IcedBitSet(0);
}
// Call either the single-class leaf or the full-prediction leaf
private void leaf2( int mask ) throws T {
assert (mask==0 || ( (mask&16)==16 && (mask&32)==32) ) : "Unknown mask: " + mask; // Is a leaf or a special leaf on the top of tree
leaf(_ts.get4f());
}
public final void visit() throws T {
int nodeType = _ts.get1();
int col = _ts.get2();
if( col==65535 ) { leaf2(nodeType); return; }
int equal = (nodeType&12) >> 2;
int naSplitDirInt = _ts.get1();
float fcmp = -1;
if (naSplitDirInt != DhnasdNaVsRest) {
// Extract value or group to split on
if (equal == 0 || equal == 1)
fcmp = _ts.get4f();
else {
if (equal == 2) _gcmp.fill2(_ct._bits, _ts);
else _gcmp.fill3(_ct._bits, _ts);
}
}
// Compute the amount to skip.
int lmask = nodeType & 0x33;
int rmask = (nodeType & 0xC0) >> 2;
int skip = 0;
switch(lmask) {
case 0: skip = _ts.get1(); break;
case 1: skip = _ts.get2(); break;
case 2: skip = _ts.get3(); break;
case 3: skip = _ts.get4(); break;
case 48: skip = 4; break; // skip is always 4 for direct leaves (see DecidedNode.size() and LeafNode.size() methods)
default: assert false:"illegal lmask value " + lmask;
}
pre(col, fcmp, _gcmp, equal, naSplitDirInt); // Pre-walk
_depth++;
if( (lmask & 0x10)==16 ) leaf2(lmask); else visit();
mid(col, fcmp, equal); // Mid-walk
if( (rmask & 0x10)==16 ) leaf2(rmask); else visit();
_depth--;
post(col, fcmp, equal);
_nodes++;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/drf/DRF.java
|
package hex.tree.drf;
import hex.Model;
import hex.ModelCategory;
import hex.PojoWriter;
import hex.genmodel.MojoModel;
import hex.genmodel.algos.drf.DrfMojoModel;
import hex.genmodel.utils.DistributionFamily;
import hex.tree.*;
import hex.tree.DTree.DecidedNode;
import hex.tree.DTree.LeafNode;
import hex.tree.DTree.UndecidedNode;
import water.Job;
import water.Key;
import water.MRTask;
import water.fvec.C0DChunk;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.util.ArrayUtils;
import java.util.Arrays;
import java.util.Random;
import static hex.genmodel.GenModel.getPrediction;
import static hex.tree.drf.TreeMeasuresCollector.asSSE;
import static hex.tree.drf.TreeMeasuresCollector.asVotes;
/** Distributed Random Forest
*/
public class DRF extends SharedTree<hex.tree.drf.DRFModel, hex.tree.drf.DRFModel.DRFParameters, hex.tree.drf.DRFModel.DRFOutput> {
private static final double ONEBOUND=1+1e-12; // due to fixed precision
private static final double ZEROBOUND=-1e-12; // due to fixed precision
@Override public ModelCategory[] can_build() {
return new ModelCategory[]{
ModelCategory.Regression,
ModelCategory.Binomial,
ModelCategory.Multinomial,
};
}
// Called from an http request
public DRF( hex.tree.drf.DRFModel.DRFParameters parms ) { super(parms ); init(false); }
public DRF( hex.tree.drf.DRFModel.DRFParameters parms, Key<DRFModel> key) { super(parms, key); init(false); }
public DRF( hex.tree.drf.DRFModel.DRFParameters parms, Job job ) { super(parms, job); init(false); }
public DRF(boolean startup_once) { super(new hex.tree.drf.DRFModel.DRFParameters(),startup_once); }
/** Start the DRF training Job on an F/J thread. */
@Override protected Driver trainModelImpl() { return new DRFDriver(); }
@Override public boolean scoreZeroTrees() { return false; }
/** Initialize the ModelBuilder, validating all arguments and preparing the
* training frame. This call is expected to be overridden in the subclasses
* and each subclass will start with "super.init();". This call is made
* by the front-end whenever the GUI is clicked, and needs to be fast;
* heavy-weight prep needs to wait for the trainModel() call.
*/
@Override public void init(boolean expensive) {
super.init(expensive);
// Initialize local variables
if( _parms._mtries < 1 && _parms._mtries != -1 && _parms._mtries != -2 )
error("_mtries", "mtries must be -1 (converted to sqrt(features)) or -2 (All features) or >= 1 but it is " + _parms._mtries);
if( _train != null ) {
int ncols = _train.numCols();
if( _parms._mtries != -1 && _parms._mtries != -2 && !(1 <= _parms._mtries && _parms._mtries < ncols /*ncols includes the response*/))
error("_mtries","Computed mtries should be -1 or -2 or in interval [1,"+ncols+"[ but it is " + _parms._mtries);
}
DistributionFamily[] allowed_distributions = new DistributionFamily[] {
DistributionFamily.AUTO,
DistributionFamily.bernoulli,
DistributionFamily.multinomial,
DistributionFamily.gaussian,
};
if (!ArrayUtils.contains(allowed_distributions, _parms._distribution))
error("_distribution", _parms._distribution.name() + " distribution is not supported for DRF in current H2O.");
if (_parms._distribution == DistributionFamily.AUTO) {
if (_nclass == 1) _parms._distribution = DistributionFamily.gaussian;
if (_nclass >= 2) _parms._distribution = DistributionFamily.multinomial;
}
if (_parms._sample_rate == 1f && _valid == null && _parms._nfolds == 0)
warn("_sample_rate", "Sample rate is 100% and no validation dataset and no cross-validation. There are no out-of-bag data to compute error estimates on the training data!");
if (hasOffsetCol())
error("_offset_column", "Offsets are not yet supported for DRF.");
}
// ----------------------
private class DRFDriver extends Driver {
@Override protected boolean doOOBScoring() { return true; }
// --- Private data handled only on master node
// Classification or Regression:
// Tree votes/SSE of individual trees on OOB rows
public transient TreeMeasuresCollector.TreeMeasures _treeMeasuresOnOOB;
// Tree votes/SSE per individual features on permutated OOB rows
public transient TreeMeasuresCollector.TreeMeasures[/*features*/] _treeMeasuresOnSOOB;
// Variable importance based on tree split decisions
private transient float[/*nfeatures*/] _improvPerVar;
private void initTreeMeasurements() {
_improvPerVar = new float[_ncols];
final int ntrees = _parms._ntrees;
// Preallocate tree votes
if (_model._output.isClassifier()) {
_treeMeasuresOnOOB = new TreeMeasuresCollector.TreeVotes(ntrees);
_treeMeasuresOnSOOB = new TreeMeasuresCollector.TreeVotes[_ncols];
for (int i=0; i<_ncols; i++) _treeMeasuresOnSOOB[i] = new TreeMeasuresCollector.TreeVotes(ntrees);
} else {
_treeMeasuresOnOOB = new TreeMeasuresCollector.TreeSSE(ntrees);
_treeMeasuresOnSOOB = new TreeMeasuresCollector.TreeSSE[_ncols];
for (int i=0; i<_ncols; i++) _treeMeasuresOnSOOB[i] = new TreeMeasuresCollector.TreeSSE(ntrees);
}
}
@Override protected void initializeModelSpecifics() {
_mtry_per_tree = Math.max(1, (int)(_parms._col_sample_rate_per_tree * _ncols));
if (!(1 <= _mtry_per_tree && _mtry_per_tree <= _ncols)) throw new IllegalArgumentException("Computed mtry_per_tree should be in interval <1,"+_ncols+"> but it is " + _mtry_per_tree);
if(_parms._mtries==-2){ //mtries set to -2 would use all columns in each split regardless of what column has been dropped during train
_mtry = _ncols;
}else if(_parms._mtries==-1) {
_mtry = (isClassifier() ? Math.max((int) Math.sqrt(_ncols), 1) : Math.max(_ncols / 3, 1)); // classification: mtry=sqrt(_ncols), regression: mtry=_ncols/3
}else{
_mtry = _parms._mtries;
}
if (!(1 <= _mtry && _mtry <= _ncols)) {
throw new IllegalArgumentException("Computed mtry should be in interval <1," + _ncols + "> but it is " + _mtry);
}
if (_model != null && _model.evalAutoParamsEnabled) {
_model.initActualParamValuesAfterOutputSetup(isClassifier());
}
_initialPrediction = isClassifier() ? 0 : getInitialValue();
// Initialize TreeVotes for classification, MSE arrays for regression
initTreeMeasurements();
/** Fill work columns:
* - classification: set 1 in the corresponding wrk col according to row response
* - regression: copy response into work column (there is only 1 work column)
*/
new MRTask() {
@Override public void map(Chunk chks[]) {
Chunk cy = chk_resp(chks);
for (int i = 0; i < cy._len; i++) {
if (cy.isNA(i)) continue;
if (isClassifier()) {
int cls = (int) cy.at8(i);
chk_work(chks, cls).set(i, 1L);
} else {
float pred = (float) cy.atd(i);
chk_work(chks, 0).set(i, pred);
}
}
}
}.doAll(_train);
}
// --------------------------------------------------------------------------
// Build the next random k-trees representing tid-th tree
@Override protected boolean buildNextKTrees() {
// We're going to build K (nclass) trees - each focused on correcting
// errors for a single class.
final DTree[] ktrees = new DTree[_nclass];
// Define a "working set" of leaf splits, from leafs[i] to tree._len for each tree i
int[] leafs = new int[_nclass];
// Assign rows to nodes - fill the "NIDs" column(s)
growTrees(ktrees, leafs, _rand);
// Move rows into the final leaf rows - fill "Tree" and OUT_BAG_TREES columns and zap the NIDs column
CollectPreds cp = new CollectPreds(ktrees,leafs,_model.defaultThreshold()).doAll(_train,_parms._build_tree_one_node);
if (isClassifier()) asVotes(_treeMeasuresOnOOB).append(cp.rightVotes, cp.allRows); // Track right votes over OOB rows for this tree
else /* regression */ asSSE (_treeMeasuresOnOOB).append(cp.sse, cp.allRows);
// Grow the model by K-trees
_model._output.addKTrees(ktrees);
return false; //never stop early
}
// Assumes that the "Work" column are filled with horizontalized (0/1) class memberships per row (or copy of regression response)
private void growTrees(DTree[] ktrees, int[] leafs, Random rand) {
// Initial set of histograms. All trees; one leaf per tree (the root
// leaf); all columns
DHistogram hcs[][][] = new DHistogram[_nclass][1/*just root leaf*/][_ncols];
// Adjust real bins for the top-levels
int adj_nbins = Math.max(_parms._nbins_top_level,_parms._nbins);
// Use for all k-trees the same seed. NOTE: this is only to make a fair
// view for all k-trees
long rseed = rand.nextLong();
// Initially setup as-if an empty-split had just happened
for (int k = 0; k < _nclass; k++) {
if (_model._output._distribution[k] != 0) { // Ignore missing classes
// The Boolean Optimization
// This optimization assumes the 2nd tree of a 2-class system is the
// inverse of the first (and that the same columns were picked)
if( k==1 && _nclass==2 && _model.binomialOpt()) continue;
ktrees[k] = new DTree(_train, _ncols, _mtry, _mtry_per_tree, rseed, _parms);
new UndecidedNode(ktrees[k], -1, DHistogram.initialHist(_train, _ncols, adj_nbins, hcs[k][0], rseed, _parms, getGlobalSplitPointsKeys(), null, true, null), null, null); // The "root" node
}
}
// Sample - mark the lines by putting 'OUT_OF_BAG' into nid(<klass>) vector
Sample ss[] = new Sample[_nclass];
for( int k=0; k<_nclass; k++)
if (ktrees[k] != null) ss[k] = new Sample(ktrees[k], _parms._sample_rate, _parms._sample_rate_per_class).dfork(null,new Frame(vec_nids(_train,k),vec_resp(_train)), _parms._build_tree_one_node);
for( int k=0; k<_nclass; k++)
if( ss[k] != null ) ss[k].getResult();
// ----
// One Big Loop till the ktrees are of proper depth.
// Adds a layer to the trees each pass.
int depth=0;
for( ; depth<_parms._max_depth; depth++ ) {
hcs = buildLayer(_train, _parms._nbins, ktrees, leafs, hcs, _parms._build_tree_one_node);
// If we did not make any new splits, then the tree is split-to-death
if( hcs == null ) break;
}
// Each tree bottomed-out in a DecidedNode; go 1 more level and insert
// LeafNodes to hold predictions.
for( int k=0; k<_nclass; k++ ) {
DTree tree = ktrees[k];
if( tree == null ) continue;
int leaf = leafs[k] = tree.len();
for( int nid=0; nid<leaf; nid++ ) {
if( tree.node(nid) instanceof DecidedNode ) {
DecidedNode dn = tree.decided(nid);
if( dn._split == null ) { // No decision here, no row should have this NID now
if( nid==0 ) { // Handle the trivial non-splitting tree
LeafNode ln = new LeafNode(tree, -1, 0);
ln._pred = (float)(isClassifier() ? _model._output._priorClassDist[k] : _initialPrediction);
}
continue;
}
for( int i=0; i<dn._nids.length; i++ ) {
int cnid = dn._nids[i];
if( cnid == -1 || // Bottomed out (predictors or responses known constant)
tree.node(cnid) instanceof UndecidedNode || // Or chopped off for depth
(tree.node(cnid) instanceof DecidedNode && // Or not possible to split
((DecidedNode)tree.node(cnid))._split==null) ) {
LeafNode ln = new LeafNode(tree,nid);
ln._pred = (float)dn.pred(i); // Set prediction into the leaf
dn._nids[i] = ln.nid(); // Mark a leaf here
}
}
}
}
} // -- k-trees are done
}
// Collect and write predictions into leafs.
private class CollectPreds extends MRTask<CollectPreds> {
/* @IN */ final DTree _trees[]; // Read-only, shared (except at the histograms in the Nodes)
/* @IN */ double _threshold; // Sum of squares for this tree only
/* @OUT */ double rightVotes; // number of right votes over OOB rows (performed by this tree) represented by DTree[] _trees
/* @OUT */ double allRows; // number of all OOB rows (sampled by this tree)
/* @OUT */ float sse; // Sum of squares for this tree only
CollectPreds(DTree trees[], int leafs[], double threshold) { _trees=trees; _threshold = threshold; }
final boolean importance = true;
@Override public void map( Chunk[] chks ) {
final Chunk y = importance ? chk_resp(chks) : null; // Response
final double[] rpred = importance ? new double[1+_nclass] : null; // Row prediction
final double[] rowdata = importance ? new double[_ncols] : null; // Pre-allocated row data
final Chunk oobt = chk_oobt(chks); // Out-of-bag rows counter over all trees
final Chunk weights = hasWeightCol() ? chk_weight(chks) : new C0DChunk(1, chks[0]._len); // Out-of-bag rows counter over all trees
// Iterate over all rows
for( int row=0; row<oobt._len; row++ ) {
double weight = weights.atd(row);
final boolean wasOOBRow = ScoreBuildHistogram.isOOBRow((int)chk_nids(chks,0).at8(row));
// For all tree (i.e., k-classes)
for( int k=0; k<_nclass; k++ ) {
final Chunk nids = chk_nids(chks, k); // Node-ids for this tree/class
if (weight!=0) {
final DTree tree = _trees[k];
if (tree == null) continue; // Empty class is ignored
int nid = (int) nids.at8(row); // Get Node to decide from
// Update only out-of-bag rows
// This is out-of-bag row - but we would like to track on-the-fly prediction for the row
if (wasOOBRow) {
final Chunk ct = chk_tree(chks, k); // k-tree working column holding votes for given row
nid = ScoreBuildHistogram.oob2Nid(nid);
if (tree.node(nid) instanceof UndecidedNode) // If we bottomed out the tree
nid = tree.node(nid).pid(); // Then take parent's decision
int leafnid;
if (tree.root() instanceof LeafNode) {
leafnid = 0;
} else {
DecidedNode dn = tree.decided(nid); // Must have a decision point
if (dn._split == null) // Unable to decide?
dn = tree.decided(tree.node(nid).pid()); // Then take parent's decision
leafnid = dn.getChildNodeID(chks,row); // Decide down to a leafnode
}
// Setup Tree(i) - on the fly prediction of i-tree for row-th row
// - for classification: cumulative number of votes for this row
// - for regression: cumulative sum of prediction of each tree - has to be normalized by number of trees
double prediction = ((LeafNode) tree.node(leafnid)).pred(); // Prediction for this k-class and this row
if (importance) rpred[1 + k] = (float) prediction; // for both regression and classification
ct.set(row, (float) (ct.atd(row) + prediction));
}
}
// reset help column for this row and this k-class
nids.set(row, 0);
} /* end of k-trees iteration */
// For this tree this row is out-of-bag - i.e., a tree voted for this row
if (wasOOBRow) oobt.set(row, oobt.atd(row) + weight); // track number of trees
if (importance && weight!=0) {
if (wasOOBRow && !y.isNA(row)) {
if (isClassifier()) {
int treePred = getPrediction(rpred, _model._output._priorClassDist, data_row(chks, row, rowdata), _threshold);
int actuPred = (int) y.at8(row);
if (treePred==actuPred) rightVotes+=weight; // No miss !
} else { // regression
double treePred = rpred[1];
double actuPred = y.atd(row);
sse += (actuPred-treePred)*(actuPred-treePred);
}
allRows+=weight;
}
}
}
}
@Override public void reduce(CollectPreds mrt) {
rightVotes += mrt.rightVotes;
allRows += mrt.allRows;
sse += mrt.sse;
}
}
@Override protected DRFModel makeModel( Key modelKey, DRFModel.DRFParameters parms) {
return new DRFModel(modelKey,parms,new DRFModel.DRFOutput(DRF.this));
}
}
// Read the 'tree' columns, do model-specific math and put the results in the
// fs[] array, and return the sum. Dividing any fs[] element by the sum
// turns the results into a probability distribution.
@Override protected double score1( Chunk chks[], double weight, double offset, double fs[/*nclass*/], int row ) {
double sum = 0;
if (_nclass > 2 || (_nclass == 2 && !_model.binomialOpt())) {
for (int k = 0; k < _nclass; k++)
sum += (fs[k+1] = weight * chk_tree(chks, k).atd(row) / chk_oobt(chks).atd(row));
}
else if (_nclass==2 && _model.binomialOpt()) {
fs[1] = weight * chk_tree(chks, 0).atd(row) / chk_oobt(chks).atd(row);
if (fs[1]>1 && fs[1]<=ONEBOUND)
fs[1] = 1.0;
else if (fs[1]<0 && fs[1]>=ZEROBOUND)
fs[1] = 0.0;
assert(fs[1] >= 0 && fs[1] <= 1);
fs[2] = 1. - fs[1];
}
else { //regression
// average per trees voted for this row (only trees which have row in "out-of-bag"
sum += (fs[0] = weight * chk_tree(chks, 0).atd(row) / chk_oobt(chks).atd(row) );
fs[1] = 0;
}
return sum;
}
@Override
public PojoWriter makePojoWriter(Model<?, ?, ?> genericModel, MojoModel mojoModel) {
DrfMojoModel drfMojoModel = (DrfMojoModel) mojoModel;
CompressedTree[][] trees = MojoUtils.extractCompressedTrees(drfMojoModel);
boolean binomialOpt = MojoUtils.isUsingBinomialOpt(drfMojoModel, trees);
return new DrfPojoWriter(genericModel, drfMojoModel.getCategoricalEncoding(), binomialOpt, trees, drfMojoModel._balanceClasses);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/drf/DRFModel.java
|
package hex.tree.drf;
import hex.genmodel.utils.DistributionFamily;
import hex.tree.*;
import hex.util.EffectiveParametersUtils;
import water.Job;
import water.Key;
import water.MemoryManager;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.util.ArrayUtils;
import water.util.MathUtils;
import java.util.Arrays;
public class DRFModel extends SharedTreeModelWithContributions<DRFModel, DRFModel.DRFParameters, DRFModel.DRFOutput> {
public static class DRFParameters extends SharedTreeModelWithContributions.SharedTreeParameters {
public String algoName() { return "DRF"; }
public String fullName() { return "Distributed Random Forest"; }
public String javaName() { return DRFModel.class.getName(); }
public boolean _binomial_double_trees = false;
public int _mtries = -1; //number of columns to use per split. default depends on the algorithm and problem (classification/regression)
public DRFParameters() {
super();
// Set DRF-specific defaults (can differ from SharedTreeModel's defaults)
_max_depth = 20;
_min_rows = 1;
}
}
public static class DRFOutput extends SharedTreeModelWithContributions.SharedTreeOutput {
public DRFOutput( DRF b) { super(b); }
}
public DRFModel(Key<DRFModel> selfKey, DRFParameters parms, DRFOutput output ) {
super(selfKey, parms, output);
}
@Override
public void initActualParamValues() {
super.initActualParamValues();
EffectiveParametersUtils.initFoldAssignment(_parms);
EffectiveParametersUtils.initHistogramType(_parms);
EffectiveParametersUtils.initCategoricalEncoding(_parms, Parameters.CategoricalEncodingScheme.Enum);
EffectiveParametersUtils.initCalibrationMethod(_parms);
}
public void initActualParamValuesAfterOutputSetup(boolean isClassifier) {
EffectiveParametersUtils.initStoppingMetric(_parms, isClassifier);
}
@Override
public Frame scoreContributions(Frame frame, Key<Frame> destination_key, Job<Frame> j) {
if (_parms._binomial_double_trees) {
throw new UnsupportedOperationException(
"Calculating contributions is currently not supported for model with binomial_double_trees parameter set.");
}
return super.scoreContributions(frame, destination_key, j);
}
@Override
public Frame scoreContributions(Frame frame, Key<Frame> destination_key, Job<Frame> j, ContributionsOptions options) {
if (_parms._binomial_double_trees) {
throw new UnsupportedOperationException(
"Calculating contributions is currently not supported for model with binomial_double_trees parameter set.");
}
return super.scoreContributions(frame, destination_key, j, options);
}
@Override
protected SharedTreeModelWithContributions<DRFModel, DRFParameters, DRFOutput>.ScoreContributionsWithBackgroundTask getScoreContributionsWithBackgroundTask(SharedTreeModel model, Frame fr, Frame backgroundFrame, boolean expand, int[] catOffsets, ContributionsOptions options) {
return new ScoreContributionsWithBackgroundTaskDRF(fr, backgroundFrame, options._outputPerReference, this, expand, catOffsets);
}
@Override
protected ScoreContributionsTask getScoreContributionsTask(SharedTreeModel model) {
return new ScoreContributionsTaskDRF(this);
}
@Override
protected ScoreContributionsTask getScoreContributionsSoringTask(SharedTreeModel model, ContributionsOptions options) {
return new ScoreContributionsSoringTaskDRF(this, options);
}
@Override public boolean binomialOpt() { return !_parms._binomial_double_trees; }
/** Bulk scoring API for one row. Chunks are all compatible with the model,
* and expect the last Chunks are for the final distribution and prediction.
* Default method is to just load the data into the tmp array, then call
* subclass scoring logic. */
@Override protected double[] score0(double[] data, double[] preds, double offset, int ntrees) {
super.score0(data, preds, offset, ntrees);
int N = _output._ntrees;
if (_output.nclasses() == 1) { // regression - compute avg over all trees
if (N>=1) preds[0] /= N;
} else { // classification
if (_output.nclasses() == 2 && binomialOpt()) {
if (N>=1) {
preds[1] /= N; //average probability
}
preds[2] = 1. - preds[1];
} else {
double sum = MathUtils.sum(preds);
if (sum > 0) MathUtils.div(preds, sum);
}
}
return preds;
}
@Override
public double score(double[] data) {
double[] pred = score0(data, new double[_output.nclasses() + 1], 0, _output._ntrees);
score0PostProcessSupervised(pred, data);
return pred[0];
}
@Override
protected SharedTreePojoWriter makeTreePojoWriter() {
CompressedForest compressedForest = new CompressedForest(_output._treeKeys, _output._domains);
CompressedForest.LocalCompressedForest localCompressedForest = compressedForest.fetch();
return new DrfPojoWriter(this, localCompressedForest._trees);
}
public class ScoreContributionsTaskDRF extends ScoreContributionsTask {
public ScoreContributionsTaskDRF(SharedTreeModel model) {
super(model);
}
@Override
public void addContribToNewChunk(float[] contribs, NewChunk[] nc) {
for (int i = 0; i < nc.length; i++) {
// Prediction of DRF tree ensemble is an average prediction of all trees. So, divide contribs by ntrees
if (_output.nclasses() == 1) { //Regression
nc[i].addNum(contribs[i] /_output._ntrees);
} else { //Binomial
float featurePlusBiasRatio = (float)1 / (_output._varimp.numberOfUsedVariables() + 1); // + 1 for bias term
nc[i].addNum(contribs[i] != 0 ? (featurePlusBiasRatio - (contribs[i] / _output._ntrees)) : 0);
}
}
}
}
public class ScoreContributionsWithBackgroundTaskDRF extends ScoreContributionsWithBackgroundTask {
public ScoreContributionsWithBackgroundTaskDRF(Frame fr, Frame backgroundFrame, boolean perReference, SharedTreeModel model, boolean expand, int[] catOffsets) {
super(fr._key, backgroundFrame._key, perReference, model, expand, catOffsets, false);
}
@Override
public void doModelSpecificComputation(double[] contribs) {
// Prediction of DRF tree ensemble is an average prediction of all trees. So, divide contribs by ntrees
if (_output.nclasses() == 1) { //Regression
for (int i = 0; i < contribs.length; i++) {
contribs[i] = contribs[i] / _output._ntrees;
}
} else { //Binomial
/* Sum of contributions + biasTerm (contribs[contribs.length-1]) gives us prediction for P(Y==0) but the user is
interested in knowing P(Y==1) = 1 - P(Y==0).
Since SHAP should satisfy the dummy property - if a feature is not used it should not have any contribution, we
cannot just do 1/nfeatures - (contribs[i]/ntrees).
In the contribs array we have contributions and BiasTerm the difference between the two is that BiasTerm should
correspond to the prediction of the background data point, hence it doesn't support the dummy property and should
be always involved in the conversion.
Another property that should be satisfied is the following:
Let's denote contribution of feature a of data point x on background data point b as contribution(a|x,b), then
contribution(a|x,b) == - contribution(a|b,x). In other words, if contribution(a|x,b) shifts the response
from f(b) to f(x), then contribution(a|b,x) should move the response by the same magnitude but in the opposite
direction (from f(x) to f(b)).
Let's derive (hopefully) the correct formula:
$$P(Y=0|x) = 0.3$$
$$P(Y=0|b) = 0.45$$
$$ \sum\phi_i =P(Y=0|x) - P(Y=0|b) = -0.15$$
Here ^^^ we call the $P(Y=0|b)$ "bias term" and here it should be obvious that it corresponds to the prediction of the background sample.
If we rewrite it we can see that the sum "lifts" the prediction from the background sample prediction to the prediction for the point that we calculate the SHAP for.
$$P(Y=0|x) = \sum\phi_i - P(Y=0|b)$$
Now if we are interested in the contributions to P(Y=1|x) we can calculate those probabilities like:
$$P(Y=1|x) = 1-P(Y=0|x) = 0.7$$
$$P(Y=1|b) = 1- P(Y=0|b) = 0.55$$
Now the important part comes in:
$$P(Y=1|x) - P(Y=1|b) = 1- P(Y=0|x) - 1 + P(Y=0|b) = - (P(Y=0|x) - P(Y=0|b)) = - \sum\phi_i = 0.15$$
So the contributions for $P(Y=1|x)$ sum up to the negative value of contributions for $P(Y=0|x)$.
And the second important thing is that the bias term now should corresponds to $P(Y=1|b) = 1 - P(Y=0|b)$.
So I think the only place where we should subtract from a constant is the bias term.
More details can be found in this thread: https://github.com/h2oai/h2o-3/issues/15657#issuecomment-1652287487
*/
for (int i = 0; i < contribs.length-1; i++) {
contribs[i] = -(contribs[i] / _output._ntrees);
}
contribs[contribs.length-1] = 1 - (contribs[contribs.length-1]/_output._ntrees);
}
}
}
public class ScoreContributionsSoringTaskDRF extends ScoreContributionsSortingTask {
public ScoreContributionsSoringTaskDRF(SharedTreeModel model, ContributionsOptions options) {
super(model, options);
}
@Override
public void doModelSpecificComputation(float[] contribs) {
for (int i = 0; i < contribs.length; i++) {
// Prediction of DRF tree ensemble is an average prediction of all trees. So, divide contribs by ntrees
if (_output.nclasses() == 1) { //Regression
contribs[i] = contribs[i] / _output._ntrees;
} else { //Binomial
float featurePlusBiasRatio = (float)1 / (_output.nfeatures() + 1); // + 1 for bias term
contribs[i] = featurePlusBiasRatio - (contribs[i] / _output._ntrees);
}
}
}
}
@Override
public DrfMojoWriter getMojo() {
return new DrfMojoWriter(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/drf/DrfMojoWriter.java
|
package hex.tree.drf;
import hex.tree.SharedTreeMojoWriter;
import java.io.IOException;
/**
* Mojo definition for DRF model.
*/
public class DrfMojoWriter extends SharedTreeMojoWriter<DRFModel, DRFModel.DRFParameters, DRFModel.DRFOutput> {
@SuppressWarnings("unused") // Called through reflection in ModelBuildersHandler
public DrfMojoWriter() {}
public DrfMojoWriter(DRFModel model) { super(model); }
@Override public String mojoVersion() {
return "1.40";
}
@Override
protected void writeModelData() throws IOException {
super.writeModelData();
writekv("binomial_double_trees", model._parms._binomial_double_trees);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/drf/DrfPojoWriter.java
|
package hex.tree.drf;
import hex.Model;
import hex.genmodel.CategoricalEncoding;
import hex.tree.CompressedTree;
import hex.tree.SharedTreePojoWriter;
import water.util.SBPrintStream;
class DrfPojoWriter extends SharedTreePojoWriter {
private final boolean _balance_classes;
DrfPojoWriter(DRFModel model, CompressedTree[][] trees) {
super(model._key, model._output, model.getGenModelEncoding(), model.binomialOpt(),
trees, model._output._treeStats);
_balance_classes = model._parms._balance_classes;
}
DrfPojoWriter(Model<?, ?, ?> model, CategoricalEncoding encoding,
boolean binomialOpt, CompressedTree[][] trees,
boolean balanceClasses) {
super(model._key, model._output, encoding, binomialOpt, trees, null);
_balance_classes = balanceClasses;
}
@Override
protected void toJavaUnifyPreds(SBPrintStream body) {
if (_output.nclasses() == 1) { // Regression
body.ip("preds[0] /= " + _trees.length + ";").nl();
} else { // Classification
if (_output.nclasses() == 2 && _binomialOpt) { // Kept the initial prediction for binomial
body.ip("preds[1] /= " + _trees.length + ";").nl();
body.ip("preds[2] = 1.0 - preds[1];").nl();
} else {
body.ip("double sum = 0;").nl();
body.ip("for(int i=1; i<preds.length; i++) { sum += preds[i]; }").nl();
body.ip("if (sum>0) for(int i=1; i<preds.length; i++) { preds[i] /= sum; }").nl();
}
if (_balance_classes)
body.ip("hex.genmodel.GenModel.correctProbabilities(preds, PRIOR_CLASS_DISTRIB, MODEL_CLASS_DISTRIB);").nl();
body.ip("preds[0] = hex.genmodel.GenModel.getPrediction(preds, PRIOR_CLASS_DISTRIB, data, " + _output.defaultThreshold() + ");").nl();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/drf/TreeMeasuresCollector.java
|
package hex.tree.drf;
import java.util.Arrays;
import java.util.Random;
import static hex.genmodel.GenModel.getPrediction;
import hex.tree.CompressedForest;
import hex.tree.CompressedTree;
import hex.tree.SharedTree;
import water.Iced;
import water.MRTask;
import water.fvec.C0DChunk;
import water.fvec.Chunk;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.ModelUtils;
import static water.util.RandomUtils.getRNG;
/** Score given tree model and preserve errors per tree in form of votes (for classification)
* or MSE (for regression).
*
* This is different from Model.score() function since the MR task
* uses inverse loop: first over all trees and over all rows in chunk.
*/
public class TreeMeasuresCollector extends MRTask<TreeMeasuresCollector> {
/* @IN */ final private CompressedForest _cforest;
/* @IN */ final private float _rate;
/* @IN */ final private int _var;
/* @IN */ final private boolean _oob;
/* @IN */ final private int _ncols;
/* @IN */ final private int _nclasses;
/* @IN */ final private boolean _classification;
/* @IN */ final private double _threshold;
final private SharedTree _st;
/* @INOUT */ private final int _ntrees;
/* @OUT */ private double [/*ntrees*/] _votes; // Number of correct votes per tree (for classification only)
/* @OUT */ private double [/*ntrees*/] _nrows; // Number of scored row per tree (for classification/regression)
/* @OUT */ private float[/*ntrees*/] _sse; // Sum of squared errors per tree (for regression only)
/* Intermediate */
private transient CompressedForest.LocalCompressedForest _forest;
private TreeMeasuresCollector(CompressedForest cforest, int nclasses, int ncols, float rate, int variable, double threshold, SharedTree st) {
assert cforest._treeKeys.length > 0;
assert nclasses == cforest._treeKeys[0].length;
_cforest = cforest;
_ncols = ncols;
_rate = rate; _var = variable;
_oob = true; _ntrees = cforest._treeKeys.length;
_nclasses = nclasses;
_classification = (nclasses>1);
_threshold = threshold;
_st = st;
}
@Override
protected void setupLocal() {
_forest = _cforest.fetch();
}
public static class ShuffleTask extends MRTask<ShuffleTask> {
@Override public void map(Chunk ic, Chunk oc) {
if (ic._len==0) return;
// Each vector is shuffled in the same way
Random rng = getRNG(seed(ic.cidx()));
oc.set(0,ic.atd(0));
for (int row=1; row<ic._len; row++) {
int j = rng.nextInt(row+1); // inclusive upper bound <0,row>
// Arghhh: expand the vector into double
if (j!=row) oc.set(row, oc.atd(j));
oc.set(j, ic.atd(row));
}
}
public static long seed(int cidx) { return (0xe031e74f321f7e29L + ((long)cidx << 32L)); }
public static Vec shuffle(Vec ivec) {
Vec ovec = ivec.makeZero();
new ShuffleTask().doAll(ivec, ovec);
return ovec;
}
}
@Override public void map(Chunk[] chks) {
double[] data = new double[_ncols];
double[] preds = new double[_nclasses+1];
Chunk cresp = _st.chk_resp(chks);
Chunk weights = _st.hasWeightCol() ? _st.chk_weight(chks) : new C0DChunk(1, chks[0]._len);
int nrows = cresp._len;
int [] oob = new int[2+Math.round((1f-_rate)*nrows*1.2f+0.5f)]; // preallocate
int [] soob = null;
// Prepare output data
_nrows = new double[_ntrees];
_votes = _classification ? new double[_ntrees] : null;
_sse = _classification ? null : new float[_ntrees];
long seedForOob = ShuffleTask.seed(cresp.cidx()); // seed for shuffling oob samples
// Start iteration
for( int tidx=0; tidx<_ntrees; tidx++) { // tree
// OOB RNG for this tree
Random rng = rngForTree(_forest._trees[tidx], cresp.cidx());
// Collect oob rows and permutate them
oob = ModelUtils.sampleOOBRows(nrows, _rate, rng, oob); // reuse use the same array for sampling
int oobcnt = oob[0]; // Get number of sample rows
if (_var>=0) {
if (soob==null || soob.length < oobcnt) soob = new int[oobcnt];
ArrayUtils.shuffleArray(oob, oobcnt, soob, seedForOob, 1); // Shuffle array and copy results into <code>soob</code>
}
for(int j = 1; j < 1+oobcnt; j++) {
int row = oob[j];
double w = weights.atd(row);
if (cresp.isNA(row)) continue; // we cannot deal with this row anyhow
if (w==0) continue;
// Do scoring:
// - prepare a row data
for (int i=0;i<_ncols;i++) data[i] = chks[i].atd(row); // 1+i - one free is expected by prediction
// - permute variable
if (_var>=0) data[_var] = chks[_var].atd(soob[j-1]);
else assert soob==null;
// - score data
Arrays.fill(preds, 0);
// - score only the tree
_forest.scoreTree(data, preds, tidx);
// - derive a prediction
if (_classification) {
int pred = getPrediction(preds, null /*FIXME: should use model's _priorClassDistribution*/, data, _threshold);
int actu = (int) cresp.at8(row);
// assert preds[pred] > 0 : "There should be a vote for at least one class.";
// - collect only correct votes
if (pred == actu) _votes[tidx]+=w;
} else { /* regression */
double pred = preds[0]; // Important!
double actu = cresp.atd(row);
_sse[tidx] += (actu-pred)*(actu-pred);
}
// - collect rows which were used for voting
_nrows[tidx]+=w;
//if (_var<0) System.err.println("VARIMP OOB row: " + (cresp._start+row) + " : " + Arrays.toString(data) + " tree/actu: " + pred + "/" + actu);
}
}
}
@Override public void reduce( TreeMeasuresCollector t ) { ArrayUtils.add(_votes,t._votes); ArrayUtils.add(_nrows, t._nrows); ArrayUtils.add(_sse, t._sse); }
public TreeVotes resultVotes() { return new TreeVotes(_votes, _nrows, _ntrees); }
public TreeSSE resultSSE () { return new TreeSSE (_sse, _nrows, _ntrees); }
private Random rngForTree(CompressedTree[] ts, int cidx) {
return _oob ? ts[0].rngForChunk(cidx) : new DummyRandom(); // k-class set of trees shares the same random number
}
/* For bulk scoring
public static TreeVotes collect(TreeModel tmodel, Frame f, int ncols, float rate, int variable) {
CompressedTree[][] trees = new CompressedTree[tmodel.ntrees()][];
for (int tidx = 0; tidx < tmodel.ntrees(); tidx++) trees[tidx] = tmodel.ctree(tidx);
return new TreeVotesCollector(trees, tmodel.nclasses(), ncols, rate, variable).doAll(f).result();
}*/
private static final class DummyRandom extends Random {
@Override public final float nextFloat() { return 1.0f; }
}
/** A simple holder for set of different tree measurements. */
public static abstract class TreeMeasures<T extends TreeMeasures> extends Iced {
/** Actual number of trees which votes are stored in this object */
protected int _ntrees;
/** Number of processed row per tree. */
protected double[/*ntrees*/] _nrows;
public TreeMeasures(int initialCapacity) { _nrows = new double[initialCapacity]; }
public TreeMeasures(double[] nrows, int ntrees) { _nrows = nrows; _ntrees = ntrees;}
/** Returns number of rows which were used during voting per individual tree. */
public final double[] nrows() { return _nrows; }
/** Returns number of voting predictors */
public final int npredictors() { return _ntrees; }
/** Returns a list of accuracies per tree. */
public abstract double accuracy(int tidx);
public final double[] accuracy() {
double[] r = new double[_ntrees];
// Average of all trees
for (int tidx=0; tidx<_ntrees; tidx++) r[tidx] = accuracy(tidx);
return r;
}
/** Compute variable importance with respect to given votes.
* The given {@link T} object represents correct votes.
* This object represents votes over shuffled data.
*
* @param right individual tree measurements performed over not shuffled data.
* @return computed importance and standard deviation
*/
public abstract double[/*2*/] imp(T right);
public abstract T append(T t);
}
/** A class holding tree votes. */
public static class TreeVotes extends TreeMeasures<TreeVotes> {
/** Number of correct votes per tree */
private double[/*ntrees*/] _votes;
public TreeVotes(int initialCapacity) {
super(initialCapacity);
_votes = new double[initialCapacity];
}
public TreeVotes(double[] votes, double[] nrows, int ntrees) {
super(nrows, ntrees);
_votes = votes;
}
/** Returns number of positive votes per tree. */
public final double[] votes() { return _votes; }
/** Returns accuracy per individual trees. */
@Override public final double accuracy(int tidx) {
assert tidx < _nrows.length && tidx < _votes.length;
return (_votes[tidx]) / _nrows[tidx];
}
/** Compute variable importance with respect to given votes.
* The given {@link TreeVotes} object represents correct votes.
* This object represents votes over shuffled data.
*
* @param right individual tree voters performed over not shuffled data.
* @return computed importance and standard deviation
*/
@Override public final double[/*2*/] imp(TreeVotes right) {
assert npredictors() == right.npredictors();
int ntrees = npredictors();
double imp = 0;
double sd = 0;
// Over all trees
for (int tidx = 0; tidx < ntrees; tidx++) {
assert right.nrows()[tidx] == nrows()[tidx];
double delta = ((double) (right.votes()[tidx] - votes()[tidx])) / nrows()[tidx];
imp += delta;
sd += delta * delta;
}
double av = imp / ntrees;
double csd = Math.sqrt( (sd/ntrees - av*av) / ntrees );
return new double[] { av, csd};
}
/** Append a tree votes to a list of trees. */
public TreeVotes append(double rightVotes, double allRows) {
assert _votes.length > _ntrees && _votes.length == _nrows.length : "TreeVotes inconsistency!";
_votes[_ntrees] = rightVotes;
_nrows[_ntrees] = allRows;
_ntrees++;
return this;
}
@Override public TreeVotes append(final TreeVotes tv) {
for (int i=0; i<tv.npredictors(); i++)
append(tv._votes[i], tv._nrows[i]);
return this;
}
}
/** A simple holder serving SSE per tree. */
public static class TreeSSE extends TreeMeasures<TreeSSE> {
/** SSE per tree */
private float[/*ntrees*/] _sse;
public TreeSSE(int initialCapacity) {
super(initialCapacity);
_sse = new float[initialCapacity];
}
public TreeSSE(float[] sse, double[] nrows, int ntrees) {
super(nrows, ntrees);
_sse = sse;
}
@Override public double accuracy(int tidx) {
return _sse[tidx] / _nrows[tidx];
}
@Override public double[] imp(TreeSSE right) {
assert npredictors() == right.npredictors();
int ntrees = npredictors();
double imp = 0;
double sd = 0;
// Over all trees
for (int tidx = 0; tidx < ntrees; tidx++) {
assert right.nrows()[tidx] == nrows()[tidx]; // check that we iterate over same OOB rows
double delta = ((double) (_sse[tidx] - right._sse[tidx])) / nrows()[tidx];
imp += delta;
sd += delta * delta;
}
double av = imp / ntrees;
double csd = Math.sqrt( (sd/ntrees - av*av) / ntrees );
return new double[] { av, csd };
}
@Override public TreeSSE append(TreeSSE t) {
for (int i=0; i<t.npredictors(); i++)
append(t._sse[i], t._nrows[i]);
return this;
}
/** Append a tree sse to a list of trees. */
public TreeSSE append(float sse, double allRows) {
assert _sse.length > _ntrees && _sse.length == _nrows.length : "TreeVotes inconsistency!";
_sse [_ntrees] = sse;
_nrows[_ntrees] = allRows;
_ntrees++;
return this;
}
}
public static TreeVotes asVotes(TreeMeasures tm) { return (TreeVotes) tm; }
public static TreeSSE asSSE (TreeMeasures tm) { return (TreeSSE) tm; }
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/AbstractCompressedNode.java
|
package hex.tree.dt;
import water.Iced;
public abstract class AbstractCompressedNode extends Iced<AbstractCompressedNode> {
public abstract String toString();
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/AbstractFeatureLimits.java
|
package hex.tree.dt;
/**
* Limits for one feature.
*/
public abstract class AbstractFeatureLimits {
public abstract AbstractFeatureLimits clone();
public abstract double[] toDoubles();
public abstract boolean equals(AbstractFeatureLimits other);
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/AbstractSplittingRule.java
|
package hex.tree.dt;
import water.Iced;
public abstract class AbstractSplittingRule extends Iced<AbstractSplittingRule> {
protected int _featureIndex = -1; // valid default value before the actual one is defined
protected double _criterionValue = -1; // valid default value before the actual one is defined
protected AbstractSplittingRule() {
}
public double getCriterionValue() {
return _criterionValue;
}
public int getFeatureIndex() {
return _featureIndex;
}
// true for left, false for right
public abstract boolean routeSample(double[] sample);
public abstract String toString();
public void setCriterionValue(double criterionOfSplit) {
_criterionValue = criterionOfSplit;
}
public void setFeatureIndex(int featureIndex) {
_featureIndex = featureIndex;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/CategoricalFeatureLimits.java
|
package hex.tree.dt;
import java.util.Arrays;
import java.util.stream.IntStream;
/**
* Limits for one feature.
*/
public class CategoricalFeatureLimits extends AbstractFeatureLimits {
public boolean[] _mask;
public CategoricalFeatureLimits(final boolean[] mask) {
_mask = Arrays.copyOf(mask, mask.length);
}
public CategoricalFeatureLimits(final double[] doubleMask) {
_mask = new boolean[doubleMask.length];
for (int i = 0; i < doubleMask.length; i++) {
if (doubleMask[i] == 1.0) {
_mask[i] = true;
}
}
}
public CategoricalFeatureLimits(final int cardinality) {
_mask = new boolean[cardinality];
// fill with true as it is used for the initial features limits where all categories are present
Arrays.fill(_mask, true);
}
public void setNewMask(final boolean[] mask) {
_mask = Arrays.copyOf(mask, mask.length);
}
public void setNewMaskExcluded(final boolean[] maskToExclude) {
_mask = Arrays.copyOf(_mask, _mask.length);
// length of the mask is number of categories in the initial dataset, has to be the same through the whole build
assert _mask.length == maskToExclude.length;
for (int i = 0; i < maskToExclude.length; i++) {
// if the category is defined in the given mask, it should be excluded from the actual mask
if(maskToExclude[i]) {
_mask[i] = false;
}
}
}
public CategoricalFeatureLimits clone() {
return new CategoricalFeatureLimits(Arrays.copyOf(_mask, _mask.length));
}
@Override
public double[] toDoubles() {
return IntStream.range(0, _mask.length).mapToDouble(idx -> _mask[idx] ? 1.0 : 0.0).toArray();
}
@Override
public boolean equals(AbstractFeatureLimits other) {
return Arrays.equals(_mask, ((CategoricalFeatureLimits) other)._mask);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/CategoricalSplittingRule.java
|
package hex.tree.dt;
import java.util.Arrays;
public class CategoricalSplittingRule extends AbstractSplittingRule {
public CategoricalSplittingRule(int featureIndex, boolean[] mask, double criterionValue) {
_featureIndex = featureIndex;
_mask = mask;
_criterionValue = criterionValue;
}
public CategoricalSplittingRule(boolean[] mask) {
_mask = mask;
}
// categories for the left split - bitmask
private final boolean[] _mask;
public boolean[] getMask() {
return _mask;
}
@Override
public String toString() {
return "x" + _featureIndex + " in [" + Arrays.toString(_mask) + "]";
}
// true for left, false for right
public boolean routeSample(double[] sample) {
int category = (int) sample[_featureIndex];
assert category < _mask.length; // todo: new values in the train set are not supported yet - will be treated as missing values
return _mask[category];
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/CompressedDT.java
|
package hex.tree.dt;
import water.Key;
import water.Keyed;
import java.util.Arrays;
import java.util.stream.Collectors;
/**
* Compressed DT class containing tree as array.
*/
public class CompressedDT extends Keyed<CompressedDT> {
/**
* List of nodes, for each node holds either split feature index and threshold or just decision value if it is list.
*/
private final AbstractCompressedNode[] _nodes;
private final String[] _listOfRules;
public CompressedDT(AbstractCompressedNode[] nodes, int leavesCount) {
_key = Key.make("CompressedDT" + Key.rand());
_nodes = nodes;
_listOfRules = new String[leavesCount];
extractRulesStartingWithNode(0, "", 0);
}
/**
* Makes prediction by recursively evaluating the data through the tree.
*
* @param rowValues - data row to find prediction for
* @param actualNodeIndex - actual node to evaluate and then go to selected child
* @return class label
*/
public DTPrediction predictRowStartingFromNode(final double[] rowValues, final int actualNodeIndex, String ruleExplanation) {
boolean isALeaf = _nodes[actualNodeIndex] instanceof CompressedLeaf;
// first value 1 means that the node is list, return prediction for the list
if (isALeaf) {
double decisionValue = ((CompressedLeaf) _nodes[actualNodeIndex]).getDecisionValue();
double probability = ((CompressedLeaf) _nodes[actualNodeIndex]).getProbabilities();
return new DTPrediction((int) decisionValue, probability, ruleExplanation + " -> ("
+ decisionValue + ", probabilities: " + probability + ", " + (1 - probability) + ")");
}
if (!ruleExplanation.isEmpty()) {
ruleExplanation += " and ";
}
AbstractSplittingRule splittingRule = ((CompressedNode) _nodes[actualNodeIndex]).getSplittingRule();
// splitting rule is true - left, false - right
if(splittingRule.routeSample(rowValues)) {
return predictRowStartingFromNode(rowValues, 2 * actualNodeIndex + 1,
ruleExplanation + splittingRule.toString());
} else {
return predictRowStartingFromNode(rowValues, 2 * actualNodeIndex + 2,
ruleExplanation + "not " + splittingRule.toString());
}
}
@Override
public String toString() {
return Arrays.stream(_nodes).map(AbstractCompressedNode::toString).collect(Collectors.joining(";"));
}
public int extractRulesStartingWithNode(int nodeIndex, String actualRule, int nextFreeSpot) {
if (_nodes[nodeIndex] instanceof CompressedLeaf) {
// if node is a leaf, add the rule to the list of rules at index given by the nextFreeSpot parameter
_listOfRules[nextFreeSpot] = actualRule + " -> (" + ((CompressedLeaf) _nodes[nodeIndex]).getDecisionValue()
+ ", " + ((CompressedLeaf) _nodes[nodeIndex]).getProbabilities() + ")";
// move nextFreeSpot to the next index and return it to be used for other branches
nextFreeSpot++;
return nextFreeSpot;
}
actualRule = actualRule.isEmpty() ? actualRule : actualRule + " and ";
// proceed to the left branch
nextFreeSpot = extractRulesStartingWithNode(2 * nodeIndex + 1,
actualRule + ((CompressedNode) _nodes[nodeIndex]).getSplittingRule().toString(), nextFreeSpot);
// proceed to the right branch
nextFreeSpot = extractRulesStartingWithNode(2 * nodeIndex + 2,
actualRule + " not (" + ((CompressedNode) _nodes[nodeIndex]).getSplittingRule().toString() + ")",
nextFreeSpot);
// return current index of the next free spot in the array
return nextFreeSpot;
}
public String[] getListOfRules() {
return _listOfRules;
}
public AbstractCompressedNode[] getNodes() {
return _nodes;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/CompressedLeaf.java
|
package hex.tree.dt;
public class CompressedLeaf extends AbstractCompressedNode {
private final double _decisionValue;
private final double _probability;
public CompressedLeaf(double decisionValue, double probabilities) {
super();
_decisionValue = decisionValue;
_probability = probabilities;
}
public double getDecisionValue() {
return _decisionValue;
}
public double getProbabilities() {
return _probability;
}
@Override
public String toString() {
return "(leaf: " + _decisionValue + ", " + _probability + ", " + (1- _probability) + ")";
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/CompressedNode.java
|
package hex.tree.dt;
public class CompressedNode extends AbstractCompressedNode {
private final AbstractSplittingRule _splittingRule;
public CompressedNode(final AbstractSplittingRule splittingRule) {
super();
this._splittingRule = splittingRule;
}
public AbstractSplittingRule getSplittingRule() {
return _splittingRule;
}
@Override
public String toString() {
return "[node: " + _splittingRule.toString() + "]";
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/DT.java
|
package hex.tree.dt;
import hex.ModelBuilder;
import hex.ModelCategory;
import hex.ModelMetrics;
import hex.tree.dt.binning.SplitStatistics;
import hex.tree.dt.binning.BinningStrategy;
import hex.tree.dt.binning.Histogram;
import hex.tree.dt.mrtasks.GetClassCountsMRTask;
import hex.tree.dt.mrtasks.ScoreDTTask;
import org.apache.commons.math3.util.Precision;
import org.apache.log4j.Logger;
import water.DKV;
import water.exceptions.H2OModelBuilderIllegalArgumentException;
import water.fvec.Frame;
import water.util.*;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static hex.tree.dt.binning.SplitStatistics.entropyBinarySplit;
/**
* Decision Tree
*/
public class DT extends ModelBuilder<DTModel, DTModel.DTParameters, DTModel.DTOutput> {
/**
* Minimum number of samples to split the set.
*/
private int _min_rows;
/**
* Current number of build nodes.
*/
int _nodesCount;
/**
* Current number of build leaves.
*/
int _leavesCount;
/**
* List of nodes, for each node holds either split feature index and threshold or just decision value if it is list.
* While building the tree nodes are being filled from index 0 iteratively
*/
private AbstractCompressedNode[] _tree;
private DTModel _model;
transient Random _rand;
// private final static int LIMIT_NUM_ROWS_FOR_SPLIT = 2; // todo - make a parameter with default value
public final static double EPSILON = 1e-6;
public final static double MIN_IMPROVEMENT = 1e-6;
private static final Logger LOG = Logger.getLogger(DT.class);
public DT(DTModel.DTParameters parameters) {
super(parameters);
_min_rows = parameters._min_rows;
_nodesCount = 0;
_leavesCount = 0;
_tree = null;
init(false);
}
public DT(boolean startup_once) {
super(new DTModel.DTParameters(), startup_once);
}
/**
* Find best split for current node based on the histogram.
*
* @param histogram - histogram for relevant data
* @return split info - holds the best split for current node, null if the split could not be found.
*/
private AbstractSplittingRule findBestSplit(Histogram histogram) {
int featuresNumber = histogram.featuresCount();
AbstractSplittingRule currentMinCriterionSplittingRule = null;
AbstractSplittingRule minCriterionSplittingRuleForFeature;
int bestFeatureIndex = -1;
for (int featureIndex = 0; featureIndex < featuresNumber; featureIndex++) {
// skip constant features
if (histogram.isConstant(featureIndex)) {
continue;
}
// find best split for current feature based on the criterion value
minCriterionSplittingRuleForFeature = findBestSplitForFeature(histogram, featureIndex);
if (minCriterionSplittingRuleForFeature == null) {
continue; // split could not be found for this feature
}
// update current minimum criteria pair
if (currentMinCriterionSplittingRule == null
|| minCriterionSplittingRuleForFeature._criterionValue < currentMinCriterionSplittingRule._criterionValue) {
currentMinCriterionSplittingRule = minCriterionSplittingRuleForFeature;
bestFeatureIndex = featureIndex;
}
}
if (bestFeatureIndex == -1) {
return null; // no split could be found
}
return currentMinCriterionSplittingRule;
}
private AbstractSplittingRule findBestSplitForFeature(Histogram histogram, int featureIndex) {
return (_train.vec(featureIndex).isNumeric()
? histogram.calculateSplitStatisticsForNumericFeature(featureIndex)
: histogram.calculateSplitStatisticsForCategoricalFeature(featureIndex))
.stream()
// todo - consider setting min count of samples in bin instead of filtering splits
.filter(binStatistics -> ((binStatistics._leftCount >= _min_rows)
&& (binStatistics._rightCount >= _min_rows)))
.peek(binStatistics -> Log.debug("split: " + binStatistics._splittingRule + ", counts: "
+ binStatistics._leftCount + " " + binStatistics._rightCount))
// calculate criterion value for the splitting rule and fill the splitting rule with the rest of info
.peek(binStatistics -> binStatistics.setCriterionValue(calculateCriterionOfSplit(binStatistics))
.setFeatureIndex(featureIndex))
.map(binStatistics -> binStatistics._splittingRule)
// get splitting rule with the lowest criterion value
.min(Comparator.comparing(AbstractSplittingRule::getCriterionValue))
.orElse(null);
}
private static double calculateCriterionOfSplit(SplitStatistics binStatistics) {
return binStatistics.binaryEntropy();
}
/**
* Select decision value for leaf. Decision value is argmax of the array with counts of samples by class.
*
* @param countsByClass counts of samples of each class
* @return decision value (in current case - 0 or 1)
*/
private int selectDecisionValue(int[] countsByClass) {
if (_nclass == 1) {
return countsByClass[0];
}
int currentMaxClass = 0;
int currentMax = countsByClass[currentMaxClass];
for (int c = 1; c < _nclass; c++) {
if (countsByClass[c] > currentMax) {
currentMaxClass = c;
currentMax = countsByClass[c];
}
}
return currentMaxClass;
}
/**
* Calculates probabilities of each class for a leaf.
*
* @param countsByClass counts of 0 and 1 in a leaf
* @return probabilities of 0 or 1
*/
private double[] calculateProbability(int[] countsByClass) {
int samplesCount = Arrays.stream(countsByClass).sum();
return Arrays.stream(countsByClass).asDoubleStream().map(n -> n / samplesCount).toArray();
}
/**
* Set decision value to the node.
*
* @param countsByClass counts of samples of each class
* @param nodeIndex node index
*/
public void makeLeafFromNode(int[] countsByClass, int nodeIndex) {
_tree[nodeIndex] = new CompressedLeaf(selectDecisionValue(countsByClass), calculateProbability(countsByClass)[0]);
_leavesCount++;
// nothing to return, node is modified inplace
}
/**
* Build next node from the first limits in queue. The queue is updated with children here.
*
* @param limitsQueue queue with feature limits for nodes
* @param nodeIndex index of node in the tree array
*/
public void buildNextNode(Queue<DataFeaturesLimits> limitsQueue, int nodeIndex) {
// take limits for actual node
DataFeaturesLimits actualLimits = limitsQueue.poll();
// if the element is null, then the node should not be built. Nulls exist to keep the array building straightforward
if (actualLimits == null) {
// don't save anything to tree (no node is created)
// add imaginary left and right children to imitate right tree structure
// left child
limitsQueue.add(null);
// right child
limitsQueue.add(null);
return;
}
// [count0, count1, ...]
int[] countsByClass = countClasses(actualLimits);
if (nodeIndex == 0) {
Log.info("Classes counts in dataset: 0 - " + countsByClass[0] + ", 1 - " + countsByClass[1]);
}
// compute node depth
int nodeDepth = (int) Math.floor(MathUtils.log2(nodeIndex + 1));
// stop building from this node, the node will be a leaf
if ((nodeDepth >= _parms._max_depth)
|| (countsByClass[0] <= _min_rows)
|| (countsByClass[1] <= _min_rows)
// || zeroRatio > 0.999 || zeroRatio < 0.001
) {
// add imaginary left and right children to imitate valid tree structure
// left child
limitsQueue.add(null);
// right child
limitsQueue.add(null);
makeLeafFromNode(countsByClass, nodeIndex);
return;
}
Histogram histogram = new Histogram(_train, actualLimits, BinningStrategy.EQUAL_WIDTH/*, minNumSamplesInBin - todo consider*/);
AbstractSplittingRule bestSplittingRule = findBestSplit(histogram);
double criterionForTheParentNode = entropyBinarySplit(1.0 * countsByClass[0] / (countsByClass[0] + countsByClass[1]));
// if no split could be found, make a list from current node
// if the information gain is low, make a leaf from current node
if (bestSplittingRule == null
|| Math.abs(criterionForTheParentNode - bestSplittingRule._criterionValue) < MIN_IMPROVEMENT) {
// add imaginary left and right children to imitate right tree structure
// left child
limitsQueue.add(null);
// right child
limitsQueue.add(null);
makeLeafFromNode(countsByClass, nodeIndex);
return;
}
_tree[nodeIndex] = new CompressedNode(bestSplittingRule);
int splitFeatureIndex = bestSplittingRule.getFeatureIndex();
DataFeaturesLimits limitsLeft, limitsRight;
if(_train.vec(splitFeatureIndex).isNumeric()) {
// create left and right limits separated by threshold
double threshold = ((NumericSplittingRule) bestSplittingRule).getThreshold();
limitsLeft = actualLimits.updateMax(splitFeatureIndex, threshold);
limitsRight = actualLimits.updateMin(splitFeatureIndex, threshold);
} else {
boolean[] mask = ((CategoricalSplittingRule) bestSplittingRule).getMask();
limitsLeft = actualLimits.updateMask(splitFeatureIndex, mask);
limitsRight = actualLimits.updateMaskExcluded(splitFeatureIndex, mask);
}
// store limits for left child
limitsQueue.add(limitsLeft);
// store limits for right child
limitsQueue.add(limitsRight);
}
/**
* Compute initial features limits.
*
* @return features limits
*/
public static DataFeaturesLimits getInitialFeaturesLimits(Frame data) {
return new DataFeaturesLimits(
IntStream.range(0, data.numCols() - 1 /*exclude the last prediction column*/)
.mapToObj(data::vec)
// decrease min as the minimum border is always excluded and real min value could be lost
.map(v -> v.isNumeric()
? new NumericFeatureLimits(v.min() - EPSILON, v.max())
: new CategoricalFeatureLimits(v.cardinality()))
.collect(Collectors.toList()));
}
private class DTDriver extends Driver {
private void dtChecks() {
if (_parms._max_depth < 1) {
error("_parms._max_depth", "Max depth has to be at least 1");
}
if (_train.hasNAs()) {
error("_train", "NaNs are not supported yet");
}
if (_train.hasInfs()) {
error("_train", "Infs are not supported");
}
if (!_response.isCategorical()) {
error("_response", "Only categorical response is supported");
}
if (!_response.isBinary()) {
error("_response", "Only binary response is supported");
}
}
@Override
public void computeImpl() {
_model = null;
try {
init(true);
dtChecks();
if (error_count() > 0) {
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(DT.this);
}
_rand = RandomUtils.getRNG(_parms._seed);
_model = new DTModel(dest(), _parms,
new DTModel.DTOutput(DT.this));
_model.delete_and_lock(_job);
buildDT();
LOG.info(_model.toString());
} finally {
if (_model != null)
_model.unlock(_job);
}
}
/**
* Build SDT and update infrastructure.
*/
private void buildDT() {
buildDTIteratively();
Log.debug("depth: " + _parms._max_depth + ", nodes count: " + _nodesCount);
CompressedDT compressedDT = new CompressedDT(_tree, _leavesCount);
_model._output._treeKey = compressedDT._key;
DKV.put(compressedDT);
_job.update(1);
_model.update(_job);
}
/**
* Build the tree iteratively starting from the root node.
*/
private void buildDTIteratively() {
int treeLength = (int) Math.pow(2, _parms._max_depth + 1) - 1;
_tree = new AbstractCompressedNode[treeLength];
Queue<DataFeaturesLimits> limitsQueue = new LinkedList<>();
limitsQueue.add(getInitialFeaturesLimits(_train));
// build iteratively each node of the tree (each cell of the array) by picking limits from the queue
// and storing children's limits to the queue.
// Tree will not be perfect. Missing nodes are empty elements and their limits in queue are null.
for (int nodeIndex = 0; nodeIndex < treeLength; nodeIndex++) {
buildNextNode(limitsQueue, nodeIndex);
}
}
}
@Override
protected Driver trainModelImpl() {
return new DTDriver();
}
@Override
public BuilderVisibility builderVisibility() {
return BuilderVisibility.Experimental;
}
@Override
public ModelCategory[] can_build() {
return new ModelCategory[]{
ModelCategory.Binomial,
// ModelCategory.Multinomial,
// ModelCategory.Ordinal,
// ModelCategory.Regression
};
}
@Override
public boolean isSupervised() {
return true;
}
protected final void makeModelMetrics() {
ModelMetrics.MetricBuilder metricsBuilder = new ScoreDTTask(_model).doAll(_train).getMetricsBuilder();
ModelMetrics modelMetrics = metricsBuilder.makeModelMetrics(_model, _parms.train(), null, null);
_model._output._training_metrics = modelMetrics;
// Score again on validation data
if( _parms._valid != null) {
Frame v = new Frame(valid());
metricsBuilder = new ScoreDTTask(_model).doAll(v).getMetricsBuilder();
_model._output._validation_metrics = metricsBuilder.makeModelMetrics(_model, v, null, null);
}
// out._model_summary = createModelSummaryTable(out._ntrees, out._treeStats);
// out._scoring_history = createScoringHistoryTable();
}
/**
* Count classes within samples satisfying given limits.
*
* @param featuresLimits limits
* @return pair (count0, count1)
*/
private int[] countClasses(final DataFeaturesLimits featuresLimits) {
GetClassCountsMRTask task = new GetClassCountsMRTask(featuresLimits == null
// create limits that are always fulfilled
? getInitialFeaturesLimits(_train).toDoubles()
: featuresLimits.toDoubles(), _nclass);
task.doAll(_train);
return task._countsByClass;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/DTModel.java
|
package hex.tree.dt;
import hex.*;
import org.apache.log4j.Logger;
import water.*;
import java.util.Arrays;
public class DTModel extends Model<DTModel, DTModel.DTParameters, DTModel.DTOutput> {
private static final Logger LOG = Logger.getLogger(DTModel.class);
public DTModel(Key<DTModel> selfKey, DTParameters parms,
DTOutput output) {
super(selfKey, parms, output);
}
@Override
public ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain) {
switch (_output.getModelCategory()) {
case Binomial:
return new ModelMetricsBinomial.MetricBuilderBinomial(domain);
case Multinomial:
return new ModelMetricsMultinomial.MetricBuilderMultinomial(_output.nclasses(), domain, _parms._auc_type);
case Regression:
return new ModelMetricsRegression.MetricBuilderRegression();
default:
throw H2O.unimpl();
}
}
@Override
protected double[] score0(double[] data, double[] preds) {
assert _output._treeKey != null : "Output has no tree, check if tree is properly set to the output.";
// compute score for given point
CompressedDT tree = DKV.getGet(_output._treeKey);
DTPrediction prediction = tree.predictRowStartingFromNode(data, 0, "");
// for now, only pred. for class 0 is stored, will be improved later
preds[0] = prediction.classPrediction;
preds[1] = prediction.probability;
preds[2] = 1 - prediction.probability;
return preds;
}
public static class DTOutput extends Model.Output {
public int _max_depth;
public int _limitNumSamplesForSplit;
public Key<CompressedDT> _treeKey;
public DTOutput(DT dt) {
super(dt);
_max_depth = dt._parms._max_depth;
_limitNumSamplesForSplit = dt._parms._min_rows;
}
}
@Override
protected Futures remove_impl(Futures fs, boolean cascade) {
Keyed.remove(_output._treeKey, fs, true);
return super.remove_impl(fs, cascade);
}
@Override
protected AutoBuffer writeAll_impl(AutoBuffer ab) {
ab.putKey(_output._treeKey);
return super.writeAll_impl(ab);
}
@Override
protected Keyed readAll_impl(AutoBuffer ab, Futures fs) {
ab.getKey(_output._treeKey, fs);
return super.readAll_impl(ab, fs);
}
public static class DTParameters extends Model.Parameters {
long seed = -1; //ignored
/**
* Depth (max depth) of the tree
*/
public int _max_depth;
public int _min_rows;
public DTParameters() {
super();
_max_depth = 20;
_min_rows = 10;
}
@Override
public String algoName() {
return "DT";
}
@Override
public String fullName() {
return "Decision Tree";
}
@Override
public String javaName() {
return DTModel.class.getName();
}
@Override
public long progressUnits() {
return 1;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/DTPrediction.java
|
package hex.tree.dt;
public class DTPrediction {
public int classPrediction;
public double probability;
public String ruleExplanation;
public DTPrediction(int classPrediction, double probability, String ruleExplanation) {
this.classPrediction = classPrediction;
this.probability = probability;
this.ruleExplanation = ruleExplanation;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/DataFeaturesLimits.java
|
package hex.tree.dt;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import static hex.tree.dt.NumericFeatureLimits.*;
/**
* Features limits for the whole dataset.
*/
public class DataFeaturesLimits {
// limits for each feature
private final List<AbstractFeatureLimits> _featuresLimits;
public DataFeaturesLimits(final List<AbstractFeatureLimits> featureLimits) {
this._featuresLimits = featureLimits;
}
public DataFeaturesLimits(final double[][] featureLimits) {
this._featuresLimits = Arrays.stream(featureLimits)
.map(dd -> dd[NUMERICAL_FLAG] == -1.0
? new NumericFeatureLimits(dd[LIMIT_MIN], dd[LIMIT_MAX])
: new CategoricalFeatureLimits(dd))
.collect(Collectors.toList());
}
public DataFeaturesLimits clone() {
return new DataFeaturesLimits(_featuresLimits.stream().map(AbstractFeatureLimits::clone).collect(Collectors.toList()));
}
/**
* Creates new instance of limits with updated min.
*
* @param selectedFeature feature index to update min
* @param newMin new min value for feature
* @return clone with updated min
*/
public DataFeaturesLimits updateMin(final int selectedFeature, final double newMin) {
DataFeaturesLimits clone = new DataFeaturesLimits(
_featuresLimits.stream().map(AbstractFeatureLimits::clone).collect(Collectors.toList()));
((NumericFeatureLimits) clone._featuresLimits.get(selectedFeature)).setNewMin(newMin);
return clone;
}
/**
* Creates new instance of limits with updated max.
*
* @param selectedFeature feature index to update max
* @param newMax new max value for feature
* @return clone with updated max
*/
public DataFeaturesLimits updateMax(final int selectedFeature, final double newMax) {
DataFeaturesLimits clone = new DataFeaturesLimits(
_featuresLimits.stream().map(AbstractFeatureLimits::clone).collect(Collectors.toList()));
((NumericFeatureLimits) clone._featuresLimits.get(selectedFeature)).setNewMax(newMax);
return clone;
}
/**
* Creates new instance of limits with updated mask - replaces old mask with new more precise one.
*
* @param selectedFeature feature index to update mask
* @param newMask new mask for the feature
* @return clone with updated mask
*/
public DataFeaturesLimits updateMask(final int selectedFeature, final boolean[] newMask) {
DataFeaturesLimits clone = new DataFeaturesLimits(
_featuresLimits.stream().map(AbstractFeatureLimits::clone).collect(Collectors.toList()));
((CategoricalFeatureLimits) clone._featuresLimits.get(selectedFeature)).setNewMask(newMask);
return clone;
}
/**
* Creates new instance of limits with updated mask - excludes from the current mask categories defined by the new one.
*
* @param selectedFeature feature index to update mask
* @param maskToExclude new mask for the feature
* @return clone with updated mask
*/
public DataFeaturesLimits updateMaskExcluded(int selectedFeature, boolean[] maskToExclude) {
DataFeaturesLimits clone = new DataFeaturesLimits(
_featuresLimits.stream().map(AbstractFeatureLimits::clone).collect(Collectors.toList()));
((CategoricalFeatureLimits) clone._featuresLimits.get(selectedFeature)).setNewMaskExcluded(maskToExclude);
return clone;
}
public AbstractFeatureLimits getFeatureLimits(int featureIndex) {
return _featuresLimits.get(featureIndex);
}
/**
* Serialize limits to 2D double array depending on the features types, so it can be passed to MR task
*
* @return
*/
public double[][] toDoubles() {
return _featuresLimits.stream()
.map(AbstractFeatureLimits::toDoubles)
.toArray(double[][]::new);
}
/**
* Get count of features.
* @return count of features
*/
public int featuresCount() {
return _featuresLimits.size();
}
public boolean equals(DataFeaturesLimits other) {
if (this == other) {
return true;
}
if (other == null || other.featuresCount() != featuresCount()) {
return false;
}
for (int i = 0; i < _featuresLimits.size(); i++) {
if (!_featuresLimits.get(i).equals(other._featuresLimits.get(i))) {
return false;
}
}
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/NumericFeatureLimits.java
|
package hex.tree.dt;
import static hex.tree.dt.DT.EPSILON;
/**
* Limits for one feature.
*/
public class NumericFeatureLimits extends AbstractFeatureLimits {
public double _min;
public double _max;
// indices for the serialized array
public static final int NUMERICAL_FLAG = 0;
public static final int LIMIT_MIN = 1;
public static final int LIMIT_MAX = 2;
// min of the first bin is moved with precision EPSILON, one additional decimal point of the difference is allowed
private static final double EQUALS_PRECISION = EPSILON * 10;
public NumericFeatureLimits(final double min, final double max) {
_min = min;
_max = max;
}
public void setNewMax(final double newMax) {
_max = newMax;
}
public void setNewMin(final double newMin) {
_min = newMin;
}
public NumericFeatureLimits clone() {
return new NumericFeatureLimits(_min, _max);
}
@Override
public double[] toDoubles() {
// -1.0 at index 0 identifies numeric feature limits
return new double[]{-1.0, _min, _max};
}
@Override
public boolean equals(AbstractFeatureLimits other) {
return Math.abs(_min - ((NumericFeatureLimits) other)._min) < EQUALS_PRECISION
&& Math.abs(_max - ((NumericFeatureLimits) other)._max) < EQUALS_PRECISION;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/NumericSplittingRule.java
|
package hex.tree.dt;
import org.apache.commons.math3.util.Precision;
public class NumericSplittingRule extends AbstractSplittingRule {
public NumericSplittingRule(int featureIndex, double threshold, double criterionValue) {
_featureIndex = featureIndex;
_threshold = threshold;
_criterionValue = criterionValue;
}
public NumericSplittingRule(double threshold) {
_threshold = threshold;
}
private final double _threshold;
public double getThreshold() {
return _threshold;
}
@Override
public String toString() {
return "x" + _featureIndex + " <= " + _threshold + "";
}
// true for left, false for right
public boolean routeSample(double[] sample) {
return sample[_featureIndex] < _threshold
|| Precision.equals(sample[_featureIndex], _threshold, Precision.EPSILON);
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/binning/AbstractBin.java
|
package hex.tree.dt.binning;
/**
* Single bin holding limits (min excluded), count of samples and count of class 0.
*/
public abstract class AbstractBin {
public int _count0;
public int _count;
public int getCount0() {
return _count0;
}
public abstract AbstractBin clone();
public abstract double[] toDoubles();
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/binning/BinningStrategy.java
|
package hex.tree.dt.binning;
import hex.tree.dt.CategoricalFeatureLimits;
import hex.tree.dt.DataFeaturesLimits;
import hex.tree.dt.NumericFeatureLimits;
import hex.tree.dt.mrtasks.CountBinsSamplesCountsMRTask;
import water.fvec.Frame;
import java.math.BigDecimal;
import java.math.RoundingMode;
import java.util.ArrayList;
import java.util.List;
import static hex.tree.dt.mrtasks.CountBinsSamplesCountsMRTask.COUNT;
import static hex.tree.dt.mrtasks.CountBinsSamplesCountsMRTask.COUNT_0;
/**
* Strategy for binning. Creates bins for single feature.
*/
public enum BinningStrategy {
/**
* Equal width: (max - min) / num_bins, optimized. Min is always excluded.
*/
EQUAL_WIDTH {
public final int NUM_BINS = 10;
public final int DECIMALS_TO_CONSIDER = 2;
public final double MIN_REL_COEFF = 0.0001;
double roundToNDecimalPoints(double number, int decimals) {
BigDecimal bigDecimal = new BigDecimal(number);
return bigDecimal.setScale(decimals, RoundingMode.HALF_UP).doubleValue();
}
double roundToNDecimalPoints(double number) {
return roundToNDecimalPoints(number, DECIMALS_TO_CONSIDER);
}
private List<AbstractBin> createEmptyBinsFromBinningValues(List<Double> binningValues, double realMin, double realMax) {
List<AbstractBin> emptyBins = new ArrayList<>();
// create bins between nearest binning values, don't create bin starting with the last value (on index size - 1)
for (int i = 0; i < binningValues.size() - 1; i++) {
emptyBins.add(
new NumericBin(roundToNDecimalPoints(binningValues.get(i)),
roundToNDecimalPoints(binningValues.get(i + 1))));
}
// set the firs min to some lower value (relative to step) so the actual value equal to min is not lost
((NumericBin) emptyBins.get(0)).setMin(realMin - MIN_REL_COEFF * (binningValues.get(1) - binningValues.get(0)));
// set the last max to the real max value to avoid precision troubles
((NumericBin) emptyBins.get(emptyBins.size() - 1)).setMax(realMax);
return emptyBins;
}
@Override
List<AbstractBin> createFeatureBins(Frame originData, DataFeaturesLimits featuresLimits, int feature) {
if (originData.vec(feature).isNumeric()) {
NumericFeatureLimits featureLimits = (NumericFeatureLimits) featuresLimits.getFeatureLimits(feature);
double step = (featureLimits._max - featureLimits._min) / NUM_BINS;
// constant feature - dont use for split
if (step == 0) {
return null;
}
// get thresholds which are minimums and maximums of bins (including min amd max)
List<Double> binningValues = new ArrayList<>();
for (double value = featureLimits._min; value <= featureLimits._max; value += step) {
binningValues.add(value);
}
List<AbstractBin> emptyBins = createEmptyBinsFromBinningValues(
binningValues, featureLimits._min, featureLimits._max);
return calculateNumericBinSamplesCount(originData, emptyBins, featuresLimits.toDoubles(), feature);
} else {
CategoricalFeatureLimits featureLimits = (CategoricalFeatureLimits) featuresLimits.getFeatureLimits(feature);
List<AbstractBin> emptyBins = new ArrayList<>();
for (int category = 0; category < featureLimits._mask.length; category++) {
// if the category is present in feature values, add new bin for this category
if (featureLimits._mask[category]) {
emptyBins.add(new CategoricalBin(category));
}
}
return calculateCategoricalBinSamplesCount(originData, emptyBins, featuresLimits.toDoubles(), feature);
}
}
},
/**
* Equal height: bins have approximately the same size - todo
* - probably too costly to do it with MR task, better leave equal-width
*/
EQUAL_HEIGHT {
@Override
List<AbstractBin> createFeatureBins(Frame originData, DataFeaturesLimits featuresLimits, int feature) {
return null;
}
},
/**
* Custom bins: works with provided bins limits - todo
*/
CUSTOM_BINS {
@Override
List<AbstractBin> createFeatureBins(Frame originData, DataFeaturesLimits featuresLimits, int feature) {
return null;
}
};
/**
* Creates bins for selected feature.
*
* @param originData data - not modified
* @param featuresLimits limits for features
* @param feature selected feature index
* @return list of created bins
*/
abstract List<AbstractBin> createFeatureBins(Frame originData, DataFeaturesLimits featuresLimits, int feature);
/**
* Calculates samples count for given bins for categorical feature.
*
* @param data data - not modified
* @param bins empty bins to calculate samples
* @param featuresLimits limits for features
* @param feature selected feature index
* @return list of created bins
*/
private static List<AbstractBin> calculateCategoricalBinSamplesCount(Frame data, List<AbstractBin> bins,
double[][] featuresLimits, int feature) {
// run MR task to compute accumulated statistic for bins - one task for one feature, calculates all bins at once
double[][] binsArray = bins.stream().map(AbstractBin::toDoubles).toArray(double[][]::new);
CountBinsSamplesCountsMRTask task = new CountBinsSamplesCountsMRTask(feature, featuresLimits, binsArray);
task.doAll(data);
for(int i = 0; i < binsArray.length; i ++) {
bins.get(i)._count = (int) task._bins[i][COUNT];
bins.get(i)._count0 = (int) task._bins[i][COUNT_0];
}
return bins;
}
/**
* Calculates samples count for given bins for numeric feature.
*
* @param data data - not modified
* @param bins empty bins to calculate samples
* @param featuresLimits limits for features
* @param feature selected feature index
* @return list of created bins
*/
private static List<AbstractBin> calculateNumericBinSamplesCount(Frame data, List<AbstractBin> bins,
double[][] featuresLimits, int feature) {
// run MR task to compute accumulated statistic for bins - one task for one feature, calculates all bins at once
double[][] binsArray = bins.stream().map(AbstractBin::toDoubles).toArray(double[][]::new);
CountBinsSamplesCountsMRTask task = new CountBinsSamplesCountsMRTask(feature, featuresLimits, binsArray);
task.doAll(data);
for(int i = 0; i < binsArray.length; i ++) {
bins.get(i)._count = (int) task._bins[i][COUNT];
bins.get(i)._count0 = (int) task._bins[i][COUNT_0];
}
return bins;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/binning/CategoricalBin.java
|
package hex.tree.dt.binning;
/**
* For categorical features values are already binned to categories - each bin corresponds to one value (category)
*/
public class CategoricalBin extends AbstractBin {
public int _category;
public CategoricalBin(int category, int count, int count0) {
_category = category;
_count = count;
_count0 = count0;
}
public CategoricalBin(int category) {
_category = category;
_count = 0;
_count0 = 0;
}
public int getCategory() {
return _category;
}
public CategoricalBin clone() {
return new CategoricalBin(_category, _count, _count0);
}
public double[] toDoubles() {
return new double[]{_category, _count, _count0};
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/binning/FeatureBins.java
|
package hex.tree.dt.binning;
import hex.tree.dt.CategoricalSplittingRule;
import hex.tree.dt.NumericSplittingRule;
import java.util.*;
import java.util.stream.Collectors;
public class FeatureBins {
private List<AbstractBin> _bins;
private final boolean _isConstant; // todo - test this
private int _numOfCategories;
public FeatureBins(List<AbstractBin> bins) {
// default value of numOfCategories is -1 (used for the numeric features)
this(bins, -1);
}
public FeatureBins(List<AbstractBin> bins, int numOfCategories) {
if (bins == null) {
_isConstant = true;
} else {
_isConstant = false;
_bins = bins;
_numOfCategories = numOfCategories;
}
}
/**
* Calculates statistics for bins depending on all other bins - see BinAccumulatedStatistics.
*
* @return list of accumulated statistics, matches original bins
*/
public List<SplitStatistics> calculateSplitStatisticsForNumericFeature() {
// init list with empty instances
List<SplitStatistics> statistics = _bins.stream()
.map(b -> new SplitStatistics()).collect(Collectors.toList());
// calculate accumulative statistics for each split:
// left split - bins to the left + current;
// right split - bins to the right.
SplitStatistics tmpAccumulatorLeft = new SplitStatistics();
SplitStatistics tmpAccumulatorRight = new SplitStatistics();
int rightIndex;
for (int leftIndex = 0; leftIndex < statistics.size(); leftIndex++) {
tmpAccumulatorLeft.accumulateLeftStatistics(_bins.get(leftIndex)._count, _bins.get(leftIndex)._count0);
statistics.get(leftIndex).copyLeftValues(tmpAccumulatorLeft);
statistics.get(leftIndex)._splittingRule = new NumericSplittingRule(((NumericBin) _bins.get(leftIndex))._max);
// accumulate from the right (from the end of bins array)
rightIndex = _bins.size() - leftIndex - 1;
// firstly accumulate with old values, then add the actual bin for the future statistics
// as the values of the actual bin are not included in its right statistics
statistics.get(rightIndex).copyRightValues(tmpAccumulatorRight);
tmpAccumulatorRight.accumulateRightStatistics(_bins.get(rightIndex)._count, _bins.get(rightIndex)._count0);
}
return statistics;
}
public boolean isConstant() {
return _isConstant;
}
List<AbstractBin> getFeatureBins() {
return _bins.stream().map(AbstractBin::clone).collect(Collectors.toList());
}
public List<SplitStatistics> calculateSplitStatisticsForCategoricalFeature() {
// for binomial classification sort bins by the frequency of one class and split similarly to the sequential feature
return calculateStatisticsForCategoricalFeatureBinomialClassification();
// full approach for binomial/multinomial/regression, works fpr up to 10 categories
// return calculateStatisticsForCategoricalFeatureFullApproach();
}
private List<SplitStatistics> calculateStatisticsForCategoricalFeatureFullApproach() {
// calculate accumulative statistics for each subset of categories:
// left split - categories included in the subset;
// right split - categories not included in subset.
assert _numOfCategories <= 10; // for numOfCategories >= 10 another implementation will be created
// as now the max supported category is 9 and for the bigger number the faster sequential approach should be used
// init list with empty instances
String categories = _bins.stream().map(b -> String.valueOf(((CategoricalBin) b)._category))
.collect(Collectors.joining("")); // is it always 0 to _bins.size()?
Set<boolean[]> splits = findAllCategoricalSplits(categories);
List<SplitStatistics> statistics = new ArrayList<>();
for (boolean[] splitMask : splits) {
SplitStatistics splitStatistics = new SplitStatistics();
for (AbstractBin bin : _bins) {
// if bin category is in the mask, it belongs to the left split, otherwise it belongs to the right split
if (splitMask[((CategoricalBin) bin)._category]) {
splitStatistics.accumulateLeftStatistics(bin._count, bin._count0);
} else {
splitStatistics.accumulateRightStatistics(bin._count, bin._count0);
}
}
splitStatistics._splittingRule = new CategoricalSplittingRule(splitMask);
statistics.add(splitStatistics);
}
return statistics;
}
private Set<boolean[]> findAllCategoricalSplits(String categories) {
int recMaxDepth = categories.length() / 2; // floor. Generate only one half of the subsets as the rest is the complement
Set<boolean[]> masks = new HashSet<>();
// for 1 to recMaxDepth - 1 depth generate all options
for (int depth = 1; depth < recMaxDepth; depth++) {
for (String s : categories.split("")) {
// use substring method to clone (deep copy) the string
rec(masks, s, categories.substring(0).replaceAll(s, ""), depth - 1);
}
}
// for recMaxDepth - 1 depth (the highest) generate only half of the options for the even length and all options for the odd
if (categories.length() == recMaxDepth * 2) {
// try only one first category
rec(masks, categories.substring(0, 1), categories.substring(1), recMaxDepth - 1);
} // nope: should be covered if generate only half of the depth and half of the combination (include first value / complementary will exclude it)
else for (String s : categories.split("")) {
// use substring method to clone (deep copy) the string
rec(masks, s, categories.substring(0).replaceAll(s, ""), recMaxDepth - 1);
}
return masks;
}
private void rec(Set<boolean[]> masks, String current, String categories, int stepsToGo) {
if (stepsToGo == 0) {
masks.add(createMaskFromString(current));
return;
}
for (String s : categories.split("")) {
if (s.charAt(0) > current.charAt(current.length() - 1))
// use substring method to clone (deep copy) the string
rec(masks, current + s, categories.substring(0).replaceAll(s, ""), stepsToGo - 1);
}
}
private boolean[] createMaskFromString(String categories) {
boolean[] mask = new boolean[_numOfCategories]; // array of false
for (String c : categories.split("")) {
mask[Integer.parseInt(c)] = true;
}
return mask;
}
private boolean[] createMaskFromBins(List<CategoricalBin> bins) {
boolean[] mask = new boolean[_numOfCategories]; // array of false
bins.stream().map(CategoricalBin::getCategory).forEach(c -> mask[c] = true);
return mask;
}
public List<SplitStatistics> calculateStatisticsForCategoricalFeatureBinomialClassification() {
List<CategoricalBin> sortedBins = _bins.stream()
.map(b -> (CategoricalBin) b)
.sorted(Comparator.comparingInt(CategoricalBin::getCount0))
.collect(Collectors.toList());
// init list with empty instances
List<SplitStatistics> statistics = sortedBins.stream()
.map(b -> new SplitStatistics()).collect(Collectors.toList());
// calculate accumulative statistics for each split:
// left split - bins to the left + current;
// right split - bins to the right.
SplitStatistics tmpAccumulatorLeft = new SplitStatistics();
SplitStatistics tmpAccumulatorRight = new SplitStatistics();
int rightIndex;
for (int leftIndex = 0; leftIndex < statistics.size(); leftIndex++) {
tmpAccumulatorLeft.accumulateLeftStatistics(sortedBins.get(leftIndex)._count, sortedBins.get(leftIndex)._count0);
statistics.get(leftIndex).copyLeftValues(tmpAccumulatorLeft);
statistics.get(leftIndex)._splittingRule = new CategoricalSplittingRule(
createMaskFromBins(sortedBins.subList(0, leftIndex + 1))); // subList takes toIndex exclusive, so +1
// accumulate from the right (from the end of bins array)
rightIndex = sortedBins.size() - leftIndex - 1;
// firstly accumulate with old values, then add the actual bin for the future statistics
// as the values of the actual bin are not included in its right statistics
statistics.get(rightIndex).copyRightValues(tmpAccumulatorRight);
tmpAccumulatorRight.accumulateRightStatistics(sortedBins.get(rightIndex)._count, sortedBins.get(rightIndex)._count0);
}
return statistics;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/binning/Histogram.java
|
package hex.tree.dt.binning;
import hex.tree.dt.DataFeaturesLimits;
import hex.tree.dt.mrtasks.FeaturesLimitsMRTask;
import water.fvec.Frame;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static hex.tree.dt.DT.getInitialFeaturesLimits;
public class Histogram {
private final List<FeatureBins> _featuresBins;
private final BinningStrategy _binningStrategy;
public Histogram(Frame originData, DataFeaturesLimits conditionLimits, BinningStrategy binningStrategy) {
_binningStrategy = binningStrategy;
// get real features limits where the conditions are fulfilled
DataFeaturesLimits featuresLimitsForConditions = getFeaturesLimitsForConditions(originData, conditionLimits);
// call strategy to create bins for each feature separately
_featuresBins = IntStream
.range(0, originData.numCols() - 1/*exclude the last prediction column*/)
.mapToObj(i -> new FeatureBins(
_binningStrategy.createFeatureBins(originData, featuresLimitsForConditions, i),
originData.vec(i).cardinality()))
.collect(Collectors.toList());
}
/**
* Get list of feature bins (copy) - for testing.
*
* @param featureIndex feature index
* @return list of feature bins
*/
public List<AbstractBin> getFeatureBins(int featureIndex) {
return _featuresBins.get(featureIndex).getFeatureBins();
}
public int featuresCount() {
return _featuresBins.size();
}
/**
* Computes features limits considering known condition limits of ancestors.
* For example: what are real limits of all features considering that feature x is limited by values x_i and x_j.
*
* @return new features limits
*/
public static DataFeaturesLimits getFeaturesLimitsForConditions(Frame originData, DataFeaturesLimits conditionLimits) {
FeaturesLimitsMRTask task = new FeaturesLimitsMRTask(conditionLimits == null
? getInitialFeaturesLimits(originData).toDoubles()
: conditionLimits.toDoubles());
task.doAll(originData);
return new DataFeaturesLimits(task._realFeatureLimits);
}
public List<SplitStatistics> calculateSplitStatisticsForNumericFeature(int feature) {
return _featuresBins.get(feature).calculateSplitStatisticsForNumericFeature();
}
public List<SplitStatistics> calculateSplitStatisticsForCategoricalFeature(int feature) {
return _featuresBins.get(feature).calculateSplitStatisticsForCategoricalFeature();
}
public boolean isConstant(int featureIndex) {
return _featuresBins.get(featureIndex).isConstant();
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/binning/NumericBin.java
|
package hex.tree.dt.binning;
import water.util.Pair;
/**
* Single bin holding limits (min excluded), count of samples and count of class 0.
*/
public class NumericBin extends AbstractBin {
public double _min;
public double _max;
public static final int MIN_INDEX = 3;
public static final int MAX_INDEX = 4;
public NumericBin(double min, double max, int count, int count0) {
_min = min;
_max = max;
_count = count;
_count0 = count0;
}
public NumericBin(double min, double max) {
_min = min;
_max = max;
_count = 0;
_count0 = 0;
}
public NumericBin(Pair<Double, Double> binLimits) {
_min = binLimits._1();
_max = binLimits._2();
_count = 0;
_count0 = 0;
}
public NumericBin clone() {
return new NumericBin(_min, _max, _count, _count0);
}
public double[] toDoubles() {
// Place numeric flag -1.0 on the index 0 to mark that the feature is numeric
return new double[]{-1.0, _count, _count0, _min, _max};
}
public void setMin(double min) {
_min = min;
}
public void setMax(double max) {
_max = max;
}
}
|
0
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt
|
java-sources/ai/h2o/h2o-algos/3.46.0.7/hex/tree/dt/binning/SplitStatistics.java
|
package hex.tree.dt.binning;
import hex.tree.dt.AbstractSplittingRule;
import org.apache.commons.math3.util.Precision;
/**
* Potential split including splitting rule and statistics on count of samples and distribution of target variable.
* Holds how many samples (and 0-samples) are in a right and left splits
*/
public class SplitStatistics {
public AbstractSplittingRule _splittingRule;
public int _leftCount;
public int _leftCount0;
public int _rightCount;
public int _rightCount0;
public SplitStatistics() {
_leftCount = 0;
_leftCount0 = 0;
_rightCount = 0;
_rightCount0 = 0;
}
public void accumulateLeftStatistics(int leftCount, int leftCount0) {
_leftCount += leftCount;
_leftCount0 += leftCount0;
}
public void accumulateRightStatistics(int rightCount, int rightCount0) {
_rightCount += rightCount;
_rightCount0 += rightCount0;
}
public void copyLeftValues(SplitStatistics toCopy) {
_leftCount = toCopy._leftCount;
_leftCount0 = toCopy._leftCount0;
}
public void copyRightValues(SplitStatistics toCopy) {
_rightCount = toCopy._rightCount;
_rightCount0 = toCopy._rightCount0;
}
public SplitStatistics setCriterionValue(double criterionOfSplit) {
_splittingRule.setCriterionValue(criterionOfSplit);
return this;
}
public SplitStatistics setFeatureIndex(int featureIndex) {
_splittingRule.setFeatureIndex(featureIndex);
return this;
}
public static double entropyBinarySplit(final double oneClassFrequency) {
return -1 * ((oneClassFrequency < Precision.EPSILON ? 0 : (oneClassFrequency * Math.log(oneClassFrequency)))
+ ((1 - oneClassFrequency) < Precision.EPSILON ? 0 : ((1 - oneClassFrequency) * Math.log(1 - oneClassFrequency))));
}
public Double binaryEntropy() {
double a1 = (entropyBinarySplit(_leftCount0 * 1.0 / _leftCount)
* _leftCount / (_leftCount + _rightCount));
double a2 = (entropyBinarySplit(_rightCount0 * 1.0 / _rightCount)
* _rightCount / (_leftCount + _rightCount));
return a1 + a2;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.