idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
22,300 | @ SuppressWarnings ( "unused" ) public FramesListV3 list ( int version , FramesListV3 s ) { Frames f = s . createAndFillImpl ( ) ; f . frames = Frame . fetchAll ( ) ; s . fillFromImplWithSynopsis ( f ) ; return s ; } | Return all the frames . The Frames list will be instances of FrameSynopsisV3 which only contains a few fields for performance reasons . |
22,301 | @ SuppressWarnings ( "unused" ) public FramesV3 column ( int version , FramesV3 s ) { Frame frame = getFromDKV ( "key" , s . frame_id . key ( ) ) ; Vec vec = frame . vec ( s . column ) ; if ( null == vec ) throw new H2OColumnNotFoundArgumentException ( "column" , s . frame_id . toString ( ) , s . column ) ; Vec [ ] vecs = { vec } ; String [ ] names = { s . column } ; Frame new_frame = new Frame ( names , vecs ) ; s . frames = new FrameV3 [ 1 ] ; s . frames [ 0 ] = new FrameV3 ( new_frame ) ; ( ( FrameV3 ) s . frames [ 0 ] ) . clearBinsField ( ) ; return s ; } | Return a single column from the frame . |
22,302 | @ SuppressWarnings ( "unused" ) public FramesV3 fetch ( int version , FramesV3 s ) { FramesV3 frames = doFetch ( version , s ) ; for ( FrameBaseV3 a_frame : frames . frames ) { ( ( FrameV3 ) a_frame ) . clearBinsField ( ) ; } return frames ; } | Return a single frame . |
22,303 | public FramesV3 export ( int version , FramesV3 s ) { Frame fr = getFromDKV ( "key" , s . frame_id . key ( ) ) ; Log . info ( "ExportFiles processing (" + s . path + ")" ) ; s . job = new JobV3 ( Frame . export ( fr , s . path , s . frame_id . key ( ) . toString ( ) , s . force , s . num_parts ) ) ; return s ; } | Export a single frame to the specified path . |
22,304 | @ SuppressWarnings ( "unused" ) public FramesV3 delete ( int version , FramesV3 frames ) { Frame frame = getFromDKV ( "key" , frames . frame_id . key ( ) ) ; frame . delete ( ) ; return frames ; } | Remove an unlocked frame . Fails if frame is in - use . |
22,305 | public boolean proxyLoginHandler ( String target , HttpServletRequest request , HttpServletResponse response ) throws IOException { if ( ! isLoginTarget ( target ) ) { return false ; } if ( isPageRequest ( request ) ) { proxySendLoginForm ( response ) ; } else { response . sendError ( HttpServletResponse . SC_UNAUTHORIZED , "Access denied. Please login." ) ; } return true ; } | proxy login handler part |
22,306 | private double distance ( final String a , final String b ) { if ( a . isEmpty ( ) ) return b . length ( ) ; if ( b . isEmpty ( ) ) return a . length ( ) ; if ( a . equals ( b ) ) return 0 ; final int aLength = b . length ( ) ; final int bLength = a . length ( ) ; double [ ] v0 = new double [ aLength + 1 ] ; double [ ] v1 = new double [ aLength + 1 ] ; for ( int i = 0 ; i < v0 . length ; i ++ ) { v0 [ i ] = i * 1D ; } for ( int i = 0 ; i < bLength ; i ++ ) { v1 [ 0 ] = ( i + 1 ) * 1D ; for ( int j = 0 ; j < aLength ; j ++ ) { v1 [ j + 1 ] = min ( v1 [ j ] + 1D , v0 [ j + 1 ] + 1D , v0 [ j ] + ( a . charAt ( i ) == b . charAt ( j ) ? 0D : 1D ) ) ; } final double [ ] swap = v0 ; v0 = v1 ; v1 = swap ; } return v0 [ aLength ] ; } | Computes a case - sensitive Levenshtein distance of two strings |
22,307 | public static List < FieldMetadata > getFieldMetadata ( Schema schema ) { List < Field > superclassFields = Arrays . asList ( Weaver . getWovenFields ( schema . getClass ( ) . getSuperclass ( ) ) ) ; List < FieldMetadata > fields = new ArrayList < > ( ) ; for ( Field field : Weaver . getWovenFields ( schema . getClass ( ) ) ) { FieldMetadata fmd = FieldMetadata . createIfApiAnnotation ( schema , field , superclassFields ) ; if ( null != fmd ) fields . add ( fmd ) ; } return fields ; } | Returns metadata of all annotated fields . |
22,308 | public static void main ( String [ ] args ) { Credentials cred = new Credentials ( System . getProperty ( "user.name" ) , "Heslo123" ) ; String url = start ( args , cred , "https://localhost:54321/" , false ) ; System . out . println ( "Proxy started on " + url + " " + cred . toDebugString ( ) ) ; } | just for local testing |
22,309 | static Frame uniqueValuesBy ( Frame fr , int columnIndex ) { Vec vec0 = fr . vec ( columnIndex ) ; Vec v ; if ( vec0 . isCategorical ( ) ) { v = Vec . makeSeq ( 0 , ( long ) vec0 . domain ( ) . length , true ) ; v . setDomain ( vec0 . domain ( ) ) ; DKV . put ( v ) ; } else { UniqTask t = new UniqTask ( ) . doAll ( vec0 ) ; int nUniq = t . _uniq . size ( ) ; final AstGroup . G [ ] uniq = t . _uniq . keySet ( ) . toArray ( new AstGroup . G [ nUniq ] ) ; v = Vec . makeZero ( nUniq , vec0 . get_type ( ) ) ; new MRTask ( ) { public void map ( Chunk c ) { int start = ( int ) c . start ( ) ; for ( int i = 0 ; i < c . _len ; ++ i ) c . set ( i , uniq [ i + start ] . _gs [ 0 ] ) ; } } . doAll ( v ) ; } return new Frame ( v ) ; } | return a frame with unique values from the specified column |
22,310 | public StringBuffer markdown ( Schema sinput , Schema soutput ) { MarkdownBuilder builder = new MarkdownBuilder ( ) ; builder . comment ( "Preview with http://jbt.github.io/markdown-editor" ) ; builder . heading1 ( _http_method , _url ) ; builder . hline ( ) ; builder . paragraph ( _summary ) ; builder . heading1 ( "Input schema: " ) ; builder . append ( sinput . markdown ( true , false ) ) ; builder . heading1 ( "Output schema: " ) ; builder . append ( soutput . markdown ( false , true ) ) ; return builder . stringBuffer ( ) ; } | Generate Markdown documentation for this Route . |
22,311 | public void processMiniBatch ( long seed , double [ ] responses , double [ ] offsets , int n ) { assert ( _training ) ; if ( _localmodel . get_params ( ) . _reproducible ) { seed += _localmodel . get_processed_global ( ) ; } else { seed = _dropout_rng . nextLong ( ) ; } fpropMiniBatch ( seed , _neurons , _localmodel , _localmodel . get_params ( ) . _elastic_averaging ? _sharedmodel : null , _training , responses , offsets , n ) ; bpropMiniBatch ( _neurons , n ) ; } | Apply the gradient to update the weights |
22,312 | static public void bpropMiniBatch ( Neurons [ ] neurons , int n ) { neurons [ neurons . length - 1 ] . bpropOutputLayer ( n ) ; for ( int i = neurons . length - 2 ; i > 0 ; -- i ) neurons [ i ] . bprop ( n ) ; for ( int mb = 0 ; mb < n ; ++ mb ) { for ( int i = 0 ; i < neurons . length ; ++ i ) { Storage . DenseVector e = neurons [ i ] . _e == null ? null : neurons [ i ] . _e [ mb ] ; if ( e == null ) continue ; Arrays . fill ( e . raw ( ) , 0 ) ; } } } | Helper to apply back - propagation without clearing out the gradients afterwards Used for gradient checking |
22,313 | final public P createParametersSchema ( ) { if ( ModelBuilderSchema . class == this . getClass ( ) ) { return ( P ) new ModelParametersSchemaV3 ( ) ; } try { Class < ? extends ModelParametersSchemaV3 > parameters_class = ReflectionUtils . findActualClassParameter ( this . getClass ( ) , 2 ) ; return ( P ) parameters_class . newInstance ( ) ; } catch ( Exception e ) { throw H2O . fail ( "Caught exception trying to instantiate a builder instance for ModelBuilderSchema: " + this + ": " + e , e ) ; } } | Factory method to create the model - specific parameters schema . |
22,314 | public S fillFromImpl ( B builder ) { this . algo = builder . _parms . algoName ( ) . toLowerCase ( ) ; this . algo_full_name = builder . _parms . fullName ( ) ; this . supervised = builder . isSupervised ( ) ; this . can_build = builder . can_build ( ) ; this . visibility = builder . builderVisibility ( ) ; job = builder . _job == null ? null : new JobV3 ( builder . _job ) ; final ModelBuilder . ValidationMessage [ ] msgs = builder . _messages ; if ( msgs != null ) { this . messages = new ValidationMessageV3 [ msgs . length ] ; int i = 0 ; for ( ModelBuilder . ValidationMessage vm : msgs ) { if ( vm != null ) this . messages [ i ++ ] = new ValidationMessageV3 ( ) . fillFromImpl ( vm ) ; } ValidationMessageV3 . mapValidationMessageFieldNames ( this . messages , new String [ ] { "_train" , "_valid" } , new String [ ] { "training_frame" , "validation_frame" } ) ; } this . error_count = builder . error_count ( ) ; parameters = createParametersSchema ( ) ; parameters . fillFromImpl ( builder . _parms ) ; parameters . model_id = builder . dest ( ) == null ? null : new KeyV3 . ModelKeyV3 ( builder . dest ( ) ) ; return ( S ) this ; } | Generic filling from the impl |
22,315 | private int cheapRandInt ( int max ) { _seed ^= ( _seed << 21 ) ; _seed ^= ( _seed >>> 35 ) ; _seed ^= ( _seed << 4 ) ; int r = ( int ) _seed % max ; return r > 0 ? r : - r ; } | This is cheap and moderate in quality . |
22,316 | static Key getKey ( boolean isLeft , int col , H2ONode node ) { return Key . make ( "__radix_order__MSBNodeCounts_col" + col + "_node" + node . index ( ) + ( isLeft ? "_LEFT" : "_RIGHT" ) ) ; } | make it homed to the owning node |
22,317 | protected void fillMissingParameters ( ) { if ( dest == null ) { dest = Key . make ( ) ; } if ( seed == - 1 ) { seed = new Random ( ) . nextLong ( ) ; Log . info ( "Generated seed: " + seed ) ; } } | Resolve parameter values that cannot be initialized to static defaults . If you re overriding this method please make sure to invoke the super implementation as well . |
22,318 | public static double exp ( double x ) { double val = Math . min ( MAX , Math . exp ( x ) ) ; return val ; } | helper - sanitized exponential function |
22,319 | public static double log ( double x ) { x = Math . max ( 0 , x ) ; double val = x == 0 ? MIN_LOG : Math . max ( MIN_LOG , Math . log ( x ) ) ; return val ; } | helper - sanitized log function |
22,320 | public double deviance ( double w , double y , double f ) { switch ( distribution ) { case AUTO : case gaussian : return w * ( y - f ) * ( y - f ) ; case huber : if ( Math . abs ( y - f ) <= huberDelta ) { return w * ( y - f ) * ( y - f ) ; } else { return 2 * w * ( Math . abs ( y - f ) - huberDelta ) * huberDelta ; } case laplace : return w * Math . abs ( y - f ) ; case quantile : return y > f ? w * quantileAlpha * ( y - f ) : w * ( 1 - quantileAlpha ) * ( f - y ) ; case bernoulli : return - 2 * w * ( y * log ( f ) + ( 1 - y ) * log ( 1 - f ) ) ; case quasibinomial : if ( y == f ) return 0 ; if ( f > 1 ) return - 2 * w * y * log ( f ) ; else if ( f < 0 ) return - 2 * w * ( 1 - y ) * log ( 1 - f ) ; else return - 2 * w * ( y * log ( f ) + ( 1 - y ) * log ( 1 - f ) ) ; case poisson : f = link ( f ) ; return - 2 * w * ( y * f - exp ( f ) ) ; case gamma : f = link ( f ) ; return 2 * w * ( y * exp ( - f ) + f ) ; case tweedie : f = link ( f ) ; assert ( tweediePower > 1 && tweediePower < 2 ) ; return 2 * w * ( Math . pow ( y , 2 - tweediePower ) / ( ( 1 - tweediePower ) * ( 2 - tweediePower ) ) - y * exp ( f * ( 1 - tweediePower ) ) / ( 1 - tweediePower ) + exp ( f * ( 2 - tweediePower ) ) / ( 2 - tweediePower ) ) ; case modified_huber : double yf = ( 2 * y - 1 ) * f ; if ( yf < - 1 ) return - w * 4 * yf ; else if ( yf > 1 ) return 0 ; else return w * yf * yf ; default : throw H2O . unimpl ( ) ; } } | Deviance of given distribution function at predicted value f |
22,321 | public double initFNum ( double w , double o , double y ) { switch ( distribution ) { case AUTO : case gaussian : case bernoulli : case quasibinomial : case multinomial : return w * ( y - o ) ; case poisson : return w * y ; case gamma : return w * y * linkInv ( - o ) ; case tweedie : return w * y * exp ( o * ( 1 - tweediePower ) ) ; case modified_huber : return y == 1 ? w : 0 ; default : throw H2O . unimpl ( ) ; } } | Contribution to numerator for initial value computation |
22,322 | public double gammaNum ( double w , double y , double z , double f ) { switch ( distribution ) { case gaussian : case bernoulli : case quasibinomial : case multinomial : return w * z ; case poisson : return w * y ; case gamma : return w * ( z + 1 ) ; case tweedie : return w * y * exp ( f * ( 1 - tweediePower ) ) ; case modified_huber : double yf = ( 2 * y - 1 ) * f ; if ( yf < - 1 ) return w * 4 * ( 2 * y - 1 ) ; else if ( yf > 1 ) return 0 ; else return w * 2 * ( 2 * y - 1 ) * ( 1 - yf ) ; default : throw H2O . unimpl ( ) ; } } | Contribution to numerator for GBM s leaf node prediction |
22,323 | public double gammaDenom ( double w , double y , double z , double f ) { switch ( distribution ) { case gaussian : case gamma : return w ; case bernoulli : case quasibinomial : double ff = y - z ; return w * ff * ( 1 - ff ) ; case multinomial : double absz = Math . abs ( z ) ; return w * ( absz * ( 1 - absz ) ) ; case poisson : return w * ( y - z ) ; case tweedie : return w * exp ( f * ( 2 - tweediePower ) ) ; case modified_huber : double yf = ( 2 * y - 1 ) * f ; if ( yf < - 1 ) return - w * 4 * yf ; else if ( yf > 1 ) return 0 ; else return w * ( 1 - yf ) * ( 1 - yf ) ; default : throw H2O . unimpl ( ) ; } } | Contribution to denominator for GBM s leaf node prediction |
22,324 | public void stop ( ) throws IOException { if ( jettyServer != null ) { try { jettyServer . stop ( ) ; } catch ( IOException e ) { throw e ; } catch ( Exception e ) { throw new IOException ( e ) ; } } } | Stop Jetty server after it has been started . This is unlikely to ever be called by H2O until H2O supports graceful shutdown . |
22,325 | public static boolean isUUID ( BufferedString str ) { boolean res ; int old = str . getOffset ( ) ; attemptUUIDParseLow ( str ) ; attemptUUIDParseHigh ( str ) ; res = str . getOffset ( ) != - 1 ; str . setOff ( old ) ; return res ; } | Confirms whether the provided UUID is considered valid . |
22,326 | protected void closure ( SB sb ) throws RuntimeException { sb . p ( ";" ) . nl ( ) ; sb . ip ( "return pred;" ) . nl ( ) . di ( 1 ) ; sb . ip ( "}" ) . p ( " // constant pool size = " ) . p ( _constantPoolSize ) . p ( "B, number of visited nodes = " ) . p ( _nodes ) . p ( ", static init size = " ) . p ( _staticInitSize ) . p ( "B" ) ; sb . nl ( ) ; _sb . p ( _grpsplit ) ; sb . di ( 1 ) . ip ( "}" ) . nl ( ) . nl ( ) ; } | close the code |
22,327 | public boolean onExceptionalCompletion ( Throwable ex , jsr166y . CountedCompleter caller ) { System . err . println ( "onExCompletion for " + this ) ; ex . printStackTrace ( ) ; water . util . Log . err ( ex ) ; return true ; } | Exceptional completion path ; mostly does printing if the exception was not handled earlier in the stack . |
22,328 | static RollupStats getOrNull ( Vec vec , final Key rskey ) { Value val = DKV . get ( rskey ) ; if ( val == null ) return vec . length ( ) > 0 ? null : new RollupStats ( 0 ) ; RollupStats rs = val . get ( RollupStats . class ) ; return rs . isReady ( ) ? rs : null ; } | Fetch if present but do not compute |
22,329 | public void debug ( UserFeedbackEvent . Stage stage , String message ) { Log . debug ( stage + ": " + message ) ; addEvent ( new UserFeedbackEvent ( autoML , UserFeedbackEvent . Level . Debug , stage , message ) ) ; } | Add a Debug UserFeedbackEvent and log . |
22,330 | public void info ( UserFeedbackEvent . Stage stage , String message ) { Log . info ( stage + ": " + message ) ; addEvent ( new UserFeedbackEvent ( autoML , UserFeedbackEvent . Level . Info , stage , message ) ) ; } | Add a Info UserFeedbackEvent and log . |
22,331 | public void warn ( UserFeedbackEvent . Stage stage , String message ) { Log . warn ( stage + ": " + message ) ; addEvent ( new UserFeedbackEvent ( autoML , UserFeedbackEvent . Level . Warn , stage , message ) ) ; } | Add a Warn UserFeedbackEvent and log . |
22,332 | protected final ParseWriter parseChunk ( int chunkId , ParseReader din , ParseWriter dout ) { _cidx = chunkId ; List < StripeInformation > stripesInfo = ( ( OrcParseSetup ) this . _setup ) . getStripes ( ) ; if ( stripesInfo . size ( ) == 0 ) { dout . addError ( new ParseWriter . ParseErr ( "Orc Parser: Empty file." , chunkId , 0L , - 2L ) ) ; return dout ; } OrcParseSetup setup = ( OrcParseSetup ) this . _setup ; StripeInformation thisStripe = stripesInfo . get ( chunkId ) ; String [ ] orcTypes = setup . getColumnTypesString ( ) ; boolean [ ] toInclude = setup . getToInclude ( ) ; try { RecordReader perStripe = orcFileReader . rows ( thisStripe . getOffset ( ) , thisStripe . getDataLength ( ) , setup . getToInclude ( ) , null , setup . getColumnNames ( ) ) ; VectorizedRowBatch batch = null ; long rows = 0 ; long rowCount = thisStripe . getNumberOfRows ( ) ; while ( rows != rowCount ) { batch = perStripe . nextBatch ( batch ) ; long currentBatchRow = batch . count ( ) ; int nrows = ( int ) currentBatchRow ; if ( currentBatchRow != nrows ) throw new IllegalArgumentException ( "got batch with too many records, does not fit in int" ) ; ColumnVector [ ] dataVectors = batch . cols ; int colIndex = 0 ; for ( int col = 0 ; col < batch . numCols ; ++ col ) { if ( toInclude [ col + 1 ] ) { if ( _setup . getColumnTypes ( ) [ colIndex ] != Vec . T_BAD ) write1column ( dataVectors [ col ] , orcTypes [ colIndex ] , colIndex , nrows , dout ) ; else dout . addNAs ( col , nrows ) ; colIndex ++ ; } } rows += currentBatchRow ; } byte [ ] col_types = _setup . getColumnTypes ( ) ; for ( int i = 0 ; i < col_types . length ; ++ i ) { if ( col_types [ i ] == Vec . T_BAD ) dout . addNAs ( i , ( int ) rowCount ) ; } perStripe . close ( ) ; } catch ( IOException ioe ) { throw new RuntimeException ( ioe ) ; } return dout ; } | This method calculates the number of stripes that will be read for each chunk . Since only single threading is supported in reading each stripe we will never split one stripe over different chunks . |
22,333 | private void write1column ( ColumnVector oneColumn , String columnType , int cIdx , int rowNumber , ParseWriter dout ) { if ( oneColumn . isRepeating && ! oneColumn . noNulls ) { for ( int i = 0 ; i < rowNumber ; ++ i ) dout . addInvalidCol ( cIdx ) ; } else switch ( columnType . toLowerCase ( ) ) { case "bigint" : case "boolean" : case "int" : case "smallint" : case "tinyint" : writeLongcolumn ( ( LongColumnVector ) oneColumn , cIdx , rowNumber , dout ) ; break ; case "float" : case "double" : writeDoublecolumn ( ( DoubleColumnVector ) oneColumn , cIdx , rowNumber , dout ) ; break ; case "numeric" : case "real" : if ( oneColumn instanceof LongColumnVector ) writeLongcolumn ( ( LongColumnVector ) oneColumn , cIdx , rowNumber , dout ) ; else writeDoublecolumn ( ( DoubleColumnVector ) oneColumn , cIdx , rowNumber , dout ) ; break ; case "string" : case "varchar" : case "char" : writeStringcolumn ( ( BytesColumnVector ) oneColumn , cIdx , rowNumber , dout ) ; break ; case "date" : case "timestamp" : writeTimecolumn ( ( LongColumnVector ) oneColumn , columnType , cIdx , rowNumber , dout ) ; break ; case "decimal" : writeDecimalcolumn ( ( DecimalColumnVector ) oneColumn , cIdx , rowNumber , dout ) ; break ; default : throw new IllegalArgumentException ( "Unsupported Orc schema type: " + columnType ) ; } } | This method writes one column of H2O data frame at a time . |
22,334 | private void writeTimecolumn ( LongColumnVector col , String columnType , int cIdx , int rowNumber , ParseWriter dout ) { boolean timestamp = columnType . equals ( "timestamp" ) ; long [ ] oneColumn = col . vector ; if ( col . isRepeating ) { long val = timestamp ? oneColumn [ 0 ] / 1000000 : correctTimeStamp ( oneColumn [ 0 ] ) ; for ( int rowIndex = 0 ; rowIndex < rowNumber ; rowIndex ++ ) dout . addNumCol ( cIdx , val , 0 ) ; } else if ( col . noNulls ) { for ( int rowIndex = 0 ; rowIndex < rowNumber ; rowIndex ++ ) dout . addNumCol ( cIdx , timestamp ? oneColumn [ rowIndex ] / 1000000 : correctTimeStamp ( oneColumn [ rowIndex ] ) , 0 ) ; } else { boolean [ ] isNull = col . isNull ; for ( int rowIndex = 0 ; rowIndex < rowNumber ; rowIndex ++ ) { if ( isNull [ rowIndex ] ) dout . addInvalidCol ( cIdx ) ; else dout . addNumCol ( cIdx , timestamp ? oneColumn [ rowIndex ] / 1000000 : correctTimeStamp ( oneColumn [ rowIndex ] ) , 0 ) ; } } } | This method writes one column of H2O frame for column type timestamp . This is just a long that records the number of seconds since Jan 1 2015 . |
22,335 | private void writeDecimalcolumn ( DecimalColumnVector col , int cIdx , int rowNumber , ParseWriter dout ) { HiveDecimalWritable [ ] oneColumn = col . vector ; if ( col . isRepeating ) { HiveDecimal hd = oneColumn [ 0 ] . getHiveDecimal ( ) ; for ( int rowIndex = 0 ; rowIndex < rowNumber ; rowIndex ++ ) dout . addNumCol ( cIdx , hd . unscaledValue ( ) . longValue ( ) , - hd . scale ( ) ) ; } else if ( col . noNulls ) { for ( int rowIndex = 0 ; rowIndex < rowNumber ; rowIndex ++ ) { HiveDecimal hd = oneColumn [ rowIndex ] . getHiveDecimal ( ) ; dout . addNumCol ( cIdx , hd . unscaledValue ( ) . longValue ( ) , - hd . scale ( ) ) ; } } else { boolean [ ] isNull = col . isNull ; for ( int rowIndex = 0 ; rowIndex < rowNumber ; rowIndex ++ ) { if ( isNull [ rowIndex ] ) dout . addInvalidCol ( cIdx ) ; else { HiveDecimal hd = oneColumn [ rowIndex ] . getHiveDecimal ( ) ; dout . addNumCol ( cIdx , hd . unscaledValue ( ) . longValue ( ) , - hd . scale ( ) ) ; } } } } | This method writes a column to H2O frame for column type Decimal . It is just written as some integer without using the scale field . Need to make sure this is what the customer wants . |
22,336 | private void writeStringcolumn ( BytesColumnVector col , int cIdx , int rowNumber , ParseWriter dout ) { BufferedString bs = new BufferedString ( ) ; if ( col . isRepeating ) { assert col . length [ 0 ] >= 0 : getClass ( ) . getSimpleName ( ) + ".writeStringcolumn/1: col.length[0]=" + col . length [ 0 ] + ", col.start[0]=" + col . start [ 0 ] ; dout . addStrCol ( cIdx , bs . set ( col . vector [ 0 ] , col . start [ 0 ] , col . length [ 0 ] ) ) ; for ( int rowIndex = 1 ; rowIndex < rowNumber ; ++ rowIndex ) dout . addStrCol ( cIdx , bs ) ; } else if ( col . noNulls ) { for ( int rowIndex = 0 ; rowIndex < rowNumber ; rowIndex ++ ) { int l = col . length [ rowIndex ] ; assert l >= 0 : getClass ( ) . getSimpleName ( ) + ".writeStringcolumn/2: col.col.length[rowIndex]=" + l + ", rowIndex=" + rowIndex ; dout . addStrCol ( cIdx , bs . set ( col . vector [ rowIndex ] , col . start [ rowIndex ] , l ) ) ; } } else { boolean [ ] isNull = col . isNull ; for ( int rowIndex = 0 ; rowIndex < rowNumber ; rowIndex ++ ) { if ( isNull [ rowIndex ] ) dout . addInvalidCol ( cIdx ) ; else { int l = col . length [ rowIndex ] ; assert l >= 0 : getClass ( ) . getSimpleName ( ) + ".writeStringcolumn/3: col.col.length[rowIndex]=" + l + ", rowIndex=" + rowIndex ; dout . addStrCol ( cIdx , bs . set ( col . vector [ rowIndex ] , col . start [ rowIndex ] , col . length [ rowIndex ] ) ) ; } } } } | This method writes a column of H2O frame for Orc File column types of string varchar char and binary at some point . |
22,337 | private void writeDoublecolumn ( DoubleColumnVector vec , int colId , int rowNumber , ParseWriter dout ) { double [ ] oneColumn = vec . vector ; byte t = _setup . getColumnTypes ( ) [ colId ] ; switch ( t ) { case Vec . T_CAT : if ( _toStringMaps . get ( colId ) == null ) _toStringMaps . put ( colId , new HashMap < Number , byte [ ] > ( ) ) ; HashMap < Number , byte [ ] > map = _toStringMaps . get ( colId ) ; BufferedString bs = new BufferedString ( ) ; if ( vec . isRepeating ) { bs . set ( StringUtils . toBytes ( oneColumn [ 0 ] ) ) ; for ( int i = 0 ; i < rowNumber ; ++ i ) dout . addStrCol ( colId , bs ) ; } else if ( vec . noNulls ) { for ( int i = 0 ; i < rowNumber ; i ++ ) { double d = oneColumn [ i ] ; if ( map . get ( d ) == null ) map . put ( d , StringUtils . toBytes ( d ) ) ; dout . addStrCol ( colId , bs . set ( map . get ( d ) ) ) ; } } else { for ( int i = 0 ; i < rowNumber ; i ++ ) { boolean [ ] isNull = vec . isNull ; if ( isNull [ i ] ) dout . addInvalidCol ( colId ) ; else { double d = oneColumn [ i ] ; if ( map . get ( d ) == null ) map . put ( d , StringUtils . toBytes ( d ) ) ; dout . addStrCol ( colId , bs . set ( map . get ( d ) ) ) ; } } } break ; default : if ( vec . isRepeating ) { for ( int i = 0 ; i < rowNumber ; ++ i ) dout . addNumCol ( colId , oneColumn [ 0 ] ) ; } else if ( vec . noNulls ) { for ( int rowIndex = 0 ; rowIndex < rowNumber ; rowIndex ++ ) dout . addNumCol ( colId , oneColumn [ rowIndex ] ) ; } else { boolean [ ] isNull = vec . isNull ; for ( int rowIndex = 0 ; rowIndex < rowNumber ; rowIndex ++ ) { if ( isNull [ rowIndex ] ) dout . addInvalidCol ( colId ) ; else dout . addNumCol ( colId , oneColumn [ rowIndex ] ) ; } } break ; } } | This method writes a column of H2O frame for Orc File column type of float or double . |
22,338 | private void check_Min_Value ( long l , int cIdx , int rowNumber , ParseWriter dout ) { if ( l <= Long . MIN_VALUE ) { String warning = "Orc Parser: Long.MIN_VALUE: " + l + " is found in column " + cIdx + " row " + rowNumber + " of stripe " + _cidx + ". This value is used for sentinel and will not be parsed correctly." ; dout . addError ( new ParseWriter . ParseErr ( warning , _cidx , rowNumber , - 2L ) ) ; } } | This method is written to check and make sure any value written to a column of type long is more than Long . MIN_VALUE . If this is not true a warning will be passed to the user . |
22,339 | static private int countStructFields ( ObjectInspector x , ArrayList < String > allColumnNames ) { int res = 1 ; switch ( x . getCategory ( ) ) { case STRUCT : StructObjectInspector structObjectInspector = ( StructObjectInspector ) x ; List < StructField > allColumns = ( List < StructField > ) structObjectInspector . getAllStructFieldRefs ( ) ; for ( StructField oneField : allColumns ) { allColumnNames . add ( oneField . getFieldName ( ) ) ; res += countStructFields ( oneField . getFieldObjectInspector ( ) , allColumnNames ) ; } break ; case LIST : ListObjectInspector listObjectInspector = ( ListObjectInspector ) x ; allColumnNames . add ( "list" ) ; res += countStructFields ( listObjectInspector . getListElementObjectInspector ( ) , allColumnNames ) ; break ; case MAP : MapObjectInspector mapObjectInspector = ( MapObjectInspector ) x ; allColumnNames . add ( "mapKey" ) ; res += countStructFields ( mapObjectInspector . getMapKeyObjectInspector ( ) , allColumnNames ) ; allColumnNames . add ( "mapValue" ) ; res += countStructFields ( mapObjectInspector . getMapValueObjectInspector ( ) , allColumnNames ) ; break ; case UNION : UnionObjectInspector unionObjectInspector = ( UnionObjectInspector ) x ; allColumnNames . add ( "union" ) ; for ( ObjectInspector xx : unionObjectInspector . getObjectInspectors ( ) ) res += countStructFields ( xx , allColumnNames ) ; break ; case PRIMITIVE : break ; default : throw H2O . unimpl ( ) ; } return res ; } | which are ignored for now |
22,340 | public Key syncDirectory ( ArrayList < String > files , ArrayList < String > keys , ArrayList < String > fails , ArrayList < String > dels ) { Futures fs = new Futures ( ) ; Key k = null ; for ( int i = 0 ; i < _files . length ; ++ i ) { if ( _ok [ i ] < H2O . CLOUD . size ( ) ) { if ( fails != null ) fails . add ( _files [ i ] ) ; } else { File f = new File ( _files [ i ] ) ; k = PersistNFS . decodeFile ( f ) ; if ( files != null ) files . add ( _files [ i ] ) ; if ( keys != null ) keys . add ( k . toString ( ) ) ; if ( DKV . get ( k ) != null ) dels . add ( k . toString ( ) ) ; Key lockOwner = Key . make ( ) ; new Frame ( k ) . delete_and_lock ( lockOwner ) ; NFSFileVec nfs = NFSFileVec . make ( f , fs ) ; new Frame ( k , new String [ ] { "C1" } , new Vec [ ] { nfs } ) . update ( lockOwner ) . unlock ( lockOwner ) ; } } fs . blockForPending ( ) ; return k ; } | which match the directory name but are not on disk . |
22,341 | StringBuilder printLine ( StringBuilder sb ) { if ( _pid == NO_PARENT ) return sb . append ( "[root]" ) ; DecidedNode parent = _tree . decided ( _pid ) ; parent . printLine ( sb ) . append ( " to " ) ; return parent . printChild ( sb , _nid ) ; } | Recursively print the decision - line from tree root to this child . |
22,342 | public int [ ] scoreCols ( ) { DTree tree = _tree ; if ( tree . actual_mtries ( ) == _hs . length && tree . _mtrys_per_tree == _hs . length ) return null ; int [ ] activeCols = tree . _cols ; int [ ] cols = new int [ activeCols . length ] ; int len = 0 ; for ( int i = 0 ; i < activeCols . length ; i ++ ) { int idx = activeCols [ i ] ; assert ( idx == i || tree . _mtrys_per_tree < _hs . length ) ; if ( _hs [ idx ] == null ) continue ; assert _hs [ idx ] . _min < _hs [ idx ] . _maxEx && _hs [ idx ] . nbins ( ) > 1 : "broken histo range " + _hs [ idx ] ; cols [ len ++ ] = idx ; } int choices = len ; int mtries = tree . actual_mtries ( ) ; if ( choices > 0 ) { for ( int i = 0 ; i < mtries ; i ++ ) { if ( len == 0 ) break ; int idx2 = tree . _rand . nextInt ( len ) ; int col = cols [ idx2 ] ; cols [ idx2 ] = cols [ -- len ] ; cols [ len ] = col ; } assert len < choices ; } return Arrays . copyOfRange ( cols , len , choices ) ; } | Can return null for all columns . |
22,343 | public void do_not_split ( ) { if ( _pid == NO_PARENT ) return ; DecidedNode dn = _tree . decided ( _pid ) ; for ( int i = 0 ; i < dn . _nids . length ; i ++ ) if ( dn . _nids [ i ] == _nid ) { dn . _nids [ i ] = ScoreBuildHistogram . UNDECIDED_CHILD_NODE_ID ; return ; } throw H2O . fail ( ) ; } | perfect prediction here and we want to turn into a leaf . |
22,344 | public final int size ( ) { if ( _size != 0 ) return _size ; assert _nodeType == 0 : "unexpected node type: " + _nodeType ; if ( _split . _equal != 0 ) _nodeType |= _split . _equal == 1 ? 4 : ( _split . _equal == 2 ? 8 : 12 ) ; int res = _split . _equal == 3 ? 9 + _split . _bs . numBytes ( ) : 7 ; res ++ ; if ( _split . _nasplit == DHistogram . NASplitDir . NAvsREST ) res -= _split . _equal == 3 ? 6 + _split . _bs . numBytes ( ) : 4 ; Node left = _tree . node ( _nids [ 0 ] ) ; int lsz = left . size ( ) ; res += lsz ; if ( left instanceof LeafNode ) _nodeType |= ( byte ) 48 ; else { int slen = lsz < 256 ? 0 : ( lsz < 65535 ? 1 : ( lsz < ( 1 << 24 ) ? 2 : 3 ) ) ; _nodeType |= slen ; res += ( slen + 1 ) ; } Node right = _tree . node ( _nids [ 1 ] ) ; if ( right instanceof LeafNode ) _nodeType |= ( byte ) ( 48 << 2 ) ; res += right . size ( ) ; assert ( _nodeType & 0x33 ) != 51 ; assert res != 0 ; return ( _size = res ) ; } | Size of this subtree ; sets _nodeType also |
22,345 | public AutoBuffer compress ( AutoBuffer ab , AutoBuffer abAux ) { int pos = ab . position ( ) ; if ( _nodeType == 0 ) size ( ) ; ab . put1 ( _nodeType ) ; assert _split != null ; assert _split . _col >= 0 ; ab . put2 ( ( short ) _split . _col ) ; ab . put1 ( ( byte ) _split . _nasplit . value ( ) ) ; if ( _split . _nasplit != DHistogram . NASplitDir . NAvsREST ) { if ( _split . _equal == 0 || _split . _equal == 1 ) ab . put4f ( _splat ) ; else if ( _split . _equal == 2 ) _split . _bs . compress2 ( ab ) ; else _split . _bs . compress3 ( ab ) ; } if ( abAux != null ) { abAux . put4 ( _nid ) ; abAux . put4 ( _tree . node ( _nids [ 0 ] ) . numNodes ( ) ) ; abAux . put4f ( ( float ) _split . _n0 ) ; abAux . put4f ( ( float ) _split . _n1 ) ; abAux . put4f ( ( float ) _split . _p0 ) ; abAux . put4f ( ( float ) _split . _p1 ) ; abAux . put4f ( ( float ) _split . _se0 ) ; abAux . put4f ( ( float ) _split . _se1 ) ; abAux . put4 ( _nids [ 0 ] ) ; abAux . put4 ( _nids [ 1 ] ) ; } Node left = _tree . node ( _nids [ 0 ] ) ; if ( ( _nodeType & 48 ) == 0 ) { int sz = left . size ( ) ; if ( sz < 256 ) ab . put1 ( sz ) ; else if ( sz < 65535 ) ab . put2 ( ( short ) sz ) ; else if ( sz < ( 1 << 24 ) ) ab . put3 ( sz ) ; else ab . put4 ( sz ) ; } left . compress ( ab , abAux ) ; Node rite = _tree . node ( _nids [ 1 ] ) ; rite . compress ( ab , abAux ) ; assert _size == ab . position ( ) - pos : "reported size = " + _size + " , real size = " + ( ab . position ( ) - pos ) ; return ab ; } | Compress this tree into the AutoBuffer |
22,346 | public void addNumCol ( int colIdx , double value ) { if ( Double . isNaN ( value ) || Double . isInfinite ( value ) ) { addInvalidCol ( colIdx ) ; } else { if ( colIdx < _nCols ) { _nvs [ _col = colIdx ] . addNumDecompose ( value ) ; if ( _ctypes != null && _ctypes [ colIdx ] == Vec . T_BAD ) _ctypes [ colIdx ] = Vec . T_NUM ; } } } | Adds double value to the column . |
22,347 | private void startRegular ( ) throws IOException { String pwd = DeepwaterCaffeBackend . CAFFE_H2O_DIR ; ProcessBuilder pb = new ProcessBuilder ( "python3 backend.py" . split ( " " ) ) ; pb . environment ( ) . put ( "PYTHONPATH" , DeepwaterCaffeBackend . CAFFE_DIR + "python" ) ; pb . redirectError ( ProcessBuilder . Redirect . INHERIT ) ; pb . directory ( new File ( pwd ) ) ; _process = pb . start ( ) ; } | Debug or if wee find a way to package Caffe without Docker |
22,348 | public static NFSFileVec make ( File f ) { Futures fs = new Futures ( ) ; NFSFileVec nfs = make ( f , fs ) ; fs . blockForPending ( ) ; return nfs ; } | Make a new NFSFileVec key which holds the filename implicitly . This name is used by the Chunks to load data on - demand . Blocking |
22,349 | public static NFSFileVec make ( File f , Futures fs ) { if ( ! f . exists ( ) ) throw new IllegalArgumentException ( "File not found: " + f . toString ( ) ) ; long size = f . length ( ) ; Key k = Vec . newKey ( PersistNFS . decodeFile ( f ) ) ; NFSFileVec nfs = new NFSFileVec ( k , size ) ; DKV . put ( k , nfs , fs ) ; return nfs ; } | Make a new NFSFileVec key which holds the filename implicitly . This name is used by the Chunks to load data on - demand . |
22,350 | public static int [ ] sampleOOBRows ( int nrows , float rate , Random sampler ) { return sampleOOBRows ( nrows , rate , sampler , new int [ 2 + Math . round ( ( 1f - rate ) * nrows * 1.2f + 0.5f ) ] ) ; } | Sample out - of - bag rows with given rate with help of given sampler . It returns array of sampled rows . The first element of array contains a number of sampled rows . The returned array can be larger than number of returned sampled elements . |
22,351 | private static int next_idx ( long [ ] tl ) { while ( true ) { int oldidx = ( int ) tl [ 0 ] ; int newidx = ( oldidx + 1 ) & ( MAX_EVENTS - 1 ) ; if ( CAS ( tl , 0 , oldidx , newidx ) ) return oldidx ; } } | Return the next index into the TIMELINE array |
22,352 | static void tcp_call ( final AutoBuffer ab ) { ab . getPort ( ) ; long [ ] snap = ab . getA8 ( ) ; int idx = CLOUD . nidx ( ab . _h2o ) ; if ( idx >= 0 && idx < SNAPSHOT . length ) SNAPSHOT [ idx ] = snap ; ab . close ( ) ; synchronized ( TimeLine . class ) { TimeLine . class . notify ( ) ; } } | Receive a remote timeline |
22,353 | static void printMyTimeLine ( ) { long [ ] s = TimeLine . snapshot ( ) ; System . err . println ( "===================================<TIMELINE>==============================================" ) ; for ( int i = 0 ; i < TimeLine . length ( ) ; ++ i ) { long lo = TimeLine . l0 ( s , i ) , hi = TimeLine . l8 ( s , i ) ; int port = ( int ) ( ( lo >> 8 ) & 0xFFFF ) ; String op = TimeLine . send_recv ( s , i ) == 0 ? "SEND" : "RECV" ; if ( ! TimeLine . isEmpty ( s , i ) && ( lo & 0xFF ) == UDP . udp . exec . ordinal ( ) ) System . err . println ( TimeLine . ms ( s , i ) + ": " + op + " " + ( ( ( TimeLine . ns ( s , i ) & 4 ) != 0 ) ? "TCP" : "UDP" ) + TimeLine . inet ( s , i ) + ":" + port + " | " + UDP . printx16 ( lo , hi ) ) ; } System . err . println ( "===========================================================================================" ) ; } | Only for debugging . Prints local timeline to stdout . |
22,354 | public Frame toFrame ( ) { Vec zeroVec = null ; try { zeroVec = Vec . makeZero ( _output . _words . length ) ; byte [ ] types = new byte [ 1 + _output . _vecSize ] ; Arrays . fill ( types , Vec . T_NUM ) ; types [ 0 ] = Vec . T_STR ; String [ ] colNames = new String [ types . length ] ; colNames [ 0 ] = "Word" ; for ( int i = 1 ; i < colNames . length ; i ++ ) colNames [ i ] = "V" + i ; return new ConvertToFrameTask ( this ) . doAll ( types , zeroVec ) . outputFrame ( colNames , null ) ; } finally { if ( zeroVec != null ) zeroVec . remove ( ) ; } } | Converts this word2vec model to a Frame . |
22,355 | private float cosineSimilarity ( float [ ] target , int pos , float [ ] vecs ) { float dotProd = 0 , tsqr = 0 , csqr = 0 ; for ( int i = 0 ; i < target . length ; i ++ ) { dotProd += target [ i ] * vecs [ pos + i ] ; tsqr += Math . pow ( target [ i ] , 2 ) ; csqr += Math . pow ( vecs [ pos + i ] , 2 ) ; } return ( float ) ( dotProd / ( Math . sqrt ( tsqr ) * Math . sqrt ( csqr ) ) ) ; } | Basic calculation of cosine similarity |
22,356 | static boolean isZipDirectory ( Key key ) { Iced ice = DKV . getGet ( key ) ; if ( ice == null ) throw new H2OIllegalArgumentException ( "Missing data" , "Did not find any data under " + "key " + key ) ; ByteVec bv = ( ByteVec ) ( ice instanceof ByteVec ? ice : ( ( Frame ) ice ) . vecs ( ) [ 0 ] ) ; return isZipDirectory ( bv ) ; } | This method check if the input argument is a zip directory containing files . |
22,357 | static float getDecompressionRatio ( ByteVec bv ) { long totalSize = 0L ; long totalCompSize = 0L ; if ( bv instanceof FileVec ) { String strPath = getPathForKey ( ( ( FileVec ) bv ) . _key ) ; try { ZipFile zipFile = new ZipFile ( strPath ) ; Enumeration < ? extends ZipEntry > entries = zipFile . entries ( ) ; while ( entries . hasMoreElements ( ) ) { ZipEntry entry = entries . nextElement ( ) ; if ( ! entry . isDirectory ( ) ) { totalSize = totalSize + entry . getSize ( ) ; totalCompSize = totalCompSize + entry . getCompressedSize ( ) ; } } zipFile . close ( ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } } if ( totalCompSize == 0 ) return 1 ; else return totalSize / totalCompSize ; } | When a file is a zip file that contains multiple files this method will return the decompression ratio . |
22,358 | static byte [ ] unzipForHeader ( byte [ ] bs , int chkSize ) { ByteArrayInputStream bais = new ByteArrayInputStream ( bs ) ; ZipInputStream zis = new ZipInputStream ( bais ) ; InputStream is = zis ; int off = 0 ; try { while ( off < bs . length ) { int len = 0 ; len = is . read ( bs , off , bs . length - off ) ; if ( len < 0 ) break ; off += len ; if ( off == bs . length ) { if ( bs . length >= chkSize ) break ; bs = Arrays . copyOf ( bs , bs . length * 2 ) ; } } } catch ( IOException e ) { e . printStackTrace ( ) ; } try { is . close ( ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } return bs ; } | This method will read a compressed zip file and return the uncompressed bits so that we can check the beginning of the file and make sure it does not contain the column names . |
22,359 | public SharedTreeSubgraph makeSubgraph ( String name ) { SharedTreeSubgraph sg = new SharedTreeSubgraph ( subgraphArray . size ( ) , name ) ; subgraphArray . add ( sg ) ; return sg ; } | Make a new tree . |
22,360 | public void print ( ) { System . out . println ( "------------------------------------------------------------" ) ; System . out . println ( "Graph" ) ; for ( SharedTreeSubgraph sg : subgraphArray ) { sg . print ( ) ; } } | Debug printout of graph structure . For developer use only . |
22,361 | public void setupLocal ( ) { _tree . init_tree ( ) ; for ( int l = _leaf ; l < _tree . _len ; l ++ ) { DTree . UndecidedNode udn = _tree . undecided ( l ) ; DHistogram hs [ ] = _hcs [ l - _leaf ] ; int sCols [ ] = udn . _scoreCols ; if ( sCols != null ) { for ( int col : sCols ) hs [ col ] . init ( ) ; } else { for ( int j = 0 ; j < _ncols ; j ++ ) if ( hs [ j ] != null ) hs [ j ] . init ( ) ; } } } | Once - per - node shared init |
22,362 | public Model getModel ( MP params ) { Key < Model > mKey = getModelKey ( params ) ; return mKey != null ? mKey . get ( ) : null ; } | Returns model for given combination of model parameters or null if the model does not exist . |
22,363 | public Model [ ] getModels ( ) { Collection < Key < Model > > modelKeys = _models . values ( ) ; Model [ ] models = new Model [ modelKeys . size ( ) ] ; int i = 0 ; for ( Key < Model > mKey : modelKeys ) { models [ i ] = mKey != null ? mKey . get ( ) : null ; i ++ ; } return models ; } | Return all models included in this grid object . |
22,364 | public Object [ ] getHyperValues ( MP parms ) { Object [ ] result = new Object [ _hyper_names . length ] ; for ( int i = 0 ; i < _hyper_names . length ; i ++ ) { result [ i ] = PojoUtils . getFieldValue ( parms , _hyper_names [ i ] , _field_naming_strategy ) ; } return result ; } | Return value of hyper parameters used for this grid search . |
22,365 | protected Futures remove_impl ( final Futures fs ) { for ( Key < Model > k : _models . values ( ) ) k . remove ( fs ) ; _models . clear ( ) ; return fs ; } | Cleanup models and grid |
22,366 | static void handleWriteToChunk ( ByteChannel sock , AutoBuffer ab ) throws IOException { String frameKey = ab . getStr ( ) ; byte [ ] expectedTypes = ab . getA1 ( ) ; if ( expectedTypes == null ) { throw new RuntimeException ( "Expected types can't be null." ) ; } int [ ] maxVecSizes = ab . getA4 ( ) ; int [ ] elemSizes = ExternalFrameUtils . getElemSizes ( expectedTypes , maxVecSizes != null ? maxVecSizes : EMPTY_ARI ) ; int [ ] startPos = ExternalFrameUtils . getStartPositions ( elemSizes ) ; byte [ ] vecTypes = vecTypesFromExpectedTypes ( expectedTypes , maxVecSizes != null ? maxVecSizes : EMPTY_ARI ) ; int expectedNumRows = ab . getInt ( ) ; int currentRowIdx = 0 ; int chunk_id = ab . getInt ( ) ; NewChunk [ ] nchnk = ChunkUtils . createNewChunks ( frameKey , vecTypes , chunk_id ) ; assert nchnk != null ; while ( currentRowIdx < expectedNumRows ) { for ( int typeIdx = 0 ; typeIdx < expectedTypes . length ; typeIdx ++ ) { switch ( expectedTypes [ typeIdx ] ) { case EXPECTED_BOOL : case EXPECTED_BYTE : store ( ab , nchnk [ startPos [ typeIdx ] ] , ab . get1 ( ) ) ; break ; case EXPECTED_CHAR : store ( ab , nchnk [ startPos [ typeIdx ] ] , ab . get2 ( ) ) ; break ; case EXPECTED_SHORT : store ( ab , nchnk [ startPos [ typeIdx ] ] , ab . get2s ( ) ) ; break ; case EXPECTED_INT : store ( ab , nchnk [ startPos [ typeIdx ] ] , ab . getInt ( ) ) ; break ; case EXPECTED_TIMESTAMP : case EXPECTED_LONG : store ( ab , nchnk [ startPos [ typeIdx ] ] , ab . get8 ( ) ) ; break ; case EXPECTED_FLOAT : store ( nchnk [ startPos [ typeIdx ] ] , ab . get4f ( ) ) ; break ; case EXPECTED_DOUBLE : store ( nchnk [ startPos [ typeIdx ] ] , ab . get8d ( ) ) ; break ; case EXPECTED_STRING : store ( ab , nchnk [ startPos [ typeIdx ] ] , ab . getStr ( ) ) ; break ; case EXPECTED_VECTOR : storeVector ( ab , nchnk , elemSizes [ typeIdx ] , startPos [ typeIdx ] ) ; break ; default : throw new IllegalArgumentException ( "Unknown expected type: " + expectedTypes [ typeIdx ] ) ; } } currentRowIdx ++ ; } ChunkUtils . closeNewChunks ( nchnk ) ; AutoBuffer outputAb = new AutoBuffer ( ) ; outputAb . put1 ( ExternalFrameHandler . CONFIRM_WRITING_DONE ) ; writeToChannel ( outputAb , sock ) ; } | Internal method use on the h2o backend side to handle writing to the chunk from non - h2o environment |
22,367 | private void getNextFile ( final InputStream is ) throws IOException { if ( is instanceof java . util . zip . ZipInputStream ) { ZipEntry ze = ( ( ZipInputStream ) is ) . getNextEntry ( ) ; while ( ze != null && ze . isDirectory ( ) ) ze = ( ( ZipInputStream ) is ) . getNextEntry ( ) ; } } | This method will try to get the next file to be parsed . It will skip over directories if encountered . |
22,368 | private StreamInfo readOneFile ( final InputStream is , final StreamParseWriter dout , InputStream bvs , StreamParseWriter nextChunk , int zidx , int fileIndex ) throws IOException { int cidx = 0 ; StreamData din = new StreamData ( is ) ; if ( ( fileIndex > 0 ) && ( ! checkFileNHeader ( is , dout , din , cidx ) ) ) return new StreamInfo ( zidx , nextChunk ) ; int streamAvailable = is . available ( ) ; while ( streamAvailable > 0 ) { parseChunk ( cidx ++ , din , nextChunk ) ; streamAvailable = is . available ( ) ; int xidx = bvs . read ( null , 0 , 0 ) ; if ( xidx > zidx ) { zidx = xidx ; nextChunk . close ( ) ; if ( dout != nextChunk ) { dout . reduce ( nextChunk ) ; if ( _jobKey != null && _jobKey . get ( ) . stop_requested ( ) ) break ; } nextChunk = nextChunk . nextChunk ( ) ; } } parseChunk ( cidx , din , nextChunk ) ; return new StreamInfo ( zidx , nextChunk ) ; } | This method reads in one zip file . Before reading the file it will check if the current file has the same number of columns and separator type as the previous files it has parssed . If they do not match no file will be parsed in this case . |
22,369 | public AutoBuffer writeUnwrappedJSON ( AutoBuffer ab ) { if ( is_array ) { if ( t . equals ( "B" ) ) return ab . putJSONA4 ( i_ar ) ; else if ( t . equals ( "I" ) ) return ab . putJSONA4 ( i_ar ) ; else if ( t . equals ( "L" ) ) return ab . putJSONA8 ( l_ar ) ; else if ( t . equals ( "F" ) ) return ab . putJSONA4f ( f_ar ) ; else if ( t . equals ( "D" ) ) return ab . putJSONA8d ( d_ar ) ; else if ( t . equals ( "Bo" ) ) return ab . putJSONAStr ( null ) ; else if ( t . equals ( "S" ) ) return ab . putJSONAStr ( s_ar ) ; else if ( t . equals ( "E" ) ) return ab . putJSONAStr ( e_ar ) ; else if ( t . equals ( "K" ) ) return ab . putJSONA ( k_ar ) ; else if ( t . equals ( "Iced" ) ) return ab . putJSONA ( iced_ar ) ; } else { if ( t . equals ( "B" ) ) return ab . putJSON1 ( ( byte ) i ) ; else if ( t . equals ( "I" ) ) return ab . putJSON4 ( i ) ; else if ( t . equals ( "L" ) ) return ab . putJSON8 ( l ) ; else if ( t . equals ( "F" ) ) return ab . putJSON4f ( f ) ; else if ( t . equals ( "D" ) ) return ab . putJSON8d ( d ) ; else if ( t . equals ( "Bo" ) ) return ab . putJSONStrUnquoted ( b ? "true" : "false" ) ; else if ( t . equals ( "S" ) ) return ab . putJSONName ( s ) ; else if ( t . equals ( "E" ) ) return ab . putJSONName ( e ) ; else if ( t . equals ( "K" ) ) return ab . putJSON ( k ) ; } throw H2O . fail ( "Unhandled type: " + t ) ; } | Write JSON for the wrapped value without putting it inside a JSON object . |
22,370 | private String pythonify ( String n ) { if ( n == null || name . toLowerCase ( ) . contains ( "confusion" ) ) return n ; StringBuilder sb = new StringBuilder ( ) ; String [ ] modified = n . split ( "[\\s_]+" ) ; for ( int i = 0 ; i < modified . length ; ++ i ) { if ( i != 0 ) sb . append ( "_" ) ; String s = modified [ i ] ; sb . append ( s . toLowerCase ( ) ) ; } String newString = sb . toString ( ) . replaceAll ( "[^\\w]" , "" ) ; return newString ; } | Turn a description such as Avg . Training MSE into a JSON - usable field name avg_training_mse |
22,371 | synchronized static public void registerAllSchemasIfNecessary ( Schema ... schemas ) { if ( schemas_registered ) return ; long startTime = System . currentTimeMillis ( ) ; for ( Schema schema : schemas ) { register ( schema ) ; } Log . info ( "Registered: " + schemas ( ) . size ( ) + " schemas in " + ( System . currentTimeMillis ( ) - startTime ) + "ms" ) ; schemas_registered = true ; } | Find all schemas using reflection and register them . |
22,372 | public static Class < ? extends Schema > getSchema ( String name ) { Class < ? extends Schema > clz = schemas . get ( name ) ; if ( clz == null ) throw new H2ONotFoundArgumentException ( "Failed to find schema for schema_name: " + name , "Failed to find schema for schema_name: " + name + "\n" + "Did you forget to add an entry into META-INF/services/water.api.Schema?" ) ; return clz ; } | Lookup schema by name . |
22,373 | public static Schema schema ( int version , Iced impl ) { if ( version == - 1 ) version = getLatestVersion ( ) ; return schema ( version , impl . getClass ( ) . getSimpleName ( ) ) ; } | For a given version and Iced object return an appropriate Schema instance if any . |
22,374 | public static Schema schema ( int version , Class < ? extends Iced > impl_class ) { if ( version == - 1 ) version = getLatestVersion ( ) ; return schema ( version , impl_class . getSimpleName ( ) ) ; } | For a given version and Iced class return an appropriate Schema instance if any . |
22,375 | protected final void writeblob ( String filename , byte [ ] blob ) throws IOException { ZipEntry archiveEntry = new ZipEntry ( targetdir + filename ) ; archiveEntry . setSize ( blob . length ) ; zos . putNextEntry ( archiveEntry ) ; zos . write ( blob ) ; zos . closeEntry ( ) ; } | Write a binary file to the MOJO archive . |
22,376 | protected final void writeln ( String s , boolean escapeNewlines ) { assert tmpfile != null : "No text file is currently being written" ; tmpfile . append ( escapeNewlines ? StringEscapeUtils . escapeNewlines ( s ) : s ) ; tmpfile . append ( '\n' ) ; } | Write a single line of text to a previously opened text file escape new line characters if enabled . |
22,377 | protected final void finishWritingTextFile ( ) throws IOException { assert tmpfile != null : "No text file is currently being written" ; writeblob ( tmpname , toBytes ( tmpfile ) ) ; tmpfile = null ; } | Finish writing a text file . |
22,378 | private void writeDomains ( ) throws IOException { int domIndex = 0 ; for ( String [ ] domain : model . scoringDomains ( ) ) { if ( domain == null ) continue ; startWritingTextFile ( String . format ( "domains/d%03d.txt" , domIndex ++ ) ) ; for ( String category : domain ) { writeln ( category . replaceAll ( "\n" , "\\n" ) ) ; } finishWritingTextFile ( ) ; } } | Create files containing domain definitions for each categorical column . |
22,379 | static String link ( Key key , String column , long row , String match ) { return "/2/Find?key=" + key + ( column == null ? "" : "&column=" + column ) + "&row=" + row + ( match == null ? "" : "&match=" + match ) ; } | Helper so InspectV2 can link to FindV2 |
22,380 | static RPC < TaskGetKey > start ( H2ONode target , Key key ) { RPC < TaskGetKey > old = TGKS . get ( key ) ; if ( old != null ) return old ; RPC < TaskGetKey > rpc = new RPC ( target , new TaskGetKey ( key ) , 1.0f ) ; if ( ( old = TGKS . putIfMatchUnlocked ( key , rpc , null ) ) != null ) return old ; rpc . setTaskNum ( ) . call ( ) ; return rpc ; } | Start an RPC to fetch a Value handling short - cutting dup - fetches |
22,381 | static void handleClientDisconnect ( H2ONode client ) { if ( client != H2O . SELF ) { if ( H2O . isFlatfileEnabled ( ) ) { H2O . removeNodeFromFlatfile ( client ) ; } H2O . removeClient ( client ) ; } } | This method checks whether the client is disconnected from this node due to some problem such as client or network is unreachable . |
22,382 | protected void validateHyperParams ( P params , Map < String , Object [ ] > hyperParams ) { List < SchemaMetadata . FieldMetadata > fsMeta = SchemaMetadata . getFieldMetadata ( params ) ; for ( Map . Entry < String , Object [ ] > hparam : hyperParams . entrySet ( ) ) { SchemaMetadata . FieldMetadata fieldMetadata = null ; for ( SchemaMetadata . FieldMetadata fm : fsMeta ) { if ( fm . name . equals ( hparam . getKey ( ) ) ) { fieldMetadata = fm ; break ; } } if ( fieldMetadata == null ) { throw new H2OIllegalArgumentException ( hparam . getKey ( ) , "grid" , "Unknown hyper parameter for grid search!" ) ; } if ( ! fieldMetadata . is_gridable ) { throw new H2OIllegalArgumentException ( hparam . getKey ( ) , "grid" , "Illegal hyper parameter for grid search! The parameter '" + fieldMetadata . name + " is not gridable!" ) ; } } } | Validate given hyper parameters with respect to type parameter P . |
22,383 | private Model buildModel ( final MP params , Grid < MP > grid , int paramsIdx , String protoModelKey ) { final long checksum = params . checksum ( ) ; Key < Model > key = grid . getModelKey ( checksum ) ; if ( key != null ) { if ( DKV . get ( key ) == null ) { Log . info ( "GridSearch.buildModel(): model with these parameters was built but removed, rebuilding; checksum: " + checksum ) ; } else { Log . info ( "GridSearch.buildModel(): model with these parameters already exists, skipping; checksum: " + checksum ) ; return key . get ( ) ; } } @ SuppressWarnings ( "unchecked" ) final Key < Model > [ ] modelKeys = KeySnapshot . globalSnapshot ( ) . filter ( new KeySnapshot . KVFilter ( ) { public boolean filter ( KeySnapshot . KeyInfo k ) { if ( ! Value . isSubclassOf ( k . _type , Model . class ) ) return false ; Model m = ( ( Model ) k . _key . get ( ) ) ; if ( ( m == null ) || ( m . _parms == null ) ) return false ; try { return m . _parms . checksum ( ) == checksum ; } catch ( H2OConcurrentModificationException e ) { Log . warn ( "GridSearch encountered concurrent modification while searching DKV" , e ) ; return false ; } catch ( final RuntimeException e ) { Throwable ex = e ; boolean concurrentModification = false ; while ( ex . getCause ( ) != null ) { ex = ex . getCause ( ) ; if ( ex instanceof H2OConcurrentModificationException ) { concurrentModification = true ; break ; } } if ( ! concurrentModification ) throw e ; Log . warn ( "GridSearch encountered concurrent modification while searching DKV" , e ) ; return false ; } } } ) . keys ( ) ; if ( modelKeys . length > 0 ) { grid . putModel ( checksum , modelKeys [ 0 ] ) ; return modelKeys [ 0 ] . get ( ) ; } Key < Model > result = Key . make ( protoModelKey + paramsIdx ) ; assert grid . getModel ( params ) == null ; Model m = ModelBuilder . trainModelNested ( _job , result , params , null ) ; grid . putModel ( checksum , result ) ; return m ; } | Build a model based on specified parameters and save it to resulting Grid object . |
22,384 | protected static Key < Grid > gridKeyName ( String modelName , Frame fr ) { if ( fr == null || fr . _key == null ) { throw new IllegalArgumentException ( "The frame being grid-searched over must have a Key" ) ; } return Key . make ( "Grid_" + modelName + "_" + fr . _key . toString ( ) + H2O . calcNextUniqueModelId ( "" ) ) ; } | Defines a key for a new Grid object holding results of grid search . |
22,385 | public static < MP extends Model . Parameters > Job < Grid > startGridSearch ( final Key < Grid > destKey , final MP params , final Map < String , Object [ ] > hyperParams ) { return startGridSearch ( destKey , params , hyperParams , new SimpleParametersBuilderFactory < MP > ( ) , new HyperSpaceSearchCriteria . CartesianSearchCriteria ( ) ) ; } | Start a new grid search job . |
22,386 | private int doArrive ( int adjust ) { final Phaser root = this . root ; for ( ; ; ) { long s = ( root == this ) ? state : reconcileState ( ) ; int phase = ( int ) ( s >>> PHASE_SHIFT ) ; if ( phase < 0 ) return phase ; int counts = ( int ) s ; int unarrived = ( counts == EMPTY ) ? 0 : ( counts & UNARRIVED_MASK ) ; if ( unarrived <= 0 ) throw new IllegalStateException ( badArrive ( s ) ) ; if ( UNSAFE . compareAndSwapLong ( this , stateOffset , s , s -= adjust ) ) { if ( unarrived == 1 ) { long n = s & PARTIES_MASK ; int nextUnarrived = ( int ) n >>> PARTIES_SHIFT ; if ( root == this ) { if ( onAdvance ( phase , nextUnarrived ) ) n |= TERMINATION_BIT ; else if ( nextUnarrived == 0 ) n |= EMPTY ; else n |= nextUnarrived ; int nextPhase = ( phase + 1 ) & MAX_PHASE ; n |= ( long ) nextPhase << PHASE_SHIFT ; UNSAFE . compareAndSwapLong ( this , stateOffset , s , n ) ; releaseWaiters ( phase ) ; } else if ( nextUnarrived == 0 ) { phase = parent . doArrive ( ONE_DEREGISTER ) ; UNSAFE . compareAndSwapLong ( this , stateOffset , s , s | EMPTY ) ; } else phase = parent . doArrive ( ONE_ARRIVAL ) ; } return phase ; } } } | Main implementation for methods arrive and arriveAndDeregister . Manually tuned to speed up and minimize race windows for the common case of just decrementing unarrived field . |
22,387 | private int doRegister ( int registrations ) { long adjust = ( ( long ) registrations << PARTIES_SHIFT ) | registrations ; final Phaser parent = this . parent ; int phase ; for ( ; ; ) { long s = ( parent == null ) ? state : reconcileState ( ) ; int counts = ( int ) s ; int parties = counts >>> PARTIES_SHIFT ; int unarrived = counts & UNARRIVED_MASK ; if ( registrations > MAX_PARTIES - parties ) throw new IllegalStateException ( badRegister ( s ) ) ; phase = ( int ) ( s >>> PHASE_SHIFT ) ; if ( phase < 0 ) break ; if ( counts != EMPTY ) { if ( parent == null || reconcileState ( ) == s ) { if ( unarrived == 0 ) root . internalAwaitAdvance ( phase , null ) ; else if ( UNSAFE . compareAndSwapLong ( this , stateOffset , s , s + adjust ) ) break ; } } else if ( parent == null ) { long next = ( ( long ) phase << PHASE_SHIFT ) | adjust ; if ( UNSAFE . compareAndSwapLong ( this , stateOffset , s , next ) ) break ; } else { synchronized ( this ) { if ( state == s ) { phase = parent . doRegister ( 1 ) ; if ( phase < 0 ) break ; while ( ! UNSAFE . compareAndSwapLong ( this , stateOffset , s , ( ( long ) phase << PHASE_SHIFT ) | adjust ) ) { s = state ; phase = ( int ) ( root . state >>> PHASE_SHIFT ) ; } break ; } } } } return phase ; } | Implementation of register bulkRegister |
22,388 | public int awaitAdvance ( int phase ) { final Phaser root = this . root ; long s = ( root == this ) ? state : reconcileState ( ) ; int p = ( int ) ( s >>> PHASE_SHIFT ) ; if ( phase < 0 ) return phase ; if ( p == phase ) return root . internalAwaitAdvance ( phase , null ) ; return p ; } | Awaits the phase of this phaser to advance from the given phase value returning immediately if the current phase is not equal to the given phase value or this phaser is terminated . |
22,389 | private String stateToString ( long s ) { return super . toString ( ) + "[phase = " + phaseOf ( s ) + " parties = " + partiesOf ( s ) + " arrived = " + arrivedOf ( s ) + "]" ; } | Implementation of toString and string - based error messages |
22,390 | private void releaseWaiters ( int phase ) { QNode q ; Thread t ; AtomicReference < QNode > head = ( phase & 1 ) == 0 ? evenQ : oddQ ; while ( ( q = head . get ( ) ) != null && q . phase != ( int ) ( root . state >>> PHASE_SHIFT ) ) { if ( head . compareAndSet ( q , q . next ) && ( t = q . thread ) != null ) { q . thread = null ; LockSupport . unpark ( t ) ; } } } | Removes and signals threads from queue for phase . |
22,391 | private int [ ] alignCategoricals ( String [ ] longerDomain , String [ ] shorterDomain ) { String [ ] sortedLongerDomain = Arrays . copyOf ( longerDomain , longerDomain . length ) ; Arrays . sort ( sortedLongerDomain ) ; int [ ] transformedIndices = MemoryManager . malloc4 ( shorterDomain . length ) ; for ( int i = 0 ; i < shorterDomain . length ; i ++ ) { transformedIndices [ i ] = Arrays . binarySearch ( sortedLongerDomain , shorterDomain [ i ] ) ; } return transformedIndices ; } | Produces a mapping array with indexes of the smaller pointing to the larger domain . |
22,392 | public SBPrintStream pj ( double s ) { if ( Double . isInfinite ( s ) ) { append ( "Double." ) . append ( s > 0 ? "POSITIVE_INFINITY" : "NEGATIVE_INFINITY" ) ; } else if ( Double . isNaN ( s ) ) { append ( "Double.NaN" ) ; } else { append ( s ) ; } return this ; } | Java specific append of double |
22,393 | public SBPrintStream pj ( float s ) { if ( Float . isInfinite ( s ) ) { append ( "Float." ) . append ( s > 0 ? "POSITIVE_INFINITY" : "NEGATIVE_INFINITY" ) ; } else if ( Float . isNaN ( s ) ) { append ( "Float.NaN" ) ; } else { append ( s ) . append ( 'f' ) ; } return this ; } | Java specific append of float |
22,394 | public final Date compiledOnDate ( ) { SimpleDateFormat dateFormat = new SimpleDateFormat ( DATE_FORMAT ) ; try { return dateFormat . parse ( compiledOn ( ) ) ; } catch ( ParseException e ) { return null ; } } | Returns compile date for this H2O version or null . |
22,395 | public static ParseSetup guessSetup ( byte [ ] bits ) { int lastNewline = bits . length - 1 ; while ( lastNewline > 0 && ! CsvParser . isEOL ( bits [ lastNewline ] ) ) lastNewline -- ; if ( lastNewline > 0 ) bits = Arrays . copyOf ( bits , lastNewline + 1 ) ; SVMLightParser p = new SVMLightParser ( new ParseSetup ( SVMLight_INFO , ParseSetup . GUESS_SEP , false , ParseSetup . GUESS_HEADER , ParseSetup . GUESS_COL_CNT , null , null , null , null , null ) , null ) ; SVMLightInspectParseWriter dout = new SVMLightInspectParseWriter ( ) ; p . parseChunk ( 0 , new ByteAryData ( bits , 0 ) , dout ) ; if ( dout . _ncols > 0 && dout . _nlines > 0 && dout . _nlines > dout . _invalidLines ) return new ParseSetup ( SVMLight_INFO , ParseSetup . GUESS_SEP , false , ParseSetup . NO_HEADER , dout . _ncols , null , dout . guessTypes ( ) , null , null , dout . _data , dout . removeErrors ( ) ) ; else throw new ParseDataset . H2OParseException ( "Could not parse file as an SVMLight file." ) ; } | Try to parse the bytes as svm light format return a ParseSetupHandler with type SVMLight if the input is in svm light format throw an exception otherwise . |
22,396 | private static void printArgs ( String [ ] arr ) { Log . info ( "" ) ; Log . info ( "----- printArgs -----" ) ; for ( int i = 0 ; i < arr . length ; i ++ ) { String s = arr [ i ] ; Log . info ( i ) ; if ( s == null ) { Log . info ( "null" ) ; } else { Log . info ( s ) ; } } Log . info ( "----------" ) ; } | Under unusual debugging circumstances it can be helpful to print out the command line arguments in this format . |
22,397 | private static void repairNullArgsAndWarnIfNecessary ( String [ ] args ) { boolean haveANullArg = false ; for ( String s : args ) { if ( s == null ) { haveANullArg = true ; break ; } } if ( haveANullArg ) { Log . warn ( "Found a null command-line argument; printing all command-line arguments out now" ) ; printArgs ( args ) ; } for ( int i = 0 ; i < args . length ; i ++ ) { String s = args [ i ] ; args [ i ] = ( s == null ) ? "" : s ; } } | This shouldn t be necessary but is . In one really weird Hadoop environment we saw an argument coming across from the driver as null . This shouldn t be possible but it happened . So repair it here by forcing a null to really be the empty string . |
22,398 | public void fill3 ( byte [ ] bits , AutoBuffer ab ) { int bitoff = ab . get2 ( ) ; int nbits = ab . get4 ( ) ; fill ( bits , ab . position ( ) , nbits , bitoff ) ; ab . skip ( bytes ( nbits ) ) ; } | Reload IcedBitSet from AutoBuffer |
22,399 | public static boolean isSupportedSchema ( Schema s ) { Schema . Type typ = s . getType ( ) ; switch ( typ ) { case BOOLEAN : case INT : case LONG : case FLOAT : case DOUBLE : case ENUM : case STRING : case NULL : case BYTES : return true ; case UNION : List < Schema > unionSchemas = s . getTypes ( ) ; if ( unionSchemas . size ( ) == 1 ) { return isSupportedSchema ( unionSchemas . get ( 0 ) ) ; } else if ( unionSchemas . size ( ) == 2 ) { Schema s1 = unionSchemas . get ( 0 ) ; Schema s2 = unionSchemas . get ( 1 ) ; return s1 . getType ( ) . equals ( Schema . Type . NULL ) && isSupportedSchema ( s2 ) || s2 . getType ( ) . equals ( Schema . Type . NULL ) && isSupportedSchema ( s1 ) ; } default : return false ; } } | Return true if the given schema can be transformed into h2o type . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.