idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
22,200 | protected Parser parser ( Key jobKey ) { ParserProvider pp = ParserService . INSTANCE . getByInfo ( _parse_type ) ; if ( pp != null ) { return pp . createParser ( this , jobKey ) ; } throw new H2OIllegalArgumentException ( "Unknown file type. Parse cannot be completed." , "Attempted to invoke a parser for ParseType:" + _parse_type + ", which doesn't exist." ) ; } | This is a single entry - point to create a parser . |
22,201 | public final ParseSetup getFinalSetup ( Key [ ] inputKeys , ParseSetup demandedSetup ) { ParserProvider pp = ParserService . INSTANCE . getByInfo ( _parse_type ) ; if ( pp != null ) { ParseSetup ps = pp . createParserSetup ( inputKeys , demandedSetup ) ; if ( demandedSetup . _decrypt_tool != null ) ps . _decrypt_tool = demandedSetup . _decrypt_tool ; ps . setSkippedColumns ( demandedSetup . getSkippedColumns ( ) ) ; ps . setParseColumnIndices ( demandedSetup . getNumberColumns ( ) , demandedSetup . getSkippedColumns ( ) ) ; return ps ; } throw new H2OIllegalArgumentException ( "Unknown parser configuration! Configuration=" + this ) ; } | Return create a final parser - specific setup for this configuration . |
22,202 | HashSet < String > checkDupColumnNames ( ) { HashSet < String > conflictingNames = new HashSet < > ( ) ; if ( null == _column_names ) return conflictingNames ; HashSet < String > uniqueNames = new HashSet < > ( ) ; for ( String n : _column_names ) if ( ! uniqueNames . add ( n ) ) conflictingNames . add ( n ) ; return conflictingNames ; } | Set of duplicated column names |
22,203 | public static ParseSetup guessSetup ( Key [ ] fkeys , boolean singleQuote , int checkHeader ) { return guessSetup ( fkeys , new ParseSetup ( GUESS_INFO , GUESS_SEP , singleQuote , checkHeader , GUESS_COL_CNT , null , new ParseWriter . ParseErr [ 0 ] ) ) ; } | Used by test harnesses for simple parsing of test data . Presumes auto - detection for file and separator types . |
22,204 | public static ParseSetup guessSetup ( Key [ ] fkeys , ParseSetup userSetup ) { GuessSetupTsk t = new GuessSetupTsk ( userSetup ) ; t . doAll ( fkeys ) . getResult ( ) ; Iced ice = DKV . getGet ( fkeys [ 0 ] ) ; if ( ice instanceof Frame && ( ( Frame ) ice ) . vec ( 0 ) instanceof UploadFileVec ) { t . _gblSetup . _chunk_size = FileVec . DFLT_CHUNK_SIZE ; } else { t . _gblSetup . _chunk_size = FileVec . calcOptimalChunkSize ( t . _totalParseSize , t . _gblSetup . _number_columns , t . _maxLineLength , Runtime . getRuntime ( ) . availableProcessors ( ) , H2O . getCloudSize ( ) , false , true ) ; } return t . _gblSetup ; } | Discover the parse setup needed to correctly parse all files . This takes a ParseSetup as guidance . Each file is examined individually and then results merged . If a conflict exists between any results all files are re - examined using the best guess from the first examination . |
22,205 | private static int [ ] createMapping ( final MojoModel model , final String [ ] reference , final String modelName ) { int [ ] mapping = new int [ reference . length ] ; if ( model . _names . length != reference . length ) { throw new IllegalStateException ( String . format ( "Model '%s' is expected to have has non-standard number of columns." , modelName ) ) ; } boolean foundDifference = false ; for ( int i = 0 ; i < reference . length ; i ++ ) { final int pos = findColumnIndex ( model . _names , reference [ i ] ) ; if ( pos == - 1 ) { throw new IllegalStateException ( String . format ( "Model '%s' does not have input column '%s'" , modelName , reference [ i ] ) ) ; } if ( pos != i ) foundDifference = true ; mapping [ i ] = pos ; } if ( foundDifference ) { return mapping ; } else return null ; } | Creates an array of integers with mapping of referential column name space into model - specific column name space . |
22,206 | public long size ( ) { long res = 0 ; if ( _network != null ) res += _network . length ; if ( _modelparams != null ) res += _modelparams . length ; return res ; } | momenta are not counted here but they are needed for model building |
22,207 | private void javaToNative ( byte [ ] network , byte [ ] parameters ) { long now = System . currentTimeMillis ( ) ; if ( _backend != null && ( network == null || Arrays . equals ( network , _network ) ) && ( parameters == null || Arrays . equals ( parameters , _modelparams ) ) ) { Log . warn ( "No need to move the state from Java to native." ) ; return ; } if ( _backend == null ) { _backend = createDeepWaterBackend ( get_params ( ) . _backend . toString ( ) ) ; if ( _backend == null ) throw new IllegalArgumentException ( "No backend found. Cannot build a Deep Water model." ) ; } if ( network == null ) network = _network ; if ( parameters == null ) parameters = _modelparams ; if ( network == null || parameters == null ) return ; Log . info ( "Java state -> native backend." ) ; initModel ( network , parameters ) ; long time = System . currentTimeMillis ( ) - now ; Log . info ( "Took: " + PrettyPrint . msecs ( time , true ) ) ; } | Internal helper to create a native backend and fill its state |
22,208 | TwoDimTable createSummaryTable ( ) { TwoDimTable table = new TwoDimTable ( "Status of Deep Learning Model" , ( get_params ( ) . _network == null ? ( "MLP: " + Arrays . toString ( get_params ( ) . _hidden ) ) : get_params ( ) . _network . toString ( ) ) + ", " + PrettyPrint . bytes ( size ( ) ) + ", " + ( ! get_params ( ) . _autoencoder ? ( "predicting " + get_params ( ) . _response_column + ", " ) : "" ) + ( get_params ( ) . _autoencoder ? "auto-encoder" : _classification ? ( _classes + "-class classification" ) : "regression" ) + ", " + String . format ( "%,d" , get_processed_global ( ) ) + " training samples, " + "mini-batch size " + String . format ( "%,d" , get_params ( ) . _mini_batch_size ) , new String [ 1 ] , new String [ ] { "Input Neurons" , "Rate" , "Momentum" } , new String [ ] { "int" , "double" , "double" } , new String [ ] { "%d" , "%5f" , "%5f" } , "" ) ; table . set ( 0 , 0 , _dataInfo != null ? _dataInfo . fullN ( ) : _width * _height * _channels ) ; table . set ( 0 , 1 , get_params ( ) . learningRate ( get_processed_global ( ) ) ) ; table . set ( 0 , 2 , get_params ( ) . momentum ( get_processed_global ( ) ) ) ; summaryTable = table ; return summaryTable ; } | Create a summary table |
22,209 | private final void print ( Object [ ] kvs ) { for ( int i = 0 ; i < len ( kvs ) ; i ++ ) { Object K = key ( kvs , i ) ; if ( K != null ) { String KS = ( K == TOMBSTONE ) ? "XXX" : K . toString ( ) ; Object V = val ( kvs , i ) ; Object U = Prime . unbox ( V ) ; String p = ( V == U ) ? "" : "prime_" ; String US = ( U == TOMBSTONE ) ? "tombstone" : U . toString ( ) ; System . out . println ( "" + i + " (" + KS + "," + p + US + ")" ) ; } } Object [ ] newkvs = chm ( kvs ) . _newkvs ; if ( newkvs != null ) { System . out . println ( "----" ) ; print ( newkvs ) ; } } | print the entire state of the table |
22,210 | private final void print2 ( Object [ ] kvs ) { for ( int i = 0 ; i < len ( kvs ) ; i ++ ) { Object key = key ( kvs , i ) ; Object val = val ( kvs , i ) ; Object U = Prime . unbox ( val ) ; if ( key != null && key != TOMBSTONE && val != null && U != TOMBSTONE ) { String p = ( val == U ) ? "" : "prime_" ; System . out . println ( "" + i + " (" + key + "," + p + val + ")" ) ; } } Object [ ] newkvs = chm ( kvs ) . _newkvs ; if ( newkvs != null ) { System . out . println ( "----" ) ; print2 ( newkvs ) ; } } | print only the live values broken down by the table they are in |
22,211 | private int setCompletion ( int completion ) { for ( int s ; ; ) { if ( ( s = status ) < 0 ) return s ; if ( U . compareAndSwapInt ( this , STATUS , s , s | completion ) ) { if ( ( s >>> 16 ) != 0 ) synchronized ( this ) { notifyAll ( ) ; } return completion ; } } } | Marks completion and wakes up threads waiting to join this task . |
22,212 | final int doExec ( ) { int s ; boolean completed ; if ( ( s = status ) >= 0 ) { try { completed = exec ( ) ; } catch ( Throwable rex ) { return setExceptionalCompletion ( rex ) ; } if ( completed ) s = setCompletion ( NORMAL ) ; } return s ; } | Primary execution method for stolen tasks . Unless done calls exec and records status if completed but doesn t wait for completion otherwise . |
22,213 | final boolean trySetSignal ( ) { int s = status ; return s >= 0 && U . compareAndSwapInt ( this , STATUS , s , s | SIGNAL ) ; } | Tries to set SIGNAL status unless already completed . Used by ForkJoinPool . Other variants are directly incorporated into externalAwaitDone etc . |
22,214 | private int doJoin ( ) { int s ; Thread t ; ForkJoinWorkerThread wt ; ForkJoinPool . WorkQueue w ; if ( ( s = status ) >= 0 ) { if ( ( ( t = Thread . currentThread ( ) ) instanceof ForkJoinWorkerThread ) ) { if ( ! ( w = ( wt = ( ForkJoinWorkerThread ) t ) . workQueue ) . tryUnpush ( this ) || ( s = doExec ( ) ) >= 0 ) s = wt . pool . awaitJoin ( w , this ) ; } else s = externalAwaitDone ( ) ; } return s ; } | Implementation for join get quietlyJoin . Directly handles only cases of already - completed external wait and unfork + exec . Others are relayed to ForkJoinPool . awaitJoin . |
22,215 | private int doInvoke ( ) { int s ; Thread t ; ForkJoinWorkerThread wt ; if ( ( s = doExec ( ) ) >= 0 ) { if ( ( t = Thread . currentThread ( ) ) instanceof ForkJoinWorkerThread ) s = ( wt = ( ForkJoinWorkerThread ) t ) . pool . awaitJoin ( wt . workQueue , this ) ; else s = externalAwaitDone ( ) ; } return s ; } | Implementation for invoke quietlyInvoke . |
22,216 | final int recordExceptionalCompletion ( Throwable ex ) { int s ; if ( ( s = status ) >= 0 ) { int h = System . identityHashCode ( this ) ; final ReentrantLock lock = exceptionTableLock ; lock . lock ( ) ; try { expungeStaleExceptions ( ) ; ExceptionNode [ ] t = exceptionTable ; int i = h & ( t . length - 1 ) ; for ( ExceptionNode e = t [ i ] ; ; e = e . next ) { if ( e == null ) { t [ i ] = new ExceptionNode ( this , ex , t [ i ] ) ; break ; } if ( e . get ( ) == this ) break ; } } finally { lock . unlock ( ) ; } s = setCompletion ( EXCEPTIONAL ) ; } return s ; } | Records exception and sets status . |
22,217 | private int setExceptionalCompletion ( Throwable ex ) { int s = recordExceptionalCompletion ( ex ) ; if ( ( s & DONE_MASK ) == EXCEPTIONAL ) internalPropagateException ( ex ) ; return s ; } | Records exception and possibly propagates |
22,218 | static final void cancelIgnoringExceptions ( ForkJoinTask < ? > t ) { if ( t != null && t . status >= 0 ) { try { t . cancel ( false ) ; } catch ( Throwable ignore ) { } } } | Cancels ignoring any exceptions thrown by cancel . Used during worker and pool shutdown . Cancel is spec ed not to throw any exceptions but if it does anyway we have no recourse during shutdown so guard against this case . |
22,219 | private void clearExceptionalCompletion ( ) { int h = System . identityHashCode ( this ) ; final ReentrantLock lock = exceptionTableLock ; lock . lock ( ) ; try { ExceptionNode [ ] t = exceptionTable ; int i = h & ( t . length - 1 ) ; ExceptionNode e = t [ i ] ; ExceptionNode pred = null ; while ( e != null ) { ExceptionNode next = e . next ; if ( e . get ( ) == this ) { if ( pred == null ) t [ i ] = next ; else pred . next = next ; break ; } pred = e ; e = next ; } expungeStaleExceptions ( ) ; status = 0 ; } finally { lock . unlock ( ) ; } } | Removes exception node and clears status |
22,220 | private static void expungeStaleExceptions ( ) { for ( Object x ; ( x = exceptionTableRefQueue . poll ( ) ) != null ; ) { if ( x instanceof ExceptionNode ) { ForkJoinTask < ? > key = ( ( ExceptionNode ) x ) . get ( ) ; ExceptionNode [ ] t = exceptionTable ; int i = System . identityHashCode ( key ) & ( t . length - 1 ) ; ExceptionNode e = t [ i ] ; ExceptionNode pred = null ; while ( e != null ) { ExceptionNode next = e . next ; if ( e == x ) { if ( pred == null ) t [ i ] = next ; else pred . next = next ; break ; } pred = e ; e = next ; } } } } | Poll stale refs and remove them . Call only while holding lock . |
22,221 | private void reportException ( int s ) { Throwable ex = ( ( s == CANCELLED ) ? new CancellationException ( ) : ( s == EXCEPTIONAL ) ? getThrowableException ( ) : null ) ; if ( ex != null ) U . throwException ( ex ) ; } | Throws exception if any associated with the given status . |
22,222 | public static ForkJoinPool getPool ( ) { Thread t = Thread . currentThread ( ) ; return ( t instanceof ForkJoinWorkerThread ) ? ( ( ForkJoinWorkerThread ) t ) . pool : null ; } | Returns the pool hosting the current task execution or null if this task is executing outside of any ForkJoinPool . |
22,223 | public final short setForkJoinTaskTag ( short tag ) { for ( int s ; ; ) { if ( U . compareAndSwapInt ( this , STATUS , s = status , ( s & ~ SMASK ) | ( tag & SMASK ) ) ) return ( short ) s ; } } | Atomically sets the tag value for this task . |
22,224 | protected final boolean isNA2 ( int idx ) { if ( isString ( ) ) return _is [ idx ] == - 1 ; if ( isUUID ( ) || _ds == null ) return _missing != null && _missing . get ( idx ) ; return Double . isNaN ( _ds [ idx ] ) ; } | what about sparse reps? |
22,225 | private void append2slowstr ( ) { if ( _xs != null ) { _xs = null ; _ms = null ; alloc_str_indices ( _sparseLen ) ; Arrays . fill ( _is , - 1 ) ; } if ( _is != null && _is . length > 0 ) { if ( _id == null ) { int nzs = 0 ; for ( int i : _is ) if ( i != - 1 ) ++ nzs ; if ( ( nzs + 1 ) * _sparseRatio < _len ) set_sparse ( nzs , Compress . NA ) ; } else { if ( ( _sparseRatio * ( _sparseLen ) >> 2 ) > _len ) cancel_sparse ( ) ; else _id = MemoryManager . arrayCopyOf ( _id , _sparseLen << 1 ) ; } _is = MemoryManager . arrayCopyOf ( _is , _sparseLen << 1 ) ; for ( int i = _sparseLen ; i < _is . length ; i ++ ) _is [ i ] = - 1 ; } else { _is = MemoryManager . malloc4 ( 4 ) ; for ( int i = 0 ; i < _is . length ; i ++ ) _is [ i ] = - 1 ; if ( sparseZero ( ) || sparseNA ( ) ) alloc_indices ( 4 ) ; } assert _sparseLen == 0 || _is . length > _sparseLen : "_ls.length = " + _is . length + ", _len = " + _sparseLen ; } | Slow - path append string |
22,226 | public Chunk compress ( ) { Chunk res = compress2 ( ) ; byte type = type ( ) ; assert _vec == null || type == _vec . _type || type == Vec . T_BAD || ( type == Vec . T_NUM && _vec . _type == Vec . T_CAT ) || ( type == Vec . T_NUM && _vec . _type == Vec . T_TIME && ! res . hasFloat ( ) ) : "NewChunk has type " + Vec . TYPE_STR [ type ] + ", but the Vec is of type " + _vec . get_type_str ( ) ; assert _len == res . _len : "NewChunk has length " + _len + ", compressed Chunk has " + res . _len ; _id = null ; _xs = null ; _ds = null ; _ms = null ; _is = null ; _ss = null ; return res ; } | Return the data so compressed . |
22,227 | public static String escapeNewlines ( String str ) { final int len = str . length ( ) ; StringWriter out = new StringWriter ( len * 2 ) ; for ( int i = 0 ; i < len ; i ++ ) { char c = str . charAt ( i ) ; switch ( c ) { case '\\' : out . write ( '\\' ) ; out . write ( '\\' ) ; break ; case '\n' : out . write ( '\\' ) ; out . write ( 'n' ) ; break ; default : out . write ( c ) ; } } return out . toString ( ) ; } | Escapes new line characters of a given string . It also escapes the forward slash characters . |
22,228 | public Val exec ( AstRoot ast , AstFunction scope ) { sanity_check_refs ( null ) ; Env env = new Env ( this ) ; env . _scope = scope ; Val val = ast . exec ( env ) ; assert env . sp ( ) == 0 ; sanity_check_refs ( val ) ; return val ; } | Execute an AstRoot in the current Session with much assertion - checking |
22,229 | public RuntimeException endQuietly ( Throwable ex ) { try { GLOBALS . clear ( ) ; Futures fs = new Futures ( ) ; for ( Frame fr : FRAMES . values ( ) ) { for ( Key < Vec > vec : fr . keys ( ) ) { Integer I = REFCNTS . get ( vec ) ; int i = ( I == null ? 0 : I ) - 1 ; if ( i > 0 ) REFCNTS . put ( vec , i ) ; else { REFCNTS . remove ( vec ) ; vec . remove ( fs ) ; } } DKV . remove ( fr . _key , fs ) ; } fs . blockForPending ( ) ; FRAMES . clear ( ) ; REFCNTS . clear ( ) ; } catch ( Exception ex2 ) { Log . warn ( "Exception " + ex2 + " suppressed while cleaning up Rapids Session after already throwing " + ex ) ; } return ex instanceof RuntimeException ? ( RuntimeException ) ex : new RuntimeException ( ex ) ; } | The Rapids call threw an exception . Best - effort cleanup no more exceptions |
22,230 | private int _addRefCnt ( Key < Vec > vec , int i ) { return _putRefCnt ( vec , _getRefCnt ( vec ) + i ) ; } | Bump internal count not counting globals |
22,231 | private int addRefCnt ( Key < Vec > vec , int i ) { return _addRefCnt ( vec , i ) + ( GLOBALS . contains ( vec ) ? 1 : 0 ) ; } | RefCnt + i this Vec ; Global Refs can be alive with zero internal counts |
22,232 | public void exec ( Frame fr , Random rng ) { if ( p > 0 ) new InsertNAs ( p , rng ) . doAll ( fr ) ; } | Execute this post - processing step . |
22,233 | private static InetAddress getInetAddress ( String ip ) { if ( ip == null ) return null ; InetAddress addr = null ; try { addr = InetAddress . getByName ( ip ) ; } catch ( UnknownHostException e ) { Log . err ( e ) ; H2O . exit ( - 1 ) ; } return addr ; } | Get address for given IP . |
22,234 | protected Frame predictScoreImpl ( Frame fr , Frame adaptFrm , String destination_key , Job j , boolean computeMetrics , CFuncRef customMetricFunc ) { Frame levelOneFrame = new Frame ( Key . < Frame > make ( "preds_levelone_" + this . _key . toString ( ) + fr . _key ) ) ; for ( Key < Model > baseKey : this . _parms . _base_models ) { Model base = baseKey . get ( ) ; Frame basePreds = base . score ( fr , "preds_base_" + this . _key . toString ( ) + fr . _key , j , false ) ; StackedEnsemble . addModelPredictionsToLevelOneFrame ( base , basePreds , levelOneFrame ) ; DKV . remove ( basePreds . _key ) ; Frame . deleteTempFrameAndItsNonSharedVecs ( basePreds , levelOneFrame ) ; } levelOneFrame . add ( this . responseColumn , adaptFrm . vec ( this . responseColumn ) ) ; Log . info ( "Finished creating \"level one\" frame for scoring: " + levelOneFrame . toString ( ) ) ; Model metalearner = this . _output . _metalearner ; Frame predictFr = metalearner . score ( levelOneFrame , destination_key , j , computeMetrics , CFuncRef . from ( _parms . _custom_metric_func ) ) ; if ( computeMetrics ) { Key < ModelMetrics > [ ] mms = metalearner . _output . getModelMetrics ( ) ; ModelMetrics lastComputedMetric = mms [ mms . length - 1 ] . get ( ) ; ModelMetrics mmStackedEnsemble = lastComputedMetric . deepCloneWithDifferentModelAndFrame ( this , fr ) ; this . addModelMetrics ( mmStackedEnsemble ) ; } Frame . deleteTempFrameAndItsNonSharedVecs ( levelOneFrame , adaptFrm ) ; return predictFr ; } | For StackedEnsemble we call score on all the base_models and then combine the results with the metalearner to create the final predictions frame . |
22,235 | private static void write2frame ( GenericRecord gr , String [ ] columnNames , Schema . Field [ ] inSchema , byte [ ] columnTypes , ParseWriter dout ) { assert inSchema . length == columnTypes . length : "AVRO field flatenized schema has to match to parser setup" ; BufferedString bs = new BufferedString ( ) ; for ( int cIdx = 0 ; cIdx < columnNames . length ; cIdx ++ ) { int inputFieldIdx = inSchema [ cIdx ] . pos ( ) ; Schema . Type inputType = toPrimitiveType ( inSchema [ cIdx ] . schema ( ) ) ; byte targetType = columnTypes [ cIdx ] ; Object value = gr . get ( inputFieldIdx ) ; if ( value == null ) { dout . addInvalidCol ( cIdx ) ; } else { switch ( inputType ) { case BOOLEAN : dout . addNumCol ( cIdx , ( ( Boolean ) value ) ? 1 : 0 ) ; break ; case INT : dout . addNumCol ( cIdx , ( ( Integer ) value ) , 0 ) ; break ; case LONG : dout . addNumCol ( cIdx , ( ( Long ) value ) , 0 ) ; break ; case FLOAT : dout . addNumCol ( cIdx , ( Float ) value ) ; break ; case DOUBLE : dout . addNumCol ( cIdx , ( Double ) value ) ; break ; case ENUM : GenericData . EnumSymbol es = ( GenericData . EnumSymbol ) value ; dout . addNumCol ( cIdx , es . getSchema ( ) . getEnumOrdinal ( es . toString ( ) ) ) ; break ; case BYTES : dout . addStrCol ( cIdx , bs . set ( ( ( ByteBuffer ) value ) . array ( ) ) ) ; break ; case STRING : dout . addStrCol ( cIdx , bs . set ( ( ( Utf8 ) value ) . getBytes ( ) ) ) ; break ; case NULL : dout . addInvalidCol ( cIdx ) ; break ; } } } } | The main method transforming Avro record into a row in H2O frame . |
22,236 | @ SuppressWarnings ( "unused" ) public water . automl . api . schemas3 . LeaderboardsV99 list ( int version , water . automl . api . schemas3 . LeaderboardsV99 s ) { Leaderboards m = s . createAndFillImpl ( ) ; m . leaderboards = Leaderboards . fetchAll ( ) ; return s . fillFromImpl ( m ) ; } | Return all the Leaderboards . |
22,237 | public float [ ] train ( BackendModel m , float [ ] data , float [ ] label ) { ( ( DeepwaterCaffeModel ) m ) . train ( data , label ) ; return null ; } | given a mini - batch worth of data and labels train |
22,238 | public final void print ( ) { System . out . println ( "=========" ) ; print_impl ( - 99 , NO_KEY , _val_1 ) ; _chm . print ( ) ; System . out . println ( "=========" ) ; } | Verbose printout of table internals useful for debugging . |
22,239 | public long [ ] keySetLong ( ) { long [ ] dom = new long [ size ( ) ] ; IteratorLong i = ( IteratorLong ) keySet ( ) . iterator ( ) ; int j = 0 ; while ( j < dom . length && i . hasNext ( ) ) dom [ j ++ ] = i . nextLong ( ) ; return dom ; } | Keys as a long array . Array may be zero - padded if keys are concurrently deleted . |
22,240 | public void set ( final int row , final int col , final Object o ) { if ( o == null ) cellValues [ row ] [ col ] = new IcedWrapper ( null ) ; else if ( o instanceof Double && Double . isNaN ( ( double ) o ) ) cellValues [ row ] [ col ] = new IcedWrapper ( Double . NaN ) ; else if ( o instanceof int [ ] ) cellValues [ row ] [ col ] = new IcedWrapper ( Arrays . toString ( ( int [ ] ) o ) ) ; else if ( o instanceof long [ ] ) cellValues [ row ] [ col ] = new IcedWrapper ( Arrays . toString ( ( long [ ] ) o ) ) ; else if ( o instanceof float [ ] ) cellValues [ row ] [ col ] = new IcedWrapper ( Arrays . toString ( ( float [ ] ) o ) ) ; else if ( o instanceof double [ ] ) cellValues [ row ] [ col ] = new IcedWrapper ( Arrays . toString ( ( double [ ] ) o ) ) ; else if ( colTypes [ col ] == "string" ) cellValues [ row ] [ col ] = new IcedWrapper ( o . toString ( ) ) ; else cellValues [ row ] [ col ] = new IcedWrapper ( o ) ; } | Setter for table cells |
22,241 | static void parseArguments ( String [ ] args ) { for ( AbstractH2OExtension e : extManager . getCoreExtensions ( ) ) { args = e . parseArguments ( args ) ; } parseH2OArgumentsTo ( args , ARGS ) ; } | Dead stupid argument parser . |
22,242 | public static void notifyAboutCloudSize ( InetAddress ip , int port , InetAddress leaderIp , int leaderPort , int size ) { if ( ARGS . notify_local != null && ! ARGS . notify_local . trim ( ) . isEmpty ( ) ) { final File notifyFile = new File ( ARGS . notify_local ) ; final File parentDir = notifyFile . getParentFile ( ) ; if ( parentDir != null && ! parentDir . isDirectory ( ) ) { if ( ! parentDir . mkdirs ( ) ) { Log . err ( "Cannot make parent dir for notify file." ) ; H2O . exit ( - 1 ) ; } } try ( BufferedWriter output = new BufferedWriter ( new FileWriter ( notifyFile ) ) ) { output . write ( SELF_ADDRESS . getHostAddress ( ) ) ; output . write ( ':' ) ; output . write ( Integer . toString ( API_PORT ) ) ; output . flush ( ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } } if ( embeddedH2OConfig == null ) { return ; } embeddedH2OConfig . notifyAboutCloudSize ( ip , port , leaderIp , leaderPort , size ) ; } | Tell the embedding software that this H2O instance belongs to a cloud of a certain size . This may be non - blocking . |
22,243 | public static void exit ( int status ) { if ( embeddedH2OConfig != null ) embeddedH2OConfig . exit ( status ) ; Log . flushStdout ( ) ; System . exit ( status ) ; } | Notify embedding software instance H2O wants to exit . Shuts down a single Node . |
22,244 | public static void shutdown ( int status ) { if ( status == 0 ) H2O . orderlyShutdown ( ) ; UDPRebooted . T . error . send ( H2O . SELF ) ; H2O . exit ( status ) ; } | Cluster shutdown itself by sending a shutdown UDP packet . |
22,245 | public static long getIdleTimeMillis ( ) { long latestEndTimeMillis = - 1 ; if ( activeRapidsExecs . get ( ) > 0 ) { updateNotIdle ( ) ; } else { Job [ ] jobs = Job . jobs ( ) ; for ( int i = jobs . length - 1 ; i >= 0 ; i -- ) { Job j = jobs [ i ] ; if ( j . isRunning ( ) ) { updateNotIdle ( ) ; break ; } if ( j . end_time ( ) > latestEndTimeMillis ) { latestEndTimeMillis = j . end_time ( ) ; } } } long latestTimeMillis = Math . max ( latestEndTimeMillis , lastTimeSomethingHappenedMillis ) ; long now = System . currentTimeMillis ( ) ; long deltaMillis = now - latestTimeMillis ; if ( deltaMillis < 0 ) { deltaMillis = 0 ; } return deltaMillis ; } | Get the number of milliseconds the H2O cluster has been idle . |
22,246 | public static String getSysProperty ( String name , String def ) { return System . getProperty ( H2O . OptArgs . SYSTEM_PROP_PREFIX + name , def ) ; } | Retrieves a value of an H2O system property |
22,247 | public static String technote ( int [ ] numbers , String message ) { StringBuilder sb = new StringBuilder ( ) . append ( message ) . append ( "\n" ) . append ( "\n" ) . append ( "For more information visit:\n" ) ; for ( int number : numbers ) { sb . append ( " http://jira.h2o.ai/browse/TN-" ) . append ( Integer . toString ( number ) ) . append ( "\n" ) ; } return sb . toString ( ) ; } | Return an error message with an accompanying list of URLs to help the user get more detailed information . |
22,248 | public static < T extends H2OCountedCompleter > T submitTask ( T task ) { int priority = task . priority ( ) ; if ( priority < LOW_PRIORITY_API_WORK ) LOW_PRIORITY_API_WORK_CLASS = task . getClass ( ) . toString ( ) ; assert MIN_PRIORITY <= priority && priority <= MAX_PRIORITY : "priority " + priority + " is out of range, expected range is < " + MIN_PRIORITY + "," + MAX_PRIORITY + ">" ; if ( FJPS [ priority ] == null ) synchronized ( H2O . class ) { if ( FJPS [ priority ] == null ) FJPS [ priority ] = new PrioritizedForkJoinPool ( priority , - 1 ) ; } FJPS [ priority ] . submit ( task ) ; return task ; } | Submit to the correct priority queue |
22,249 | static < T extends RemoteRunnable > T runOnH2ONode ( H2ONode node , T runnable ) { if ( node == H2O . SELF ) { runnable . run ( ) ; return runnable ; } else { RunnableWrapperTask < T > task = new RunnableWrapperTask < > ( runnable ) ; try { return new RPC < > ( node , task ) . call ( ) . get ( ) . _runnable ; } catch ( DistributedException e ) { Log . trace ( "Exception in calling runnable on a remote node" , e ) ; Throwable cause = e . getCause ( ) ; throw cause instanceof RuntimeException ? ( RuntimeException ) cause : e ; } } } | package - private for unit tests |
22,250 | public static String DEFAULT_FLOW_DIR ( ) { String flow_dir = null ; try { if ( ARGS . ga_hadoop_ver != null ) { PersistManager pm = getPM ( ) ; if ( pm != null ) { String s = pm . getHdfsHomeDirectory ( ) ; if ( pm . exists ( s ) ) { flow_dir = s ; } } if ( flow_dir != null ) { flow_dir = flow_dir + "/h2oflows" ; } } else { flow_dir = System . getProperty ( "user.home" ) + File . separator + "h2oflows" ; } } catch ( Exception ignore ) { } return flow_dir ; } | Place to store flows |
22,251 | private static void printAndLogVersion ( String [ ] arguments ) { Log . init ( ARGS . log_level , ARGS . quiet ) ; Log . info ( "----- H2O started " + ( ARGS . client ? "(client)" : "" ) + " -----" ) ; Log . info ( "Build git branch: " + ABV . branchName ( ) ) ; Log . info ( "Build git hash: " + ABV . lastCommitHash ( ) ) ; Log . info ( "Build git describe: " + ABV . describe ( ) ) ; Log . info ( "Build project version: " + ABV . projectVersion ( ) ) ; Log . info ( "Build age: " + PrettyPrint . toAge ( ABV . compiledOnDate ( ) , new Date ( ) ) ) ; Log . info ( "Built by: '" + ABV . compiledBy ( ) + "'" ) ; Log . info ( "Built on: '" + ABV . compiledOn ( ) + "'" ) ; if ( ABV . isTooOld ( ) ) { Log . warn ( "\n*** Your H2O version is too old! Please download the latest version from http://h2o.ai/download/ ***" ) ; Log . warn ( "" ) ; } Log . info ( "Found H2O Core extensions: " + extManager . getCoreExtensions ( ) ) ; Log . info ( "Processed H2O arguments: " , Arrays . toString ( arguments ) ) ; Runtime runtime = Runtime . getRuntime ( ) ; Log . info ( "Java availableProcessors: " + runtime . availableProcessors ( ) ) ; Log . info ( "Java heap totalMemory: " + PrettyPrint . bytes ( runtime . totalMemory ( ) ) ) ; Log . info ( "Java heap maxMemory: " + PrettyPrint . bytes ( runtime . maxMemory ( ) ) ) ; Log . info ( "Java version: Java " + System . getProperty ( "java.version" ) + " (from " + System . getProperty ( "java.vendor" ) + ")" ) ; List < String > launchStrings = ManagementFactory . getRuntimeMXBean ( ) . getInputArguments ( ) ; Log . info ( "JVM launch parameters: " + launchStrings ) ; Log . info ( "OS version: " + System . getProperty ( "os.name" ) + " " + System . getProperty ( "os.version" ) + " (" + System . getProperty ( "os.arch" ) + ")" ) ; long totalMemory = OSUtils . getTotalPhysicalMemory ( ) ; Log . info ( "Machine physical memory: " + ( totalMemory == - 1 ? "NA" : PrettyPrint . bytes ( totalMemory ) ) ) ; } | If logging has not been setup yet then Log . info will only print to stdout . This allows for early processing of the - version option without unpacking the jar file and other startup stuff . |
22,252 | private static void startLocalNode ( ) { NetworkInit . initializeNetworkSockets ( ) ; if ( ! ARGS . client && H2O . isFlatfileEnabled ( ) && ! H2O . isNodeInFlatfile ( SELF ) ) { Log . warn ( "Flatfile configuration does not include self: " + SELF + ", but contains " + H2O . getFlatfile ( ) ) ; H2O . addNodeToFlatfile ( SELF ) ; } Log . info ( "H2O cloud name: '" + ARGS . name + "' on " + SELF + ( H2O . isFlatfileEnabled ( ) ? ( ", discovery address " + CLOUD_MULTICAST_GROUP + ":" + CLOUD_MULTICAST_PORT ) : ", static configuration based on -flatfile " + ARGS . flatfile ) ) ; if ( ! H2O . ARGS . disable_web ) { Log . info ( "If you have trouble connecting, try SSH tunneling from your local machine (e.g., via port 55555):\n" + " 1. Open a terminal and run 'ssh -L 55555:localhost:" + API_PORT + " " + System . getProperty ( "user.name" ) + "@" + SELF_ADDRESS . getHostAddress ( ) + "'\n" + " 2. Point your browser to " + NetworkInit . h2oHttpView . getScheme ( ) + "://localhost:55555" ) ; } SELF . _heartbeat . _jar_md5 = JarHash . JARHASH ; SELF . _heartbeat . _client = ARGS . client ; SELF . _heartbeat . _cloud_name_hash = ARGS . name . hashCode ( ) ; } | Initializes the local node and the local cloud with itself as the only member . |
22,253 | private static void startNetworkServices ( ) { UDPRebooted . T . reboot . broadcast ( ) ; new MultiReceiverThread ( ) . start ( ) ; Cleaner . THE_CLEANER . start ( ) ; new TCPReceiverThread ( NetworkInit . _tcpSocket ) . start ( ) ; } | Starts the worker threads receiver threads heartbeats and all other network related services . |
22,254 | void set_next_Cloud ( H2ONode [ ] h2os , int hash ) { synchronized ( this ) { int idx = _idx + 1 ; if ( idx == 256 ) idx = 1 ; CLOUDS [ idx ] = CLOUD = new H2O ( h2os , hash , idx ) ; } SELF . _heartbeat . _cloud_size = ( char ) CLOUD . size ( ) ; H2O . CLOUD . _node_ip_to_index = new HashMap < > ( ) ; for ( H2ONode node : H2O . CLOUD . _memary ) { H2O . CLOUD . _node_ip_to_index . put ( node . getIpPortString ( ) , node . index ( ) ) ; } } | member list . |
22,255 | public long free_mem ( ) { long memsz = 0 ; for ( H2ONode h2o : CLOUD . _memary ) memsz += h2o . _heartbeat . get_free_mem ( ) ; return memsz ; } | Cluster free memory |
22,256 | public boolean healthy ( ) { long now = System . currentTimeMillis ( ) ; for ( H2ONode node : H2O . CLOUD . members ( ) ) if ( ! node . isHealthy ( now ) ) return false ; return true ; } | Quick health check ; no reason given for bad health |
22,257 | public static void joinOthers ( ) { long start = System . currentTimeMillis ( ) ; while ( System . currentTimeMillis ( ) - start < 2000 ) { if ( CLOUD . size ( ) > 1 && Paxos . _commonKnowledge ) break ; try { Thread . sleep ( 100 ) ; } catch ( InterruptedException ignore ) { } } } | - If nobody else is found not an error . |
22,258 | public static void raw_remove ( Key key ) { Value v = STORE . remove ( key ) ; if ( v != null ) v . removePersist ( ) ; } | Get the value from the store |
22,259 | public static String STOREtoString ( ) { int [ ] cnts = new int [ 1 ] ; Object [ ] kvs = H2O . STORE . raw_array ( ) ; for ( int i = 2 ; i < kvs . length ; i += 2 ) { Object ov = kvs [ i + 1 ] ; if ( ! ( ov instanceof Value ) ) continue ; Value val = ( Value ) ov ; if ( val . isNull ( ) ) { Value . STORE_get ( val . _key ) ; continue ; } int t = val . type ( ) ; while ( t >= cnts . length ) cnts = Arrays . copyOf ( cnts , cnts . length << 1 ) ; cnts [ t ] ++ ; } StringBuilder sb = new StringBuilder ( ) ; for ( int t = 0 ; t < cnts . length ; t ++ ) if ( cnts [ t ] != 0 ) sb . append ( String . format ( "-%30s %5d\n" , TypeMap . CLAZZES [ t ] , cnts [ t ] ) ) ; return sb . toString ( ) ; } | Nice local - STORE only debugging summary |
22,260 | public static boolean checkUnsupportedJava ( ) { if ( Boolean . getBoolean ( H2O . OptArgs . SYSTEM_PROP_PREFIX + "debug.noJavaVersionCheck" ) ) { return false ; } if ( JAVA_VERSION . isKnown ( ) && ! isUserEnabledJavaVersion ( ) && ( JAVA_VERSION . getMajor ( ) < 7 || JAVA_VERSION . getMajor ( ) > 12 ) ) { System . err . println ( "Only Java 7, 8, 9, 10, 11 and 12 are supported, system version is " + System . getProperty ( "java.version" ) ) ; return true ; } String vmName = System . getProperty ( "java.vm.name" ) ; if ( vmName != null && vmName . equals ( "GNU libgcj" ) ) { System . err . println ( "GNU gcj is not supported" ) ; return true ; } return false ; } | Check if the Java version is not supported |
22,261 | private static long getCurrentPID ( ) { try { String n = ManagementFactory . getRuntimeMXBean ( ) . getName ( ) ; int i = n . indexOf ( '@' ) ; if ( i != - 1 ) { return Long . parseLong ( n . substring ( 0 , i ) ) ; } else { return - 1L ; } } catch ( Throwable ignore ) { return - 1L ; } } | Find PID of the current process use - 1 if we can t find the value . |
22,262 | public static void setFlatfile ( Set < H2ONode > nodes ) { if ( nodes == null ) { STATIC_H2OS = null ; } else { STATIC_H2OS = Collections . newSetFromMap ( new ConcurrentHashMap < H2ONode , Boolean > ( ) ) ; STATIC_H2OS . addAll ( nodes ) ; } } | Setup a set of nodes which should be contacted during manual multicast |
22,263 | public void onAck ( ) { if ( _dontCache && ! _xval . isPersisted ( ) ) H2O . putIfMatch ( _xkey , null , _xval ) ; if ( _xval != null ) _xval . completeRemotePut ( ) ; } | Received an ACK |
22,264 | public static String UUID ( long lo , long hi ) { long lo0 = ( lo >> 32 ) & 0xFFFFFFFFL ; long lo1 = ( lo >> 16 ) & 0xFFFFL ; long lo2 = ( lo >> 0 ) & 0xFFFFL ; long hi0 = ( hi >> 48 ) & 0xFFFFL ; long hi1 = ( hi >> 0 ) & 0xFFFFFFFFFFFFL ; return String . format ( "%08X-%04X-%04X-%04X-%012X" , lo0 , lo1 , lo2 , hi0 , hi1 ) ; } | About as clumsy and random as a blaster ... |
22,265 | public KeySnapshot filter ( KVFilter kvf ) { ArrayList < KeyInfo > res = new ArrayList < > ( ) ; for ( KeyInfo kinfo : _keyInfos ) if ( kvf . filter ( kinfo ) ) res . add ( kinfo ) ; return new KeySnapshot ( res . toArray ( new KeyInfo [ res . size ( ) ] ) ) ; } | Filter the snapshot providing custom filter . Only the keys for which filter returns true will be present in the new snapshot . |
22,266 | public static Key [ ] globalKeysOfClass ( final Class clz ) { return KeySnapshot . globalSnapshot ( ) . filter ( new KeySnapshot . KVFilter ( ) { public boolean filter ( KeySnapshot . KeyInfo k ) { return Value . isSubclassOf ( k . _type , clz ) ; } } ) . keys ( ) ; } | Return all the keys of the given class . |
22,267 | public static KeySnapshot localSnapshot ( boolean homeOnly ) { Object [ ] kvs = H2O . STORE . raw_array ( ) ; ArrayList < KeyInfo > res = new ArrayList < > ( ) ; for ( int i = 2 ; i < kvs . length ; i += 2 ) { Object ok = kvs [ i ] ; if ( ! ( ok instanceof Key ) ) continue ; Key key = ( Key ) ok ; if ( ! key . user_allowed ( ) ) continue ; if ( homeOnly && ! key . home ( ) ) continue ; Value val = Value . STORE_get ( key ) ; if ( val == null ) continue ; res . add ( new KeyInfo ( key , val ) ) ; } final KeyInfo [ ] arr = res . toArray ( new KeyInfo [ res . size ( ) ] ) ; Arrays . sort ( arr ) ; return new KeySnapshot ( arr ) ; } | Get the user keys from this node only . |
22,268 | public static KeySnapshot globalSnapshot ( long timeTolerance ) { KeySnapshot res = _cache ; final long t = System . currentTimeMillis ( ) ; if ( res == null || ( t - res . timestamp ) > timeTolerance ) res = new KeySnapshot ( ( new GlobalUKeySetTask ( ) . doAllNodes ( ) . _res ) ) ; else if ( t - res . timestamp > _updateInterval ) H2O . submitTask ( new H2O . H2OCountedCompleter ( ) { public void compute2 ( ) { new GlobalUKeySetTask ( ) . doAllNodes ( ) ; } } ) ; return res ; } | Cache - enabled call to get global key snapshot . User can provide time tolerance to indicate a how old the snapshot can be . |
22,269 | public static void start ( String [ ] args , String relativeResourcePath , boolean finalizeRestRegistration ) { long time0 = System . currentTimeMillis ( ) ; H2O . main ( args ) ; H2O . registerResourceRoot ( new File ( relativeResourcePath + File . separator + "h2o-web/src/main/resources/www" ) ) ; H2O . registerResourceRoot ( new File ( relativeResourcePath + File . separator + "h2o-core/src/main/resources/www" ) ) ; ExtensionManager . getInstance ( ) . registerRestApiExtensions ( ) ; if ( ! H2O . ARGS . disable_web ) { if ( finalizeRestRegistration ) { H2O . startServingRestApi ( ) ; } } long timeF = System . currentTimeMillis ( ) ; Log . info ( "H2O started in " + ( timeF - time0 ) + "ms" ) ; if ( ! H2O . ARGS . disable_web ) { Log . info ( "" ) ; Log . info ( "Open H2O Flow in your web browser: " + H2O . getURL ( NetworkInit . h2oHttpView . getScheme ( ) ) ) ; Log . info ( "" ) ; } } | Start H2O node . |
22,270 | private Icer < D > icer ( ) { int id = _ice_id ; int tyid ; if ( id != 0 ) assert id == ( tyid = TypeMap . onIce ( this ) ) : "incorrectly cashed id " + id + ", typemap has " + tyid + ", type = " + getClass ( ) . getName ( ) ; return TypeMap . getIcer ( id != 0 ? id : ( _ice_id = ( short ) TypeMap . onIce ( this ) ) , this ) ; } | Return the icer for this instance + class . Will set on 1st use . |
22,271 | public void readExternal ( ObjectInput ois ) throws IOException , ClassNotFoundException { int x = ois . readInt ( ) ; byte [ ] buf = MemoryManager . malloc1 ( x ) ; ois . readFully ( buf ) ; read ( new AutoBuffer ( buf ) ) ; } | Java serializers use H2Os Icing |
22,272 | static String buildSelectSingleRowSql ( String databaseType , String table , String columns ) { switch ( databaseType ) { case SQL_SERVER_DB_TYPE : return "SELECT TOP(1) " + columns + " FROM " + table ; case ORACLE_DB_TYPE : return "SELECT " + columns + " FROM " + table + " FETCH NEXT 1 ROWS ONLY" ; case TERADATA_DB_TYPE : return "SELECT TOP 1 " + columns + " FROM " + table ; default : return "SELECT " + columns + " FROM " + table + " LIMIT 1" ; } } | Builds SQL SELECT to retrieve single row from a table based on type of database |
22,273 | static String buildSelectChunkSql ( String databaseType , String table , long start , int length , String columns , String [ ] columnNames ) { String sqlText = "SELECT " + columns + " FROM " + table ; switch ( databaseType ) { case SQL_SERVER_DB_TYPE : sqlText += " ORDER BY ROW_NUMBER() OVER (ORDER BY (SELECT 0))" ; sqlText += " OFFSET " + start + " ROWS FETCH NEXT " + length + " ROWS ONLY" ; break ; case ORACLE_DB_TYPE : sqlText += " OFFSET " + start + " ROWS FETCH NEXT " + length + " ROWS ONLY" ; break ; case TERADATA_DB_TYPE : sqlText += " QUALIFY ROW_NUMBER() OVER (ORDER BY " + columnNames [ 0 ] + ") BETWEEN " + ( start + 1 ) + " AND " + ( start + length ) ; break ; default : sqlText += " LIMIT " + length + " OFFSET " + start ; } return sqlText ; } | Builds SQL SELECT to retrieve chunk of rows from a table based on row offset and number of rows in a chunk . |
22,274 | static void initializeDatabaseDriver ( String databaseType ) { String driverClass = System . getProperty ( JDBC_DRIVER_CLASS_KEY_PREFIX + databaseType ) ; if ( driverClass != null ) { Log . debug ( "Loading " + driverClass + " to initialize database of type " + databaseType ) ; try { Class . forName ( driverClass ) ; } catch ( ClassNotFoundException e ) { throw new RuntimeException ( "Connection to '" + databaseType + "' database is not possible due to missing JDBC driver. " + "User specified driver class: " + driverClass , e ) ; } return ; } switch ( databaseType ) { case HIVE_DB_TYPE : try { Class . forName ( HIVE_JDBC_DRIVER_CLASS ) ; } catch ( ClassNotFoundException e ) { throw new RuntimeException ( "Connection to HIVE database is not possible due to missing JDBC driver." , e ) ; } break ; case NETEZZA_DB_TYPE : try { Class . forName ( NETEZZA_JDBC_DRIVER_CLASS ) ; } catch ( ClassNotFoundException e ) { throw new RuntimeException ( "Connection to Netezza database is not possible due to missing JDBC driver." , e ) ; } break ; default : } } | Initializes database driver for databases with JDBC driver version lower than 4 . 0 |
22,275 | synchronized public List < ParserProvider > getAllProviders ( boolean sort ) { List < ParserProvider > providers = new ArrayList < > ( ) ; for ( ParserProvider pp : loader ) { providers . add ( pp ) ; } if ( sort ) { Collections . sort ( providers , PARSER_PROVIDER_COMPARATOR ) ; } return providers ; } | Returns all parser providers sorted based on priority if required . |
22,276 | public static long getTotalPhysicalMemory ( ) { long memory = - 1 ; try { MBeanServer mBeanServer = ManagementFactory . getPlatformMBeanServer ( ) ; Object attribute = mBeanServer . getAttribute ( new ObjectName ( "java.lang" , "type" , "OperatingSystem" ) , "TotalPhysicalMemorySize" ) ; return ( Long ) attribute ; } catch ( Throwable e ) { e . printStackTrace ( ) ; } return memory ; } | Safe call to obtain size of total physical memory . |
22,277 | private ValFrame rowwise ( Env env , Frame fr , final AstPrimitive fun ) { final String [ ] names = fr . _names ; final AstFunction scope = env . _scope ; double [ ] ds = new double [ fr . numCols ( ) ] ; for ( int col = 0 ; col < fr . numCols ( ) ; ++ col ) ds [ col ] = fr . vec ( col ) . at ( 0 ) ; int noutputs = fun . apply ( env , env . stk ( ) , new AstRoot [ ] { fun , new AstRow ( ds , fr . names ( ) ) } ) . getRow ( ) . length ; Frame res = new MRTask ( ) { public void map ( Chunk chks [ ] , NewChunk [ ] nc ) { double ds [ ] = new double [ chks . length ] ; AstRoot [ ] asts = new AstRoot [ ] { fun , new AstRow ( ds , names ) } ; Session ses = new Session ( ) ; Env env = new Env ( ses ) ; env . _scope = scope ; for ( int row = 0 ; row < chks [ 0 ] . _len ; row ++ ) { for ( int col = 0 ; col < chks . length ; col ++ ) ds [ col ] = chks [ col ] . atd ( row ) ; try ( Env . StackHelp stk_inner = env . stk ( ) ) { double [ ] valRow = fun . apply ( env , stk_inner , asts ) . getRow ( ) ; for ( int newCol = 0 ; newCol < nc . length ; ++ newCol ) nc [ newCol ] . addNum ( valRow [ newCol ] ) ; } } ses . end ( null ) ; } } . doAll ( noutputs , Vec . T_NUM , fr ) . outputFrame ( ) ; return new ValFrame ( res ) ; } | 1 argument . All rows are independent and run in parallel |
22,278 | public final AutoBuffer write_impl ( AutoBuffer ab ) { _write_lock = true ; try { if ( map ( ) . size ( ) == 0 ) return ab . put1 ( 0 ) ; Entry < K , V > entry = map ( ) . entrySet ( ) . iterator ( ) . next ( ) ; K key = entry . getKey ( ) ; V val = entry . getValue ( ) ; assert key != null && val != null ; int mode ; if ( key instanceof String ) { if ( val instanceof String ) { mode = 1 ; } else { assert ( val instanceof Freezable || val instanceof Freezable [ ] ) : "incompatible class " + val . getClass ( ) ; mode = val instanceof Freezable ? 2 : 5 ; } } else { assert key instanceof Iced ; if ( val instanceof String ) { mode = 3 ; } else { assert ( val instanceof Freezable || val instanceof Freezable [ ] ) ; mode = val instanceof Freezable ? 4 : 6 ; } } ab . put1 ( mode ) ; writeMap ( ab , mode ) ; return isStringKey ( mode ) ? ab . putStr ( null ) : ab . put ( null ) ; } catch ( Throwable t ) { System . err . println ( "Iced hash map serialization failed! " + t . toString ( ) + ", msg = " + t . getMessage ( ) ) ; t . printStackTrace ( ) ; throw H2O . fail ( "Iced hash map serialization failed!" + t . toString ( ) + ", msg = " + t . getMessage ( ) ) ; } finally { _write_lock = false ; } } | random Value is written . |
22,279 | static double computeQuantile ( double lo , double hi , double row , double nrows , double prob , QuantileModel . CombineMethod method ) { if ( lo == hi ) return lo ; if ( method == null ) method = QuantileModel . CombineMethod . INTERPOLATE ; switch ( method ) { case INTERPOLATE : return linearInterpolate ( lo , hi , row , nrows , prob ) ; case AVERAGE : return 0.5 * ( hi + lo ) ; case LOW : return lo ; case HIGH : return hi ; default : Log . info ( "Unknown even sample size quantile combination type: " + method + ". Doing linear interpolation." ) ; return linearInterpolate ( lo , hi , row , nrows , prob ) ; } } | Compute the correct final quantile from these 4 values . If the lo and hi elements are equal use them . However if they differ then there is no single value which exactly matches the desired quantile . There are several well - accepted definitions in this case - including picking either the lo or the hi or averaging them or doing a linear interpolation . |
22,280 | public static void diagnose ( String applicationId , String queueName , int numNodes , int nodeMemoryMb , int numNodesStarted ) throws Exception { H2OYarnDiagnostic client = new H2OYarnDiagnostic ( ) ; client . applicationId = applicationId ; client . queueName = queueName ; client . numNodes = numNodes ; client . nodeMemoryMb = nodeMemoryMb ; client . nodeVirtualCores = 1 ; client . numNodesStarted = numNodesStarted ; client . run ( ) ; } | The assumption is this method doesn t get called unless a problem occurred . |
22,281 | private int calcCountSubstringsWords ( String str , HashSet < String > words ) { int wordCount = 0 ; int N = str . length ( ) ; for ( int i = 0 ; i < N - 1 ; i ++ ) for ( int j = i + 2 ; j < N + 1 ; j ++ ) { if ( words . contains ( str . substring ( i , j ) ) ) wordCount += 1 ; } return wordCount ; } | count all substrings > = 2 chars that are in words |
22,282 | public double str_op ( BufferedString l , BufferedString r ) { if ( StringUtils . isNullOrEmpty ( l ) ) return StringUtils . isNullOrEmpty ( r ) ? 0 : 1 ; else return l . equals ( r ) ? 0 : 1 ; } | Make sense to run this OP on an enm? |
22,283 | public ParseSetup guessInitSetup ( ByteVec v , byte [ ] bits , ParseSetup userSetup ) { return guessSetup ( v , bits , userSetup . _separator , userSetup . _number_columns , userSetup . _single_quotes , userSetup . _check_header , userSetup . _column_names , userSetup . _column_types , userSetup . _domains , userSetup . _na_strings ) ; } | Constructs initial ParseSetup from a given user setup |
22,284 | public ParseSetup guessSetup ( ByteVec v , byte [ ] bits , byte sep , int ncols , boolean singleQuotes , int checkHeader , String [ ] columnNames , byte [ ] columnTypes , String [ ] [ ] domains , String [ ] [ ] naStrings ) { throw new UnsupportedOperationException ( "Not implemented. This method is kept only for backwards compatibility. " + "Override methods guessInitSetup & guessFinalSetup if you are implementing a new parser." ) ; } | Returns parser setup of throws exception if input is not recognized |
22,285 | private static RowData parseDataRow ( String line , GenMunger munger ) { if ( line . isEmpty ( ) || line . equals ( "" ) ) return null ; String [ ] inputData = line . split ( ",(?=([^\"]*\"[^\"]*\")*[^\"]*$)|(,)" , - 1 ) ; for ( int i = 0 ; i < inputData . length ; ++ i ) inputData [ i ] = inputData [ i ] == null ? "" : inputData [ i ] ; if ( inputData . length != munger . inNames ( ) . length ) return null ; return munger . fillDefault ( inputData ) ; } | This CSV parser is as bare bones as it gets . Our test data doesn t have funny quoting spacing or other issues . Can t handle cases where the number of data columns is less than the number of header columns . |
22,286 | public static void main ( String [ ] args ) throws Exception { parseArgs ( args ) ; GenMunger rawMunger ; rawMunger = ( hex . genmodel . GenMunger ) Class . forName ( assemblyClassName ) . newInstance ( ) ; BufferedReader input = new BufferedReader ( new FileReader ( inputCSVFileName ) ) ; BufferedWriter output = new BufferedWriter ( new FileWriter ( outputCSVFileName ) ) ; String [ ] rawHeader = rawMunger . outNames ( ) ; StringBuilder header = new StringBuilder ( ) ; for ( int i = 0 ; i < rawHeader . length ; ++ i ) { header . append ( "\"" ) . append ( rawHeader [ i ] ) . append ( "\"" ) ; if ( i < rawHeader . length - 1 ) header . append ( "," ) ; } output . write ( header . toString ( ) ) ; output . write ( "\n" ) ; int lineNum = 0 ; String line ; try { while ( ( line = input . readLine ( ) ) != null ) { lineNum ++ ; if ( lineNum == 1 ) continue ; RowData row ; try { row = parseDataRow ( line , rawMunger ) ; } catch ( NumberFormatException nfe ) { nfe . printStackTrace ( ) ; System . out . println ( "Failed to parse row: " + lineNum ) ; throw new RuntimeException ( ) ; } RowData mungedRow = rawMunger . fit ( row ) ; for ( int i = 0 ; i < rawMunger . outNames ( ) . length ; ++ i ) { Object val = mungedRow == null ? Double . NaN : mungedRow . get ( rawMunger . outNames ( ) [ i ] ) ; if ( val instanceof Double ) output . write ( String . valueOf ( val ) ) ; else output . write ( "\"" + val + "\"" ) ; if ( i < rawMunger . outNames ( ) . length - 1 ) output . write ( "," ) ; } output . write ( "\n" ) ; } } catch ( Exception e ) { System . out . println ( "Caught exception on line " + lineNum ) ; System . out . println ( "" ) ; e . printStackTrace ( ) ; System . exit ( 1 ) ; } finally { output . close ( ) ; input . close ( ) ; } System . exit ( 0 ) ; } | CSV reader and predictor test program . |
22,287 | public S fillWithDefaults ( ) { HyperSpaceSearchCriteria defaults = null ; if ( HyperSpaceSearchCriteria . Strategy . Cartesian == strategy ) { defaults = new HyperSpaceSearchCriteria . CartesianSearchCriteria ( ) ; } else if ( HyperSpaceSearchCriteria . Strategy . RandomDiscrete == strategy ) { defaults = new HyperSpaceSearchCriteria . RandomDiscreteValueSearchCriteria ( ) ; } else { throw new H2OIllegalArgumentException ( "search_criteria.strategy" , strategy . toString ( ) ) ; } fillFromImpl ( ( I ) defaults ) ; return ( S ) this ; } | Fill with the default values from the corresponding Iced object . |
22,288 | protected final void registerModelBuilder ( RestApiContext context , ModelBuilder mbProto , int version ) { Class < ? extends water . api . Handler > handlerClass = water . api . ModelBuilderHandler . class ; if ( H2O . ARGS . features_level . compareTo ( mbProto . builderVisibility ( ) ) > 0 ) { return ; } String base = mbProto . getClass ( ) . getSimpleName ( ) ; String lbase = base . toLowerCase ( ) ; context . registerEndpoint ( "train_" + lbase , "POST /" + version + "/ModelBuilders/" + lbase , handlerClass , "train" , "Train a " + base + " model." ) ; context . registerEndpoint ( "validate_" + lbase , "POST /" + version + "/ModelBuilders/" + lbase + "/parameters" , handlerClass , "validate_parameters" , "Validate a set of " + base + " model builder parameters." ) ; context . registerEndpoint ( "grid_search_" + lbase , "POST /99/Grid/" + lbase , GridSearchHandler . class , "train" , "Run grid search for " + base + " model." ) ; } | Register algorithm common REST interface . |
22,289 | long checkRangeSupport ( URI uri ) throws IOException { HttpRequestBase req = createReq ( uri , true ) ; try ( CloseableHttpClient client = HttpClientBuilder . create ( ) . build ( ) ; CloseableHttpResponse response = client . execute ( req ) ) { Header acceptRangesHeader = response . getFirstHeader ( HttpHeaders . ACCEPT_RANGES ) ; Header contentLengthHeader = response . getFirstHeader ( HttpHeaders . CONTENT_LENGTH ) ; boolean acceptByteRange = ( acceptRangesHeader != null ) && "bytes" . equalsIgnoreCase ( acceptRangesHeader . getValue ( ) ) ; if ( ! acceptByteRange || contentLengthHeader == null ) { return - 1L ; } return Long . valueOf ( contentLengthHeader . getValue ( ) ) ; } } | Tests whether a given URI can be accessed using range - requests . |
22,290 | private Chunk [ ] getScoringChunks ( Chunk [ ] allChunks ) { if ( _preds == null ) return allChunks ; Chunk [ ] chks = new Chunk [ allChunks . length - _preds . numCols ( ) ] ; System . arraycopy ( allChunks , 0 , chks , 0 , chks . length ) ; return chks ; } | scoring chunks are those chunks that make the input to one of the scoring functions |
22,291 | private ModelMetrics makeModelMetrics ( SharedTreeModel model , Frame fr , Frame adaptedFr , Frame preds ) { ModelMetrics mm ; if ( model . _output . nclasses ( ) == 2 && _computeGainsLift ) { assert preds != null : "Predictions were pre-created" ; mm = _mb . makeModelMetrics ( model , fr , adaptedFr , preds ) ; } else { boolean calculatePreds = preds == null && model . _parms . _distribution == DistributionFamily . huber ; if ( calculatePreds ) { Log . warn ( "Going to calculate predictions from scratch. This can be expensive for large models! See PUBDEV-4992" ) ; preds = model . score ( fr ) ; } mm = _mb . makeModelMetrics ( model , fr , null , preds ) ; if ( calculatePreds && ( preds != null ) ) preds . remove ( ) ; } return mm ; } | Run after the doAll scoring to convert the MetricsBuilder to a ModelMetrics |
22,292 | public void createChunks ( String frameKey , byte [ ] expectedTypes , int chunkId , int totalNumRows , int [ ] maxVecSizes ) throws IOException { ab . put1 ( ExternalFrameHandler . INIT_BYTE ) ; ab . put1 ( ExternalFrameHandler . CREATE_FRAME ) ; ab . putStr ( frameKey ) ; this . expectedTypes = expectedTypes ; ab . putA1 ( expectedTypes ) ; ab . putA4 ( maxVecSizes ) ; ab . putInt ( totalNumRows ) ; ab . putInt ( chunkId ) ; writeToChannel ( ab , channel ) ; } | Create chunks on the h2o backend . This method creates chunk in en empty frame . |
22,293 | public void waitUntilAllWritten ( int timeout ) throws ExternalFrameConfirmationException { final AutoBuffer confirmAb = new AutoBuffer ( channel ) ; try { byte flag = ExternalFrameConfirmationCheck . getConfirmation ( confirmAb , timeout ) ; assert ( flag == ExternalFrameHandler . CONFIRM_WRITING_DONE ) ; } catch ( TimeoutException ex ) { throw new ExternalFrameConfirmationException ( "Timeout for confirmation exceeded!" ) ; } catch ( InterruptedException e ) { throw new ExternalFrameConfirmationException ( "Confirmation thread interrupted!" ) ; } catch ( ExecutionException e ) { throw new ExternalFrameConfirmationException ( "Confirmation failed!" ) ; } } | This method ensures the application waits for all bytes to be written before continuing in the control flow . |
22,294 | public static ValueSetter createValueSetter ( Vec v , Object value ) { if ( value == null ) { return new NAValueSetter ( ) ; } switch ( v . get_type ( ) ) { case Vec . T_CAT : return new CatValueSetter ( v . domain ( ) , value ) ; case Vec . T_NUM : case Vec . T_TIME : return new NumValueSetter ( value ) ; case Vec . T_STR : return new StrValueSetter ( value ) ; case Vec . T_UUID : return new UUIDValueSetter ( value ) ; default : throw new IllegalArgumentException ( "Cannot create ValueSetter for a Vec of type = " + v . get_type_str ( ) ) ; } } | Create an instance of ValueSetter for a given scalar value . It creates setter of the appropriate type based on the type of the underlying Vec . |
22,295 | Map < Integer , Pair < Integer , Integer > > constructRingMap ( Map < Integer , List < Integer > > treeMap , Map < Integer , Integer > parentMap ) { assert parentMap . get ( 0 ) == - 1 ; List < Integer > sharedRing = constructShareRing ( treeMap , parentMap , 0 ) ; assert sharedRing . size ( ) == treeMap . size ( ) ; Map < Integer , Pair < Integer , Integer > > ringMap = new LinkedHashMap < > ( numWorkers ) ; for ( int r = 0 ; r < numWorkers ; r ++ ) { int rPrev = ( r + numWorkers - 1 ) % numWorkers ; int rNext = ( r + 1 ) % numWorkers ; ringMap . put ( sharedRing . get ( r ) , new Pair < > ( sharedRing . get ( rPrev ) , sharedRing . get ( rNext ) ) ) ; } return ringMap ; } | Returns for each node with rank the previous and next node in DFS order . For the root the previous entry will be the last element which will create a ring type structure . |
22,296 | private final ByteVec getUploadedMojo ( final Key < Frame > key ) throws IllegalArgumentException { Objects . requireNonNull ( key ) ; Frame mojoFrame = key . get ( ) ; if ( mojoFrame . numCols ( ) > 1 ) throw new IllegalArgumentException ( String . format ( "Given MOJO frame with key '%s' should contain only 1 column with MOJO bytes. More columns found. Incorrect key provided ?" , key ) ) ; ByteVec mojoData = ( ByteVec ) mojoFrame . anyVec ( ) ; if ( mojoData . length ( ) < 1 ) throw new IllegalArgumentException ( String . format ( "Given MOJO frame with key '%s' is empty (0 bytes). Please provide a non-empty MOJO file." , key ) ) ; return mojoData ; } | Retrieves pre - uploaded MOJO archive and performs basic verifications if present . |
22,297 | public static void closeSilently ( Closeable ... closeable ) { for ( Closeable c : closeable ) try { if ( c != null ) c . close ( ) ; } catch ( IOException xe ) { } } | Silently close given files . |
22,298 | public static void close ( Closeable ... closeable ) { for ( Closeable c : closeable ) try { if ( c != null ) c . close ( ) ; } catch ( IOException ex ) { Log . err ( ex ) ; } } | Closes given files logging exceptions thrown during the process of closing . |
22,299 | public static File locateFile ( String fname ) { File file = new File ( fname ) ; if ( ! file . exists ( ) ) file = new File ( "target/" + fname ) ; if ( ! file . exists ( ) ) file = new File ( "../" + fname ) ; if ( ! file . exists ( ) ) file = new File ( "../../" + fname ) ; if ( ! file . exists ( ) ) file = new File ( "../../../" + fname ) ; if ( ! file . exists ( ) ) file = new File ( "../target/" + fname ) ; if ( ! file . exists ( ) ) file = new File ( StringUtils . expandPath ( fname ) ) ; if ( ! file . exists ( ) ) file = null ; return file ; } | Hunt for files in likely places . Null if cannot find . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.