idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
19,200
public void fit ( INDArray examples , int [ ] labels ) { INDArray outcomeMatrix = FeatureUtil . toOutcomeMatrix ( labels , numLabels ( ) ) ; fit ( examples , outcomeMatrix ) ; }
Fit the model
19,201
public String getFirstPart ( ) { StringBuilder builder = new StringBuilder ( ) ; builder . append ( useHttps ? "https" : "http" ) . append ( "://" ) . append ( address ) . append ( ":" ) . append ( port ) . append ( "" ) ; return builder . toString ( ) ; }
This method returns scheme address and port for this UiConnectionInfo
19,202
public List < String > keysForBucket ( String bucket ) { AmazonS3 s3 = getClient ( ) ; List < String > ret = new ArrayList < > ( ) ; ListObjectsRequest listObjectsRequest = new ListObjectsRequest ( ) . withBucketName ( bucket ) ; ObjectListing objectListing ; do { objectListing = s3 . listObjects ( listObjectsRequest ) ; for ( S3ObjectSummary objectSummary : objectListing . getObjectSummaries ( ) ) { ret . add ( objectSummary . getKey ( ) ) ; } listObjectsRequest . setMarker ( objectListing . getNextMarker ( ) ) ; } while ( objectListing . isTruncated ( ) ) ; return ret ; }
Return the keys for a bucket
19,203
private void forwardToParameterServer ( INDArrayMessage message ) { try { incomingFlow . accept ( message ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
This method puts INDArray to the flow read by parameter server
19,204
public void downloadAndExtract ( DataSetType set ) throws IOException { String localFilename = new File ( remoteDataUrl ( set ) ) . getName ( ) ; File tmpFile = new File ( System . getProperty ( "java.io.tmpdir" ) , localFilename ) ; File localCacheDir = getLocalCacheDir ( ) ; if ( localCacheDir . exists ( ) ) { File [ ] list = localCacheDir . listFiles ( ) ; if ( list == null || list . length == 0 ) localCacheDir . delete ( ) ; } File localDestinationDir = new File ( localCacheDir , dataSetName ( set ) ) ; if ( ! localDestinationDir . exists ( ) ) { localCacheDir . mkdirs ( ) ; tmpFile . delete ( ) ; log . info ( "Downloading dataset to " + tmpFile . getAbsolutePath ( ) ) ; FileUtils . copyURLToFile ( new URL ( remoteDataUrl ( set ) ) , tmpFile ) ; } else { log . info ( "Using cached dataset at " + localCacheDir . getAbsolutePath ( ) ) ; return ; } if ( expectedChecksum ( set ) != 0L ) { log . info ( "Verifying download..." ) ; Checksum adler = new Adler32 ( ) ; FileUtils . checksum ( tmpFile , adler ) ; long localChecksum = adler . getValue ( ) ; log . info ( "Checksum local is " + localChecksum + ", expecting " + expectedChecksum ( set ) ) ; if ( expectedChecksum ( set ) != localChecksum ) { log . error ( "Checksums do not match. Cleaning up files and failing..." ) ; tmpFile . delete ( ) ; throw new IllegalStateException ( "Dataset file failed checksum: " + tmpFile + " - expected checksum " + expectedChecksum ( set ) + " vs. actual checksum " + localChecksum + ". If this error persists, please open an issue at https://github.com/deeplearning4j/deeplearning4j." ) ; } } try { ArchiveUtils . unzipFileTo ( tmpFile . getAbsolutePath ( ) , localCacheDir . getAbsolutePath ( ) ) ; } catch ( Throwable t ) { if ( localCacheDir . exists ( ) ) FileUtils . deleteDirectory ( localCacheDir ) ; throw t ; } }
Downloads and extracts the local dataset .
19,205
protected List < StatsStorageEvent > checkStorageEvents ( Persistable p ) { if ( listeners . isEmpty ( ) ) return null ; int count = 0 ; StatsStorageEvent newSID = null ; StatsStorageEvent newTID = null ; StatsStorageEvent newWID = null ; if ( ! sessionIDs . contains ( p . getSessionID ( ) ) ) { newSID = new StatsStorageEvent ( this , StatsStorageListener . EventType . NewSessionID , p . getSessionID ( ) , p . getTypeID ( ) , p . getWorkerID ( ) , p . getTimeStamp ( ) ) ; count ++ ; } boolean foundTypeId = false ; boolean foundWorkerId = false ; String typeId = p . getTypeID ( ) ; String wid = p . getWorkerID ( ) ; for ( SessionTypeId ts : storageMetaData . keySet ( ) ) { if ( typeId . equals ( ts . getTypeID ( ) ) ) { foundTypeId = true ; break ; } } for ( SessionTypeWorkerId stw : staticInfo . keySet ( ) ) { if ( ! foundTypeId && typeId . equals ( stw . getTypeID ( ) ) ) { foundTypeId = true ; } if ( ! foundWorkerId && wid . equals ( stw . getWorkerID ( ) ) ) { foundWorkerId = true ; } if ( foundTypeId && foundWorkerId ) break ; } if ( ! foundTypeId || ! foundWorkerId ) { for ( SessionTypeWorkerId stw : updates . keySet ( ) ) { if ( ! foundTypeId && typeId . equals ( stw . getTypeID ( ) ) ) { foundTypeId = true ; } if ( ! foundWorkerId && wid . equals ( stw . getWorkerID ( ) ) ) { foundWorkerId = true ; } if ( foundTypeId && foundWorkerId ) break ; } } if ( ! foundTypeId ) { newTID = new StatsStorageEvent ( this , StatsStorageListener . EventType . NewTypeID , p . getSessionID ( ) , p . getTypeID ( ) , p . getWorkerID ( ) , p . getTimeStamp ( ) ) ; count ++ ; } if ( ! foundWorkerId ) { newWID = new StatsStorageEvent ( this , StatsStorageListener . EventType . NewWorkerID , p . getSessionID ( ) , p . getTypeID ( ) , p . getWorkerID ( ) , p . getTimeStamp ( ) ) ; count ++ ; } if ( count == 0 ) return null ; List < StatsStorageEvent > sses = new ArrayList < > ( count ) ; if ( newSID != null ) sses . add ( newSID ) ; if ( newTID != null ) sses . add ( newTID ) ; if ( newWID != null ) sses . add ( newWID ) ; return sses ; }
available in the DB
19,206
public void transferBackToVocabCache ( VocabCache cache , boolean emptyHolder ) { if ( ! ( cache instanceof InMemoryLookupCache ) ) throw new IllegalStateException ( "Sorry, only InMemoryLookupCache use implemented." ) ; List < VocabularyWord > words = words ( ) ; for ( VocabularyWord word : words ) { if ( word . getWord ( ) . isEmpty ( ) ) continue ; VocabWord vocabWord = new VocabWord ( 1 , word . getWord ( ) ) ; if ( word . getHistoricalGradient ( ) != null ) { INDArray gradient = Nd4j . create ( word . getHistoricalGradient ( ) ) ; vocabWord . setHistoricalGradient ( gradient ) ; } ( ( InMemoryLookupCache ) cache ) . getVocabs ( ) . put ( word . getWord ( ) , vocabWord ) ; ( ( InMemoryLookupCache ) cache ) . getTokens ( ) . put ( word . getWord ( ) , vocabWord ) ; if ( word . getHuffmanNode ( ) != null ) { vocabWord . setIndex ( word . getHuffmanNode ( ) . getIdx ( ) ) ; vocabWord . setCodeLength ( word . getHuffmanNode ( ) . getLength ( ) ) ; vocabWord . setPoints ( arrayToList ( word . getHuffmanNode ( ) . getPoint ( ) , word . getHuffmanNode ( ) . getLength ( ) ) ) ; vocabWord . setCodes ( arrayToList ( word . getHuffmanNode ( ) . getCode ( ) , word . getHuffmanNode ( ) . getLength ( ) ) ) ; cache . addWordToIndex ( word . getHuffmanNode ( ) . getIdx ( ) , word . getWord ( ) ) ; } if ( word . getCount ( ) > 1 ) cache . incrementWordCount ( word . getWord ( ) , word . getCount ( ) - 1 ) ; } if ( emptyHolder ) { idxMap . clear ( ) ; vocabulary . clear ( ) ; } }
This method is required for compatibility purposes . It just transfers vocabulary from VocabHolder into VocabCache
19,207
public static List < Integer > arrayToList ( int [ ] array , int codeLen ) { List < Integer > result = new ArrayList < > ( ) ; for ( int x = 0 ; x < codeLen ; x ++ ) { result . add ( array [ x ] ) ; } return result ; }
This method is used only for VocabCache compatibility purposes
19,208
public void incrementWordCounter ( String word ) { if ( vocabulary . containsKey ( word ) ) { vocabulary . get ( word ) . incrementCount ( ) ; } }
Increments by one number of occurrences of the word in corpus
19,209
protected synchronized void activateScavenger ( ) { int initialSize = vocabulary . size ( ) ; List < VocabularyWord > words = new ArrayList < > ( vocabulary . values ( ) ) ; for ( VocabularyWord word : words ) { if ( word . isSpecial ( ) || word . getCount ( ) >= minWordFrequency || word . getFrequencyShift ( ) == null ) { word . setFrequencyShift ( null ) ; continue ; } word . getFrequencyShift ( ) [ word . getRetentionStep ( ) ] = ( byte ) word . getCount ( ) ; int activation = Math . max ( minWordFrequency / 5 , 2 ) ; logger . debug ( "Current state> Activation: [" + activation + "], retention info: " + Arrays . toString ( word . getFrequencyShift ( ) ) ) ; if ( word . getCount ( ) <= activation && word . getFrequencyShift ( ) [ this . retentionDelay - 1 ] > 0 ) { if ( word . getFrequencyShift ( ) [ this . retentionDelay - 1 ] <= activation && word . getFrequencyShift ( ) [ this . retentionDelay - 1 ] == word . getFrequencyShift ( ) [ 0 ] ) { vocabulary . remove ( word . getWord ( ) ) ; } } if ( word . getRetentionStep ( ) < retentionDelay - 1 ) { word . incrementRetentionStep ( ) ; } else { for ( int x = 1 ; x < retentionDelay ; x ++ ) { word . getFrequencyShift ( ) [ x - 1 ] = word . getFrequencyShift ( ) [ x ] ; } } } logger . info ( "Scavenger was activated. Vocab size before: [" + initialSize + "], after: [" + vocabulary . size ( ) + "]" ) ; }
This method removes low - frequency words based on their frequency change between activations . I . e . if word has appeared only once and it s retained the same frequency over consequence activations we can assume it can be removed freely
19,210
public void resetWordCounters ( ) { for ( VocabularyWord word : getVocabulary ( ) ) { word . setHuffmanNode ( null ) ; word . setFrequencyShift ( null ) ; word . setCount ( 0 ) ; } }
This methods reset counters for all words in vocabulary
19,211
public void truncateVocabulary ( int threshold ) { logger . debug ( "Truncating vocabulary to minWordFrequency: [" + threshold + "]" ) ; Set < String > keyset = vocabulary . keySet ( ) ; for ( String word : keyset ) { VocabularyWord vw = vocabulary . get ( word ) ; if ( ! vw . isSpecial ( ) && vw . getCount ( ) < threshold ) { vocabulary . remove ( word ) ; if ( vw . getHuffmanNode ( ) != null ) idxMap . remove ( vw . getHuffmanNode ( ) . getIdx ( ) ) ; } } }
All words with frequency below threshold wii be removed
19,212
public int indexOf ( String word ) { if ( vocabulary . containsKey ( word ) ) { return vocabulary . get ( word ) . getHuffmanNode ( ) . getIdx ( ) ; } else return - 1 ; }
This method returns index of word in sorted list .
19,213
public List < VocabularyWord > words ( ) { List < VocabularyWord > vocab = new ArrayList < > ( vocabulary . values ( ) ) ; Collections . sort ( vocab , new Comparator < VocabularyWord > ( ) { public int compare ( VocabularyWord o1 , VocabularyWord o2 ) { return Integer . compare ( o2 . getCount ( ) , o1 . getCount ( ) ) ; } } ) ; return vocab ; }
Returns sorted list of words in vocabulary . Sort is DESCENDING .
19,214
public ThresholdAlgorithm getAverageThresholdAlgorithm ( ) { Collection < ThresholdAlgorithm > c = this . allThreadThresholdAlgorithms . values ( ) ; if ( c . isEmpty ( ) ) { return null ; } if ( c . size ( ) == 1 ) { return c . iterator ( ) . next ( ) ; } Iterator < ThresholdAlgorithm > iter = c . iterator ( ) ; ThresholdAlgorithmReducer r = null ; while ( iter . hasNext ( ) ) { ThresholdAlgorithm ta = iter . next ( ) ; if ( r == null ) { r = ta . newReducer ( ) ; } r . add ( ta ) ; } ThresholdAlgorithm ta = r . getFinalResult ( ) ; thresholdAlgorithm = new ThreadLocal < > ( ) ; allThreadThresholdAlgorithms . clear ( ) ; return ta ; }
This should ONLY be called once all training threads have completed
19,215
public static SDVariable softmax ( SameDiff SD , SDVariable x , int dimension , int rank ) { int [ ] permutation = ArrayUtil . range ( 0 , rank ) ; permutation [ 0 ] = dimension ; permutation [ dimension ] = 0 ; return SD . nn . softmax ( x . permute ( permutation ) ) . permute ( ArrayUtil . invertPermutation ( permutation ) ) ; }
Compute softmax along a given dimension
19,216
public String createSubdir ( ) throws IOException { if ( ! saveData ) return "" ; File dr = new File ( dataRoot ) ; dr . mkdirs ( ) ; File [ ] rootChildren = dr . listFiles ( ) ; int i = 1 ; while ( childrenExist ( rootChildren , i + "" ) ) i ++ ; File f = new File ( dataRoot + "/" + i ) ; f . mkdirs ( ) ; currentDir = f . getAbsolutePath ( ) ; log . info ( "Created training data directory: " + currentDir ) ; File mov = new File ( getVideoDir ( ) ) ; mov . mkdirs ( ) ; File model = new File ( getModelDir ( ) ) ; model . mkdirs ( ) ; File stat = new File ( getStat ( ) ) ; File info = new File ( getInfo ( ) ) ; stat . createNewFile ( ) ; info . createNewFile ( ) ; return f . getAbsolutePath ( ) ; }
FIXME race condition if you create them at the same time where checking if dir exists is not atomic with the creation
19,217
public V getRecord ( long index ) throws IOException { int readerIdx = - 1 ; for ( int i = 0 ; i < recordIndexesEachReader . size ( ) ; i ++ ) { Pair < Long , Long > p = recordIndexesEachReader . get ( i ) ; if ( index >= p . getFirst ( ) && index <= p . getSecond ( ) ) { readerIdx = i ; break ; } } if ( readerIdx == - 1 ) { throw new IllegalStateException ( "Index not found in any reader: " + index ) ; } WritableComparable key = indexToKey . getKeyForIndex ( index ) ; Writable value = ReflectionUtils . newInstance ( recordClass , null ) ; V v = ( V ) readers [ readerIdx ] . get ( key , value ) ; return v ; }
It a single record from the map file for the given index
19,218
public INDArray exec ( RandomOp op , Random rng ) { if ( ! ( rng instanceof CpuNativeRandom ) ) throw new IllegalStateException ( "You should use one of NativeRandom classes for NativeOperations execution. Op class: " + op . getClass ( ) . getName ( ) ) ; long st = profilingConfigurableHookIn ( op ) ; Preconditions . checkArgument ( op . z ( ) . isR ( ) , "Op.Z must have one of floating point types" ) ; if ( op . x ( ) != null && op . y ( ) != null && op . z ( ) != null ) { loop . execRandom ( null , op . opNum ( ) , rng . getStatePointer ( ) , op . x ( ) . data ( ) . addressPointer ( ) , ( LongPointer ) op . x ( ) . shapeInfoDataBuffer ( ) . addressPointer ( ) , null , null , op . y ( ) . data ( ) . addressPointer ( ) , ( LongPointer ) op . y ( ) . shapeInfoDataBuffer ( ) . addressPointer ( ) , null , null , op . z ( ) . data ( ) . addressPointer ( ) , ( LongPointer ) op . z ( ) . shapeInfoDataBuffer ( ) . addressPointer ( ) , null , null , op . extraArgsDataBuff ( op . z ( ) . dataType ( ) ) . addressPointer ( ) ) ; } else if ( op . x ( ) != null && op . z ( ) != null ) { loop . execRandom ( null , op . opNum ( ) , rng . getStatePointer ( ) , op . x ( ) . data ( ) . addressPointer ( ) , ( LongPointer ) op . x ( ) . shapeInfoDataBuffer ( ) . addressPointer ( ) , null , null , op . z ( ) . data ( ) . addressPointer ( ) , ( LongPointer ) op . z ( ) . shapeInfoDataBuffer ( ) . addressPointer ( ) , null , null , op . extraArgsDataBuff ( op . z ( ) . dataType ( ) ) . addressPointer ( ) ) ; } else { loop . execRandom ( null , op . opNum ( ) , rng . getStatePointer ( ) , op . z ( ) . data ( ) . addressPointer ( ) , ( LongPointer ) op . z ( ) . shapeInfoDataBuffer ( ) . addressPointer ( ) , null , null , op . extraArgsDataBuff ( op . z ( ) . dataType ( ) ) . addressPointer ( ) ) ; } profilingConfigurableHookOut ( op , st ) ; return op . z ( ) ; }
This method executes specific RandomOp against specified RNG
19,219
public void processMessage ( ) { INDArray syn0 = storage . getArray ( WordVectorStorage . SYN_0 ) ; INDArray syn1 = storage . getArray ( WordVectorStorage . SYN_1 ) ; INDArray syn1Neg = storage . getArray ( WordVectorStorage . SYN_1_NEGATIVE ) ; INDArray expTable = storage . getArray ( WordVectorStorage . EXP_TABLE ) ; if ( syn0 == null ) { log . info ( "sI_{} is starting initialization..." , transport . getShardIndex ( ) ) ; Nd4j . getRandom ( ) . setSeed ( seed * ( shardIndex + 1 ) ) ; if ( voidConfiguration . getExecutionMode ( ) == ExecutionMode . AVERAGING ) { columnsPerShard = vectorLength ; } else if ( voidConfiguration . getExecutionMode ( ) == ExecutionMode . SHARDED ) { if ( voidConfiguration . getNumberOfShards ( ) - 1 == shardIndex ) { int modulo = vectorLength % voidConfiguration . getNumberOfShards ( ) ; if ( modulo != 0 ) { columnsPerShard += modulo ; log . info ( "Got inequal split. using higher number of elements: {}" , columnsPerShard ) ; } } } int [ ] shardShape = new int [ ] { numWords , columnsPerShard } ; syn0 = Nd4j . rand ( shardShape , 'c' ) . subi ( 0.5 ) . divi ( vectorLength ) ; if ( useHs ) syn1 = Nd4j . create ( shardShape , 'c' ) ; if ( useNeg ) syn1Neg = Nd4j . create ( shardShape , 'c' ) ; expTable = initExpTable ( 100000 ) ; storage . setArray ( WordVectorStorage . SYN_0 , syn0 ) ; if ( useHs ) storage . setArray ( WordVectorStorage . SYN_1 , syn1 ) ; if ( useNeg ) storage . setArray ( WordVectorStorage . SYN_1_NEGATIVE , syn1Neg ) ; storage . setArray ( WordVectorStorage . EXP_TABLE , expTable ) ; InitializationAggregation ia = new InitializationAggregation ( ( short ) voidConfiguration . getNumberOfShards ( ) , transport . getShardIndex ( ) ) ; ia . setOriginatorId ( this . originatorId ) ; transport . sendMessage ( ia ) ; } }
This method initializes shard storage with given data
19,220
public LongShapeDescriptor asDataType ( DataType dataType ) { long extras = 0L ; extras = ArrayOptionsHelper . setOptionBit ( extras , dataType ) ; if ( isEmpty ( ) ) { extras = ArrayOptionsHelper . setOptionBit ( extras , ArrayType . EMPTY ) ; } return new LongShapeDescriptor ( shape , stride , offset , ews , order , extras ) ; }
Return a new LongShapeDescriptor with the same shape strides order etc but with the specified datatype instead
19,221
public void incrementWordCount ( String word , int increment ) { T element = extendedVocabulary . get ( word ) ; if ( element != null ) { element . increaseElementFrequency ( increment ) ; totalWordCount . addAndGet ( increment ) ; } }
Increment frequency for specified label by specified value
19,222
public String wordAtIndex ( int index ) { T element = idxMap . get ( index ) ; if ( element != null ) { return element . getLabel ( ) ; } return null ; }
Returns the label of the element at specified Huffman index
19,223
public int indexOf ( String label ) { T token = tokenFor ( label ) ; if ( token != null ) { return token . getIndex ( ) ; } else return - 2 ; }
Returns Huffman index for specified label
19,224
public void incrementDocCount ( String word , long howMuch ) { T element = extendedVocabulary . get ( word ) ; if ( element != null ) { element . incrementSequencesCount ( ) ; } }
Increment number of documents the label was observed in
19,225
public void setCountForDoc ( String word , long count ) { T element = extendedVocabulary . get ( word ) ; if ( element != null ) { element . setSequencesCount ( count ) ; } }
Set exact number of observed documents that contain specified word
19,226
public boolean addToken ( T element ) { boolean ret = false ; T oldElement = vocabulary . putIfAbsent ( element . getStorageId ( ) , element ) ; if ( oldElement == null ) { if ( element . getLabel ( ) != null ) { extendedVocabulary . put ( element . getLabel ( ) , element ) ; } oldElement = element ; ret = true ; } else { oldElement . incrementSequencesCount ( element . getSequencesCount ( ) ) ; oldElement . increaseElementFrequency ( ( int ) element . getElementFrequency ( ) ) ; } totalWordCount . addAndGet ( ( long ) oldElement . getElementFrequency ( ) ) ; return ret ; }
This method adds specified SequenceElement to vocabulary
19,227
public Pair < DataBuffer , long [ ] > createShapeInformation ( long [ ] shape , DataType dataType ) { char order = Nd4j . order ( ) ; return createShapeInformation ( shape , order , dataType ) ; }
This method creates shapeInformation buffer based on shape being passed in
19,228
public Tokenizer create ( String toTokenize ) { if ( toTokenize . isEmpty ( ) ) { throw new IllegalArgumentException ( "Unable to proceed; no sentence to tokenize" ) ; } Tokenizer t = new JapaneseTokenizer ( kuromoji , toTokenize , useBaseForm ) ; if ( preProcessor != null ) { t . setTokenPreProcessor ( preProcessor ) ; } return t ; }
Create a Tokenizer instance for the given sentence .
19,229
public IActivation getGateActivationFromConfig ( Map < String , Object > layerConfig ) throws InvalidKerasConfigurationException , UnsupportedKerasConfigurationException { Map < String , Object > innerConfig = KerasLayerUtils . getInnerLayerConfigFromConfig ( layerConfig , conf ) ; if ( ! innerConfig . containsKey ( conf . getLAYER_FIELD_INNER_ACTIVATION ( ) ) ) throw new InvalidKerasConfigurationException ( "Keras LSTM layer config missing " + conf . getLAYER_FIELD_INNER_ACTIVATION ( ) + " field" ) ; return mapToIActivation ( ( String ) innerConfig . get ( conf . getLAYER_FIELD_INNER_ACTIVATION ( ) ) , conf ) ; }
Get LSTM gate activation function from Keras layer configuration .
19,230
public double getForgetBiasInitFromConfig ( Map < String , Object > layerConfig , boolean train ) throws InvalidKerasConfigurationException , UnsupportedKerasConfigurationException { Map < String , Object > innerConfig = KerasLayerUtils . getInnerLayerConfigFromConfig ( layerConfig , conf ) ; String kerasForgetBiasInit ; if ( innerConfig . containsKey ( conf . getLAYER_FIELD_UNIT_FORGET_BIAS ( ) ) ) { kerasForgetBiasInit = LSTM_FORGET_BIAS_INIT_ONE ; } else if ( ! innerConfig . containsKey ( conf . getLAYER_FIELD_FORGET_BIAS_INIT ( ) ) ) { throw new InvalidKerasConfigurationException ( "Keras LSTM layer config missing " + conf . getLAYER_FIELD_FORGET_BIAS_INIT ( ) + " field" ) ; } else { kerasForgetBiasInit = ( String ) innerConfig . get ( conf . getLAYER_FIELD_FORGET_BIAS_INIT ( ) ) ; } double init ; switch ( kerasForgetBiasInit ) { case LSTM_FORGET_BIAS_INIT_ZERO : init = 0.0 ; break ; case LSTM_FORGET_BIAS_INIT_ONE : init = 1.0 ; break ; default : if ( train ) throw new UnsupportedKerasConfigurationException ( "Unsupported LSTM forget gate bias initialization: " + kerasForgetBiasInit ) ; else { init = 1.0 ; log . warn ( "Unsupported LSTM forget gate bias initialization: " + kerasForgetBiasInit + " (using 1 instead)" ) ; } break ; } return init ; }
Get LSTM forget gate bias initialization from Keras layer configuration .
19,231
public void saveAsFile ( List < String > labels , String path ) throws IOException { BufferedWriter write = null ; try { write = new BufferedWriter ( new FileWriter ( new File ( path ) ) ) ; for ( int i = 0 ; i < Y . rows ( ) ; i ++ ) { if ( i >= labels . size ( ) ) break ; String word = labels . get ( i ) ; if ( word == null ) continue ; StringBuilder sb = new StringBuilder ( ) ; INDArray wordVector = Y . getRow ( i ) ; for ( int j = 0 ; j < wordVector . length ( ) ; j ++ ) { sb . append ( wordVector . getDouble ( j ) ) ; if ( j < wordVector . length ( ) - 1 ) sb . append ( "," ) ; } sb . append ( "," ) ; sb . append ( word ) ; sb . append ( "\n" ) ; write . write ( sb . toString ( ) ) ; } write . flush ( ) ; write . close ( ) ; } finally { if ( write != null ) write . close ( ) ; } }
Save the model as a file with a csv format adding the label as the last column .
19,232
public void fit ( INDArray data , int nDims ) { this . x = data ; this . numDimensions = nDims ; fit ( ) ; }
Change the dimensions with
19,233
public static Checkpoint lastCheckpoint ( File rootDir ) { List < Checkpoint > all = availableCheckpoints ( rootDir ) ; if ( all . isEmpty ( ) ) { return null ; } return all . get ( all . size ( ) - 1 ) ; }
Return the most recent checkpoint if one exists - otherwise returns null
19,234
public static MultiLayerNetwork loadCheckpointMLN ( File rootDir , Checkpoint checkpoint ) { return loadCheckpointMLN ( rootDir , checkpoint . getCheckpointNum ( ) ) ; }
Load a MultiLayerNetwork for the given checkpoint that resides in the specified root directory
19,235
public static MultiLayerNetwork loadCheckpointMLN ( File rootDir , int checkpointNum ) { File f = getFileForCheckpoint ( rootDir , checkpointNum ) ; try { return ModelSerializer . restoreMultiLayerNetwork ( f , true ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } }
Load a MultiLayerNetwork for the given checkpoint number
19,236
public static ComputationGraph loadCheckpointCG ( File rootDir , Checkpoint checkpoint ) { return loadCheckpointCG ( rootDir , checkpoint . getCheckpointNum ( ) ) ; }
Load a ComputationGraph for the given checkpoint from the specified root direcotry
19,237
public static ComputationGraph loadCheckpointCG ( File rootDir , int checkpointNum ) { File f = getFileForCheckpoint ( rootDir , checkpointNum ) ; try { return ModelSerializer . restoreComputationGraph ( f , true ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } }
Load a ComputationGraph for the given checkpoint that resides in the specified root directory
19,238
public void setupSearchState ( Pair < Gradient , Double > pair ) { INDArray gradient = pair . getFirst ( ) . gradient ( conf . variables ( ) ) ; INDArray params = model . params ( ) . dup ( ) ; searchState . put ( GRADIENT_KEY , gradient ) ; searchState . put ( SCORE_KEY , pair . getSecond ( ) ) ; searchState . put ( PARAMS_KEY , params ) ; }
Setup the initial search state
19,239
public static ComputationGraph toComputationGraph ( MultiLayerNetwork net ) { ComputationGraphConfiguration . GraphBuilder b = new NeuralNetConfiguration . Builder ( ) . dataType ( net . getLayerWiseConfigurations ( ) . getDataType ( ) ) . graphBuilder ( ) ; MultiLayerConfiguration origConf = net . getLayerWiseConfigurations ( ) . clone ( ) ; int layerIdx = 0 ; String lastLayer = "in" ; b . addInputs ( "in" ) ; for ( NeuralNetConfiguration c : origConf . getConfs ( ) ) { String currLayer = String . valueOf ( layerIdx ) ; InputPreProcessor preproc = origConf . getInputPreProcess ( layerIdx ) ; b . addLayer ( currLayer , c . getLayer ( ) , preproc , lastLayer ) ; lastLayer = currLayer ; layerIdx ++ ; } b . setOutputs ( lastLayer ) ; ComputationGraphConfiguration conf = b . build ( ) ; ComputationGraph cg = new ComputationGraph ( conf ) ; cg . init ( ) ; cg . setParams ( net . params ( ) ) ; INDArray updaterState = net . getUpdater ( ) . getStateViewArray ( ) ; if ( updaterState != null ) { cg . getUpdater ( ) . getUpdaterStateViewArray ( ) . assign ( updaterState ) ; } return cg ; }
Convert a MultiLayerNetwork to a ComputationGraph
19,240
private void iterations ( ) { int iterationCount = 0 ; while ( ( clusteringStrategy . getTerminationCondition ( ) != null && ! clusteringStrategy . getTerminationCondition ( ) . isSatisfied ( iterationHistory ) ) || iterationHistory . getMostRecentIterationInfo ( ) . isStrategyApplied ( ) ) { currentIteration ++ ; removePoints ( ) ; classifyPoints ( ) ; applyClusteringStrategy ( ) ; log . trace ( "Completed clustering iteration {}" , ++ iterationCount ) ; } }
Run clustering iterations until a termination condition is hit . This is done by first classifying all points and then updating cluster centers based on those classified points
19,241
protected void initClusters ( ) { log . info ( "Generating initial clusters" ) ; List < Point > points = new ArrayList < > ( initialPoints ) ; val random = Nd4j . getRandom ( ) ; Distance distanceFn = clusteringStrategy . getDistanceFunction ( ) ; int initialClusterCount = clusteringStrategy . getInitialClusterCount ( ) ; clusterSet = new ClusterSet ( distanceFn , clusteringStrategy . inverseDistanceCalculation ( ) , new long [ ] { initialClusterCount , points . get ( 0 ) . getArray ( ) . length ( ) } ) ; clusterSet . addNewClusterWithCenter ( points . remove ( random . nextInt ( points . size ( ) ) ) ) ; INDArray dxs = Nd4j . create ( points . size ( ) ) ; dxs . addi ( clusteringStrategy . inverseDistanceCalculation ( ) ? - Double . MAX_VALUE : Double . MAX_VALUE ) ; while ( clusterSet . getClusterCount ( ) < initialClusterCount && ! points . isEmpty ( ) ) { dxs = ClusterUtils . computeSquareDistancesFromNearestCluster ( clusterSet , points , dxs , exec ) ; double r = random . nextFloat ( ) * dxs . maxNumber ( ) . doubleValue ( ) ; for ( int i = 0 ; i < dxs . length ( ) ; i ++ ) { double distance = dxs . getDouble ( i ) ; Preconditions . checkState ( distance >= 0 , "Encountered negative distance: distance function is not valid? Distance " + "function must return values >= 0, got distance %s for function s" , distance , distanceFn ) ; if ( dxs . getDouble ( i ) >= r ) { clusterSet . addNewClusterWithCenter ( points . remove ( i ) ) ; dxs = Nd4j . create ( ArrayUtils . remove ( dxs . data ( ) . asDouble ( ) , i ) ) ; break ; } } } ClusterSetInfo initialClusterSetInfo = ClusterUtils . computeClusterSetInfo ( clusterSet ) ; iterationHistory . getIterationsInfos ( ) . put ( currentIteration , new IterationInfo ( currentIteration , initialClusterSetInfo ) ) ; }
Initialize the cluster centers at random
19,242
public int getMaxValueIndex ( double [ ] array ) { int index = 0 ; double max = Integer . MIN_VALUE ; for ( int i = 0 ; i < array . length ; i ++ ) { if ( array [ i ] > max ) { max = array [ i ] ; index = i ; } } return index ; }
Get the index position of maximum value the given array
19,243
public int getMinValueIndex ( double [ ] array ) { int index = 0 ; double min = Integer . MAX_VALUE ; for ( int i = 0 ; i < array . length ; i ++ ) { if ( array [ i ] < min ) { min = array [ i ] ; index = i ; } } return index ; }
Get the index position of minimum value in the given array
19,244
public double getNthOrderedValue ( double [ ] array , int n , boolean ascending ) { if ( n > array . length ) { n = array . length ; } int targetindex ; if ( ascending ) { targetindex = n ; } else { targetindex = array . length - n ; } return getOrderedValue ( array , targetindex ) ; }
Get the n - th value in the array after sorted
19,245
public static INDArray concat ( INDArray [ ] history ) { INDArray arr = Nd4j . concat ( 0 , history ) ; return arr ; }
concat an array history into a single INDArry of as many channel as element in the history array
19,246
public Transition < A > dup ( ) { INDArray [ ] dupObservation = dup ( observation ) ; INDArray nextObs = nextObservation . dup ( ) ; return new Transition < > ( dupObservation , action , reward , isTerminal , nextObs ) ; }
Duplicate this transition
19,247
public static INDArray [ ] dup ( INDArray [ ] history ) { INDArray [ ] dupHistory = new INDArray [ history . length ] ; for ( int i = 0 ; i < history . length ; i ++ ) { dupHistory [ i ] = history [ i ] . dup ( ) ; } return dupHistory ; }
Duplicate an history
19,248
public INDArray [ ] executeGraph ( SameDiff sd ) { return executeGraph ( sd , ExecutorConfiguration . builder ( ) . outputMode ( OutputMode . IMPLICIT ) . executionMode ( ExecutionMode . SEQUENTIAL ) . profilingMode ( OpExecutioner . ProfilingMode . DISABLED ) . build ( ) ) ; }
This method executes given graph and returns results
19,249
public T get ( String variable , String frame , int iteration , FrameIter parentFrameIter ) { return get ( variable , frame , iteration , parentFrameIter , true ) ; }
Get a previously calculated output ; throws an exception if the output does not exist
19,250
public T get ( String variable , String frame , int iteration , FrameIter parentFrameIter , boolean enforceExistence ) { VarId varId = newVarId ( variable , frame , iteration , parentFrameIter ) ; T out = nodeOutputs . get ( varId ) ; if ( enforceExistence ) { Preconditions . checkNotNull ( out , "No output found for variable %s (frame %s, iteration %s)" , variable , frame , iteration ) ; } return out ; }
Get a previously calculated output
19,251
protected ImageWritable doTransform ( ImageWritable image , Random random ) { if ( image == null ) { return null ; } Mat original = converter . convert ( image . getFrame ( ) ) ; Mat grayed = new Mat ( ) ; cvtColor ( original , grayed , CV_BGR2GRAY ) ; if ( blurWidth > 0 && blurHeight > 0 ) blur ( grayed , grayed , new Size ( blurWidth , blurHeight ) ) ; Mat edgeOut = new Mat ( ) ; if ( isCanny ) Canny ( grayed , edgeOut , lowerThresh , upperThresh ) ; else threshold ( grayed , edgeOut , lowerThresh , upperThresh , 0 ) ; double largestArea = 0 ; Rect boundingRect = new Rect ( ) ; MatVector contours = new MatVector ( ) ; Mat hierarchy = new Mat ( ) ; findContours ( edgeOut , contours , hierarchy , this . mode , this . method ) ; for ( int i = 0 ; i < contours . size ( ) ; i ++ ) { double area = contourArea ( contours . get ( i ) , false ) ; if ( area > largestArea ) { boundingRect = boundingRect ( contours . get ( i ) ) ; } } x = boundingRect . x ( ) ; y = boundingRect . y ( ) ; Mat result = original . apply ( boundingRect ) ; return new ImageWritable ( converter . convert ( result ) ) ; }
Takes an image and returns a cropped image based on it s largest blob .
19,252
private INDArray setIdentityConv ( long [ ] shape , char order , INDArray paramView ) { final INDArrayIndex [ ] indArrayIndices = new INDArrayIndex [ shape . length ] ; for ( int i = 2 ; i < shape . length ; i ++ ) { if ( shape [ i ] % 2 == 0 ) { throw new IllegalStateException ( "Cannot use IDENTITY init with parameters of shape " + Arrays . toString ( shape ) + "! Must have odd sized kernels!" ) ; } indArrayIndices [ i ] = NDArrayIndex . point ( shape [ i ] / 2 ) ; } paramView . assign ( Nd4j . zeros ( paramView . shape ( ) ) ) ; final INDArray params = paramView . reshape ( order , shape ) ; for ( int i = 0 ; i < shape [ 0 ] ; i ++ ) { indArrayIndices [ 0 ] = NDArrayIndex . point ( i ) ; indArrayIndices [ 1 ] = NDArrayIndex . point ( i ) ; params . put ( indArrayIndices , Nd4j . ones ( 1 ) ) ; } return params ; }
Set identity mapping for convolution layers . When viewed as an NxM matrix of kernel tensors identity mapping is when parameters is a diagonal matrix of identity kernels .
19,253
public static int countUniqueParameters ( List < ParameterSpace > allLeaves ) { List < ParameterSpace > unique = getUniqueObjects ( allLeaves ) ; int count = 0 ; for ( ParameterSpace ps : unique ) { if ( ! ps . isLeaf ( ) ) { throw new IllegalStateException ( "Method should only be used with leaf nodes" ) ; } count += ps . numParameters ( ) ; } return count ; }
Count the number of unique parameters in the specified leaf nodes
19,254
public static AeronConnectionInformation of ( String connectionHost , int connectionPort , int streamId ) { return AeronConnectionInformation . builder ( ) . connectionHost ( connectionHost ) . connectionPort ( connectionPort ) . streamId ( streamId ) . build ( ) ; }
Traditional static generator method
19,255
public void merge ( ROC other ) { if ( this . thresholdSteps != other . thresholdSteps ) { throw new UnsupportedOperationException ( "Cannot merge ROC instances with different numbers of threshold steps (" + this . thresholdSteps + " vs. " + other . thresholdSteps + ")" ) ; } this . countActualPositive += other . countActualPositive ; this . countActualNegative += other . countActualNegative ; this . auc = null ; this . auprc = null ; this . rocCurve = null ; this . prCurve = null ; if ( isExact ) { if ( other . exampleCount == 0 ) { return ; } if ( this . exampleCount == 0 ) { this . exampleCount = other . exampleCount ; this . probAndLabel = other . probAndLabel ; return ; } if ( this . exampleCount + other . exampleCount > this . probAndLabel . size ( 0 ) ) { val newSize = this . probAndLabel . size ( 0 ) + Math . max ( other . probAndLabel . size ( 0 ) , exactAllocBlockSize ) ; INDArray newProbAndLabel = Nd4j . create ( DataType . DOUBLE , newSize , 2 ) ; newProbAndLabel . put ( new INDArrayIndex [ ] { interval ( 0 , exampleCount ) , all ( ) } , probAndLabel . get ( interval ( 0 , exampleCount ) , all ( ) ) ) ; probAndLabel = newProbAndLabel ; } INDArray toPut = other . probAndLabel . get ( interval ( 0 , other . exampleCount ) , all ( ) ) ; probAndLabel . put ( new INDArrayIndex [ ] { interval ( exampleCount , exampleCount + other . exampleCount ) , all ( ) } , toPut ) ; } else { for ( Double d : this . counts . keySet ( ) ) { CountsForThreshold cft = this . counts . get ( d ) ; CountsForThreshold otherCft = other . counts . get ( d ) ; cft . countTruePositive += otherCft . countTruePositive ; cft . countFalsePositive += otherCft . countFalsePositive ; } } this . exampleCount += other . exampleCount ; }
Merge this ROC instance with another . This ROC instance is modified by adding the stats from the other instance .
19,256
private URL extractActualUrl ( URL jarUrl ) throws MalformedURLException { String urlFile = jarUrl . getFile ( ) ; int separatorIndex = urlFile . indexOf ( "!/" ) ; if ( separatorIndex != - 1 ) { String jarFile = urlFile . substring ( 0 , separatorIndex ) ; try { return new URL ( jarFile ) ; } catch ( MalformedURLException var5 ) { if ( ! jarFile . startsWith ( "/" ) ) { jarFile = "/" + jarFile ; } return new URL ( "file:" + jarFile ) ; } } else { return jarUrl ; } }
Extracts parent Jar URL from original ClassPath entry URL .
19,257
public void updateState ( SubscriberState subscriberState ) { updated . put ( subscriberState . getStreamId ( ) , System . currentTimeMillis ( ) ) ; statusStorageMap . put ( subscriberState . getStreamId ( ) , subscriberState ) ; }
Update the state for storage
19,258
public void init ( Model model , Object ... args ) { mediaDriverContext = new MediaDriver . Context ( ) ; mediaDriver = MediaDriver . launchEmbedded ( mediaDriverContext ) ; parameterServerNode = new ParameterServerNode ( mediaDriver , statusServerPort , numWorkers ) ; if ( parameterServerArgs == null ) parameterServerArgs = new String [ ] { "-m" , "true" , "-s" , "1," + String . valueOf ( model . numParams ( ) ) , "-p" , "40323" , "-h" , "localhost" , "-id" , "11" , "-md" , mediaDriver . aeronDirectoryName ( ) , "-sh" , "localhost" , "-sp" , String . valueOf ( statusServerPort ) , "-u" , String . valueOf ( numUpdatesPerEpoch ) } ; }
Initialize the context
19,259
public StepReply < O > step ( A action ) { JSONObject body = new JSONObject ( ) . put ( "action" , getActionSpace ( ) . encode ( action ) ) . put ( "render" , render ) ; JSONObject reply = ClientUtils . post ( url + ENVS_ROOT + instanceId + STEP , body ) . getObject ( ) ; O observation = observationSpace . getValue ( reply , "observation" ) ; double reward = reply . getDouble ( "reward" ) ; boolean done = reply . getBoolean ( "done" ) ; JSONObject info = reply . getJSONObject ( "info" ) ; return new StepReply < O > ( observation , reward , done , info ) ; }
Step the environment by one action
19,260
public O reset ( ) { JsonNode resetRep = ClientUtils . post ( url + ENVS_ROOT + instanceId + RESET , new JSONObject ( ) ) ; return observationSpace . getValue ( resetRep . getObject ( ) , "observation" ) ; }
Reset the state of the environment and return an initial observation .
19,261
public void upload ( String trainingDir , String apiKey , String algorithmId ) { JSONObject json = new JSONObject ( ) . put ( "training_dir" , trainingDir ) . put ( "api_key" , apiKey ) . put ( "algorithm_id" , algorithmId ) ; uploadPost ( json ) ; }
Upload monitoring data to OpenAI servers .
19,262
public static Model copyWeightsToModel ( Model model , Map < String , KerasLayer > kerasLayers ) throws InvalidKerasConfigurationException { Layer [ ] layersFromModel ; if ( model instanceof MultiLayerNetwork ) layersFromModel = ( ( MultiLayerNetwork ) model ) . getLayers ( ) ; else layersFromModel = ( ( ComputationGraph ) model ) . getLayers ( ) ; Set < String > layerNames = new HashSet < > ( kerasLayers . keySet ( ) ) ; for ( org . deeplearning4j . nn . api . Layer layer : layersFromModel ) { String layerName = layer . conf ( ) . getLayer ( ) . getLayerName ( ) ; if ( ! kerasLayers . containsKey ( layerName ) ) throw new InvalidKerasConfigurationException ( "No weights found for layer in model (named " + layerName + ")" ) ; kerasLayers . get ( layerName ) . copyWeightsToLayer ( layer ) ; layerNames . remove ( layerName ) ; } for ( String layerName : layerNames ) { if ( kerasLayers . get ( layerName ) . getNumParams ( ) > 0 ) throw new InvalidKerasConfigurationException ( "Attemping to copy weights for layer not in model (named " + layerName + ")" ) ; } return model ; }
Helper function to import weights from nested Map into existing model . Depends critically on matched layer and parameter names . In general this seems to be straightforward for most Keras models and layersOrdered but there may be edge cases .
19,263
public static int determineKerasMajorVersion ( Map < String , Object > modelConfig , KerasModelConfiguration config ) throws InvalidKerasConfigurationException { int kerasMajorVersion ; if ( ! modelConfig . containsKey ( config . getFieldKerasVersion ( ) ) ) { log . warn ( "Could not read keras version used (no " + config . getFieldKerasVersion ( ) + " field found) \n" + "assuming keras version is 1.0.7 or earlier." ) ; kerasMajorVersion = 1 ; } else { String kerasVersionString = ( String ) modelConfig . get ( config . getFieldKerasVersion ( ) ) ; if ( Character . isDigit ( kerasVersionString . charAt ( 0 ) ) ) { kerasMajorVersion = Character . getNumericValue ( kerasVersionString . charAt ( 0 ) ) ; } else { throw new InvalidKerasConfigurationException ( "Keras version was not readable (" + config . getFieldKerasVersion ( ) + " provided)" ) ; } } return kerasMajorVersion ; }
Determine Keras major version
19,264
public static String determineKerasBackend ( Map < String , Object > modelConfig , KerasModelConfiguration config ) { String kerasBackend = null ; if ( ! modelConfig . containsKey ( config . getFieldBackend ( ) ) ) { log . warn ( "Could not read keras backend used (no " + config . getFieldBackend ( ) + " field found) \n" ) ; } else { kerasBackend = ( String ) modelConfig . get ( config . getFieldBackend ( ) ) ; } return kerasBackend ; }
Determine Keras backend
19,265
public static Map < String , Object > parseModelConfig ( String modelJson , String modelYaml ) throws IOException , InvalidKerasConfigurationException { Map < String , Object > modelConfig ; if ( modelJson != null ) modelConfig = parseJsonString ( modelJson ) ; else if ( modelYaml != null ) modelConfig = parseYamlString ( modelYaml ) ; else throw new InvalidKerasConfigurationException ( "Requires model configuration as either JSON or YAML string." ) ; return modelConfig ; }
Parse Keras model configuration from JSON or YAML string representation
19,266
public static Map < String , Object > parseJsonString ( String json ) throws IOException { ObjectMapper mapper = new ObjectMapper ( ) ; TypeReference < HashMap < String , Object > > typeRef = new TypeReference < HashMap < String , Object > > ( ) { } ; return mapper . readValue ( json , typeRef ) ; }
Convenience function for parsing JSON strings .
19,267
public static Map < String , Object > parseYamlString ( String yaml ) throws IOException { ObjectMapper mapper = new ObjectMapper ( new YAMLFactory ( ) ) ; TypeReference < HashMap < String , Object > > typeRef = new TypeReference < HashMap < String , Object > > ( ) { } ; return mapper . readValue ( yaml , typeRef ) ; }
Convenience function for parsing YAML strings .
19,268
public DataSet vectorize ( InputStream is , String label ) { try { BufferedReader reader = new BufferedReader ( new InputStreamReader ( is , "UTF-8" ) ) ; String line = "" ; StringBuilder builder = new StringBuilder ( ) ; while ( ( line = reader . readLine ( ) ) != null ) { builder . append ( line ) ; } return vectorize ( builder . toString ( ) , label ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
Text coming from an input stream considered as one document
19,269
public DataSet vectorize ( String text , String label ) { INDArray input = transform ( text ) ; INDArray labelMatrix = FeatureUtil . toOutcomeVector ( labelsSource . indexOf ( label ) , labelsSource . size ( ) ) ; return new DataSet ( input , labelMatrix ) ; }
Vectorizes the passed in text treating it as one document
19,270
public INDArray transform ( String text ) { Tokenizer tokenizer = tokenizerFactory . create ( text ) ; List < String > tokens = tokenizer . getTokens ( ) ; return transform ( tokens ) ; }
Transforms the matrix
19,271
public static int getDimension ( INDArray arr , boolean defaultRows ) { if ( arr . isVector ( ) ) { return defaultRows ? ( int ) arr . rows ( ) : ( int ) arr . columns ( ) ; } if ( arr . ordering ( ) == NDArrayFactory . C ) return defaultRows ? ( int ) arr . columns ( ) : ( int ) arr . rows ( ) ; return defaultRows ? ( int ) arr . rows ( ) : ( int ) arr . columns ( ) ; }
Get the dimension associated with the given ordering .
19,272
public static int getLd ( INDArray arr ) { if ( arr . isVector ( ) ) { return ( int ) arr . length ( ) ; } return arr . ordering ( ) == NDArrayFactory . C ? ( int ) arr . size ( 1 ) : ( int ) arr . size ( 0 ) ; }
Get the leading dimension for a blas invocation .
19,273
public T deserialize ( String json ) { ObjectMapper mapper = SequenceElement . mapper ( ) ; try { T ret = ( T ) mapper . readValue ( json , targetClass ) ; return ret ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } }
This method builds object from provided JSON
19,274
public String serialize ( T element ) { String json = null ; try { json = element . toJSON ( ) ; } catch ( Exception e ) { log . error ( "Direct serialization failed, falling back to jackson" ) ; } if ( json == null || json . isEmpty ( ) ) { ObjectMapper mapper = SequenceElement . mapper ( ) ; try { json = mapper . writeValueAsString ( element ) ; } catch ( org . nd4j . shade . jackson . core . JsonProcessingException e ) { throw new RuntimeException ( e ) ; } } return json ; }
This method serializaes object into JSON string
19,275
public static INDArray toArray ( Collection < ? extends Writable > record ) { List < Writable > l ; if ( record instanceof List ) { l = ( List < Writable > ) record ; } else { l = new ArrayList < > ( record ) ; } if ( l . size ( ) == 1 && l . get ( 0 ) instanceof NDArrayWritable ) { return ( ( NDArrayWritable ) l . get ( 0 ) ) . get ( ) ; } int length = 0 ; for ( Writable w : record ) { if ( w instanceof NDArrayWritable ) { INDArray a = ( ( NDArrayWritable ) w ) . get ( ) ; if ( ! a . isRowVector ( ) ) { throw new UnsupportedOperationException ( "Multiple writables present but NDArrayWritable is " + "not a row vector. Can only concat row vectors with other writables. Shape: " + Arrays . toString ( a . shape ( ) ) ) ; } length += a . length ( ) ; } else { length ++ ; } } INDArray arr = Nd4j . create ( 1 , length ) ; int k = 0 ; for ( Writable w : record ) { if ( w instanceof NDArrayWritable ) { INDArray toPut = ( ( NDArrayWritable ) w ) . get ( ) ; arr . put ( new INDArrayIndex [ ] { NDArrayIndex . point ( 0 ) , NDArrayIndex . interval ( k , k + toPut . length ( ) ) } , toPut ) ; k += toPut . length ( ) ; } else { arr . putScalar ( 0 , k , w . toDouble ( ) ) ; k ++ ; } } return arr ; }
Convert a record to an INDArray . May contain a mix of Writables and row vector NDArrayWritables .
19,276
public static List < Writable > toRecord ( INDArray array ) { List < Writable > writables = new ArrayList < > ( ) ; writables . add ( new NDArrayWritable ( array ) ) ; return writables ; }
Convert an ndarray to a record
19,277
public static List < List < Writable > > toRecords ( DataSet dataSet ) { if ( isClassificationDataSet ( dataSet ) ) { return getClassificationWritableMatrix ( dataSet ) ; } else { return getRegressionWritableMatrix ( dataSet ) ; } }
Convert a DataSet to a matrix
19,278
public void insert ( INDArray point ) { if ( ! point . isVector ( ) || point . length ( ) != dims ) throw new IllegalArgumentException ( "Point must be a vector of length " + dims ) ; if ( root == null ) { root = new KDNode ( point ) ; rect = new HyperRect ( HyperRect . point ( point ) ) ; } else { int disc = 0 ; KDNode node = root ; KDNode insert = new KDNode ( point ) ; int successor ; while ( true ) { INDArray pt = node . getPoint ( ) ; INDArray countEq = Nd4j . getExecutioner ( ) . execAndReturn ( new Any ( pt . neq ( point ) ) ) . z ( ) ; if ( countEq . getInt ( 0 ) == 0 ) { return ; } else { successor = successor ( node , point , disc ) ; KDNode child ; if ( successor < 1 ) child = node . getLeft ( ) ; else child = node . getRight ( ) ; if ( child == null ) break ; disc = ( disc + 1 ) % dims ; node = child ; } } if ( successor < 1 ) node . setLeft ( insert ) ; else node . setRight ( insert ) ; rect . enlargeTo ( point ) ; insert . setParent ( node ) ; } size ++ ; }
Insert a point in to the tree
19,279
public Pair < Double , INDArray > nn ( INDArray point ) { return nn ( root , point , rect , Double . POSITIVE_INFINITY , null , 0 ) ; }
Query for nearest neighbor . Returns the distance and point
19,280
public void multiPartUpload ( File file , String bucketName ) { AmazonS3 client = new AmazonS3Client ( creds ) ; bucketName = ensureValidBucketName ( bucketName ) ; List < Bucket > buckets = client . listBuckets ( ) ; for ( Bucket b : buckets ) if ( b . getName ( ) . equals ( bucketName ) ) { doMultiPart ( client , bucketName , file ) ; return ; } client . createBucket ( bucketName ) ; doMultiPart ( client , bucketName , file ) ; }
Multi part upload for big files
19,281
public void setCacheMode ( CacheMode mode ) { if ( mode == null ) mode = CacheMode . NONE ; for ( Layer layer : layers ) { layer . setCacheMode ( mode ) ; } }
This method sets specified CacheMode for all layers within network
19,282
public Layer getLayer ( String name ) { Preconditions . checkState ( verticesMap . containsKey ( name ) , "Layer with name %s does not exist in the network" , name ) ; return verticesMap . get ( name ) . getLayer ( ) ; }
Get a given layer by name .
19,283
public void setInput ( int inputNum , INDArray input ) { if ( inputs == null ) { inputs = new INDArray [ numInputArrays ] ; } inputs [ inputNum ] = input ; }
Set the specified input for the ComputationGraph
19,284
public void setInputs ( INDArray ... inputs ) { if ( inputs != null && inputs . length != this . numInputArrays ) { throw new IllegalArgumentException ( "Invalid input array: network has " + numInputArrays + " inputs, but array is of length " + inputs . length ) ; } this . inputs = inputs ; }
Set all inputs for the ComputationGraph network
19,285
public void setLabels ( INDArray ... labels ) { if ( labels != null && labels . length != this . numOutputArrays ) { throw new IllegalArgumentException ( "Invalid output array: network has " + numOutputArrays + " outputs, but array is of length " + labels . length ) ; } this . labels = labels ; }
Set all labels for the ComputationGraph network
19,286
public void pretrainLayer ( String layerName , DataSetIterator dataSetIterator ) { if ( numInputArrays != 1 ) { throw new UnsupportedOperationException ( "Cannot train ComputationGraph network with multiple inputs using a DataSetIterator" ) ; } pretrainLayer ( layerName , ComputationGraphUtil . toMultiDataSetIterator ( dataSetIterator ) ) ; }
Pretrain a specified layer with the given DataSetIterator
19,287
public void pretrainLayer ( String layerName , MultiDataSetIterator iter ) { try { pretrainLayerHelper ( layerName , iter , 1 ) ; } catch ( OutOfMemoryError e ) { CrashReportingUtil . writeMemoryCrashDump ( this , e ) ; throw e ; } }
Pretrain a specified layer with the given MultiDataSetIterator
19,288
public void fit ( MultiDataSet multiDataSet ) { fit ( multiDataSet . getFeatures ( ) , multiDataSet . getLabels ( ) , multiDataSet . getFeaturesMaskArrays ( ) , multiDataSet . getLabelsMaskArrays ( ) ) ; if ( multiDataSet . hasMaskArrays ( ) ) clearLayerMaskArrays ( ) ; }
Fit the ComputationGraph using a MultiDataSet
19,289
public void fit ( INDArray [ ] inputs , INDArray [ ] labels ) { fit ( inputs , labels , null , null ) ; }
Fit the ComputationGraph given arrays of inputs and labels .
19,290
public Map < String , INDArray > feedForward ( INDArray input , boolean train ) { if ( numInputArrays != 1 ) throw new UnsupportedOperationException ( "Cannot feedForward with single input for graph network with " + numInputArrays + " expected inputs" ) ; setInput ( 0 , input ) ; return feedForward ( train ) ; }
Conduct forward pass using a single input array . Note that this method can only be used with ComputationGraphs with a single input array .
19,291
public INDArray [ ] output ( List < String > layers , boolean train , INDArray [ ] features , INDArray [ ] featureMasks ) { Preconditions . checkState ( layers != null && layers . size ( ) > 0 , "Layers must not be null: got later names %s" , layers ) ; int [ ] layerNums = new int [ layers . size ( ) ] ; for ( int i = 0 ; i < layers . size ( ) ; i ++ ) { String n = layers . get ( i ) ; Preconditions . checkState ( verticesMap . containsKey ( n ) , "Layer with name %s not found in network" , n ) ; layerNums [ i ] = verticesMap . get ( n ) . getVertexIndex ( ) ; } INDArray [ ] out = outputOfLayersDetached ( train , FwdPassType . STANDARD , layerNums , features , featureMasks , null , true , false , null ) ; return out ; }
Get the activations for the specific layers only
19,292
public Gradient backpropGradient ( INDArray ... epsilons ) { if ( epsilons == null || epsilons . length != numOutputArrays ) throw new IllegalArgumentException ( "Invalid input: must have epsilons length equal to number of output arrays" ) ; try { calcBackpropGradients ( true , configuration . getBackpropType ( ) == BackpropType . TruncatedBPTT , epsilons ) ; return gradient ; } catch ( OutOfMemoryError e ) { CrashReportingUtil . writeMemoryCrashDump ( this , e ) ; throw e ; } }
Calculate the gradient of the network with respect to some external errors . Note that this is typically used for things like reinforcement learning not typical networks that include an OutputLayer or RnnOutputLayer
19,293
public ComputationGraphUpdater getUpdater ( boolean initializeIfAbsent ) { if ( solver == null && initializeIfAbsent ) { solver = new Solver . Builder ( ) . configure ( conf ( ) ) . listeners ( getListeners ( ) ) . model ( this ) . build ( ) ; solver . getOptimizer ( ) . setUpdaterComputationGraph ( new ComputationGraphUpdater ( this ) ) ; } if ( solver != null ) { return solver . getOptimizer ( ) . getComputationGraphUpdater ( ) ; } return null ; }
Get the ComputationGraphUpdater for this network
19,294
public void setUpdater ( ComputationGraphUpdater updater ) { if ( solver == null ) { solver = new Solver . Builder ( ) . configure ( conf ( ) ) . listeners ( getListeners ( ) ) . model ( this ) . build ( ) ; } solver . getOptimizer ( ) . setUpdaterComputationGraph ( updater ) ; }
Set the computationGraphUpdater for the network
19,295
public INDArray params ( boolean backwardOnly ) { if ( backwardOnly ) return flattenedParams ; List < INDArray > list = new ArrayList < > ( layers . length ) ; for ( int i = 0 ; i < topologicalOrder . length ; i ++ ) { if ( ! vertices [ topologicalOrder [ i ] ] . hasLayer ( ) ) continue ; Layer l = vertices [ topologicalOrder [ i ] ] . getLayer ( ) ; INDArray layerParams = l . params ( ) ; if ( layerParams != null ) list . add ( layerParams ) ; } return Nd4j . toFlattened ( 'f' , list ) ; }
Get the parameters for the ComputationGraph
19,296
protected void doTruncatedBPTT ( INDArray [ ] inputs , INDArray [ ] labels , INDArray [ ] featureMasks , INDArray [ ] labelMasks , LayerWorkspaceMgr workspaceMgr ) { if ( flattenedGradients == null ) { initGradientsView ( ) ; } long timeSeriesLength = - 1 ; for ( INDArray in : inputs ) { if ( in . rank ( ) != 3 ) continue ; if ( timeSeriesLength == - 1 ) timeSeriesLength = in . size ( 2 ) ; else if ( timeSeriesLength != in . size ( 2 ) ) { log . warn ( "Cannot do TBPTT with time series of different lengths" ) ; return ; } } for ( INDArray out : labels ) { if ( out . rank ( ) != 3 ) continue ; if ( timeSeriesLength == - 1 ) timeSeriesLength = out . size ( 2 ) ; else if ( timeSeriesLength != out . size ( 2 ) ) { log . warn ( "Cannot do TBPTT with time series of different lengths" ) ; return ; } } long fwdLen = configuration . getTbpttFwdLength ( ) ; long nSubsets = timeSeriesLength / fwdLen ; if ( timeSeriesLength % fwdLen != 0 ) nSubsets ++ ; rnnClearPreviousState ( ) ; for ( int i = 0 ; i < nSubsets ; i ++ ) { long startTimeIdx = i * fwdLen ; long endTimeIdx = startTimeIdx + fwdLen ; if ( endTimeIdx > timeSeriesLength ) endTimeIdx = timeSeriesLength ; List < INDArray [ ] > list = getSubsetsForTbptt ( ( int ) startTimeIdx , endTimeIdx , inputs , labels , featureMasks , labelMasks ) ; setInputs ( list . get ( 0 ) ) ; setLabels ( list . get ( 1 ) ) ; setLayerMaskArrays ( list . get ( 2 ) , list . get ( 3 ) ) ; if ( solver == null ) { try ( MemoryWorkspace wsO = Nd4j . getMemoryManager ( ) . scopeOutOfWorkspaces ( ) ) { solver = new Solver . Builder ( ) . configure ( conf ( ) ) . listeners ( getListeners ( ) ) . model ( this ) . build ( ) ; } } solver . optimize ( workspaceMgr ) ; rnnUpdateStateWithTBPTTState ( ) ; } if ( clearTbpttState ) { rnnClearPreviousState ( ) ; } clearLayerMaskArrays ( ) ; }
Fit the network using truncated BPTT
19,297
protected void rnnUpdateStateWithTBPTTState ( ) { for ( int i = 0 ; i < layers . length ; i ++ ) { if ( layers [ i ] instanceof RecurrentLayer ) { RecurrentLayer l = ( ( RecurrentLayer ) layers [ i ] ) ; l . rnnSetPreviousState ( l . rnnGetTBPTTState ( ) ) ; } else if ( layers [ i ] instanceof MultiLayerNetwork ) { ( ( MultiLayerNetwork ) layers [ i ] ) . updateRnnStateWithTBPTTState ( ) ; } } }
Update the internal state of RNN layers after a truncated BPTT fit call
19,298
public void clearLayersStates ( ) { for ( Layer layer : layers ) { layer . clear ( ) ; layer . clearNoiseWeightParams ( ) ; } for ( GraphVertex vertex : vertices ) { vertex . clearVertex ( ) ; } }
This method just makes sure there s no state preserved within layers
19,299
public static int countUnique ( Collection < ? > collection ) { HashSet < Object > set = new HashSet < > ( collection ) ; return set . size ( ) ; }
Count the number of unique values in a collection