idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
19,900
public int iamin ( INDArray arr ) { if ( arr . isSparse ( ) ) { return Nd4j . getSparseBlasWrapper ( ) . level1 ( ) . iamin ( arr ) ; } else { throw new UnsupportedOperationException ( ) ; } }
finds the element of a vector that has the minimum absolute value .
19,901
public void swap ( INDArray x , INDArray y ) { if ( Nd4j . getExecutioner ( ) . getProfilingMode ( ) == OpExecutioner . ProfilingMode . ALL ) OpProfiler . getInstance ( ) . processBlasCall ( false , x , y ) ; if ( x . isSparse ( ) || y . isSparse ( ) ) { Nd4j . getSparseBlasWrapper ( ) . level1 ( ) . swap ( x , y ) ; return ; } if ( x . data ( ) . dataType ( ) == DataType . DOUBLE ) { DefaultOpExecutioner . validateDataType ( DataType . DOUBLE , x , y ) ; dswap ( x . length ( ) , x , BlasBufferUtil . getBlasStride ( x ) , y , BlasBufferUtil . getBlasStride ( y ) ) ; } else { DefaultOpExecutioner . validateDataType ( DataType . FLOAT , x , y ) ; sswap ( x . length ( ) , x , BlasBufferUtil . getBlasStride ( x ) , y , BlasBufferUtil . getBlasStride ( y ) ) ; } }
swaps a vector with another vector .
19,902
public void copy ( long n , DataBuffer x , int offsetX , int incrX , DataBuffer y , int offsetY , int incrY ) { if ( supportsDataBufferL1Ops ( ) ) { if ( x . dataType ( ) == DataType . DOUBLE ) { dcopy ( n , x , offsetX , incrX , y , offsetY , incrY ) ; } else { scopy ( n , x , offsetX , incrX , y , offsetY , incrY ) ; } } else { long [ ] shapex = { 1 , n } ; long [ ] shapey = { 1 , n } ; long [ ] stridex = { incrX , incrX } ; long [ ] stridey = { incrY , incrY } ; INDArray arrX = Nd4j . create ( x , shapex , stridex , offsetX , 'c' ) ; INDArray arrY = Nd4j . create ( x , shapey , stridey , offsetY , 'c' ) ; copy ( arrX , arrY ) ; } }
copy a vector to another vector .
19,903
public void rotg ( INDArray a , INDArray b , INDArray c , INDArray s ) { throw new UnsupportedOperationException ( ) ; }
computes parameters for a Givens rotation .
19,904
public void rot ( long N , INDArray X , INDArray Y , double c , double s ) { if ( Nd4j . getExecutioner ( ) . getProfilingMode ( ) == OpExecutioner . ProfilingMode . ALL ) OpProfiler . getInstance ( ) . processBlasCall ( false , X , Y ) ; if ( X . isSparse ( ) && ! Y . isSparse ( ) ) { Nd4j . getSparseBlasWrapper ( ) . level1 ( ) . rot ( N , X , Y , c , s ) ; } else if ( X . data ( ) . dataType ( ) == DataType . DOUBLE ) { DefaultOpExecutioner . validateDataType ( DataType . DOUBLE , X , Y ) ; drot ( N , X , BlasBufferUtil . getBlasStride ( X ) , Y , BlasBufferUtil . getBlasStride ( X ) , c , s ) ; } else { DefaultOpExecutioner . validateDataType ( DataType . FLOAT , X , Y ) ; srot ( N , X , BlasBufferUtil . getBlasStride ( X ) , Y , BlasBufferUtil . getBlasStride ( X ) , ( float ) c , ( float ) s ) ; } }
performs rotation of points in the plane .
19,905
public void rotmg ( INDArray d1 , INDArray d2 , INDArray b1 , double b2 , INDArray P ) { throw new UnsupportedOperationException ( ) ; }
computes the modified parameters for a Givens rotation .
19,906
public void scal ( long N , double alpha , INDArray X ) { if ( Nd4j . getExecutioner ( ) . getProfilingMode ( ) == OpExecutioner . ProfilingMode . ALL ) OpProfiler . getInstance ( ) . processBlasCall ( false , X ) ; if ( X . isSparse ( ) ) { Nd4j . getSparseBlasWrapper ( ) . level1 ( ) . scal ( N , alpha , X ) ; } else if ( X . data ( ) . dataType ( ) == DataType . DOUBLE ) dscal ( N , alpha , X , BlasBufferUtil . getBlasStride ( X ) ) ; else if ( X . data ( ) . dataType ( ) == DataType . FLOAT ) sscal ( N , ( float ) alpha , X , BlasBufferUtil . getBlasStride ( X ) ) ; else if ( X . data ( ) . dataType ( ) == DataType . HALF ) Nd4j . getExecutioner ( ) . exec ( new ScalarMultiplication ( X , alpha ) ) ; }
computes a vector by a scalar product .
19,907
public INDArray asRowVector ( BufferedImage image ) { if ( centerCropIfNeeded ) { image = centerCropIfNeeded ( image ) ; } image = scalingIfNeed ( image , true ) ; if ( channels == 3 ) { return toINDArrayBGR ( image ) . ravel ( ) ; } int [ ] [ ] ret = toIntArrayArray ( image ) ; return NDArrayUtil . toNDArray ( ArrayUtil . flatten ( ret ) ) ; }
Convert an image in to a row vector
19,908
public INDArray toRaveledTensor ( BufferedImage image ) { try { image = scalingIfNeed ( image , false ) ; return toINDArrayBGR ( image ) . ravel ( ) ; } catch ( Exception e ) { throw new RuntimeException ( "Unable to load image" , e ) ; } }
Convert an image in to a raveled tensor of the bgr values of the image
19,909
public INDArray toBgr ( BufferedImage image ) { if ( image == null ) throw new IllegalStateException ( "Unable to load image" ) ; image = scalingIfNeed ( image , false ) ; return toINDArrayBGR ( image ) ; }
Convert an BufferedImage to an bgr spectrum image
19,910
public INDArray asMatrix ( InputStream inputStream ) throws IOException { if ( channels == 3 ) return toBgr ( inputStream ) ; try { BufferedImage image = ImageIO . read ( inputStream ) ; return asMatrix ( image ) ; } catch ( IOException e ) { throw new IOException ( "Unable to load image" , e ) ; } }
Convert an input stream to a matrix
19,911
public INDArray asMatrix ( BufferedImage image ) { if ( channels == 3 ) { return toBgr ( image ) ; } else { image = scalingIfNeed ( image , true ) ; int w = image . getWidth ( ) ; int h = image . getHeight ( ) ; INDArray ret = Nd4j . create ( h , w ) ; for ( int i = 0 ; i < h ; i ++ ) { for ( int j = 0 ; j < w ; j ++ ) { ret . putScalar ( new int [ ] { i , j } , image . getRGB ( j , i ) ) ; } } return ret ; } }
Convert an BufferedImage to a matrix
19,912
public INDArray asImageMiniBatches ( File f , int numMiniBatches , int numRowsPerSlice ) { try { INDArray d = asMatrix ( f ) ; return Nd4j . create ( numMiniBatches , numRowsPerSlice , d . columns ( ) ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
Slices up an image in to a mini batch .
19,913
public static BufferedImage toImage ( INDArray matrix ) { BufferedImage img = new BufferedImage ( matrix . rows ( ) , matrix . columns ( ) , BufferedImage . TYPE_INT_ARGB ) ; WritableRaster r = img . getRaster ( ) ; int [ ] equiv = new int [ ( int ) matrix . length ( ) ] ; for ( int i = 0 ; i < equiv . length ; i ++ ) { equiv [ i ] = ( int ) matrix . getDouble ( i ) ; } r . setDataElements ( 0 , 0 , matrix . rows ( ) , matrix . columns ( ) , equiv ) ; return img ; }
Convert a matrix in to a buffereed image
19,914
public static Column mean ( DataRowsFacade dataFrame , String columnName ) { return dataFrame . get ( ) . groupBy ( columnName ) . agg ( avg ( columnName ) ) . col ( columnName ) ; }
Mean for a column
19,915
public static StructType fromSchema ( Schema schema ) { StructField [ ] structFields = new StructField [ schema . numColumns ( ) ] ; for ( int i = 0 ; i < structFields . length ; i ++ ) { switch ( schema . getColumnTypes ( ) . get ( i ) ) { case Double : structFields [ i ] = new StructField ( schema . getName ( i ) , DataTypes . DoubleType , false , Metadata . empty ( ) ) ; break ; case Integer : structFields [ i ] = new StructField ( schema . getName ( i ) , DataTypes . IntegerType , false , Metadata . empty ( ) ) ; break ; case Long : structFields [ i ] = new StructField ( schema . getName ( i ) , DataTypes . LongType , false , Metadata . empty ( ) ) ; break ; case Float : structFields [ i ] = new StructField ( schema . getName ( i ) , DataTypes . FloatType , false , Metadata . empty ( ) ) ; break ; default : throw new IllegalStateException ( "This api should not be used with strings , binary data or ndarrays. This is only for columnar data" ) ; } } return new StructType ( structFields ) ; }
Convert a datavec schema to a struct type in spark
19,916
public static Schema fromStructType ( StructType structType ) { Schema . Builder builder = new Schema . Builder ( ) ; StructField [ ] fields = structType . fields ( ) ; String [ ] fieldNames = structType . fieldNames ( ) ; for ( int i = 0 ; i < fields . length ; i ++ ) { String name = fields [ i ] . dataType ( ) . typeName ( ) . toLowerCase ( ) ; switch ( name ) { case "double" : builder . addColumnDouble ( fieldNames [ i ] ) ; break ; case "float" : builder . addColumnFloat ( fieldNames [ i ] ) ; break ; case "long" : builder . addColumnLong ( fieldNames [ i ] ) ; break ; case "int" : case "integer" : builder . addColumnInteger ( fieldNames [ i ] ) ; break ; case "string" : builder . addColumnString ( fieldNames [ i ] ) ; break ; default : throw new RuntimeException ( "Unknown type: " + name ) ; } } return builder . build ( ) ; }
Create a datavec schema from a struct type
19,917
public static Pair < Schema , JavaRDD < List < Writable > > > toRecords ( DataRowsFacade dataFrame ) { Schema schema = fromStructType ( dataFrame . get ( ) . schema ( ) ) ; return new Pair < > ( schema , dataFrame . get ( ) . javaRDD ( ) . map ( new ToRecord ( schema ) ) ) ; }
Create a compatible schema and rdd for datavec
19,918
public static DataRowsFacade toDataFrame ( Schema schema , JavaRDD < List < Writable > > data ) { JavaSparkContext sc = new JavaSparkContext ( data . context ( ) ) ; SQLContext sqlContext = new SQLContext ( sc ) ; JavaRDD < Row > rows = data . map ( new ToRow ( schema ) ) ; return dataRows ( sqlContext . createDataFrame ( rows , fromSchema ( schema ) ) ) ; }
Creates a data frame from a collection of writables rdd given a schema
19,919
public static List < Writable > rowToWritables ( Schema schema , Row row ) { List < Writable > ret = new ArrayList < > ( ) ; for ( int i = 0 ; i < row . size ( ) ; i ++ ) { switch ( schema . getType ( i ) ) { case Double : ret . add ( new DoubleWritable ( row . getDouble ( i ) ) ) ; break ; case Float : ret . add ( new FloatWritable ( row . getFloat ( i ) ) ) ; break ; case Integer : ret . add ( new IntWritable ( row . getInt ( i ) ) ) ; break ; case Long : ret . add ( new LongWritable ( row . getLong ( i ) ) ) ; break ; case String : ret . add ( new Text ( row . getString ( i ) ) ) ; break ; default : throw new IllegalStateException ( "Illegal type" ) ; } } return ret ; }
Convert a given Row to a list of writables given the specified Schema
19,920
public static List < String > toList ( String [ ] input ) { List < String > ret = new ArrayList < > ( ) ; for ( int i = 0 ; i < input . length ; i ++ ) ret . add ( input [ i ] ) ; return ret ; }
Convert a string array into a list
19,921
public static String [ ] toArray ( List < String > list ) { String [ ] ret = new String [ list . size ( ) ] ; for ( int i = 0 ; i < ret . length ; i ++ ) ret [ i ] = list . get ( i ) ; return ret ; }
Convert a string list into a array
19,922
public static INDArray toMatrix ( List < Row > rows ) { INDArray ret = Nd4j . create ( rows . size ( ) , rows . get ( 0 ) . size ( ) ) ; for ( int i = 0 ; i < ret . rows ( ) ; i ++ ) { for ( int j = 0 ; j < ret . columns ( ) ; j ++ ) { ret . putScalar ( i , j , rows . get ( i ) . getDouble ( j ) ) ; } } return ret ; }
Convert a list of rows to a matrix
19,923
public static List < Column > toColumn ( List < String > columns ) { List < Column > ret = new ArrayList < > ( ) ; for ( String s : columns ) ret . add ( col ( s ) ) ; return ret ; }
Convert a list of string names to columns
19,924
public static Column [ ] toColumns ( String ... columns ) { Column [ ] ret = new Column [ columns . length ] ; for ( int i = 0 ; i < columns . length ; i ++ ) ret [ i ] = col ( columns [ i ] ) ; return ret ; }
Convert an array of strings to column names
19,925
public int compareTo ( Object o ) { float thisValue = this . value ; float thatValue = ( ( FloatWritable ) o ) . value ; return ( thisValue < thatValue ? - 1 : ( thisValue == thatValue ? 0 : 1 ) ) ; }
Compares two FloatWritables .
19,926
private static long getUpdateFrequencyConfiguration ( ) { String property = System . getProperty ( DL4JSystemProperties . NTP_SOURCE_UPDATE_FREQUENCY_MS_PROPERTY ) ; Long parseAttempt = null ; long updateFreq ; if ( property != null ) { try { parseAttempt = Long . parseLong ( property ) ; } catch ( Exception e ) { log . info ( "Error parsing system property \"{}\" with value \"{}\"" , DL4JSystemProperties . NTP_SOURCE_UPDATE_FREQUENCY_MS_PROPERTY , property ) ; } if ( parseAttempt != null ) { if ( parseAttempt < MIN_UPDATE_FREQUENCY ) { log . info ( "Invalid update frequency (milliseconds): {} is less than minimum {}. Using default update frequency: {} ms" , parseAttempt , MIN_UPDATE_FREQUENCY , DEFAULT_UPDATE_FREQUENCY ) ; updateFreq = DEFAULT_UPDATE_FREQUENCY ; } else { updateFreq = parseAttempt ; } } else { updateFreq = DEFAULT_UPDATE_FREQUENCY ; } } else { updateFreq = DEFAULT_UPDATE_FREQUENCY ; } return updateFreq ; }
Query and parse the system property
19,927
private INDArray gaussianRandomMatrix ( long [ ] shape , Random rng ) { Nd4j . checkShapeValues ( shape ) ; INDArray res = Nd4j . create ( shape ) ; GaussianDistribution op1 = new GaussianDistribution ( res , 0.0 , 1.0 / Math . sqrt ( shape [ 0 ] ) ) ; Nd4j . getExecutioner ( ) . exec ( op1 , rng ) ; return res ; }
Generate a dense Gaussian random matrix .
19,928
private double getEpsFromConfig ( Map < String , Object > layerConfig ) throws InvalidKerasConfigurationException { Map < String , Object > innerConfig = KerasLayerUtils . getInnerLayerConfigFromConfig ( layerConfig , conf ) ; if ( ! innerConfig . containsKey ( LAYER_FIELD_EPSILON ) ) throw new InvalidKerasConfigurationException ( "Keras BatchNorm layer config missing " + LAYER_FIELD_EPSILON + " field" ) ; return ( double ) innerConfig . get ( LAYER_FIELD_EPSILON ) ; }
Get BatchNormalization epsilon parameter from Keras layer configuration .
19,929
private double getMomentumFromConfig ( Map < String , Object > layerConfig ) throws InvalidKerasConfigurationException { Map < String , Object > innerConfig = KerasLayerUtils . getInnerLayerConfigFromConfig ( layerConfig , conf ) ; if ( ! innerConfig . containsKey ( LAYER_FIELD_MOMENTUM ) ) throw new InvalidKerasConfigurationException ( "Keras BatchNorm layer config missing " + LAYER_FIELD_MOMENTUM + " field" ) ; return ( double ) innerConfig . get ( LAYER_FIELD_MOMENTUM ) ; }
Get BatchNormalization momentum parameter from Keras layer configuration .
19,930
private void getGammaRegularizerFromConfig ( Map < String , Object > layerConfig , boolean enforceTrainingConfig ) throws UnsupportedKerasConfigurationException , InvalidKerasConfigurationException { Map < String , Object > innerConfig = KerasLayerUtils . getInnerLayerConfigFromConfig ( layerConfig , conf ) ; if ( innerConfig . get ( LAYER_FIELD_GAMMA_REGULARIZER ) != null ) { if ( enforceTrainingConfig ) throw new UnsupportedKerasConfigurationException ( "Regularization for BatchNormalization gamma parameter not supported" ) ; else log . warn ( "Regularization for BatchNormalization gamma parameter not supported...ignoring." ) ; } }
Get BatchNormalization gamma regularizer from Keras layer configuration . Currently unsupported .
19,931
private void getBetaRegularizerFromConfig ( Map < String , Object > layerConfig , boolean enforceTrainingConfig ) throws UnsupportedKerasConfigurationException , InvalidKerasConfigurationException { Map < String , Object > innerConfig = KerasLayerUtils . getInnerLayerConfigFromConfig ( layerConfig , conf ) ; if ( innerConfig . get ( LAYER_FIELD_BETA_REGULARIZER ) != null ) { if ( enforceTrainingConfig ) throw new UnsupportedKerasConfigurationException ( "Regularization for BatchNormalization beta parameter not supported" ) ; else log . warn ( "Regularization for BatchNormalization beta parameter not supported...ignoring." ) ; } }
Get BatchNormalization beta regularizer from Keras layer configuration . Currently unsupported .
19,932
private int getBatchNormMode ( Map < String , Object > layerConfig , boolean enforceTrainingConfig ) throws InvalidKerasConfigurationException , UnsupportedKerasConfigurationException { Map < String , Object > innerConfig = KerasLayerUtils . getInnerLayerConfigFromConfig ( layerConfig , conf ) ; int batchNormMode = 0 ; if ( this . kerasMajorVersion == 1 & ! innerConfig . containsKey ( LAYER_FIELD_MODE ) ) throw new InvalidKerasConfigurationException ( "Keras BatchNorm layer config missing " + LAYER_FIELD_MODE + " field" ) ; if ( this . kerasMajorVersion == 1 ) batchNormMode = ( int ) innerConfig . get ( LAYER_FIELD_MODE ) ; switch ( batchNormMode ) { case LAYER_BATCHNORM_MODE_1 : throw new UnsupportedKerasConfigurationException ( "Keras BatchNormalization mode " + LAYER_BATCHNORM_MODE_1 + " (sample-wise) not supported" ) ; case LAYER_BATCHNORM_MODE_2 : throw new UnsupportedKerasConfigurationException ( "Keras BatchNormalization (per-batch statistics during testing) " + LAYER_BATCHNORM_MODE_2 + " not supported" ) ; } return batchNormMode ; }
Get BatchNormalization mode from Keras layer configuration . Most modes currently unsupported .
19,933
private int getBatchNormAxis ( Map < String , Object > layerConfig ) throws InvalidKerasConfigurationException { Map < String , Object > innerConfig = KerasLayerUtils . getInnerLayerConfigFromConfig ( layerConfig , conf ) ; return ( int ) innerConfig . get ( LAYER_FIELD_AXIS ) ; }
Get BatchNormalization axis from Keras layer configuration . Currently unused .
19,934
public int compareTo ( Object o ) { int thisValue = this . value ; int thatValue = ( ( IntWritable ) o ) . value ; return ( thisValue < thatValue ? - 1 : ( thisValue == thatValue ? 0 : 1 ) ) ; }
Compares two IntWritables .
19,935
public Sequence < T > nextSequence ( ) { Sequence < T > sequence = currentIterator . next ( ) ; sequence . setSequenceId ( tagger . getAndIncrement ( ) ) ; return sequence ; }
Returns next sequence out of iterator
19,936
public static void validateConvolutionModePadding ( ConvolutionMode mode , int padding ) { if ( mode == ConvolutionMode . Same ) { boolean nullPadding = true ; if ( padding != 0 ) nullPadding = false ; if ( ! nullPadding ) throw new IllegalArgumentException ( "Padding cannot be used when using the `same' convolution mode" ) ; } }
Check that the convolution mode is consistent with the padding specification
19,937
public static int getSameModeTopLeftPadding ( int outSize , int inSize , int kernel , int strides , int dilation ) { int eKernel = effectiveKernelSize ( kernel , dilation ) ; int outPad = ( ( outSize - 1 ) * strides + eKernel - inSize ) / 2 ; Preconditions . checkState ( outPad >= 0 , "Invalid padding values calculated: %s - " + "layer configuration is invalid? Input size %s, output size %s, kernel %s, " + "strides %s, dilation %s" , outPad , inSize , outSize , kernel , strides , dilation ) ; return outPad ; }
Get top padding for same mode only .
19,938
public boolean validateEvent ( ListenerEvent event , long argument ) { try { locker . acquire ( ) ; if ( event == targetEvent && argument % targetFrequency == 0 ) { return true ; } else return false ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } finally { locker . release ( ) ; } }
This method is called prior each processEvent call to check if this specific VectorsListener implementation is viable for specific event
19,939
public void processEvent ( ListenerEvent event , SequenceVectors < T > sequenceVectors , long argument ) { try { locker . acquire ( ) ; SimpleDateFormat sdf = new SimpleDateFormat ( "yyyy-MM-dd HH:mm:ss.SSS" ) ; StringBuilder builder = new StringBuilder ( targetFolder . getAbsolutePath ( ) ) ; builder . append ( "/" ) . append ( modelPrefix ) . append ( "_" ) . append ( sdf . format ( new Date ( ) ) ) . append ( ".seqvec" ) ; File targetFile = new File ( builder . toString ( ) ) ; if ( useBinarySerialization ) { SerializationUtils . saveObject ( sequenceVectors , targetFile ) ; } else { throw new UnsupportedOperationException ( "Not implemented yet" ) ; } } catch ( Exception e ) { e . printStackTrace ( ) ; } finally { locker . release ( ) ; } }
This method is called at each epoch end
19,940
public static INDArray [ ] arraysFromBase64 ( String base64 ) throws IOException { String [ ] base64Arr = base64 . split ( "\t" ) ; INDArray [ ] ret = new INDArray [ base64Arr . length ] ; for ( int i = 0 ; i < base64Arr . length ; i ++ ) { byte [ ] decode = Base64 . decodeBase64 ( base64Arr [ i ] ) ; ByteArrayInputStream bis = new ByteArrayInputStream ( decode ) ; DataInputStream dis = new DataInputStream ( bis ) ; INDArray predict = Nd4j . read ( dis ) ; ret [ i ] = predict ; } return ret ; }
Returns a set of arrays from base 64 that is tab delimited .
19,941
public static String arraysToBase64 ( INDArray [ ] arrays ) throws IOException { StringBuilder sb = new StringBuilder ( ) ; for ( INDArray outputArr : arrays ) { ByteArrayOutputStream bos = new ByteArrayOutputStream ( ) ; DataOutputStream dos = new DataOutputStream ( bos ) ; Nd4j . write ( outputArr , dos ) ; String base64 = Base64 . encodeBase64String ( bos . toByteArray ( ) ) ; sb . append ( base64 ) ; sb . append ( "\t" ) ; } return sb . toString ( ) ; }
Returns a tab delimited base 64 representation of the given arrays
19,942
public static String base64String ( INDArray arr ) throws IOException { ByteArrayOutputStream bos = new ByteArrayOutputStream ( ) ; DataOutputStream dos = new DataOutputStream ( bos ) ; Nd4j . write ( arr , dos ) ; String base64 = Base64 . encodeBase64String ( bos . toByteArray ( ) ) ; return base64 ; }
Returns an ndarray as base 64
19,943
public static INDArray fromBase64 ( String base64 ) throws IOException { byte [ ] arr = Base64 . decodeBase64 ( base64 ) ; ByteArrayInputStream bis = new ByteArrayInputStream ( arr ) ; DataInputStream dis = new DataInputStream ( bis ) ; INDArray predict = Nd4j . read ( dis ) ; return predict ; }
Create an ndarray from a base 64 representation
19,944
public static INDArray generate ( long [ ] shape , Values values ) { return generate ( shape , Nd4j . order ( ) , values ) ; }
Generate a random array with the specified shape
19,945
protected INDArray createBias ( NeuralNetConfiguration conf , INDArray biasView , boolean initializeParams ) { org . deeplearning4j . nn . conf . layers . ConvolutionLayer layerConf = ( org . deeplearning4j . nn . conf . layers . ConvolutionLayer ) conf . getLayer ( ) ; if ( initializeParams ) biasView . assign ( layerConf . getBiasInit ( ) ) ; return biasView ; }
1 bias per feature map
19,946
public Map < String , Number > status ( ) { Map < String , Number > ret = new HashMap < > ( ) ; ret . put ( "workers" , workers ) ; ret . put ( "accumulatedUpdates" , numUpdates ( ) ) ; return ret ; }
Returns the current status of this parameter server updater
19,947
public String toJson ( ) { try { return objectMapper . writeValueAsString ( status ( ) ) ; } catch ( JsonProcessingException e ) { throw new RuntimeException ( e ) ; } }
Serialize this updater as json
19,948
public void update ( NDArrayMessage message ) { updateStorage . addUpdate ( message ) ; INDArray arr = message . getArr ( ) ; int [ ] dimensions = message . getDimensions ( ) ; boolean whole = dimensions . length == 1 && dimensions [ 0 ] == - 1 ; if ( ! whole ) partialUpdate ( arr , ndArrayHolder . get ( ) , message . getIndex ( ) , dimensions ) ; else update ( arr , ndArrayHolder . get ( ) ) ; }
Do an update based on the ndarray message .
19,949
public LabelledDocument nextDocument ( ) { LabelledDocument document = currentIterator . next ( ) ; for ( String label : document . getLabels ( ) ) { labels . storeLabel ( label ) ; } return document ; }
This method returns next LabelledDocument from underlying iterator
19,950
public void queueReport ( WebTarget target , Entity entity ) { queue . add ( Pair . makePair ( target , entity ) ) ; }
This method queues UI report for sending
19,951
public void postReport ( WebTarget target , Entity entity ) { Response resp = target . request ( MediaType . APPLICATION_JSON ) . accept ( MediaType . APPLICATION_JSON ) . post ( entity ) ; log . debug ( "{}" , resp ) ; }
This method immediately sends UI report to specified target using POST request
19,952
public static < K , T , V > MultiDimensionalMap < K , T , V > newThreadSafeTreeBackedMap ( ) { return new MultiDimensionalMap < > ( new ConcurrentSkipListMap < Pair < K , T > , V > ( ) ) ; }
Thread safe sorted map implementation
19,953
public static < K , T , V > MultiDimensionalMap < K , T , V > newThreadSafeHashBackedMap ( ) { return new MultiDimensionalMap < > ( new ConcurrentHashMap < Pair < K , T > , V > ( ) ) ; }
Thread safe hash map implementation
19,954
public static < K , T , V > MultiDimensionalMap < K , T , V > newHashBackedMap ( ) { return new MultiDimensionalMap < > ( new HashMap < Pair < K , T > , V > ( ) ) ; }
Thread safe hash map impl
19,955
public static < K , T , V > MultiDimensionalMap < K , T , V > newTreeBackedMap ( ) { return new MultiDimensionalMap < > ( new TreeMap < Pair < K , T > , V > ( ) ) ; }
Tree map implementation
19,956
public INDArray getRange ( ) { if ( range == null ) { try ( MemoryWorkspace ws = Nd4j . getMemoryManager ( ) . scopeOutOfWorkspaces ( ) ) { range = upper . sub ( lower ) ; } } return range ; }
Get the feature wise range for the statistics . Note that this is a lazy getter . It is only computed when needed .
19,957
public INDArray average ( INDArray target , INDArray [ ] arrays ) { if ( arrays == null || arrays . length == 0 ) throw new RuntimeException ( "Input arrays are missing" ) ; if ( arrays . length == 1 ) { if ( target == null ) { return null ; } return target . assign ( arrays [ 0 ] ) ; } long len = target != null ? target . lengthLong ( ) : arrays [ 0 ] . length ( ) ; PointerPointer dataPointers = new PointerPointer ( arrays . length ) ; val firstType = arrays [ 0 ] . dataType ( ) ; for ( int i = 0 ; i < arrays . length ; i ++ ) { Nd4j . getCompressor ( ) . autoDecompress ( arrays [ i ] ) ; Preconditions . checkArgument ( arrays [ i ] . dataType ( ) == firstType , "All arrays must have the same data type" ) ; if ( arrays [ i ] . elementWiseStride ( ) != 1 ) throw new ND4JIllegalStateException ( "Native averaging is applicable only to continuous INDArrays" ) ; if ( arrays [ i ] . lengthLong ( ) != len ) throw new ND4JIllegalStateException ( "All arrays should have equal length for averaging" ) ; dataPointers . put ( i , arrays [ i ] . data ( ) . addressPointer ( ) ) ; } nativeOps . average ( null , dataPointers , ( LongPointer ) arrays [ 0 ] . shapeInfoDataBuffer ( ) . addressPointer ( ) , null , null , target == null ? null : target . data ( ) . addressPointer ( ) , target == null ? null : ( LongPointer ) target . shapeInfoDataBuffer ( ) . addressPointer ( ) , null , null , arrays . length , len , true ) ; return target ; }
This method averages input arrays and returns averaged array
19,958
public static int numChunksForMessage ( NDArrayMessage message , int chunkSize ) { int sizeOfMessage = NDArrayMessage . byteBufferSizeForMessage ( message ) ; int numMessages = sizeOfMessage / chunkSize ; if ( numMessages * chunkSize < sizeOfMessage ) numMessages ++ ; return numMessages ; }
Determine the number of chunks
19,959
public static NDArrayMessage [ ] chunkedMessages ( NDArrayMessage arrayMessage , int chunkSize ) { int sizeOfMessage = NDArrayMessage . byteBufferSizeForMessage ( arrayMessage ) - 4 ; int numMessages = sizeOfMessage / chunkSize ; ByteBuffer direct = NDArrayMessage . toBuffer ( arrayMessage ) . byteBuffer ( ) ; NDArrayMessage [ ] ret = new NDArrayMessage [ numMessages ] ; for ( int i = 0 ; i < numMessages ; i ++ ) { byte [ ] chunk = new byte [ chunkSize ] ; direct . get ( chunk , i * chunkSize , chunkSize ) ; ret [ i ] = NDArrayMessage . builder ( ) . chunk ( chunk ) . numChunks ( numMessages ) . build ( ) ; } return ret ; }
Create an array of messages to send based on a specified chunk size
19,960
public static long getCurrentTimeUtc ( ) { Instant instant = Instant . now ( ) ; ZonedDateTime dateTime = instant . atZone ( ZoneOffset . UTC ) ; return dateTime . toInstant ( ) . toEpochMilli ( ) ; }
Get the current time in utc in milliseconds
19,961
public void trackStatePointer ( NativePack random ) { if ( random . getStatePointer ( ) != null ) { GarbageStateReference reference = new GarbageStateReference ( random , queue ) ; referenceMap . put ( random . getStatePointer ( ) . address ( ) , reference ) ; } }
This method is used internally from NativeRandom deallocators This method doesn t accept Random interface implementations intentionally .
19,962
public void relocate ( AllocationStatus currentStatus , AllocationStatus targetStatus , AllocationPoint point , AllocationShape shape , CudaContext context ) { if ( currentStatus == AllocationStatus . DEVICE && targetStatus == AllocationStatus . HOST ) { DataBuffer targetBuffer = point . getBuffer ( ) ; if ( targetBuffer == null ) throw new IllegalStateException ( "Target buffer is NULL!" ) ; Pointer devicePointer = new CudaPointer ( point . getPointers ( ) . getDevicePointer ( ) . address ( ) ) ; } else if ( currentStatus == AllocationStatus . HOST && targetStatus == AllocationStatus . DEVICE ) { if ( point . isConstant ( ) ) { return ; } if ( point . getPointers ( ) . getDevicePointer ( ) == null ) { throw new IllegalStateException ( "devicePointer is NULL!" ) ; } val profD = PerformanceTracker . getInstance ( ) . helperStartTransaction ( ) ; if ( nativeOps . memcpyAsync ( point . getPointers ( ) . getDevicePointer ( ) , point . getPointers ( ) . getHostPointer ( ) , AllocationUtils . getRequiredMemory ( shape ) , CudaConstants . cudaMemcpyHostToDevice , context . getSpecialStream ( ) ) == 0 ) throw new IllegalStateException ( "MemcpyAsync relocate H2D failed: [" + point . getHostPointer ( ) . address ( ) + "] -> [" + point . getDevicePointer ( ) . address ( ) + "]" ) ; flowController . commitTransfer ( context . getSpecialStream ( ) ) ; PerformanceTracker . getInstance ( ) . helperRegisterTransaction ( point . getDeviceId ( ) , profD , point . getNumberOfBytes ( ) , MemcpyDirection . HOST_TO_DEVICE ) ; } else throw new UnsupportedOperationException ( "Can't relocate data in requested direction: [" + currentStatus + "] -> [" + targetStatus + "]" ) ; }
Copies specific chunk of memory from one storage to another
19,963
public void copyforward ( AllocationPoint point , AllocationShape shape ) { log . info ( "copyforward() called on tp[" + point . getObjectId ( ) + "], shape: " + point . getShape ( ) ) ; throw new UnsupportedOperationException ( "Deprecated call" ) ; }
Copies memory from host buffer to device . Host copy is preserved as is .
19,964
public void fallback ( AllocationPoint point , AllocationShape shape ) { throw new IllegalStateException ( "Can't fallback from [" + point . getAllocationStatus ( ) + "]" ) ; }
Copies memory from device to zero - copy memory
19,965
public void free ( AllocationPoint point , AllocationStatus target ) { if ( point . getAllocationStatus ( ) == AllocationStatus . DEVICE ) deviceMemoryTracker . subFromAllocation ( Thread . currentThread ( ) . getId ( ) , point . getDeviceId ( ) , AllocationUtils . getRequiredMemory ( point . getShape ( ) ) ) ; memoryProvider . free ( point ) ; }
This method frees memory chunk specified by pointer and location
19,966
public void memcpyAsync ( DataBuffer dstBuffer , Pointer srcPointer , long length , long dstOffset ) { AllocationPoint point = ( ( BaseCudaDataBuffer ) dstBuffer ) . getAllocationPoint ( ) ; Pointer dP = new CudaPointer ( ( point . getPointers ( ) . getHostPointer ( ) . address ( ) ) + dstOffset ) ; CudaContext tContext = null ; if ( dstBuffer . isConstant ( ) ) { org . bytedeco . javacpp . Pointer dstPointer = new CudaPointer ( point . getPointers ( ) . getHostPointer ( ) . address ( ) + dstOffset , 0L ) ; org . bytedeco . javacpp . Pointer srcPointerJ = new CudaPointer ( srcPointer , length ) ; val profD = PerformanceTracker . getInstance ( ) . helperStartTransaction ( ) ; org . bytedeco . javacpp . Pointer . memcpy ( dstPointer , srcPointerJ , length ) ; PerformanceTracker . getInstance ( ) . helperRegisterTransaction ( point . getDeviceId ( ) , profD , point . getNumberOfBytes ( ) , MemcpyDirection . HOST_TO_HOST ) ; point . tickHostRead ( ) ; } else { CudaContext context = flowController . prepareAction ( point ) ; tContext = context ; val prof = PerformanceTracker . getInstance ( ) . helperStartTransaction ( ) ; if ( nativeOps . memcpyAsync ( dP , srcPointer , length , CudaConstants . cudaMemcpyHostToHost , context . getSpecialStream ( ) ) == 0 ) throw new IllegalStateException ( "MemcpyAsync H2H failed: [" + srcPointer . address ( ) + "] -> [" + dP . address ( ) + "]" ) ; flowController . commitTransfer ( tContext . getSpecialStream ( ) ) ; PerformanceTracker . getInstance ( ) . helperRegisterTransaction ( point . getDeviceId ( ) , prof , point . getNumberOfBytes ( ) , MemcpyDirection . HOST_TO_HOST ) ; if ( point . getAllocationStatus ( ) == AllocationStatus . HOST ) flowController . registerAction ( context , point ) ; } if ( point . getAllocationStatus ( ) == AllocationStatus . DEVICE ) { Pointer rDP = new CudaPointer ( point . getPointers ( ) . getDevicePointer ( ) . address ( ) + dstOffset ) ; if ( tContext == null ) tContext = flowController . prepareAction ( point ) ; val prof = PerformanceTracker . getInstance ( ) . helperStartTransaction ( ) ; if ( nativeOps . memcpyAsync ( rDP , dP , length , CudaConstants . cudaMemcpyHostToDevice , tContext . getSpecialStream ( ) ) == 0 ) throw new IllegalStateException ( "MemcpyAsync H2D failed: [" + dP . address ( ) + "] -> [" + rDP . address ( ) + "]" ) ; flowController . commitTransfer ( tContext . getSpecialStream ( ) ) ; PerformanceTracker . getInstance ( ) . helperRegisterTransaction ( point . getDeviceId ( ) , prof , point . getNumberOfBytes ( ) , MemcpyDirection . HOST_TO_DEVICE ) ; flowController . registerAction ( tContext , point ) ; } point . tickDeviceWrite ( ) ; }
Asynchronous version of memcpy
19,967
public void memcpySpecial ( DataBuffer dstBuffer , Pointer srcPointer , long length , long dstOffset ) { CudaContext context = getCudaContext ( ) ; AllocationPoint point = ( ( BaseCudaDataBuffer ) dstBuffer ) . getAllocationPoint ( ) ; Pointer dP = new CudaPointer ( ( point . getPointers ( ) . getHostPointer ( ) . address ( ) ) + dstOffset ) ; val profH = PerformanceTracker . getInstance ( ) . helperStartTransaction ( ) ; if ( nativeOps . memcpyAsync ( dP , srcPointer , length , CudaConstants . cudaMemcpyHostToHost , context . getOldStream ( ) ) == 0 ) throw new ND4JIllegalStateException ( "memcpyAsync failed" ) ; PerformanceTracker . getInstance ( ) . helperRegisterTransaction ( point . getDeviceId ( ) , profH , point . getNumberOfBytes ( ) , MemcpyDirection . HOST_TO_HOST ) ; if ( point . getAllocationStatus ( ) == AllocationStatus . DEVICE ) { Pointer rDP = new CudaPointer ( point . getPointers ( ) . getDevicePointer ( ) . address ( ) + dstOffset ) ; val profD = PerformanceTracker . getInstance ( ) . helperStartTransaction ( ) ; if ( nativeOps . memcpyAsync ( rDP , dP , length , CudaConstants . cudaMemcpyHostToDevice , context . getOldStream ( ) ) == 0 ) throw new ND4JIllegalStateException ( "memcpyAsync failed" ) ; context . syncOldStream ( ) ; PerformanceTracker . getInstance ( ) . helperRegisterTransaction ( point . getDeviceId ( ) , profD , point . getNumberOfBytes ( ) , MemcpyDirection . HOST_TO_DEVICE ) ; } context . syncOldStream ( ) ; point . tickDeviceWrite ( ) ; }
Special memcpy version addressing shapeInfoDataBuffer copies
19,968
public boolean promoteObject ( DataBuffer buffer ) { AllocationPoint dstPoint = AtomicAllocator . getInstance ( ) . getAllocationPoint ( buffer ) ; if ( dstPoint . getAllocationStatus ( ) != AllocationStatus . HOST ) return false ; if ( configuration . getMemoryModel ( ) == Configuration . MemoryModel . DELAYED && dstPoint . getAllocationStatus ( ) == AllocationStatus . HOST ) { if ( buffer . isConstant ( ) ) { Nd4j . getConstantHandler ( ) . moveToConstantSpace ( buffer ) ; } else { PointersPair pair = memoryProvider . malloc ( dstPoint . getShape ( ) , dstPoint , AllocationStatus . DEVICE ) ; if ( pair != null ) { Integer deviceId = getDeviceId ( ) ; dstPoint . getPointers ( ) . setDevicePointer ( pair . getDevicePointer ( ) ) ; dstPoint . setAllocationStatus ( AllocationStatus . DEVICE ) ; deviceAllocations . get ( deviceId ) . put ( dstPoint . getObjectId ( ) , dstPoint . getObjectId ( ) ) ; zeroAllocations . get ( dstPoint . getBucketId ( ) ) . remove ( dstPoint . getObjectId ( ) ) ; deviceMemoryTracker . addToAllocation ( Thread . currentThread ( ) . getId ( ) , deviceId , AllocationUtils . getRequiredMemory ( dstPoint . getShape ( ) ) ) ; dstPoint . tickHostWrite ( ) ; } else throw new RuntimeException ( "PewPew" ) ; } } return true ; }
This method moves specific object from zero - copy memory to device memory
19,969
public Table < AllocationStatus , Integer , Long > getAllocationStatistics ( ) { Table < AllocationStatus , Integer , Long > table = HashBasedTable . create ( ) ; table . put ( AllocationStatus . HOST , 0 , zeroUseCounter . get ( ) ) ; for ( Integer deviceId : configuration . getAvailableDevices ( ) ) { table . put ( AllocationStatus . DEVICE , deviceId , getAllocatedDeviceMemory ( deviceId ) ) ; } return table ; }
This method returns total amount of memory allocated within system
19,970
public long getAllocatedHostObjects ( Long bucketId ) { if ( zeroAllocations . containsKey ( bucketId ) ) return zeroAllocations . get ( bucketId ) . size ( ) ; else return 0L ; }
This method returns number of allocated objects within specific bucket
19,971
public long getAllocatedHostObjects ( ) { AtomicLong counter = new AtomicLong ( 0 ) ; for ( Long threadId : zeroAllocations . keySet ( ) ) { counter . addAndGet ( zeroAllocations . get ( threadId ) . size ( ) ) ; } return counter . get ( ) ; }
This method returns total number of allocated objects in host memory
19,972
public Set < Long > getDeviceTrackingPoints ( Integer deviceId ) { return deviceAllocations . get ( deviceId ) . keySet ( ) ; }
This method returns set of allocation tracking IDs for specific device
19,973
public Set < Long > getHostTrackingPoints ( Long bucketId ) { if ( ! zeroAllocations . containsKey ( bucketId ) ) { return new HashSet < > ( ) ; } return zeroAllocations . get ( bucketId ) . keySet ( ) ; }
This method returns sets of allocation tracking IDs for specific bucket
19,974
public void purgeDeviceObject ( Long threadId , Integer deviceId , Long objectId , AllocationPoint point , boolean copyback ) { if ( point . getAllocationStatus ( ) != AllocationStatus . DEVICE ) return ; flowController . waitTillReleased ( point ) ; free ( point , AllocationStatus . DEVICE ) ; if ( ! deviceAllocations . get ( deviceId ) . containsKey ( objectId ) ) throw new IllegalStateException ( "Can't happen ever" ) ; forget ( point , AllocationStatus . DEVICE ) ; if ( deviceAllocations . get ( deviceId ) . containsKey ( objectId ) ) throw new IllegalStateException ( "Can't happen ever" ) ; deviceMemoryTracker . subFromAllocation ( threadId , deviceId , AllocationUtils . getRequiredMemory ( point . getShape ( ) ) ) ; point . setAllocationStatus ( AllocationStatus . HOST ) ; }
This method explicitly removes object from device memory .
19,975
public void purgeZeroObject ( Long bucketId , Long objectId , AllocationPoint point , boolean copyback ) { forget ( point , AllocationStatus . HOST ) ; flowController . waitTillReleased ( point ) ; free ( point , AllocationStatus . HOST ) ; point . setAllocationStatus ( AllocationStatus . DEALLOCATED ) ; long reqMem = AllocationUtils . getRequiredMemory ( point . getShape ( ) ) * - 1 ; zeroUseCounter . addAndGet ( reqMem ) ; }
This method explicitly removes object from zero - copy memory .
19,976
protected void initCudaContextForThread ( Long threadId ) { nativeOps . setDevice ( getDeviceId ( ) ) ; CudaContext context = new CudaContext ( ) ; context . initHandle ( ) ; context . initOldStream ( ) ; context . initStream ( ) ; context . associateHandle ( ) ; }
This method does initialization for thread .
19,977
public void synchronizeThreadDevice ( Long threadId , Integer deviceId , AllocationPoint point ) { flowController . synchronizeToHost ( point ) ; }
This method causes memory synchronization on host side . Viable only for Device - dependant MemoryHandlers
19,978
private static void setJobConf ( Object theObject , Configuration conf ) { try { Class < ? > jobConfClass = conf . getClassByName ( "org.apache.hadoop.mapred.JobConf" ) ; Class < ? > jobConfigurableClass = conf . getClassByName ( "org.apache.hadoop.mapred.JobConfigurable" ) ; if ( jobConfClass . isAssignableFrom ( conf . getClass ( ) ) && jobConfigurableClass . isAssignableFrom ( theObject . getClass ( ) ) ) { Method configureMethod = jobConfigurableClass . getMethod ( "configure" , jobConfClass ) ; configureMethod . invoke ( theObject , conf ) ; } } catch ( ClassNotFoundException e ) { } catch ( Exception e ) { throw new RuntimeException ( "Error in configuring object" , e ) ; } }
This code is to support backward compatibility and break the compile time dependency of core on mapred . This should be made deprecated along with the mapred package HADOOP - 1230 . Should be removed when mapred package is removed .
19,979
@ SuppressWarnings ( "unchecked" ) public static < T > T copy ( Configuration conf , T src , T dst ) throws IOException { CopyInCopyOutBuffer buffer = cloneBuffers . get ( ) ; buffer . outBuffer . reset ( ) ; SerializationFactory factory = getFactory ( conf ) ; Class < T > cls = ( Class < T > ) src . getClass ( ) ; Serializer < T > serializer = factory . getSerializer ( cls ) ; serializer . open ( buffer . outBuffer ) ; serializer . serialize ( src ) ; buffer . moveData ( ) ; Deserializer < T > deserializer = factory . getDeserializer ( cls ) ; deserializer . open ( buffer . inBuffer ) ; dst = deserializer . deserialize ( dst ) ; return dst ; }
Make a copy of the writable object using serialization to a buffer
19,980
public static < K , V > Map < K , Pair < List < V > , List < V > > > cogroup ( List < Pair < K , V > > left , List < Pair < K , V > > right ) { Map < K , Pair < List < V > , List < V > > > ret = new HashMap < > ( ) ; Map < K , List < V > > leftMap = groupByKey ( left ) ; Map < K , List < V > > rightMap = groupByKey ( right ) ; for ( Map . Entry < K , List < V > > entry : leftMap . entrySet ( ) ) { K key = entry . getKey ( ) ; if ( ! ret . containsKey ( key ) ) { List < V > leftListPair = new ArrayList < > ( ) ; List < V > rightListPair = new ArrayList < > ( ) ; Pair < List < V > , List < V > > p = Pair . of ( leftListPair , rightListPair ) ; ret . put ( key , p ) ; } Pair < List < V > , List < V > > p = ret . get ( key ) ; p . getFirst ( ) . addAll ( entry . getValue ( ) ) ; } for ( Map . Entry < K , List < V > > entry : rightMap . entrySet ( ) ) { K key = entry . getKey ( ) ; if ( ! ret . containsKey ( key ) ) { List < V > leftListPair = new ArrayList < > ( ) ; List < V > rightListPair = new ArrayList < > ( ) ; Pair < List < V > , List < V > > p = Pair . of ( leftListPair , rightListPair ) ; ret . put ( key , p ) ; } Pair < List < V > , List < V > > p = ret . get ( key ) ; p . getSecond ( ) . addAll ( entry . getValue ( ) ) ; } return ret ; }
For each key in left and right cogroup returns the list of values as a pair for each value present in left as well as right .
19,981
public static < K , V > Map < K , List < V > > groupByKey ( List < Pair < K , V > > listInput ) { Map < K , List < V > > ret = new HashMap < > ( ) ; for ( Pair < K , V > pair : listInput ) { List < V > currList = ret . get ( pair . getFirst ( ) ) ; if ( currList == null ) { currList = new ArrayList < > ( ) ; ret . put ( pair . getFirst ( ) , currList ) ; } currList . add ( pair . getSecond ( ) ) ; } return ret ; }
Group the input pairs by the key of each pair .
19,982
public OpDef getOpDefByTensorflowName ( String name ) { if ( ! tensorflowOpDescriptors . containsKey ( name ) ) { throw new ND4JIllegalStateException ( "No op found with name " + name ) ; } return tensorflowOpDescriptors . get ( name ) ; }
Get the op definition of a given tensorflow op .
19,983
public boolean storeIfAbsent ( T key , INDArray object ) { try { if ( emulateIsAbsent ) lock . writeLock ( ) . lock ( ) ; if ( compressedEntries . containsKey ( key ) ) { return false ; } else { store ( key , object ) ; return true ; } } finally { if ( emulateIsAbsent ) lock . writeLock ( ) . unlock ( ) ; } }
Store object into storage if it doesn t exist
19,984
public INDArray get ( T key ) { try { if ( emulateIsAbsent ) lock . readLock ( ) . lock ( ) ; if ( containsKey ( key ) ) { INDArray result = compressedEntries . get ( key ) ; return compressor . decompress ( result ) ; } else { return null ; } } finally { if ( emulateIsAbsent ) lock . readLock ( ) . unlock ( ) ; } }
Get object from the storage by key
19,985
public boolean containsKey ( T key ) { try { if ( emulateIsAbsent ) lock . readLock ( ) . lock ( ) ; return compressedEntries . containsKey ( key ) ; } finally { if ( emulateIsAbsent ) lock . readLock ( ) . unlock ( ) ; } }
This method checks if storage contains specified key
19,986
public void clear ( ) { if ( emulateIsAbsent ) lock . writeLock ( ) . lock ( ) ; compressedEntries . clear ( ) ; if ( emulateIsAbsent ) lock . writeLock ( ) . unlock ( ) ; }
This method purges everything from storage
19,987
public void drop ( T key ) { if ( emulateIsAbsent ) lock . writeLock ( ) . lock ( ) ; compressedEntries . remove ( key ) ; if ( emulateIsAbsent ) lock . writeLock ( ) . unlock ( ) ; }
This method removes value by specified key
19,988
public long size ( ) { try { if ( emulateIsAbsent ) lock . readLock ( ) . lock ( ) ; return compressedEntries . size ( ) ; } finally { if ( emulateIsAbsent ) lock . readLock ( ) . unlock ( ) ; } }
This method returns number of entries available in storage
19,989
public synchronized void incrementWordCount ( String word , int increment ) { if ( word == null || word . isEmpty ( ) ) throw new IllegalArgumentException ( "Word can't be empty or null" ) ; wordFrequencies . incrementCount ( word , increment ) ; if ( hasToken ( word ) ) { VocabWord token = tokenFor ( word ) ; token . increaseElementFrequency ( increment ) ; } totalWordOccurrences . set ( totalWordOccurrences . get ( ) + increment ) ; }
Increment the count for the given word by the amount increment
19,990
public static double logBinomialProbability ( int x , int n , double p , double q ) { double ret ; if ( x == 0 ) { if ( p < 0.1 ) { ret = - getDeviancePart ( n , n * q ) - n * p ; } else { ret = n * FastMath . log ( q ) ; } } else if ( x == n ) { if ( q < 0.1 ) { ret = - getDeviancePart ( n , n * p ) - n * q ; } else { ret = n * FastMath . log ( p ) ; } } else { ret = getStirlingError ( n ) - getStirlingError ( x ) - getStirlingError ( n - x ) - getDeviancePart ( x , n * p ) - getDeviancePart ( n - x , n * q ) ; double f = ( MathUtils . TWO_PI * x * ( n - x ) ) / n ; ret = - 0.5 * FastMath . log ( f ) + ret ; } return ret ; }
Compute the logarithm of the PMF for a binomial distribution using the saddle point expansion .
19,991
private String formatPointer ( PatriciaTrie . PatriciaNode < V > from , PatriciaTrie . PatriciaNode < V > to , String label , String tailport ) { StringBuilder builder = new StringBuilder ( ) ; builder . append ( getNodeId ( from ) ) ; builder . append ( " -> " ) ; builder . append ( getNodeId ( to ) ) ; builder . append ( " [ " ) ; builder . append ( "label=\"" ) ; builder . append ( label ) ; builder . append ( " \"" ) ; builder . append ( "tailport=\"" ) ; builder . append ( tailport ) ; builder . append ( " \"" ) ; builder . append ( "fontcolor=\"#666666\" " ) ; builder . append ( " ]" ) ; builder . append ( "\n" ) ; return builder . toString ( ) ; }
Formats a link between two nodes
19,992
private String formatNodeLabel ( PatriciaTrie . PatriciaNode < V > node , KeyMapper < String > keyMapper , boolean formatBitString ) { StringBuilder builder = new StringBuilder ( ) ; builder . append ( "<<table border=\"0\" cellborder=\"0\">" ) ; builder . append ( "<tr><td>" ) ; builder . append ( "key: <font color=\"#00a000\">" ) ; builder . append ( getNodeLabel ( node ) ) ; builder . append ( "</font> </td></tr>" ) ; builder . append ( "<tr><td>" ) ; builder . append ( "bit: <font color=\"blue\">" ) ; builder . append ( node . getBit ( ) ) ; builder . append ( "</font> </td></tr>" ) ; if ( formatBitString ) { builder . append ( "<tr><td>" ) ; builder . append ( "bitString: <font color=\"blue\">" ) ; String bitString = keyMapper . toBitString ( node . getKey ( ) ) ; int c = node . getBit ( ) + node . getBit ( ) / 4 ; builder . append ( bitString . substring ( 0 , c ) ) ; builder . append ( "<font color=\"red\">" ) ; builder . append ( bitString . charAt ( c ) ) ; builder . append ( "</font>" ) ; builder . append ( bitString . substring ( c + 1 ) ) ; builder . append ( "</font> </td></tr>" ) ; } builder . append ( "<tr><td>" ) ; builder . append ( "value: <font color=\"#00a0a0\">" ) ; builder . append ( node . getValue ( ) ) ; builder . append ( "</font> </td></tr>" ) ; builder . append ( "</table>>" ) ; return builder . toString ( ) ; }
Format node label
19,993
private String getNodeId ( PatriciaTrie . PatriciaNode < V > node ) { if ( node == null ) { return "null" ; } else { return node . getKey ( ) ; } }
Get node id used to distinguish nodes internally
19,994
public static boolean checkKryoConfiguration ( JavaSparkContext javaSparkContext , Logger log ) { String serializer = javaSparkContext . getConf ( ) . get ( "spark.serializer" , null ) ; if ( serializer != null && serializer . equals ( "org.apache.spark.serializer.KryoSerializer" ) ) { String kryoRegistrator = javaSparkContext . getConf ( ) . get ( "spark.kryo.registrator" , null ) ; if ( kryoRegistrator == null || ! kryoRegistrator . equals ( "org.nd4j.Nd4jRegistrator" ) ) { SerializerInstance si ; ByteBuffer bb ; try { si = javaSparkContext . env ( ) . serializer ( ) . newInstance ( ) ; bb = si . serialize ( Nd4j . linspace ( 1 , 5 , 5 ) , null ) ; } catch ( Exception e ) { throw new RuntimeException ( KRYO_EXCEPTION_MSG , e ) ; } if ( bb == null ) { throw new RuntimeException ( KRYO_EXCEPTION_MSG + "\n(Got: null ByteBuffer from Spark SerializerInstance)" ) ; } else { boolean equals ; INDArray deserialized ; try { deserialized = ( INDArray ) si . deserialize ( bb , null ) ; equals = Nd4j . linspace ( 1 , 5 , 5 ) . equals ( deserialized ) ; } catch ( Exception e ) { throw new RuntimeException ( KRYO_EXCEPTION_MSG , e ) ; } if ( ! equals ) { throw new RuntimeException ( KRYO_EXCEPTION_MSG + "\n(Error during deserialization: test array" + " was not deserialized successfully)" ) ; } return true ; } } } return true ; }
Check the spark configuration for incorrect Kryo configuration logging a warning message if necessary
19,995
public static JavaRDD < DataSet > shuffleExamples ( JavaRDD < DataSet > rdd , int newBatchSize , int numPartitions ) { JavaPairRDD < Integer , DataSet > singleExampleDataSets = rdd . flatMapToPair ( new SplitDataSetExamplesPairFlatMapFunction ( numPartitions ) ) ; singleExampleDataSets = singleExampleDataSets . partitionBy ( new HashPartitioner ( numPartitions ) ) ; return singleExampleDataSets . values ( ) . mapPartitions ( new BatchDataSetsFunction ( newBatchSize ) ) ; }
Randomly shuffle the examples in each DataSet object and recombine them into new DataSet objects with the specified BatchSize
19,996
protected void pushToGrid ( OpDescriptor descriptor , boolean flush ) { execCounter . incrementAndGet ( ) ; Op op = descriptor . getOp ( ) ; int [ ] dimensions = descriptor . getDimensions ( ) ; if ( op instanceof TransformOp ) { TransformOp t = ( TransformOp ) op ; if ( flush ) flushQueue ( ) ; super . invoke ( t ) ; } else if ( op instanceof Variance ) { Variance acc = ( Variance ) op ; if ( flush ) flushQueue ( ) ; super . naiveExec ( acc , dimensions ) ; } else if ( op instanceof ReduceOp ) { ReduceOp acc = ( ReduceOp ) op ; if ( flush ) flushQueue ( ) ; super . naiveExec ( acc , dimensions ) ; } else if ( op instanceof ScalarOp ) { ScalarOp sc = ( ScalarOp ) op ; if ( flush ) flushQueue ( ) ; super . invoke ( sc ) ; } else if ( op instanceof BroadcastOp ) { BroadcastOp broadcastOp = ( BroadcastOp ) op ; if ( flush ) flushQueue ( ) ; if ( dimensions != null ) { super . exec ( broadcastOp ) ; } else { super . invoke ( broadcastOp ) ; } } else if ( op instanceof IndexAccumulation ) { IndexAccumulation indexAccumulation = ( IndexAccumulation ) op ; if ( flush ) flushQueue ( ) ; } else if ( op instanceof MetaOp ) { metaCounter . incrementAndGet ( ) ; exec ( ( MetaOp ) op ) ; } else if ( op instanceof GridOp ) { exec ( ( GridOp ) op ) ; } }
This method adds op into GridOp queue
19,997
protected boolean isMatchingZX ( Op opA , Op opB ) { if ( opA . x ( ) == opB . x ( ) && opA . z ( ) == opB . z ( ) && opA . x ( ) == opB . z ( ) ) return true ; return false ; }
This method checks if opA and opB are sharing the same operands
19,998
protected boolean isMatchingZXY ( Op opA , Op opB ) { if ( opA . z ( ) == opB . x ( ) || opA . z ( ) == opB . y ( ) ) return true ; return false ; }
This method is additional check basically it qualifies possibility of InvertedPredicate MetaOp
19,999
protected GridPointers pointerizeOp ( Op op , int ... dimensions ) { GridPointers pointers = new GridPointers ( op , dimensions ) ; AtomicAllocator allocator = AtomicAllocator . getInstance ( ) ; CudaContext context = ( CudaContext ) allocator . getDeviceContext ( ) . getContext ( ) ; pointers . setX ( allocator . getPointer ( op . x ( ) , context ) ) ; pointers . setXShapeInfo ( allocator . getPointer ( op . x ( ) . shapeInfoDataBuffer ( ) , context ) ) ; pointers . setZ ( allocator . getPointer ( op . z ( ) , context ) ) ; pointers . setZShapeInfo ( allocator . getPointer ( op . z ( ) . shapeInfoDataBuffer ( ) , context ) ) ; pointers . setZLength ( op . z ( ) . length ( ) ) ; if ( op . y ( ) != null ) { pointers . setY ( allocator . getPointer ( op . y ( ) , context ) ) ; pointers . setYShapeInfo ( allocator . getPointer ( op . y ( ) . shapeInfoDataBuffer ( ) , context ) ) ; } if ( dimensions != null && dimensions . length > 0 ) { DataBuffer dimensionBuffer = Nd4j . getConstantHandler ( ) . getConstantBuffer ( dimensions , DataType . INT ) ; pointers . setDimensions ( allocator . getPointer ( dimensionBuffer , context ) ) ; pointers . setDimensionsLength ( dimensions . length ) ; } if ( dimensions != null && dimensions . length > 0 ) { Pair < DataBuffer , DataBuffer > tadBuffers = tadManager . getTADOnlyShapeInfo ( op . x ( ) , dimensions ) ; Pointer devTadShapeInfo = AtomicAllocator . getInstance ( ) . getPointer ( tadBuffers . getFirst ( ) , context ) ; Pointer devTadOffsets = tadBuffers . getSecond ( ) == null ? null : AtomicAllocator . getInstance ( ) . getPointer ( tadBuffers . getSecond ( ) , context ) ; pointers . setTadShape ( devTadShapeInfo ) ; pointers . setTadOffsets ( devTadOffsets ) ; } return pointers ; }
This method returns Op as set of required pointers for it