idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
20,000 | public void aggregate ( Aggregate op , long key ) { int deviceId = Nd4j . getAffinityManager ( ) . getDeviceForCurrentThread ( ) ; if ( opCounter . get ( ) == null ) opCounter . set ( new AtomicLong ( 0 ) ) ; aggregates . get ( deviceId ) . add ( new AggregateDescriptor ( op , key , opCounter . get ( ) . getAndIncrement ( ) ) ) ; } | This method enqueues aggregate op for future invocation . Key value will be used to batch individual ops |
20,001 | public static String getModuleNameFor ( Op op ) { String moduleName = null ; if ( op instanceof ReduceOp ) { moduleName = "reduce" ; if ( op . opName ( ) . equals ( "cosinesimilarity" ) ) { moduleName = "reduce3" ; } else if ( op . opName ( ) . equals ( "euclidean" ) ) { moduleName = "reduce3" ; } else if ( op . opName ( ) . equals ( "manhattan" ) ) { moduleName = "reduce3" ; } } else if ( op instanceof TransformOp ) { if ( op . opName ( ) . equals ( "add" ) ) { moduleName = "pairWiseTransform" ; } else if ( op . opName ( ) . equals ( "copy" ) ) { moduleName = "pairWiseTransform" ; } else if ( op . opName ( ) . equals ( "div" ) ) { moduleName = "pairWiseTransform" ; } else if ( op . opName ( ) . equals ( "mul" ) ) { moduleName = "pairWiseTransform" ; } else if ( op . opName ( ) . equals ( "rdiv" ) ) { moduleName = "pairWiseTransform" ; } else if ( op . opName ( ) . equals ( "rsub" ) ) { moduleName = "pairWiseTransform" ; } else if ( op . opName ( ) . equals ( "sub" ) ) { moduleName = "pairWiseTransform" ; } else { moduleName = "transform" ; } } else if ( op instanceof ScalarOp ) { moduleName = "scalar" ; } else if ( op instanceof BroadcastOp ) { moduleName = "broadcast" ; } else if ( op instanceof IndexAccumulation ) { moduleName = "indexReduce" ; } return moduleName ; } | For invoking a cuda kernel this returns the module opName for the given op |
20,002 | public static int convertMPtoCores ( int ccMajor , int ccMinor , int numberOfProcessors ) { if ( ccMajor == 1 ) return 8 ; if ( ccMajor == 2 && ccMinor == 1 ) return 48 ; if ( ccMajor == 2 ) return 32 ; if ( ccMajor == 3 ) return 192 ; if ( ccMajor == 5 ) return 128 ; return - 1 ; } | Returns number of SMs based on device compute capability and number of processors . |
20,003 | public Gradient [ ] calcGradient ( IActorCritic iac , Stack < MiniTrans < Integer > > rewards ) { MiniTrans < Integer > minTrans = rewards . pop ( ) ; int size = rewards . size ( ) ; boolean recurrent = getAsyncGlobal ( ) . getCurrent ( ) . isRecurrent ( ) ; int [ ] shape = getHistoryProcessor ( ) == null ? mdp . getObservationSpace ( ) . getShape ( ) : getHistoryProcessor ( ) . getConf ( ) . getShape ( ) ; int [ ] nshape = recurrent ? Learning . makeShape ( 1 , shape , size ) : Learning . makeShape ( size , shape ) ; INDArray input = Nd4j . create ( nshape ) ; INDArray targets = recurrent ? Nd4j . create ( 1 , 1 , size ) : Nd4j . create ( size , 1 ) ; INDArray logSoftmax = recurrent ? Nd4j . zeros ( 1 , mdp . getActionSpace ( ) . getSize ( ) , size ) : Nd4j . zeros ( size , mdp . getActionSpace ( ) . getSize ( ) ) ; double r = minTrans . getReward ( ) ; for ( int i = size - 1 ; i >= 0 ; i -- ) { minTrans = rewards . pop ( ) ; r = minTrans . getReward ( ) + conf . getGamma ( ) * r ; if ( recurrent ) { input . get ( NDArrayIndex . point ( 0 ) , NDArrayIndex . all ( ) , NDArrayIndex . point ( i ) ) . assign ( minTrans . getObs ( ) ) ; } else { input . putRow ( i , minTrans . getObs ( ) ) ; } targets . putScalar ( i , r ) ; double expectedV = minTrans . getOutput ( ) [ 0 ] . getDouble ( 0 ) ; double advantage = r - expectedV ; if ( recurrent ) { logSoftmax . putScalar ( 0 , minTrans . getAction ( ) , i , advantage ) ; } else { logSoftmax . putScalar ( i , minTrans . getAction ( ) , advantage ) ; } } return iac . gradient ( input , new INDArray [ ] { targets , logSoftmax } ) ; } | calc the gradients based on the n - step rewards |
20,004 | public int numChunksSoFar ( String id ) { if ( ! chunks . containsKey ( id ) ) return 0 ; return chunks . get ( id ) . size ( ) ; } | Returns the number of chunks accumulated for a given id so far |
20,005 | public boolean allPresent ( String id ) { if ( ! chunks . containsKey ( id ) ) return false ; List < NDArrayMessageChunk > chunkList = chunks . get ( id ) ; return chunkList . size ( ) == chunkList . get ( 0 ) . getNumChunks ( ) ; } | Returns true if all chunks are present |
20,006 | public NDArrayMessage reassemble ( String id ) { List < NDArrayMessageChunk > chunkList = chunks . get ( id ) ; if ( chunkList . size ( ) != chunkList . get ( 0 ) . getNumChunks ( ) ) throw new IllegalStateException ( "Unable to reassemble message chunk " + id + " missing " + ( chunkList . get ( 0 ) . getNumChunks ( ) - chunkList . size ( ) ) + "chunks" ) ; NDArrayMessageChunk [ ] inOrder = new NDArrayMessageChunk [ chunkList . size ( ) ] ; for ( NDArrayMessageChunk chunk : chunkList ) { inOrder [ chunk . getChunkIndex ( ) ] = chunk ; } NDArrayMessage message = NDArrayMessage . fromChunks ( inOrder ) ; chunkList . clear ( ) ; chunks . remove ( id ) ; return message ; } | Reassemble an ndarray message from a set of chunks |
20,007 | public FeedForwardLayer getLossLayer ( InputType type ) throws UnsupportedKerasConfigurationException { if ( type instanceof InputType . InputTypeFeedForward ) { this . layer = new LossLayer . Builder ( loss ) . name ( this . layerName ) . build ( ) ; } else if ( type instanceof InputType . InputTypeRecurrent ) { this . layer = new RnnLossLayer . Builder ( loss ) . name ( this . layerName ) . build ( ) ; } else if ( type instanceof InputType . InputTypeConvolutional ) { this . layer = new CnnLossLayer . Builder ( loss ) . name ( this . layerName ) . build ( ) ; } else { throw new UnsupportedKerasConfigurationException ( "Unsupported output layer type" + "got : " + type . toString ( ) ) ; } return ( FeedForwardLayer ) this . layer ; } | Get DL4J LossLayer . |
20,008 | public void uploadForDeployment ( String from , String to ) throws Exception { File fromFile = new File ( from ) ; if ( ! to . isEmpty ( ) && fromFile . isDirectory ( ) ) mkDir ( to ) ; else upload ( from , to ) ; } | Creates the directory for the file if necessary and uploads the file |
20,009 | private void mkDir ( String dir ) throws Exception { Session session = getSession ( ) ; session . connect ( ) ; Channel channel = session . openChannel ( "sftp" ) ; channel . connect ( ) ; ChannelSftp c = ( ChannelSftp ) channel ; if ( ! fileExists ( dir , c ) ) c . mkdir ( dir ) ; c . exit ( ) ; session . disconnect ( ) ; } | creates the directory to upload to |
20,010 | private void upload ( String fileOrDir , String uploadRootDir ) throws Exception { if ( uploadRootDir . isEmpty ( ) ) uploadRootDir = "." ; File origin = new File ( fileOrDir ) ; if ( fileOrDir . endsWith ( ".tar" ) || fileOrDir . endsWith ( ".tar.gz" ) ) { upload ( new File ( fileOrDir ) , uploadRootDir ) ; untar ( uploadRootDir ) ; } else if ( origin . isFile ( ) ) { upload ( new File ( fileOrDir ) , uploadRootDir ) ; } else { File [ ] childFiles = origin . listFiles ( ) ; if ( childFiles != null ) upload ( Arrays . asList ( childFiles ) , uploadRootDir ) ; } } | uploads the file or listed files in a directory |
20,011 | protected void init ( ) { if ( storage . size ( ) != vocabCache . numWords ( ) ) throw new RuntimeException ( "Number of words in Vocab isn't matching number of stored Vectors. vocab: [" + vocabCache . numWords ( ) + "]; storage: [" + storage . size ( ) + "]" ) ; for ( int i = 0 ; i < Nd4j . getAffinityManager ( ) . getNumberOfDevices ( ) ; i ++ ) { cacheWrtDevice . add ( new ConcurrentHashMap < Integer , INDArray > ( ) ) ; } } | Init method validates configuration defined using |
20,012 | public double similarity ( String label1 , String label2 ) { if ( label1 == null || label2 == null ) { log . debug ( "LABELS: " + label1 + ": " + ( label1 == null ? "null" : "exists" ) + ";" + label2 + " vec2:" + ( label2 == null ? "null" : "exists" ) ) ; return Double . NaN ; } INDArray vec1 = getWordVectorMatrix ( label1 ) . dup ( ) ; INDArray vec2 = getWordVectorMatrix ( label2 ) . dup ( ) ; if ( vec1 == null || vec2 == null ) { log . debug ( label1 + ": " + ( vec1 == null ? "null" : "exists" ) + ";" + label2 + " vec2:" + ( vec2 == null ? "null" : "exists" ) ) ; return Double . NaN ; } if ( label1 . equals ( label2 ) ) return 1.0 ; vec1 = Transforms . unitVec ( vec1 ) ; vec2 = Transforms . unitVec ( vec2 ) ; return Transforms . cosineSim ( vec1 , vec2 ) ; } | Returns the similarity of 2 words |
20,013 | public void processMessage ( ) { CbowRequestMessage cbrm = new CbowRequestMessage ( rowsA , rowsB , w1 , codes , negSamples , alpha , 119 ) ; if ( negSamples > 0 ) { int negatives [ ] = Arrays . copyOfRange ( rowsB , codes . length , rowsB . length ) ; cbrm . setNegatives ( negatives ) ; } cbrm . setFrameId ( - 119L ) ; cbrm . setTaskId ( this . taskId ) ; cbrm . setOriginatorId ( this . getOriginatorId ( ) ) ; CbowTrainer cbt = ( CbowTrainer ) trainer ; cbt . pickTraining ( cbrm ) ; INDArray words = Nd4j . pullRows ( storage . getArray ( WordVectorStorage . SYN_0 ) , 1 , rowsA , 'c' ) ; INDArray mean = words . mean ( 0 ) ; int resultLength = codes . length + ( negSamples > 0 ? ( negSamples + 1 ) : 0 ) ; INDArray result = Nd4j . createUninitialized ( resultLength , 1 ) ; int e = 0 ; for ( ; e < codes . length ; e ++ ) { double dot = Nd4j . getBlasWrapper ( ) . dot ( mean , storage . getArray ( WordVectorStorage . SYN_1 ) . getRow ( rowsB [ e ] ) ) ; result . putScalar ( e , dot ) ; } for ( ; e < resultLength ; e ++ ) { double dot = Nd4j . getBlasWrapper ( ) . dot ( mean , storage . getArray ( WordVectorStorage . SYN_1_NEGATIVE ) . getRow ( rowsB [ e ] ) ) ; result . putScalar ( e , dot ) ; } if ( voidConfiguration . getExecutionMode ( ) == ExecutionMode . AVERAGING ) { DotAggregation dot = new DotAggregation ( taskId , ( short ) 1 , shardIndex , result ) ; dot . setTargetId ( ( short ) - 1 ) ; dot . setOriginatorId ( getOriginatorId ( ) ) ; transport . putMessage ( dot ) ; } else if ( voidConfiguration . getExecutionMode ( ) == ExecutionMode . SHARDED ) { DotAggregation dot = new DotAggregation ( taskId , ( short ) voidConfiguration . getNumberOfShards ( ) , shardIndex , result ) ; dot . setTargetId ( ( short ) - 1 ) ; dot . setOriginatorId ( getOriginatorId ( ) ) ; transport . sendMessage ( dot ) ; } } | This method calculates dot of gives rows with averaging applied to rowsA as required by CBoW |
20,014 | public INDArray hash ( INDArray data ) { if ( data . shape ( ) [ 1 ] != inDimension ) { throw new ND4JIllegalStateException ( String . format ( "Invalid shape: Requested INDArray shape %s, this table expects dimension %d" , Arrays . toString ( data . shape ( ) ) , inDimension ) ) ; } INDArray projected = data . mmul ( randomProjection ) ; INDArray res = Nd4j . getExecutioner ( ) . exec ( new Sign ( projected ) ) ; return res ; } | Returns hash values for a particular query |
20,015 | INDArray rawBucketOf ( INDArray query ) { INDArray pattern = hash ( query ) ; INDArray res = Nd4j . zeros ( DataType . BOOL , index . shape ( ) ) ; Nd4j . getExecutioner ( ) . exec ( new BroadcastEqualTo ( index , pattern , res , - 1 ) ) ; return res . castTo ( Nd4j . defaultFloatingPointType ( ) ) . min ( - 1 ) ; } | data elements in the same bucket as the query without entropy |
20,016 | INDArray bucketData ( INDArray query ) { INDArray mask = bucket ( query ) ; int nRes = mask . sum ( 0 ) . getInt ( 0 ) ; INDArray res = Nd4j . create ( new int [ ] { nRes , inDimension } ) ; int j = 0 ; for ( int i = 0 ; i < nRes ; i ++ ) { while ( mask . getInt ( j ) == 0 && j < mask . length ( ) - 1 ) { j += 1 ; } if ( mask . getInt ( j ) == 1 ) res . putRow ( i , indexData . getRow ( j ) ) ; j += 1 ; } return res ; } | data elements in the same entropy bucket as the query |
20,017 | public static INDArray deserializeRawJson ( String serializedRawArray ) { String cleanedRawArray = serializedRawArray ; JsonArray jsonArray = JSON_PARSER . parse ( cleanedRawArray ) . getAsJsonArray ( ) ; List < Integer > dimensions = new ArrayList < > ( ) ; dimensions . add ( jsonArray . size ( ) ) ; getSizeMultiDimensionalArray ( jsonArray , dimensions ) ; return buildArray ( dimensions , cleanedRawArray ) ; } | Deserialize an ndarray form json |
20,018 | public void memcpy ( DataBuffer dstBuffer , DataBuffer srcBuffer ) { CudaContext context = ( CudaContext ) AtomicAllocator . getInstance ( ) . getDeviceContext ( ) . getContext ( ) ; if ( dstBuffer instanceof CompressedDataBuffer && ! ( srcBuffer instanceof CompressedDataBuffer ) ) { AllocationPoint srcPoint = AtomicAllocator . getInstance ( ) . getAllocationPoint ( srcBuffer ) ; long size = srcBuffer . getElementSize ( ) * srcBuffer . length ( ) ; if ( ! srcPoint . isActualOnHostSide ( ) ) { AtomicAllocator . getInstance ( ) . synchronizeHostData ( srcBuffer ) ; } Pointer src = AtomicAllocator . getInstance ( ) . getHostPointer ( srcBuffer ) ; Pointer . memcpy ( dstBuffer . addressPointer ( ) , src , size ) ; } else if ( ! ( dstBuffer instanceof CompressedDataBuffer ) && srcBuffer instanceof CompressedDataBuffer ) { AllocationPoint dstPoint = AtomicAllocator . getInstance ( ) . getAllocationPoint ( dstBuffer ) ; long size = srcBuffer . getElementSize ( ) * srcBuffer . length ( ) ; Pointer . memcpy ( dstBuffer . addressPointer ( ) , srcBuffer . addressPointer ( ) , size ) ; dstPoint . tickHostWrite ( ) ; } else if ( dstBuffer instanceof CompressedDataBuffer && srcBuffer instanceof CompressedDataBuffer ) { Pointer . memcpy ( dstBuffer . addressPointer ( ) , srcBuffer . addressPointer ( ) , srcBuffer . length ( ) * srcBuffer . getElementSize ( ) ) ; } else { AtomicAllocator . getInstance ( ) . memcpy ( dstBuffer , srcBuffer ) ; } } | This method provides basic memcpy functionality with respect to target environment |
20,019 | public void release ( Pointer pointer , MemoryKind kind ) { if ( kind == MemoryKind . DEVICE ) { NativeOpsHolder . getInstance ( ) . getDeviceNativeOps ( ) . freeDevice ( pointer , 0 ) ; pointer . setNull ( ) ; } else if ( kind == MemoryKind . HOST ) { NativeOpsHolder . getInstance ( ) . getDeviceNativeOps ( ) . freeHost ( pointer ) ; pointer . setNull ( ) ; } } | This method releases previously allocated memory chunk |
20,020 | public T get ( int deviceId ) { try { locksMap . get ( deviceId ) . readLock ( ) . lock ( ) ; return backingMap . get ( deviceId ) ; } finally { locksMap . get ( deviceId ) . readLock ( ) . unlock ( ) ; } } | This method returns object local to target device |
20,021 | public void set ( int deviceId , T object ) { try { locksMap . get ( deviceId ) . writeLock ( ) . lock ( ) ; backingMap . put ( deviceId , object ) ; } finally { locksMap . get ( deviceId ) . writeLock ( ) . unlock ( ) ; } } | This method sets object for specific device |
20,022 | public void clear ( ) { int deviceId = Nd4j . getAffinityManager ( ) . getDeviceForCurrentThread ( ) ; try { locksMap . get ( deviceId ) . writeLock ( ) . lock ( ) ; backingMap . remove ( deviceId ) ; } finally { locksMap . get ( deviceId ) . writeLock ( ) . unlock ( ) ; } } | This method removes object stored for current device |
20,023 | protected void getDeviceBuffers ( CudaContext context , int deviceId ) { NativeOps nativeOps = NativeOpsHolder . getInstance ( ) . getDeviceNativeOps ( ) ; int sizeOf = 8 ; val reductionPointer = nativeOps . mallocDevice ( 16384 * sizeOf , deviceId , 0 ) ; if ( reductionPointer == null ) throw new IllegalStateException ( "Can't allocate [DEVICE] reduction buffer memory!" ) ; nativeOps . memsetAsync ( reductionPointer , 0 , 16384 * sizeOf , 0 , context . getOldStream ( ) ) ; context . syncOldStream ( ) ; val allocationPointer = nativeOps . mallocDevice ( 16384 * sizeOf , deviceId , 0 ) ; if ( allocationPointer == null ) throw new IllegalStateException ( "Can't allocate [DEVICE] allocation buffer memory!" ) ; val scalarPointer = nativeOps . mallocHost ( sizeOf , 0 ) ; if ( scalarPointer == null ) throw new IllegalStateException ( "Can't allocate [HOST] scalar buffer memory!" ) ; context . setBufferScalar ( scalarPointer ) ; context . setBufferAllocation ( allocationPointer ) ; context . setBufferReduction ( reductionPointer ) ; val specialPointer = nativeOps . mallocDevice ( 16384 * sizeOf , deviceId , 0 ) ; if ( specialPointer == null ) throw new IllegalStateException ( "Can't allocate [DEVICE] special buffer memory!" ) ; nativeOps . memsetAsync ( specialPointer , 0 , 16384 * sizeOf , 0 , context . getOldStream ( ) ) ; context . setBufferSpecial ( specialPointer ) ; } | This method is used to allocate |
20,024 | public void updateProperties ( InputStream inputStream ) { try { conf . load ( inputStream ) ; conf . putAll ( System . getProperties ( ) ) ; } catch ( IOException e ) { log . warn ( "Error loading system properties from input stream" , e ) ; } } | Load the additional properties from an input stream and load all system properties |
20,025 | public static synchronized Map < String , OpDef > opDescs ( ) { if ( DESCRIPTORS != null ) { return DESCRIPTORS ; } try ( InputStream contents = new ClassPathResource ( "ops.proto" ) . getInputStream ( ) ; BufferedInputStream bis2 = new BufferedInputStream ( contents ) ; BufferedReader reader = new BufferedReader ( new InputStreamReader ( bis2 ) ) ) { org . tensorflow . framework . OpList . Builder builder = org . tensorflow . framework . OpList . newBuilder ( ) ; StringBuilder str = new StringBuilder ( ) ; String line = null ; while ( ( line = reader . readLine ( ) ) != null ) { str . append ( line ) ; } TextFormat . getParser ( ) . merge ( str . toString ( ) , builder ) ; List < OpDef > list = builder . getOpList ( ) ; Map < String , OpDef > map = new HashMap < > ( ) ; for ( OpDef opDef : list ) { map . put ( opDef . getName ( ) , opDef ) ; } DESCRIPTORS = map ; return DESCRIPTORS ; } catch ( Exception e ) { throw new ND4JIllegalStateException ( "Unable to load tensorflow descriptors" , e ) ; } } | Get the op descriptors for tensorflow |
20,026 | public void processMessage ( ) { VectorAggregation aggregation = new VectorAggregation ( rowIndex , ( short ) voidConfiguration . getNumberOfShards ( ) , getShardIndex ( ) , storage . getArray ( key ) . getRow ( rowIndex ) . dup ( ) ) ; aggregation . setOriginatorId ( this . getOriginatorId ( ) ) ; clipboard . pin ( aggregation ) ; DistributedVectorMessage dvm = new DistributedVectorMessage ( key , rowIndex ) ; dvm . setOriginatorId ( this . originatorId ) ; if ( voidConfiguration . getNumberOfShards ( ) > 1 ) transport . sendMessageToAllShards ( dvm ) ; else { aggregation . extractContext ( this ) ; aggregation . processMessage ( ) ; } } | This message is possible to get only as Shard |
20,027 | private BufferedImage restoreRGBImage ( INDArray tensor3D ) { INDArray arrayR = null ; INDArray arrayG = null ; INDArray arrayB = null ; if ( tensor3D . shape ( ) [ 0 ] == 3 ) { arrayR = tensor3D . tensorAlongDimension ( 2 , 2 , 1 ) ; arrayG = tensor3D . tensorAlongDimension ( 1 , 2 , 1 ) ; arrayB = tensor3D . tensorAlongDimension ( 0 , 2 , 1 ) ; } else { arrayB = tensor3D . tensorAlongDimension ( 0 , 2 , 1 ) ; arrayG = arrayB ; arrayR = arrayB ; } BufferedImage imageToRender = new BufferedImage ( arrayR . columns ( ) , arrayR . rows ( ) , BufferedImage . TYPE_INT_RGB ) ; for ( int x = 0 ; x < arrayR . columns ( ) ; x ++ ) { for ( int y = 0 ; y < arrayR . rows ( ) ; y ++ ) { Color pix = new Color ( ( int ) ( 255 * arrayR . getRow ( y ) . getDouble ( x ) ) , ( int ) ( 255 * arrayG . getRow ( y ) . getDouble ( x ) ) , ( int ) ( 255 * arrayB . getRow ( y ) . getDouble ( x ) ) ) ; int rgb = pix . getRGB ( ) ; imageToRender . setRGB ( x , y , rgb ) ; } } return imageToRender ; } | Returns RGB image out of 3D tensor |
20,028 | private BufferedImage renderImageGrayscale ( INDArray array ) { BufferedImage imageToRender = new BufferedImage ( array . columns ( ) , array . rows ( ) , BufferedImage . TYPE_BYTE_GRAY ) ; for ( int x = 0 ; x < array . columns ( ) ; x ++ ) { for ( int y = 0 ; y < array . rows ( ) ; y ++ ) { imageToRender . getRaster ( ) . setSample ( x , y , 0 , ( int ) ( 255 * array . getRow ( y ) . getDouble ( x ) ) ) ; } } return imageToRender ; } | Renders 2D INDArray into BufferedImage |
20,029 | public void append ( byte [ ] utf8 , int start , int len ) { setCapacity ( length + len , true ) ; System . arraycopy ( utf8 , start , bytes , length , len ) ; length += len ; } | Append a range of bytes to the end of the given text |
20,030 | public void write ( DataOutput out ) throws IOException { WritableUtils . writeVInt ( out , length ) ; out . write ( bytes , 0 , length ) ; } | serialize write this object to out length uses zero - compressed encoding |
20,031 | public static void validateUTF8 ( byte [ ] utf8 , int start , int len ) throws MalformedInputException { int count = start ; int leadByte = 0 ; int length = 0 ; int state = LEAD_BYTE ; while ( count < start + len ) { int aByte = ( ( int ) utf8 [ count ] & 0xFF ) ; switch ( state ) { case LEAD_BYTE : leadByte = aByte ; length = bytesFromUTF8 [ aByte ] ; switch ( length ) { case 0 : if ( leadByte > 0x7F ) throw new MalformedInputException ( count ) ; break ; case 1 : if ( leadByte < 0xC2 || leadByte > 0xDF ) throw new MalformedInputException ( count ) ; state = TRAIL_BYTE_1 ; break ; case 2 : if ( leadByte < 0xE0 || leadByte > 0xEF ) throw new MalformedInputException ( count ) ; state = TRAIL_BYTE_1 ; break ; case 3 : if ( leadByte < 0xF0 || leadByte > 0xF4 ) throw new MalformedInputException ( count ) ; state = TRAIL_BYTE_1 ; break ; default : throw new MalformedInputException ( count ) ; } break ; case TRAIL_BYTE_1 : if ( leadByte == 0xF0 && aByte < 0x90 ) throw new MalformedInputException ( count ) ; if ( leadByte == 0xF4 && aByte > 0x8F ) throw new MalformedInputException ( count ) ; if ( leadByte == 0xE0 && aByte < 0xA0 ) throw new MalformedInputException ( count ) ; if ( leadByte == 0xED && aByte > 0x9F ) throw new MalformedInputException ( count ) ; case TRAIL_BYTE : if ( aByte < 0x80 || aByte > 0xBF ) throw new MalformedInputException ( count ) ; if ( -- length == 0 ) { state = LEAD_BYTE ; } else { state = TRAIL_BYTE ; } break ; } count ++ ; } } | Check to see if a byte array is valid utf - 8 |
20,032 | public static int bytesToCodePoint ( ByteBuffer bytes ) { bytes . mark ( ) ; byte b = bytes . get ( ) ; bytes . reset ( ) ; int extraBytesToRead = bytesFromUTF8 [ ( b & 0xFF ) ] ; if ( extraBytesToRead < 0 ) return - 1 ; int ch = 0 ; switch ( extraBytesToRead ) { case 5 : ch += ( bytes . get ( ) & 0xFF ) ; ch <<= 6 ; case 4 : ch += ( bytes . get ( ) & 0xFF ) ; ch <<= 6 ; case 3 : ch += ( bytes . get ( ) & 0xFF ) ; ch <<= 6 ; case 2 : ch += ( bytes . get ( ) & 0xFF ) ; ch <<= 6 ; case 1 : ch += ( bytes . get ( ) & 0xFF ) ; ch <<= 6 ; case 0 : ch += ( bytes . get ( ) & 0xFF ) ; } ch -= offsetsFromUTF8 [ extraBytesToRead ] ; return ch ; } | Returns the next code point at the current position in the buffer . The buffer s position will be incremented . Any mark set on this buffer will be changed by this method! |
20,033 | public static int utf8Length ( String string ) { CharacterIterator iter = new StringCharacterIterator ( string ) ; char ch = iter . first ( ) ; int size = 0 ; while ( ch != CharacterIterator . DONE ) { if ( ( ch >= 0xD800 ) && ( ch < 0xDC00 ) ) { char trail = iter . next ( ) ; if ( ( trail > 0xDBFF ) && ( trail < 0xE000 ) ) { size += 4 ; } else { size += 3 ; iter . previous ( ) ; } } else if ( ch < 0x80 ) { size ++ ; } else if ( ch < 0x800 ) { size += 2 ; } else { size += 3 ; } ch = iter . next ( ) ; } return size ; } | For the given string returns the number of UTF - 8 bytes required to encode the string . |
20,034 | public long getApproximateFreeMemory ( int deviceId ) { val externalAllocations = getTotalMemory ( deviceId ) - getFreeMemory ( deviceId ) ; val active = getActiveMemory ( deviceId ) ; val free = getTotalMemory ( deviceId ) - ( active + externalAllocations ) ; return free ; } | This method returns approximate free memory on specified device |
20,035 | public long getPreciseFreeMemory ( int deviceId ) { val extFree = NativeOpsHolder . getInstance ( ) . getDeviceNativeOps ( ) . getDeviceFreeMemory ( deviceId ) ; return extFree ; } | This method returns precise amount of free memory on specified device |
20,036 | public double nrm2 ( INDArray arr ) { switch ( arr . data ( ) . dataType ( ) ) { case DOUBLE : DefaultOpExecutioner . validateDataType ( DataType . DOUBLE , arr ) ; return dnrm2 ( arr . length ( ) , arr , 1 ) ; case FLOAT : DefaultOpExecutioner . validateDataType ( DataType . FLOAT , arr ) ; return snrm2 ( arr . length ( ) , arr , 1 ) ; case HALF : return hnrm2 ( arr . length ( ) , arr , 1 ) ; default : } throw new UnsupportedOperationException ( ) ; } | Computes the Euclidean norm of a vector . |
20,037 | public double asum ( INDArray arr ) { switch ( arr . data ( ) . dataType ( ) ) { case DOUBLE : DefaultOpExecutioner . validateDataType ( DataType . DOUBLE , arr ) ; return dasum ( arr . length ( ) , arr , 1 ) ; case FLOAT : DefaultOpExecutioner . validateDataType ( DataType . FLOAT , arr ) ; return sasum ( arr . length ( ) , arr , 1 ) ; case HALF : DefaultOpExecutioner . validateDataType ( DataType . HALF , arr ) ; return hasum ( arr . length ( ) , arr , 1 ) ; default : } throw new UnsupportedOperationException ( ) ; } | Compute the sum of magnitude of the vector elements |
20,038 | public int iamin ( INDArray arr ) { switch ( arr . data ( ) . dataType ( ) ) { case DOUBLE : DefaultOpExecutioner . validateDataType ( DataType . DOUBLE , arr ) ; return idamin ( arr . length ( ) , arr , 1 ) ; case FLOAT : DefaultOpExecutioner . validateDataType ( DataType . FLOAT , arr ) ; return isamin ( arr . length ( ) , arr , 1 ) ; case HALF : DefaultOpExecutioner . validateDataType ( DataType . HALF , arr ) ; return ihamin ( arr . length ( ) , arr , 1 ) ; default : } throw new UnsupportedOperationException ( ) ; } | Find the index of the element with maximum absolute value |
20,039 | public void axpy ( long n , double alpha , INDArray x , INDArray y ) { BaseSparseNDArray sparseX = ( BaseSparseNDArray ) x ; DataBuffer pointers = sparseX . getVectorCoordinates ( ) ; switch ( x . data ( ) . dataType ( ) ) { case DOUBLE : DefaultOpExecutioner . validateDataType ( DataType . DOUBLE , x ) ; DefaultOpExecutioner . validateDataType ( DataType . DOUBLE , y ) ; daxpyi ( n , alpha , x , pointers , y ) ; break ; case FLOAT : DefaultOpExecutioner . validateDataType ( DataType . FLOAT , x ) ; DefaultOpExecutioner . validateDataType ( DataType . FLOAT , y ) ; saxpyi ( n , alpha , x , pointers , y ) ; break ; case HALF : DefaultOpExecutioner . validateDataType ( DataType . HALF , x ) ; DefaultOpExecutioner . validateDataType ( DataType . HALF , y ) ; haxpyi ( n , alpha , x , pointers , y ) ; break ; default : throw new UnsupportedOperationException ( ) ; } } | Adds a scalar multiple of compressed sparse vector to a full - storage vector . |
20,040 | public void scal ( long N , double alpha , INDArray X ) { switch ( X . data ( ) . dataType ( ) ) { case DOUBLE : dscal ( N , alpha , X , 1 ) ; break ; case FLOAT : sscal ( N , alpha , X , 1 ) ; break ; case HALF : hscal ( N , alpha , X , 1 ) ; break ; default : throw new UnsupportedOperationException ( ) ; } } | Computes the product of a vector by a scalar . |
20,041 | public static void createHtmlAnalysisFile ( DataAnalysis dataAnalysis , File output ) throws Exception { String str = createHtmlAnalysisString ( dataAnalysis ) ; FileUtils . writeStringToFile ( output , str , StandardCharsets . UTF_8 ) ; } | Render a data analysis object as a HTML file . This will produce a summary table along charts for numerical columns |
20,042 | public String getLabel ( String path ) { if ( labelGenerator != null ) { return labelGenerator . getLabelForPath ( path ) . toString ( ) ; } if ( fileNameMap != null && fileNameMap . containsKey ( path ) ) return fileNameMap . get ( path ) ; return ( new File ( path ) ) . getParentFile ( ) . getName ( ) ; } | Get the label from the given path |
20,043 | protected void accumulateLabel ( String path ) { String name = getLabel ( path ) ; if ( ! labels . contains ( name ) ) labels . add ( name ) ; } | Accumulate the label from the path |
20,044 | public static INDArray symmetricGeneralizedEigenvalues ( INDArray A , boolean calculateVectors ) { INDArray eigenvalues = Nd4j . create ( A . rows ( ) ) ; Nd4j . getBlasWrapper ( ) . syev ( 'V' , 'L' , ( calculateVectors ? A : A . dup ( ) ) , eigenvalues ) ; return eigenvalues ; } | Compute generalized eigenvalues of the problem A x = L x . Matrix A is modified in the process holding eigenvectors as columns after execution . |
20,045 | public static INDArray symmetricGeneralizedEigenvalues ( INDArray A , INDArray B ) { Preconditions . checkArgument ( A . isMatrix ( ) && A . isSquare ( ) , "Argument A must be a square matrix: has shape %s" , A . shape ( ) ) ; Preconditions . checkArgument ( B . isMatrix ( ) && B . isSquare ( ) , "Argument B must be a square matrix: has shape %s" , B . shape ( ) ) ; INDArray W = Nd4j . create ( A . rows ( ) ) ; A = InvertMatrix . invert ( B , false ) . mmuli ( A ) ; Nd4j . getBlasWrapper ( ) . syev ( 'V' , 'L' , A , W ) ; return W ; } | Compute generalized eigenvalues of the problem A x = L B x . The data will be unchanged no eigenvectors returned . |
20,046 | private boolean streamHasMoreTokens ( ) { if ( streamTokenizer . ttype != StreamTokenizer . TT_EOF ) { try { streamTokenizer . nextToken ( ) ; } catch ( IOException e1 ) { throw new RuntimeException ( e1 ) ; } } return streamTokenizer . ttype != StreamTokenizer . TT_EOF && streamTokenizer . ttype != - 1 ; } | Checks if underlying stream has any tokens left |
20,047 | public boolean hasMoreTokens ( ) { log . info ( "Tokens size: [" + tokens . size ( ) + "], position: [" + position . get ( ) + "]" ) ; if ( ! tokens . isEmpty ( ) ) return position . get ( ) < tokens . size ( ) ; else return streamHasMoreTokens ( ) ; } | Checks if any prebuffered tokens left otherswise checks underlying stream |
20,048 | public List < String > getTokens ( ) { if ( ! tokens . isEmpty ( ) ) return tokens ; log . info ( "Starting prebuffering..." ) ; while ( streamHasMoreTokens ( ) ) { tokens . add ( nextTokenFromStream ( ) ) ; } log . info ( "Tokens prefetch finished. Tokens size: [" + tokens . size ( ) + "]" ) ; return tokens ; } | Returns all tokens as list of Strings |
20,049 | public String [ ] columnNames ( ) { return getInputSchema ( ) . getColumnNames ( ) . toArray ( new String [ getInputSchema ( ) . getColumnNames ( ) . size ( ) ] ) ; } | Returns column names this op is meant to run on |
20,050 | public INDArray getGradient ( INDArray gradient , int iteration ) { if ( historicalGradient == null ) throw new IllegalStateException ( "Updater has not been initialized with view state" ) ; historicalGradient . addi ( gradient . mul ( gradient ) ) ; INDArray sqrtHistory = sqrt ( historicalGradient . dup ( gradientReshapeOrder ) , false ) . addi ( epsilon ) ; INDArray ret = gradient . muli ( sqrtHistory . rdivi ( learningRate ) ) ; numIterations ++ ; return ret ; } | Gets feature specific learning rates Adagrad keeps a history of gradients being passed in . Note that each gradient passed in becomes adapted over time hence the opName adagrad |
20,051 | public static MissionSpec loadMissionXML ( String filename ) { MissionSpec mission = null ; try { String xml = new String ( Files . readAllBytes ( Paths . get ( filename ) ) ) ; mission = new MissionSpec ( xml , true ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } return mission ; } | Convenience method to load a Malmo mission specification from an XML - file |
20,052 | public DataBuffer createDouble ( double [ ] data , boolean copy , MemoryWorkspace workspace ) { return new CudaDoubleDataBuffer ( data , copy , workspace ) ; } | Creates a double data buffer |
20,053 | public RecordReader createReader ( InputSplit split ) throws IOException , InterruptedException { RecordReader reader = new ListStringRecordReader ( ) ; reader . initialize ( split ) ; return reader ; } | Creates a reader from an input split |
20,054 | public boolean matches ( SameDiff sameDiff , DifferentialFunction rootFn ) { if ( ! root . matches ( sameDiff , rootFn ) ) { return false ; } SDVariable [ ] inputs = rootFn . args ( ) ; int inCount = inputs == null ? 0 : inputs . length ; if ( inputCount != null ) { if ( inCount != this . inputCount ) return false ; } SDVariable [ ] outputs = rootFn . outputVariables ( ) ; int outCount = outputs == null ? 0 : outputs . length ; if ( outputCount != null ) { if ( outCount != outputCount ) return false ; } for ( Map < Integer , OpPredicate > m : Arrays . asList ( opInputMatchPredicates , opInputSubgraphPredicates ) ) { for ( Map . Entry < Integer , OpPredicate > e : m . entrySet ( ) ) { int inNum = e . getKey ( ) ; if ( inNum >= inCount ) { return false ; } SDVariable in = inputs [ inNum ] ; DifferentialFunction df = sameDiff . getVariableOutputFunction ( in . getVarName ( ) ) ; if ( df == null || ! e . getValue ( ) . matches ( sameDiff , df ) ) { return false ; } } } return true ; } | Determine if the subgraph starting with the root function matches the predicate |
20,055 | public SubGraph getSubGraph ( SameDiff sd , DifferentialFunction rootFn ) { Preconditions . checkState ( matches ( sd , rootFn ) , "Root function does not match predicate" ) ; List < DifferentialFunction > childNodes = new ArrayList < > ( ) ; if ( ! opInputSubgraphPredicates . isEmpty ( ) ) { for ( Map . Entry < Integer , OpPredicate > entry : opInputSubgraphPredicates . entrySet ( ) ) { OpPredicate p2 = entry . getValue ( ) ; SDVariable arg = rootFn . arg ( entry . getKey ( ) ) ; DifferentialFunction df = sd . getVariableOutputFunction ( arg . getVarName ( ) ) ; if ( df != null ) { childNodes . add ( df ) ; if ( p2 instanceof SubGraphPredicate ) { SubGraph sg = ( ( SubGraphPredicate ) p2 ) . getSubGraph ( sd , df ) ; childNodes . addAll ( sg . childNodes ) ; } } } } SubGraph sg = SubGraph . builder ( ) . sameDiff ( sd ) . rootNode ( rootFn ) . childNodes ( childNodes ) . build ( ) ; return sg ; } | Get the SubGraph that matches the predicate |
20,056 | public void create ( ) { RunInstancesRequest runInstancesRequest = new RunInstancesRequest ( ) . withImageId ( amiId ) . withInstanceType ( size ) . withKeyName ( keyPair ) . withMinCount ( 1 ) . withSecurityGroupIds ( securityGroupId ) . withMaxCount ( numBoxes ) ; AmazonEC2 ec2 = getEc2 ( ) ; ec2 . setRegion ( com . amazonaws . regions . Region . getRegion ( regions ) ) ; List < Instance > boxes = ec2 . runInstances ( runInstancesRequest ) . getReservation ( ) . getInstances ( ) ; if ( boxesCreated == null ) { boxesCreated = new ArrayList < > ( ) ; for ( Instance i : boxes ) boxesCreated . add ( i . getInstanceId ( ) ) ; log . info ( "Boxes created " + boxesCreated ) ; } else { blowupBoxes ( ) ; boxesCreated . clear ( ) ; for ( Instance i : boxes ) boxesCreated . add ( i . getInstanceId ( ) ) ; } } | Create the instances |
20,057 | public void subDivide ( ) { MemoryWorkspace workspace = workspaceMode == WorkspaceMode . NONE ? new DummyWorkspace ( ) : Nd4j . getWorkspaceManager ( ) . getWorkspaceForCurrentThread ( workspaceConfigurationExternal , workspaceExternal ) ; try ( MemoryWorkspace ws = workspace . notifyScopeEntered ( ) ) { INDArray newCorner = Nd4j . create ( D ) ; INDArray newWidth = Nd4j . create ( D ) ; for ( int i = 0 ; i < numChildren ; i ++ ) { int div = 1 ; for ( int d = 0 ; d < D ; d ++ ) { newWidth . putScalar ( d , .5 * boundary . width ( d ) ) ; if ( ( i / div ) % 2 == 1 ) newCorner . putScalar ( d , boundary . corner ( d ) - .5 * boundary . width ( d ) ) ; else newCorner . putScalar ( d , boundary . corner ( d ) + .5 * boundary . width ( d ) ) ; div *= 2 ; } children [ i ] = new SpTree ( this , data , newCorner , newWidth , indices ) ; } for ( int i = 0 ; i < size ; i ++ ) { boolean success = false ; for ( int j = 0 ; j < this . numChildren ; j ++ ) if ( ! success ) success = children [ j ] . insert ( index [ i ] ) ; index [ i ] = - 1 ; } size = 0 ; isLeaf = false ; } } | Subdivide the node in to 4 children |
20,058 | public static IActivation getIActivationFromConfig ( Map < String , Object > layerConfig , KerasLayerConfiguration conf ) throws InvalidKerasConfigurationException , UnsupportedKerasConfigurationException { return getActivationFromConfig ( layerConfig , conf ) . getActivationFunction ( ) ; } | Get activation function from Keras layer configuration . |
20,059 | public static Activation getActivationFromConfig ( Map < String , Object > layerConfig , KerasLayerConfiguration conf ) throws InvalidKerasConfigurationException , UnsupportedKerasConfigurationException { Map < String , Object > innerConfig = KerasLayerUtils . getInnerLayerConfigFromConfig ( layerConfig , conf ) ; if ( ! innerConfig . containsKey ( conf . getLAYER_FIELD_ACTIVATION ( ) ) ) throw new InvalidKerasConfigurationException ( "Keras layer is missing " + conf . getLAYER_FIELD_ACTIVATION ( ) + " field" ) ; return mapToActivation ( ( String ) innerConfig . get ( conf . getLAYER_FIELD_ACTIVATION ( ) ) , conf ) ; } | Get activation enum value from Keras layer configuration . |
20,060 | protected void postInit ( ) { Collection < TrainingListener > oldListeners = new ArrayList < > ( ) ; Collection < TrainingListener > replicatedListeners = new ArrayList < > ( ) ; if ( parallelWrapper . getListeners ( ) != null ) { oldListeners . addAll ( parallelWrapper . getListeners ( ) ) ; } configureListeners ( uuid , oldListeners , replicatedListeners ) ; this . replicatedModel . setListeners ( replicatedListeners ) ; } | This method does post - initialization configuration of Model . Good place to configure listeners and all such a things |
20,061 | public void free ( AllocationPoint point ) { if ( point . getAllocationStatus ( ) == AllocationStatus . DEVICE ) { super . free ( point ) ; } else { AllocationShape shape = point . getShape ( ) ; long reqMemory = AllocationUtils . getRequiredMemory ( shape ) ; if ( reqMemory > CudaEnvironment . getInstance ( ) . getConfiguration ( ) . getMaximumHostCacheableLength ( ) || zeroCachedAmount . get ( ) >= CudaEnvironment . getInstance ( ) . getConfiguration ( ) . getMaximumHostCache ( ) ) { super . free ( point ) ; return ; } ensureCacheHolder ( shape ) ; CacheHolder cache = zeroCache . get ( shape ) ; if ( reqMemory <= FORCED_CACHE_THRESHOLD ) { Pointer . memset ( point . getHostPointer ( ) , 0 , reqMemory ) ; cache . put ( new CudaPointer ( point . getHostPointer ( ) . address ( ) ) ) ; } else { long cacheEntries = cache . size ( ) ; long cacheHeight = zeroCache . size ( ) ; long cacheDepth = cacheEntries * reqMemory ; Pointer . memset ( point . getHostPointer ( ) , 0 , reqMemory ) ; cache . put ( new CudaPointer ( point . getHostPointer ( ) . address ( ) ) ) ; } MemoryTracker . getInstance ( ) . decrementAllocatedHostAmount ( reqMemory ) ; MemoryTracker . getInstance ( ) . incrementCachedHostAmount ( reqMemory ) ; } } | This method frees specific chunk of memory described by AllocationPoint passed in . |
20,062 | public static INDArray convertArrowVector ( FieldVector fieldVector , ColumnType type ) { DataBuffer buffer = null ; int cols = fieldVector . getValueCount ( ) ; ByteBuffer direct = ByteBuffer . allocateDirect ( fieldVector . getDataBuffer ( ) . capacity ( ) ) ; direct . order ( ByteOrder . nativeOrder ( ) ) ; fieldVector . getDataBuffer ( ) . getBytes ( 0 , direct ) ; direct . rewind ( ) ; switch ( type ) { case Integer : buffer = Nd4j . createBuffer ( direct , DataType . INT , cols , 0 ) ; break ; case Float : buffer = Nd4j . createBuffer ( direct , DataType . FLOAT , cols ) ; break ; case Double : buffer = Nd4j . createBuffer ( direct , DataType . DOUBLE , cols ) ; break ; case Long : buffer = Nd4j . createBuffer ( direct , DataType . LONG , cols ) ; break ; } return Nd4j . create ( buffer , new int [ ] { cols , 1 } ) ; } | Convert a field vector to a column vector |
20,063 | public static List < Writable > toArrowWritablesSingle ( List < FieldVector > fieldVectors , Schema schema ) { return toArrowWritables ( fieldVectors , schema ) . get ( 0 ) ; } | Return a singular record based on the converted writables result . |
20,064 | public static Field field ( String name , ArrowType arrowType ) { return new Field ( name , FieldType . nullable ( arrowType ) , new ArrayList < Field > ( ) ) ; } | Shortcut method for returning a field given an arrow type and name with no sub fields |
20,065 | public static List < FieldVector > toArrowColumns ( final BufferAllocator bufferAllocator , final Schema schema , List < List < Writable > > dataVecRecord ) { int numRows = dataVecRecord . size ( ) ; List < FieldVector > ret = createFieldVectors ( bufferAllocator , schema , numRows ) ; for ( int j = 0 ; j < schema . numColumns ( ) ; j ++ ) { FieldVector fieldVector = ret . get ( j ) ; int row = 0 ; for ( List < Writable > record : dataVecRecord ) { Writable writable = record . get ( j ) ; setValue ( schema . getType ( j ) , fieldVector , writable , row ) ; row ++ ; } } return ret ; } | Given a buffer allocator and datavec schema convert the passed in batch of records to a set of arrow columns |
20,066 | public static void assertNInNOutSet ( String layerType , String layerName , long layerIndex , long nIn , long nOut ) { if ( nIn <= 0 || nOut <= 0 ) { if ( layerName == null ) layerName = "(name not set)" ; throw new DL4JInvalidConfigException ( layerType + " (index=" + layerIndex + ", name=" + layerName + ") nIn=" + nIn + ", nOut=" + nOut + "; nIn and nOut must be > 0" ) ; } } | Asserts that the layer nIn and nOut values are set for the layer |
20,067 | public static void assertNOutSet ( String layerType , String layerName , long layerIndex , long nOut ) { if ( nOut <= 0 ) { if ( layerName == null ) layerName = "(name not set)" ; throw new DL4JInvalidConfigException ( layerType + " (index=" + layerIndex + ", name=" + layerName + ") nOut=" + nOut + "; nOut must be > 0" ) ; } } | Asserts that the layer nOut value is set for the layer |
20,068 | public SDVariable jaccardDistance ( SDVariable x , SDVariable y , int ... dimensions ) { return jaccardDistance ( null , x , y , dimensions ) ; } | Jaccard similarity reduction operation . The output contains the Jaccard distance for each tensor along the specified dimensions . |
20,069 | public SDVariable [ ] normalizeMoments ( String [ ] name , SDVariable counts , SDVariable means , SDVariable variances , double shift ) { SDVariable [ ] res = f ( ) . normalizeMoments ( counts , means , variances , shift ) ; return sd . updateVariableNamesAndReferences ( res , name ) ; } | Calculate the mean and variance from the sufficient statistics |
20,070 | public void createCluster ( ) { AmazonElasticMapReduce emr = sparkEmrClientBuilder . build ( ) ; Optional < ClusterSummary > csr = findClusterWithName ( emr , sparkClusterName ) ; if ( csr . isPresent ( ) ) { String msg = String . format ( "A cluster with name %s and id %s is already deployed" , sparkClusterName , csr . get ( ) . getId ( ) ) ; log . error ( msg ) ; throw new IllegalStateException ( msg ) ; } else { RunJobFlowResult res = emr . runJobFlow ( sparkRunJobFlowRequest ) ; String msg = String . format ( "Your cluster is launched with name %s and id %s." , sparkClusterName , res . getJobFlowId ( ) ) ; log . info ( msg ) ; } } | Creates the current cluster |
20,071 | public boolean isReadyForNext ( ) { if ( objectMapper == null ) objectMapper = new ObjectMapper ( ) ; try { int masterStream = Integer . parseInt ( ndarraySendUrl . split ( ":" ) [ 2 ] ) ; SubscriberState subscriberState = objectMapper . readValue ( Unirest . get ( String . format ( "http://%s:%d/state/%d" , masterStatusHost , masterStatusPort , masterStream ) ) . asJson ( ) . getBody ( ) . toString ( ) , SubscriberState . class ) ; return subscriberState . isReady ( ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } return false ; } | Returns true if the client is ready for a next array or not |
20,072 | public boolean masterStarted ( ) { if ( objectMapper == null ) objectMapper = new ObjectMapper ( ) ; try { String type = objectMapper . readValue ( Unirest . get ( String . format ( "http://%s:%d/opType" , masterStatusHost , masterStatusPort ) ) . asJson ( ) . getBody ( ) . toString ( ) , ServerTypeJson . class ) . getType ( ) ; if ( ! type . equals ( "master" ) ) throw new IllegalStateException ( "Wrong opType " + type ) ; Unirest . get ( String . format ( "http://%s:%d/started" , masterStatusHost , masterStatusPort ) ) . asJson ( ) . getBody ( ) ; return objectMapper . readValue ( Unirest . get ( String . format ( "http://%s:%d/started" , masterStatusHost , masterStatusPort ) ) . asJson ( ) . getBody ( ) . toString ( ) , MasterStatus . class ) . started ( ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } return false ; } | Sends a post request to the status server to determine if the master node is started . |
20,073 | public INDArray getArray ( ) { if ( subscriber == null ) { running = new AtomicBoolean ( true ) ; subscriber = AeronNDArraySubscriber . startSubscriber ( aeron , subscriberHost , subscriberPort , this , subscriberStream , running ) ; log . debug ( "Started parameter server client on " + subscriber . connectionUrl ( ) ) ; } if ( arr == null ) arr = new AtomicReference < > ( none ) ; log . debug ( "Parameter server client retrieving url from " + ndarrayRetrieveUrl ) ; String [ ] split = ndarrayRetrieveUrl . split ( ":" ) ; int port = Integer . parseInt ( split [ 1 ] ) ; int streamToPublish = Integer . parseInt ( split [ 2 ] ) ; String channel = AeronUtil . aeronChannel ( split [ 0 ] , port ) ; try ( HostPortPublisher hostPortPublisher = HostPortPublisher . builder ( ) . channel ( channel ) . aeron ( aeron ) . streamId ( streamToPublish ) . uriToSend ( AeronConnectionInformation . of ( subscriberHost , subscriberPort , subscriberStream ) . toString ( ) ) . build ( ) ) { hostPortPublisher . send ( ) ; log . debug ( "Sent subscriber information " + AeronConnectionInformation . of ( subscriberHost , subscriberPort , subscriberStream ) . toString ( ) ) ; while ( arr . get ( ) == none ) { Thread . sleep ( 1000 ) ; log . info ( "Waiting on array to be updated." ) ; } } catch ( Exception e ) { log . error ( "Error with publishing" , e ) ; } INDArray arr2 = arr . get ( ) ; arr . set ( none ) ; return arr2 ; } | Get an ndarray from the designated ndarray retrieve url . This will pull the current ndarray from the master |
20,074 | public void apply ( InMemoryLookupTable table ) { for ( Map . Entry < Integer , Set < INDArray > > entry : changes . entrySet ( ) ) { Set < INDArray > changes = entry . getValue ( ) ; INDArray toChange = table . getSyn0 ( ) . slice ( entry . getKey ( ) ) ; for ( INDArray syn1 : changes ) Nd4j . getBlasWrapper ( ) . level1 ( ) . axpy ( toChange . length ( ) , 1 , syn1 , toChange ) ; } } | Take the changes and apply them to the given table |
20,075 | public DataSet loadFromMetaData ( List < RecordMetaData > list ) throws IOException { if ( underlying == null ) { Record r = recordReader . loadFromMetaData ( list . get ( 0 ) ) ; initializeUnderlying ( r ) ; } List < RecordMetaData > l = new ArrayList < > ( list . size ( ) ) ; for ( RecordMetaData m : list ) { l . add ( new RecordMetaDataComposableMap ( Collections . singletonMap ( READER_KEY , m ) ) ) ; } MultiDataSet m = underlying . loadFromMetaData ( l ) ; return mdsToDataSet ( m ) ; } | Load a multiple examples to a DataSet using the provided RecordMetaData instances . |
20,076 | public static < V , E > Graph < V , E > loadGraph ( String path , EdgeLineProcessor < E > lineProcessor , VertexFactory < V > vertexFactory , int numVertices , boolean allowMultipleEdges ) throws IOException { Graph < V , E > graph = new Graph < > ( numVertices , allowMultipleEdges , vertexFactory ) ; try ( BufferedReader br = new BufferedReader ( new FileReader ( new File ( path ) ) ) ) { String line ; while ( ( line = br . readLine ( ) ) != null ) { Edge < E > edge = lineProcessor . processLine ( line ) ; if ( edge != null ) { graph . addEdge ( edge ) ; } } } return graph ; } | Load a graph into memory using a given EdgeLineProcessor . Assume one edge per line |
20,077 | public static < V , E > Graph < V , E > loadGraph ( String vertexFilePath , String edgeFilePath , VertexLoader < V > vertexLoader , EdgeLineProcessor < E > edgeLineProcessor , boolean allowMultipleEdges ) throws IOException { List < Vertex < V > > vertices = vertexLoader . loadVertices ( vertexFilePath ) ; Graph < V , E > graph = new Graph < > ( vertices , allowMultipleEdges ) ; try ( BufferedReader br = new BufferedReader ( new FileReader ( new File ( edgeFilePath ) ) ) ) { String line ; while ( ( line = br . readLine ( ) ) != null ) { Edge < E > edge = edgeLineProcessor . processLine ( line ) ; if ( edge != null ) { graph . addEdge ( edge ) ; } } } return graph ; } | Load graph assuming vertices are in one file and edges are in another file . |
20,078 | public static INDArray normalizeZeroMeanAndUnitVariance ( INDArray toNormalize ) { INDArray columnMeans = toNormalize . mean ( 0 ) ; INDArray columnStds = toNormalize . std ( 0 ) ; toNormalize . subiRowVector ( columnMeans ) ; columnStds . addi ( Nd4j . EPS_THRESHOLD ) ; toNormalize . diviRowVector ( columnStds ) ; return toNormalize ; } | Normalize data to zero mean and unit variance substract by the mean and divide by the standard deviation |
20,079 | public static INDArray pow ( INDArray ndArray , INDArray power , boolean dup ) { INDArray result = ( dup ? ndArray . ulike ( ) : ndArray ) ; return exec ( new PowPairwise ( ndArray , power , result ) ) ; } | Element - wise power function - x^y performed element - wise |
20,080 | public static INDArray tan ( INDArray ndArray , boolean dup ) { return exec ( dup ? new Tan ( ndArray , ndArray . ulike ( ) ) : new Tan ( ndArray ) ) ; } | Element - wise tan function . Copies the array |
20,081 | public static INDArray log ( INDArray ndArray , double base , boolean duplicate ) { return Nd4j . getExecutioner ( ) . exec ( new LogX ( ndArray , duplicate ? ndArray . ulike ( ) : ndArray , base ) ) ; } | Log on arbitrary base |
20,082 | public static INDArray sign ( INDArray toSign , boolean dup ) { return exec ( dup ? new Sign ( toSign , toSign . ulike ( ) ) : new Sign ( toSign ) ) ; } | Signum function of this ndarray |
20,083 | public static INDArray max ( INDArray ndArray , double k , boolean dup ) { return exec ( dup ? new ScalarMax ( ndArray , null , ndArray . ulike ( ) , k ) : new ScalarMax ( ndArray , k ) ) ; } | Maximum function with a scalar |
20,084 | public static INDArray min ( INDArray ndArray , double k , boolean dup ) { return exec ( dup ? new ScalarMin ( ndArray , null , ndArray . ulike ( ) , k ) : new ScalarMin ( ndArray , k ) ) ; } | Minimum function with a scalar |
20,085 | public static INDArray min ( INDArray first , INDArray second ) { return min ( first , second , true ) ; } | Element wise minimum function between 2 INDArrays |
20,086 | public void destroyWorkspace ( boolean extended ) { if ( workspace . getHostPointer ( ) != null && workspace . getHostPointer ( ) . getOriginalPointer ( ) != null && workspace . getHostPointer ( ) . getOriginalPointer ( ) instanceof BytePointer ) workspace . getHostPointer ( ) . getOriginalPointer ( ) . deallocate ( ) ; workspace . setHostPointer ( null ) ; currentSize . set ( 0 ) ; reset ( ) ; if ( extended ) { clearExternalAllocations ( ) ; } } | This method basically deallocates workspace memory |
20,087 | public MemoryWorkspace notifyScopeBorrowed ( ) { if ( isBorrowed . get ( ) ) throw new ND4JIllegalStateException ( "Workspace [" + id + "]: Can't borrow from borrowed workspace" ) ; borrowingWorkspace = Nd4j . getMemoryManager ( ) . getCurrentWorkspace ( ) ; isBorrowed . set ( true ) ; Nd4j . getMemoryManager ( ) . setCurrentWorkspace ( this ) ; return this ; } | This method TEMPORARY enters this workspace without reset applied |
20,088 | public KerasModelBuilder modelJsonFilename ( String modelJsonFilename ) throws IOException { checkForExistence ( modelJsonFilename ) ; this . modelJson = new String ( Files . readAllBytes ( Paths . get ( modelJsonFilename ) ) ) ; return this ; } | Set model architecture from file name pointing to model JSON string . |
20,089 | public KerasModelBuilder modelYamlFilename ( String modelYamlFilename ) throws IOException { checkForExistence ( modelYamlFilename ) ; this . modelJson = new String ( Files . readAllBytes ( Paths . get ( modelYamlFilename ) ) ) ; return this ; } | Set model architecture from file name pointing to model YAML string . |
20,090 | public KerasModelBuilder modelJsonInputStream ( InputStream modelJsonInputStream ) throws IOException { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream ( ) ; IOUtils . copy ( modelJsonInputStream , byteArrayOutputStream ) ; this . modelJson = new String ( byteArrayOutputStream . toByteArray ( ) ) ; return this ; } | Set model architecture from input stream of model JSON . |
20,091 | public KerasModelBuilder modelYamlInputStream ( InputStream modelYamlInputStream ) throws IOException { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream ( ) ; IOUtils . copy ( modelYamlInputStream , byteArrayOutputStream ) ; this . modelJson = new String ( byteArrayOutputStream . toByteArray ( ) ) ; return this ; } | Set model architecture from input stream of model YAML . |
20,092 | public KerasModelBuilder trainingJsonInputStream ( InputStream trainingJsonInputStream ) throws IOException { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream ( ) ; IOUtils . copy ( trainingJsonInputStream , byteArrayOutputStream ) ; this . trainingJson = new String ( byteArrayOutputStream . toByteArray ( ) ) ; return this ; } | Provide training configuration as file input stream from JSON |
20,093 | public KerasModelBuilder trainingYamlInputStream ( InputStream trainingYamlInputStream ) throws IOException { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream ( ) ; IOUtils . copy ( trainingYamlInputStream , byteArrayOutputStream ) ; this . trainingYaml = new String ( byteArrayOutputStream . toByteArray ( ) ) ; return this ; } | Provide training configuration as file input stream from YAML |
20,094 | public KerasModelBuilder weightsHdf5FilenameNoRoot ( String weightsHdf5Filename ) throws IOException { checkForExistence ( weightsHdf5Filename ) ; this . weightsArchive = new Hdf5Archive ( weightsHdf5Filename ) ; return this ; } | Set weights of the model by providing the file name of the corresponding weights HDF5 file . The root of the HDF5 group containing weights won t be set by this method . |
20,095 | public KerasModelBuilder weightsHdf5Filename ( String weightsHdf5Filename ) throws IOException { checkForExistence ( weightsHdf5Filename ) ; this . weightsArchive = new Hdf5Archive ( weightsHdf5Filename ) ; this . weightsRoot = config . getTrainingWeightsRoot ( ) ; return this ; } | Set weights of the model by providing the file name of the corresponding weights HDF5 file . The root of the HDF5 group containing weights will be read and set from the configuration of this model builder instance . |
20,096 | public void close ( ) { if ( trainingArchive != null && trainingArchive != weightsArchive ) { trainingArchive . close ( ) ; trainingArchive = null ; } if ( weightsArchive != null ) { weightsArchive . close ( ) ; weightsArchive = null ; } } | Close all HDF5 archives for this model builder . |
20,097 | protected double ddoti ( long N , INDArray X , DataBuffer indx , INDArray Y ) { return cblas_ddoti ( ( int ) N , ( DoublePointer ) X . data ( ) . addressPointer ( ) , ( IntPointer ) indx . addressPointer ( ) , ( DoublePointer ) Y . data ( ) . addressPointer ( ) ) ; } | Computes the dot product of a compressed sparse double vector by a full - storage real vector . |
20,098 | protected double sdoti ( long N , INDArray X , DataBuffer indx , INDArray Y ) { return cblas_sdoti ( ( int ) N , ( FloatPointer ) X . data ( ) . addressPointer ( ) , ( IntPointer ) indx . addressPointer ( ) , ( FloatPointer ) Y . data ( ) . addressPointer ( ) ) ; } | Computes the dot product of a compressed sparse float vector by a full - storage real vector . |
20,099 | protected double dnrm2 ( long N , INDArray X , int incx ) { return cblas_dnrm2 ( ( int ) N , ( DoublePointer ) X . data ( ) . addressPointer ( ) , incx ) ; } | Computes the Euclidean norm of a double vector |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.