idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
15,100 | public Csv schema ( TypeInformation < Row > schemaType ) { Preconditions . checkNotNull ( schemaType ) ; internalProperties . putString ( FORMAT_SCHEMA , TypeStringUtils . writeTypeInfo ( schemaType ) ) ; return this ; } | Sets the format schema with field names and the types . Required if schema is not derived . |
15,101 | public static void main ( String [ ] args ) throws Exception { Configuration globalConfig = GlobalConfiguration . loadConfiguration ( ) ; PythonPlanBinder binder = new PythonPlanBinder ( globalConfig ) ; try { binder . runPlan ( args ) ; } catch ( Exception e ) { System . out . println ( "Failed to run plan: " + e . getMessage ( ) ) ; LOG . error ( "Failed to run plan." , e ) ; } } | Entry point for the execution of a python plan . |
15,102 | public < V , A extends Serializable > void addAccumulator ( String name , Accumulator < V , A > accumulator ) { getRuntimeContext ( ) . addAccumulator ( id + SEPARATOR + name , accumulator ) ; } | Adds an accumulator by prepending the given name with a random string . |
15,103 | static void processLastRow ( BaseRow currentRow , boolean generateRetraction , ValueState < BaseRow > state , Collector < BaseRow > out ) throws Exception { Preconditions . checkArgument ( BaseRowUtil . isAccumulateMsg ( currentRow ) ) ; if ( generateRetraction ) { BaseRow preRow = state . value ( ) ; state . update ( currentRow ) ; if ( preRow != null ) { preRow . setHeader ( BaseRowUtil . RETRACT_MSG ) ; out . collect ( preRow ) ; } } out . collect ( currentRow ) ; } | Processes element to deduplicate on keys sends current element as last row retracts previous element if needed . |
15,104 | static void processFirstRow ( BaseRow currentRow , ValueState < Boolean > state , Collector < BaseRow > out ) throws Exception { Preconditions . checkArgument ( BaseRowUtil . isAccumulateMsg ( currentRow ) ) ; if ( state . value ( ) != null ) { return ; } state . update ( true ) ; out . collect ( currentRow ) ; } | Processes element to deduplicate on keys sends current element if it is first row . |
15,105 | public Iterator < T > sample ( final Iterator < T > input ) { if ( fraction == 0 ) { return emptyIterable ; } return new SampledIterator < T > ( ) { T current = null ; public boolean hasNext ( ) { if ( current == null ) { current = getNextSampledElement ( ) ; } return current != null ; } public T next ( ) { if ( current == null ) { return getNextSampledElement ( ) ; } else { T result = current ; current = null ; return result ; } } private T getNextSampledElement ( ) { if ( fraction <= THRESHOLD ) { double rand = random . nextDouble ( ) ; double u = Math . max ( rand , EPSILON ) ; int gap = ( int ) ( Math . log ( u ) / Math . log ( 1 - fraction ) ) ; int elementCount = 0 ; if ( input . hasNext ( ) ) { T element = input . next ( ) ; while ( input . hasNext ( ) && elementCount < gap ) { element = input . next ( ) ; elementCount ++ ; } if ( elementCount < gap ) { return null ; } else { return element ; } } else { return null ; } } else { while ( input . hasNext ( ) ) { T element = input . next ( ) ; if ( random . nextDouble ( ) <= fraction ) { return element ; } } return null ; } } } ; } | Sample the input elements for each input element take a Bernoulli trail for sampling . |
15,106 | public static < T > byte [ ] serializeValue ( T value , TypeSerializer < T > serializer ) throws IOException { if ( value != null ) { DataOutputSerializer dos = new DataOutputSerializer ( 32 ) ; serializer . serialize ( value , dos ) ; return dos . getCopyOfBuffer ( ) ; } else { return null ; } } | Serializes the value with the given serializer . |
15,107 | public static < T > T deserializeValue ( byte [ ] serializedValue , TypeSerializer < T > serializer ) throws IOException { if ( serializedValue == null ) { return null ; } else { final DataInputDeserializer deser = new DataInputDeserializer ( serializedValue , 0 , serializedValue . length ) ; final T value = serializer . deserialize ( deser ) ; if ( deser . available ( ) > 0 ) { throw new IOException ( "Unconsumed bytes in the deserialized value. " + "This indicates a mismatch in the value serializers " + "used by the KvState instance and this access." ) ; } return value ; } } | Deserializes the value with the given serializer . |
15,108 | public static < T > List < T > deserializeList ( byte [ ] serializedValue , TypeSerializer < T > serializer ) throws IOException { if ( serializedValue != null ) { final DataInputDeserializer in = new DataInputDeserializer ( serializedValue , 0 , serializedValue . length ) ; try { final List < T > result = new ArrayList < > ( ) ; while ( in . available ( ) > 0 ) { result . add ( serializer . deserialize ( in ) ) ; if ( in . available ( ) > 0 ) { in . readByte ( ) ; } } return result ; } catch ( IOException e ) { throw new IOException ( "Unable to deserialize value. " + "This indicates a mismatch in the value serializers " + "used by the KvState instance and this access." , e ) ; } } else { return null ; } } | Deserializes all values with the given serializer . |
15,109 | public static < UK , UV > byte [ ] serializeMap ( Iterable < Map . Entry < UK , UV > > entries , TypeSerializer < UK > keySerializer , TypeSerializer < UV > valueSerializer ) throws IOException { if ( entries != null ) { DataOutputSerializer dos = new DataOutputSerializer ( 32 ) ; for ( Map . Entry < UK , UV > entry : entries ) { keySerializer . serialize ( entry . getKey ( ) , dos ) ; if ( entry . getValue ( ) == null ) { dos . writeBoolean ( true ) ; } else { dos . writeBoolean ( false ) ; valueSerializer . serialize ( entry . getValue ( ) , dos ) ; } } return dos . getCopyOfBuffer ( ) ; } else { return null ; } } | Serializes all values of the Iterable with the given serializer . |
15,110 | public static < UK , UV > Map < UK , UV > deserializeMap ( byte [ ] serializedValue , TypeSerializer < UK > keySerializer , TypeSerializer < UV > valueSerializer ) throws IOException { if ( serializedValue != null ) { DataInputDeserializer in = new DataInputDeserializer ( serializedValue , 0 , serializedValue . length ) ; Map < UK , UV > result = new HashMap < > ( ) ; while ( in . available ( ) > 0 ) { UK key = keySerializer . deserialize ( in ) ; boolean isNull = in . readBoolean ( ) ; UV value = isNull ? null : valueSerializer . deserialize ( in ) ; result . put ( key , value ) ; } return result ; } else { return null ; } } | Deserializes all kv pairs with the given serializer . |
15,111 | protected void seekInput ( MemorySegment segment , int positionInSegment , int limitInSegment ) { this . currentSegment = segment ; this . positionInSegment = positionInSegment ; this . limitInSegment = limitInSegment ; } | Sets the internal state of the view such that the next bytes will be read from the given memory segment starting at the given position . The memory segment will provide bytes up to the given limit position . |
15,112 | public void setShardAssigner ( KinesisShardAssigner shardAssigner ) { this . shardAssigner = checkNotNull ( shardAssigner , "function can not be null" ) ; ClosureCleaner . clean ( shardAssigner , true ) ; } | Provide a custom assigner to influence how shards are distributed over subtasks . |
15,113 | protected KinesisDataFetcher < T > createFetcher ( List < String > streams , SourceFunction . SourceContext < T > sourceContext , RuntimeContext runtimeContext , Properties configProps , KinesisDeserializationSchema < T > deserializationSchema ) { return new KinesisDataFetcher < > ( streams , sourceContext , runtimeContext , configProps , deserializationSchema , shardAssigner , periodicWatermarkAssigner ) ; } | This method is exposed for tests that need to mock the KinesisDataFetcher in the consumer . |
15,114 | public static void openChainedTasks ( List < ChainedDriver < ? , ? > > tasks , AbstractInvokable parent ) throws Exception { for ( int i = 0 ; i < tasks . size ( ) ; i ++ ) { final ChainedDriver < ? , ? > task = tasks . get ( i ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( constructLogString ( "Start task code" , task . getTaskName ( ) , parent ) ) ; } task . openTask ( ) ; } } | Opens all chained tasks in the order as they are stored in the array . The opening process creates a standardized log info message . |
15,115 | public static void closeChainedTasks ( List < ChainedDriver < ? , ? > > tasks , AbstractInvokable parent ) throws Exception { for ( int i = 0 ; i < tasks . size ( ) ; i ++ ) { final ChainedDriver < ? , ? > task = tasks . get ( i ) ; task . closeTask ( ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( constructLogString ( "Finished task code" , task . getTaskName ( ) , parent ) ) ; } } } | Closes all chained tasks in the order as they are stored in the array . The closing process creates a standardized log info message . |
15,116 | public static < T > T instantiateUserCode ( TaskConfig config , ClassLoader cl , Class < ? super T > superClass ) { try { T stub = config . < T > getStubWrapper ( cl ) . getUserCodeObject ( superClass , cl ) ; if ( superClass != null && ! superClass . isAssignableFrom ( stub . getClass ( ) ) ) { throw new RuntimeException ( "The class '" + stub . getClass ( ) . getName ( ) + "' is not a subclass of '" + superClass . getName ( ) + "' as is required." ) ; } return stub ; } catch ( ClassCastException ccex ) { throw new RuntimeException ( "The UDF class is not a proper subclass of " + superClass . getName ( ) , ccex ) ; } } | Instantiates a user code class from is definition in the task configuration . The class is instantiated without arguments using the null - ary constructor . Instantiation will fail if this constructor does not exist or is not public . |
15,117 | public void setResources ( ResourceSpec minResources , ResourceSpec preferredResources ) { this . minResources = minResources ; this . preferredResources = preferredResources ; } | Sets the minimum and preferred resources for this contract instance . The resource denotes how many memories and cpu cores of the user function will be consumed during the execution . |
15,118 | protected void runAsyncWithoutFencing ( Runnable runnable ) { if ( rpcServer instanceof FencedMainThreadExecutable ) { ( ( FencedMainThreadExecutable ) rpcServer ) . runAsyncWithoutFencing ( runnable ) ; } else { throw new RuntimeException ( "FencedRpcEndpoint has not been started with a FencedMainThreadExecutable RpcServer." ) ; } } | Run the given runnable in the main thread of the RpcEndpoint without checking the fencing token . This allows to run operations outside of the fencing token scope . |
15,119 | protected < V > CompletableFuture < V > callAsyncWithoutFencing ( Callable < V > callable , Time timeout ) { if ( rpcServer instanceof FencedMainThreadExecutable ) { return ( ( FencedMainThreadExecutable ) rpcServer ) . callAsyncWithoutFencing ( callable , timeout ) ; } else { throw new RuntimeException ( "FencedRpcEndpoint has not been started with a FencedMainThreadExecutable RpcServer." ) ; } } | Run the given callable in the main thread of the RpcEndpoint without checking the fencing token . This allows to run operations outside of the fencing token scope . |
15,120 | static FileSystemKind getKindForScheme ( String scheme ) { scheme = scheme . toLowerCase ( Locale . US ) ; if ( scheme . startsWith ( "s3" ) || scheme . startsWith ( "emr" ) || scheme . startsWith ( "oss" ) ) { return FileSystemKind . OBJECT_STORE ; } else if ( scheme . startsWith ( "http" ) || scheme . startsWith ( "ftp" ) ) { return FileSystemKind . OBJECT_STORE ; } else { return FileSystemKind . FILE_SYSTEM ; } } | Gets the kind of the file system from its scheme . |
15,121 | void updateSummary ( CompletedCheckpointStats completed ) { stateSize . add ( completed . getStateSize ( ) ) ; duration . add ( completed . getEndToEndDuration ( ) ) ; alignmentBuffered . add ( completed . getAlignmentBuffered ( ) ) ; } | Updates the summary with the given completed checkpoint . |
15,122 | private int getPartitioningFanOutNoEstimates ( ) { return Math . max ( 11 , findSmallerPrime ( ( int ) Math . min ( buildRowCount * avgRecordLen / ( 10 * segmentSize ) , MAX_NUM_PARTITIONS ) ) ) ; } | Gets the number of partitions to be used for an initial hash - table . |
15,123 | public void freeCurrent ( ) { int beforeReleaseNum = availableMemory . size ( ) ; memManager . release ( availableMemory ) ; allocatedFloatingNum -= ( beforeReleaseNum - availableMemory . size ( ) ) ; } | Free the memory not used . |
15,124 | public static void addDeprecations ( DeprecationDelta [ ] deltas ) { DeprecationContext prev , next ; do { prev = deprecationContext . get ( ) ; next = new DeprecationContext ( prev , deltas ) ; } while ( ! deprecationContext . compareAndSet ( prev , next ) ) ; } | Adds a set of deprecated keys to the global deprecations . |
15,125 | public void setDeprecatedProperties ( ) { DeprecationContext deprecations = deprecationContext . get ( ) ; Properties props = getProps ( ) ; Properties overlay = getOverlay ( ) ; for ( Map . Entry < String , DeprecatedKeyInfo > entry : deprecations . getDeprecatedKeyMap ( ) . entrySet ( ) ) { String depKey = entry . getKey ( ) ; if ( ! overlay . contains ( depKey ) ) { for ( String newKey : entry . getValue ( ) . newKeys ) { String val = overlay . getProperty ( newKey ) ; if ( val != null ) { props . setProperty ( depKey , val ) ; overlay . setProperty ( depKey , val ) ; break ; } } } } } | Sets all deprecated properties that are not currently set but have a corresponding new property that is set . Useful for iterating the properties when all deprecated properties for currently set properties need to be present . |
15,126 | public static synchronized void reloadExistingConfigurations ( ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Reloading " + REGISTRY . keySet ( ) . size ( ) + " existing configurations" ) ; } for ( Configuration conf : REGISTRY . keySet ( ) ) { conf . reloadConfiguration ( ) ; } } | Reload existing configuration instances . |
15,127 | public double getStorageSize ( String name , String defaultValue , StorageUnit targetUnit ) { Preconditions . checkState ( isNotBlank ( name ) , "Key cannot be blank." ) ; String vString = get ( name ) ; if ( isBlank ( vString ) ) { vString = defaultValue ; } StorageSize measure = StorageSize . parse ( vString ) ; return convertStorageUnit ( measure . getValue ( ) , measure . getUnit ( ) , targetUnit ) ; } | Gets the Storage Size from the config or returns the defaultValue . The unit of return value is specified in target unit . |
15,128 | public void setStorageSize ( String name , double value , StorageUnit unit ) { set ( name , value + unit . getShortName ( ) ) ; } | Sets Storage Size for the specified key . |
15,129 | private double convertStorageUnit ( double value , StorageUnit sourceUnit , StorageUnit targetUnit ) { double byteValue = sourceUnit . toBytes ( value ) ; return targetUnit . fromBytes ( byteValue ) ; } | convert the value from one storage unit to another . |
15,130 | public IntegerRanges getRange ( String name , String defaultValue ) { return new IntegerRanges ( get ( name , defaultValue ) ) ; } | Parse the given attribute as a set of integer ranges |
15,131 | public char [ ] getPassword ( String name ) throws IOException { char [ ] pass = null ; pass = getPasswordFromCredentialProviders ( name ) ; if ( pass == null ) { pass = getPasswordFromConfig ( name ) ; } return pass ; } | Get the value for a known password configuration element . In order to enable the elimination of clear text passwords in config this method attempts to resolve the property name as an alias through the CredentialProvider API and conditionally fallsback to config . |
15,132 | private CredentialEntry getCredentialEntry ( CredentialProvider provider , String name ) throws IOException { CredentialEntry entry = provider . getCredentialEntry ( name ) ; if ( entry != null ) { return entry ; } String oldName = getDeprecatedKey ( name ) ; if ( oldName != null ) { entry = provider . getCredentialEntry ( oldName ) ; if ( entry != null ) { logDeprecationOnce ( oldName , provider . toString ( ) ) ; return entry ; } } DeprecatedKeyInfo keyInfo = getDeprecatedKeyInfo ( name ) ; if ( keyInfo != null && keyInfo . newKeys != null ) { for ( String newName : keyInfo . newKeys ) { entry = provider . getCredentialEntry ( newName ) ; if ( entry != null ) { logDeprecationOnce ( name , null ) ; return entry ; } } } return null ; } | Get the credential entry by name from a credential provider . |
15,133 | public Class < ? > getClassByNameOrNull ( String name ) { Map < String , WeakReference < Class < ? > > > map ; synchronized ( CACHE_CLASSES ) { map = CACHE_CLASSES . get ( classLoader ) ; if ( map == null ) { map = Collections . synchronizedMap ( new WeakHashMap < String , WeakReference < Class < ? > > > ( ) ) ; CACHE_CLASSES . put ( classLoader , map ) ; } } Class < ? > clazz = null ; WeakReference < Class < ? > > ref = map . get ( name ) ; if ( ref != null ) { clazz = ref . get ( ) ; } if ( clazz == null ) { try { clazz = Class . forName ( name , true , classLoader ) ; } catch ( ClassNotFoundException e ) { map . put ( name , new WeakReference < Class < ? > > ( NEGATIVE_CACHE_SENTINEL ) ) ; return null ; } map . put ( name , new WeakReference < Class < ? > > ( clazz ) ) ; return clazz ; } else if ( clazz == NEGATIVE_CACHE_SENTINEL ) { return null ; } else { return clazz ; } } | Load a class by name returning null rather than throwing an exception if it couldn t be loaded . This is to avoid the overhead of creating an exception . |
15,134 | public Set < String > getFinalParameters ( ) { Set < String > setFinalParams = Collections . newSetFromMap ( new ConcurrentHashMap < String , Boolean > ( ) ) ; setFinalParams . addAll ( finalParameters ) ; return setFinalParams ; } | Get the set of parameters marked final . |
15,135 | private void checkForOverride ( Properties properties , String name , String attr , String value ) { String propertyValue = properties . getProperty ( attr ) ; if ( propertyValue != null && ! propertyValue . equals ( value ) ) { LOG . warn ( name + ":an attempt to override final parameter: " + attr + "; Ignoring." ) ; } } | Print a warning if a property with a given name already exists with a different value |
15,136 | public static boolean hasWarnedDeprecation ( String name ) { DeprecationContext deprecations = deprecationContext . get ( ) ; if ( deprecations . getDeprecatedKeyMap ( ) . containsKey ( name ) ) { if ( deprecations . getDeprecatedKeyMap ( ) . get ( name ) . accessed . get ( ) ) { return true ; } } return false ; } | Returns whether or not a deprecated name has been warned . If the name is not deprecated then always return false |
15,137 | public void putBuildRow ( BaseRow row ) throws IOException { final int hashCode = hash ( this . buildSideProjection . apply ( row ) . hashCode ( ) , 0 ) ; insertIntoTable ( originBuildSideSerializer . baseRowToBinary ( row ) , hashCode ) ; } | Put a build side row to hash table . |
15,138 | public void endBuild ( ) throws IOException { int buildWriteBuffers = 0 ; for ( BinaryHashPartition p : this . partitionsBeingBuilt ) { buildWriteBuffers += p . finalizeBuildPhase ( this . ioManager , this . currentEnumerator ) ; } buildSpillRetBufferNumbers += buildWriteBuffers ; this . probeIterator = new ProbeIterator ( this . binaryProbeSideSerializer . createInstance ( ) ) ; this . bucketIterator = new LookupBucketIterator ( this ) ; } | End build phase . |
15,139 | public boolean tryProbe ( BaseRow record ) throws IOException { if ( ! this . probeIterator . hasSource ( ) ) { this . probeIterator . setInstance ( record ) ; } BinaryRow probeKey = probeSideProjection . apply ( record ) ; final int hash = hash ( probeKey . hashCode ( ) , this . currentRecursionDepth ) ; BinaryHashPartition p = this . partitionsBeingBuilt . get ( hash % partitionsBeingBuilt . size ( ) ) ; if ( p . isInMemory ( ) ) { this . probeKey = probeKey ; this . probeRow = record ; p . bucketArea . startLookup ( hash ) ; return true ; } else { if ( p . testHashBloomFilter ( hash ) ) { BinaryRow row = originProbeSideSerializer . baseRowToBinary ( record ) ; p . insertIntoProbeBuffer ( row ) ; } return false ; } } | Find matched build side rows for a probe row . |
15,140 | public boolean isProperlyShutDown ( ) { for ( File path : paths ) { if ( path != null && path . exists ( ) ) { return false ; } } return true ; } | Utility method to check whether the IO manager has been properly shut down . For this base implementation this means that all files have been removed . |
15,141 | public void deleteChannel ( FileIOChannel . ID channel ) throws IOException { if ( channel != null ) { if ( channel . getPathFile ( ) . exists ( ) && ! channel . getPathFile ( ) . delete ( ) ) { LOG . warn ( "IOManager failed to delete temporary file {}" , channel . getPath ( ) ) ; } } } | Deletes the file underlying the given channel . If the channel is still open this call may fail . |
15,142 | public static String byteToHexString ( final byte [ ] bytes , final int start , final int end ) { if ( bytes == null ) { throw new IllegalArgumentException ( "bytes == null" ) ; } int length = end - start ; char [ ] out = new char [ length * 2 ] ; for ( int i = start , j = 0 ; i < end ; i ++ ) { out [ j ++ ] = HEX_CHARS [ ( 0xF0 & bytes [ i ] ) >>> 4 ] ; out [ j ++ ] = HEX_CHARS [ 0x0F & bytes [ i ] ] ; } return new String ( out ) ; } | Given an array of bytes it will convert the bytes to a hex string representation of the bytes . |
15,143 | public static String generateRandomAlphanumericString ( Random rnd , int length ) { checkNotNull ( rnd ) ; checkArgument ( length >= 0 ) ; StringBuilder buffer = new StringBuilder ( length ) ; for ( int i = 0 ; i < length ; i ++ ) { buffer . append ( nextAlphanumericChar ( rnd ) ) ; } return buffer . toString ( ) ; } | Creates a random alphanumeric string of given length . |
15,144 | public void startThreads ( ) { if ( this . sortThread != null ) { this . sortThread . start ( ) ; } if ( this . spillThread != null ) { this . spillThread . start ( ) ; } if ( this . mergeThread != null ) { this . mergeThread . start ( ) ; } } | Starts all the threads that are used by this sorter . |
15,145 | public void dispose ( ) { IOUtils . closeQuietly ( cancelStreamRegistry ) ; if ( kvStateRegistry != null ) { kvStateRegistry . unregisterAll ( ) ; } lastName = null ; lastState = null ; keyValueStatesByName . clear ( ) ; } | Closes the state backend releasing all internal resources but does not delete any persistent checkpoint data . |
15,146 | @ SuppressWarnings ( "unchecked" ) public static < T > T stripProxy ( final WrappingProxy < T > wrappingProxy ) { if ( wrappingProxy == null ) { return null ; } T delegate = wrappingProxy . getWrappedDelegate ( ) ; int numProxiesStripped = 0 ; while ( delegate instanceof WrappingProxy ) { throwIfSafetyNetExceeded ( ++ numProxiesStripped ) ; delegate = ( ( WrappingProxy < T > ) delegate ) . getWrappedDelegate ( ) ; } return delegate ; } | Expects a proxy and returns the unproxied delegate . |
15,147 | public void close ( ) { IOUtils . closeQuietly ( defaultColumnFamilyHandle ) ; IOUtils . closeQuietly ( nativeMetricMonitor ) ; IOUtils . closeQuietly ( db ) ; columnFamilyDescriptors . forEach ( ( cfd ) -> IOUtils . closeQuietly ( cfd . getOptions ( ) ) ) ; } | Necessary clean up iff restore operation failed . |
15,148 | public static boolean isRestSSLEnabled ( Configuration sslConfig ) { @ SuppressWarnings ( "deprecation" ) final boolean fallbackFlag = sslConfig . getBoolean ( SecurityOptions . SSL_ENABLED ) ; return sslConfig . getBoolean ( SecurityOptions . SSL_REST_ENABLED , fallbackFlag ) ; } | Checks whether SSL for the external REST endpoint is enabled . |
15,149 | public static boolean isRestSSLAuthenticationEnabled ( Configuration sslConfig ) { checkNotNull ( sslConfig , "sslConfig" ) ; return isRestSSLEnabled ( sslConfig ) && sslConfig . getBoolean ( SecurityOptions . SSL_REST_AUTHENTICATION_ENABLED ) ; } | Checks whether mutual SSL authentication for the external REST endpoint is enabled . |
15,150 | public static ServerSocketFactory createSSLServerSocketFactory ( Configuration config ) throws Exception { SSLContext sslContext = createInternalSSLContext ( config ) ; if ( sslContext == null ) { throw new IllegalConfigurationException ( "SSL is not enabled" ) ; } String [ ] protocols = getEnabledProtocols ( config ) ; String [ ] cipherSuites = getEnabledCipherSuites ( config ) ; SSLServerSocketFactory factory = sslContext . getServerSocketFactory ( ) ; return new ConfiguringSSLServerSocketFactory ( factory , protocols , cipherSuites ) ; } | Creates a factory for SSL Server Sockets from the given configuration . SSL Server Sockets are always part of internal communication . |
15,151 | public static SocketFactory createSSLClientSocketFactory ( Configuration config ) throws Exception { SSLContext sslContext = createInternalSSLContext ( config ) ; if ( sslContext == null ) { throw new IllegalConfigurationException ( "SSL is not enabled" ) ; } return sslContext . getSocketFactory ( ) ; } | Creates a factory for SSL Client Sockets from the given configuration . SSL Client Sockets are always part of internal communication . |
15,152 | public static SSLHandlerFactory createInternalServerSSLEngineFactory ( final Configuration config ) throws Exception { SSLContext sslContext = createInternalSSLContext ( config ) ; if ( sslContext == null ) { throw new IllegalConfigurationException ( "SSL is not enabled for internal communication." ) ; } return new SSLHandlerFactory ( sslContext , getEnabledProtocols ( config ) , getEnabledCipherSuites ( config ) , false , true , config . getInteger ( SecurityOptions . SSL_INTERNAL_HANDSHAKE_TIMEOUT ) , config . getInteger ( SecurityOptions . SSL_INTERNAL_CLOSE_NOTIFY_FLUSH_TIMEOUT ) ) ; } | Creates a SSLEngineFactory to be used by internal communication server endpoints . |
15,153 | private static SSLContext createInternalSSLContext ( Configuration config ) throws Exception { checkNotNull ( config , "config" ) ; if ( ! isInternalSSLEnabled ( config ) ) { return null ; } String keystoreFilePath = getAndCheckOption ( config , SecurityOptions . SSL_INTERNAL_KEYSTORE , SecurityOptions . SSL_KEYSTORE ) ; String keystorePassword = getAndCheckOption ( config , SecurityOptions . SSL_INTERNAL_KEYSTORE_PASSWORD , SecurityOptions . SSL_KEYSTORE_PASSWORD ) ; String certPassword = getAndCheckOption ( config , SecurityOptions . SSL_INTERNAL_KEY_PASSWORD , SecurityOptions . SSL_KEY_PASSWORD ) ; String trustStoreFilePath = getAndCheckOption ( config , SecurityOptions . SSL_INTERNAL_TRUSTSTORE , SecurityOptions . SSL_TRUSTSTORE ) ; String trustStorePassword = getAndCheckOption ( config , SecurityOptions . SSL_INTERNAL_TRUSTSTORE_PASSWORD , SecurityOptions . SSL_TRUSTSTORE_PASSWORD ) ; String sslProtocolVersion = config . getString ( SecurityOptions . SSL_PROTOCOL ) ; int sessionCacheSize = config . getInteger ( SecurityOptions . SSL_INTERNAL_SESSION_CACHE_SIZE ) ; int sessionTimeoutMs = config . getInteger ( SecurityOptions . SSL_INTERNAL_SESSION_TIMEOUT ) ; KeyStore keyStore = KeyStore . getInstance ( KeyStore . getDefaultType ( ) ) ; try ( InputStream keyStoreFile = Files . newInputStream ( new File ( keystoreFilePath ) . toPath ( ) ) ) { keyStore . load ( keyStoreFile , keystorePassword . toCharArray ( ) ) ; } KeyStore trustStore = KeyStore . getInstance ( KeyStore . getDefaultType ( ) ) ; try ( InputStream trustStoreFile = Files . newInputStream ( new File ( trustStoreFilePath ) . toPath ( ) ) ) { trustStore . load ( trustStoreFile , trustStorePassword . toCharArray ( ) ) ; } KeyManagerFactory kmf = KeyManagerFactory . getInstance ( KeyManagerFactory . getDefaultAlgorithm ( ) ) ; kmf . init ( keyStore , certPassword . toCharArray ( ) ) ; TrustManagerFactory tmf = TrustManagerFactory . getInstance ( TrustManagerFactory . getDefaultAlgorithm ( ) ) ; tmf . init ( trustStore ) ; SSLContext sslContext = SSLContext . getInstance ( sslProtocolVersion ) ; sslContext . init ( kmf . getKeyManagers ( ) , tmf . getTrustManagers ( ) , null ) ; if ( sessionCacheSize >= 0 ) { sslContext . getClientSessionContext ( ) . setSessionCacheSize ( sessionCacheSize ) ; } if ( sessionTimeoutMs >= 0 ) { sslContext . getClientSessionContext ( ) . setSessionTimeout ( sessionTimeoutMs / 1000 ) ; } return sslContext ; } | Creates the SSL Context for internal SSL if internal SSL is configured . For internal SSL the client and server side configuration are identical because of mutual authentication . |
15,154 | private static SSLContext createRestSSLContext ( Configuration config , RestSSLContextConfigMode configMode ) throws Exception { checkNotNull ( config , "config" ) ; if ( ! isRestSSLEnabled ( config ) ) { return null ; } KeyManager [ ] keyManagers = null ; if ( configMode == RestSSLContextConfigMode . SERVER || configMode == RestSSLContextConfigMode . MUTUAL ) { String keystoreFilePath = getAndCheckOption ( config , SecurityOptions . SSL_REST_KEYSTORE , SecurityOptions . SSL_KEYSTORE ) ; String keystorePassword = getAndCheckOption ( config , SecurityOptions . SSL_REST_KEYSTORE_PASSWORD , SecurityOptions . SSL_KEYSTORE_PASSWORD ) ; String certPassword = getAndCheckOption ( config , SecurityOptions . SSL_REST_KEY_PASSWORD , SecurityOptions . SSL_KEY_PASSWORD ) ; KeyStore keyStore = KeyStore . getInstance ( KeyStore . getDefaultType ( ) ) ; try ( InputStream keyStoreFile = Files . newInputStream ( new File ( keystoreFilePath ) . toPath ( ) ) ) { keyStore . load ( keyStoreFile , keystorePassword . toCharArray ( ) ) ; } KeyManagerFactory kmf = KeyManagerFactory . getInstance ( KeyManagerFactory . getDefaultAlgorithm ( ) ) ; kmf . init ( keyStore , certPassword . toCharArray ( ) ) ; keyManagers = kmf . getKeyManagers ( ) ; } TrustManager [ ] trustManagers = null ; if ( configMode == RestSSLContextConfigMode . CLIENT || configMode == RestSSLContextConfigMode . MUTUAL ) { String trustStoreFilePath = getAndCheckOption ( config , SecurityOptions . SSL_REST_TRUSTSTORE , SecurityOptions . SSL_TRUSTSTORE ) ; String trustStorePassword = getAndCheckOption ( config , SecurityOptions . SSL_REST_TRUSTSTORE_PASSWORD , SecurityOptions . SSL_TRUSTSTORE_PASSWORD ) ; KeyStore trustStore = KeyStore . getInstance ( KeyStore . getDefaultType ( ) ) ; try ( InputStream trustStoreFile = Files . newInputStream ( new File ( trustStoreFilePath ) . toPath ( ) ) ) { trustStore . load ( trustStoreFile , trustStorePassword . toCharArray ( ) ) ; } TrustManagerFactory tmf = TrustManagerFactory . getInstance ( TrustManagerFactory . getDefaultAlgorithm ( ) ) ; tmf . init ( trustStore ) ; trustManagers = tmf . getTrustManagers ( ) ; } String sslProtocolVersion = config . getString ( SecurityOptions . SSL_PROTOCOL ) ; SSLContext sslContext = SSLContext . getInstance ( sslProtocolVersion ) ; sslContext . init ( keyManagers , trustManagers , null ) ; return sslContext ; } | Creates an SSL context for the external REST SSL . If mutual authentication is configured the client and the server side configuration are identical . |
15,155 | public static SSLContext createRestServerSSLContext ( Configuration config ) throws Exception { final RestSSLContextConfigMode configMode ; if ( isRestSSLAuthenticationEnabled ( config ) ) { configMode = RestSSLContextConfigMode . MUTUAL ; } else { configMode = RestSSLContextConfigMode . SERVER ; } return createRestSSLContext ( config , configMode ) ; } | Creates an SSL context for the external REST endpoint server . |
15,156 | public static SSLContext createRestClientSSLContext ( Configuration config ) throws Exception { final RestSSLContextConfigMode configMode ; if ( isRestSSLAuthenticationEnabled ( config ) ) { configMode = RestSSLContextConfigMode . MUTUAL ; } else { configMode = RestSSLContextConfigMode . CLIENT ; } return createRestSSLContext ( config , configMode ) ; } | Creates an SSL context for clients against the external REST endpoint . |
15,157 | public final TypeSerializer < T > currentSchemaSerializer ( ) { if ( registeredSerializer != null ) { checkState ( ! isRegisteredWithIncompatibleSerializer , "Unable to provide a serializer with the current schema, because the restored state was " + "registered with a new serializer that has incompatible schema." ) ; return registeredSerializer ; } return previousSchemaSerializer ( ) ; } | Gets the serializer that recognizes the current serialization schema of the state . This is the serializer that should be used for regular state serialization and deserialization after state has been restored . |
15,158 | public final TypeSerializer < T > previousSchemaSerializer ( ) { if ( cachedRestoredSerializer != null ) { return cachedRestoredSerializer ; } if ( previousSerializerSnapshot == null ) { throw new UnsupportedOperationException ( "This provider does not contain the state's previous serializer's snapshot. Cannot provider a serializer for previous schema." ) ; } this . cachedRestoredSerializer = previousSerializerSnapshot . restoreSerializer ( ) ; return cachedRestoredSerializer ; } | Gets the serializer that recognizes the previous serialization schema of the state . This is the serializer that should be used for restoring the state i . e . when the state is still in the previous serialization schema . |
15,159 | public CompletableFuture < Acknowledge > deregisterApplication ( final ApplicationStatus finalStatus , final String diagnostics ) { log . info ( "Shut down cluster because application is in {}, diagnostics {}." , finalStatus , diagnostics ) ; try { internalDeregisterApplication ( finalStatus , diagnostics ) ; } catch ( ResourceManagerException e ) { log . warn ( "Could not properly shutdown the application." , e ) ; } return CompletableFuture . completedFuture ( Acknowledge . get ( ) ) ; } | Cleanup application and shut down cluster . |
15,160 | private RegistrationResponse registerJobMasterInternal ( final JobMasterGateway jobMasterGateway , JobID jobId , String jobManagerAddress , ResourceID jobManagerResourceId ) { if ( jobManagerRegistrations . containsKey ( jobId ) ) { JobManagerRegistration oldJobManagerRegistration = jobManagerRegistrations . get ( jobId ) ; if ( Objects . equals ( oldJobManagerRegistration . getJobMasterId ( ) , jobMasterGateway . getFencingToken ( ) ) ) { log . debug ( "Job manager {}@{} was already registered." , jobMasterGateway . getFencingToken ( ) , jobManagerAddress ) ; } else { disconnectJobManager ( oldJobManagerRegistration . getJobID ( ) , new Exception ( "New job leader for job " + jobId + " found." ) ) ; JobManagerRegistration jobManagerRegistration = new JobManagerRegistration ( jobId , jobManagerResourceId , jobMasterGateway ) ; jobManagerRegistrations . put ( jobId , jobManagerRegistration ) ; jmResourceIdRegistrations . put ( jobManagerResourceId , jobManagerRegistration ) ; } } else { JobManagerRegistration jobManagerRegistration = new JobManagerRegistration ( jobId , jobManagerResourceId , jobMasterGateway ) ; jobManagerRegistrations . put ( jobId , jobManagerRegistration ) ; jmResourceIdRegistrations . put ( jobManagerResourceId , jobManagerRegistration ) ; } log . info ( "Registered job manager {}@{} for job {}." , jobMasterGateway . getFencingToken ( ) , jobManagerAddress , jobId ) ; jobManagerHeartbeatManager . monitorTarget ( jobManagerResourceId , new HeartbeatTarget < Void > ( ) { public void receiveHeartbeat ( ResourceID resourceID , Void payload ) { } public void requestHeartbeat ( ResourceID resourceID , Void payload ) { jobMasterGateway . heartbeatFromResourceManager ( resourceID ) ; } } ) ; return new JobMasterRegistrationSuccess ( getFencingToken ( ) , resourceId ) ; } | Registers a new JobMaster . |
15,161 | private RegistrationResponse registerTaskExecutorInternal ( TaskExecutorGateway taskExecutorGateway , String taskExecutorAddress , ResourceID taskExecutorResourceId , int dataPort , HardwareDescription hardwareDescription ) { WorkerRegistration < WorkerType > oldRegistration = taskExecutors . remove ( taskExecutorResourceId ) ; if ( oldRegistration != null ) { log . debug ( "Replacing old registration of TaskExecutor {}." , taskExecutorResourceId ) ; slotManager . unregisterTaskManager ( oldRegistration . getInstanceID ( ) ) ; } final WorkerType newWorker = workerStarted ( taskExecutorResourceId ) ; if ( newWorker == null ) { log . warn ( "Discard registration from TaskExecutor {} at ({}) because the framework did " + "not recognize it" , taskExecutorResourceId , taskExecutorAddress ) ; return new RegistrationResponse . Decline ( "unrecognized TaskExecutor" ) ; } else { WorkerRegistration < WorkerType > registration = new WorkerRegistration < > ( taskExecutorGateway , newWorker , dataPort , hardwareDescription ) ; log . info ( "Registering TaskManager with ResourceID {} ({}) at ResourceManager" , taskExecutorResourceId , taskExecutorAddress ) ; taskExecutors . put ( taskExecutorResourceId , registration ) ; taskManagerHeartbeatManager . monitorTarget ( taskExecutorResourceId , new HeartbeatTarget < Void > ( ) { public void receiveHeartbeat ( ResourceID resourceID , Void payload ) { } public void requestHeartbeat ( ResourceID resourceID , Void payload ) { taskExecutorGateway . heartbeatFromResourceManager ( resourceID ) ; } } ) ; return new TaskExecutorRegistrationSuccess ( registration . getInstanceID ( ) , resourceId , clusterInformation ) ; } } | Registers a new TaskExecutor . |
15,162 | protected void closeJobManagerConnection ( JobID jobId , Exception cause ) { JobManagerRegistration jobManagerRegistration = jobManagerRegistrations . remove ( jobId ) ; if ( jobManagerRegistration != null ) { final ResourceID jobManagerResourceId = jobManagerRegistration . getJobManagerResourceID ( ) ; final JobMasterGateway jobMasterGateway = jobManagerRegistration . getJobManagerGateway ( ) ; final JobMasterId jobMasterId = jobManagerRegistration . getJobMasterId ( ) ; log . info ( "Disconnect job manager {}@{} for job {} from the resource manager." , jobMasterId , jobMasterGateway . getAddress ( ) , jobId ) ; jobManagerHeartbeatManager . unmonitorTarget ( jobManagerResourceId ) ; jmResourceIdRegistrations . remove ( jobManagerResourceId ) ; jobMasterGateway . disconnectResourceManager ( getFencingToken ( ) , cause ) ; } else { log . debug ( "There was no registered job manager for job {}." , jobId ) ; } } | This method should be called by the framework once it detects that a currently registered job manager has failed . |
15,163 | protected void closeTaskManagerConnection ( final ResourceID resourceID , final Exception cause ) { taskManagerHeartbeatManager . unmonitorTarget ( resourceID ) ; WorkerRegistration < WorkerType > workerRegistration = taskExecutors . remove ( resourceID ) ; if ( workerRegistration != null ) { log . info ( "Closing TaskExecutor connection {} because: {}" , resourceID , cause . getMessage ( ) ) ; slotManager . unregisterTaskManager ( workerRegistration . getInstanceID ( ) ) ; workerRegistration . getTaskExecutorGateway ( ) . disconnectResourceManager ( cause ) ; } else { log . debug ( "No open TaskExecutor connection {}. Ignoring close TaskExecutor connection. Closing reason was: {}" , resourceID , cause . getMessage ( ) ) ; } } | This method should be called by the framework once it detects that a currently registered task executor has failed . |
15,164 | protected void onFatalError ( Throwable t ) { try { log . error ( "Fatal error occurred in ResourceManager." , t ) ; } catch ( Throwable ignored ) { } fatalErrorHandler . onFatalError ( t ) ; } | Notifies the ResourceManager that a fatal error has occurred and it cannot proceed . |
15,165 | public void notifyKvStateRegistered ( JobVertexID jobVertexId , KeyGroupRange keyGroupRange , String registrationName , KvStateID kvStateId , InetSocketAddress kvStateServerAddress ) { KvStateLocation location = lookupTable . get ( registrationName ) ; if ( location == null ) { ExecutionJobVertex vertex = jobVertices . get ( jobVertexId ) ; if ( vertex != null ) { int parallelism = vertex . getMaxParallelism ( ) ; location = new KvStateLocation ( jobId , jobVertexId , parallelism , registrationName ) ; lookupTable . put ( registrationName , location ) ; } else { throw new IllegalArgumentException ( "Unknown JobVertexID " + jobVertexId ) ; } } if ( ! location . getJobVertexId ( ) . equals ( jobVertexId ) ) { IllegalStateException duplicate = new IllegalStateException ( "Registration name clash. KvState with name '" + registrationName + "' has already been registered by another operator (" + location . getJobVertexId ( ) + ")." ) ; ExecutionJobVertex vertex = jobVertices . get ( jobVertexId ) ; if ( vertex != null ) { vertex . fail ( new SuppressRestartsException ( duplicate ) ) ; } throw duplicate ; } location . registerKvState ( keyGroupRange , kvStateId , kvStateServerAddress ) ; } | Notifies the registry about a registered KvState instance . |
15,166 | public void notifyKvStateUnregistered ( JobVertexID jobVertexId , KeyGroupRange keyGroupRange , String registrationName ) { KvStateLocation location = lookupTable . get ( registrationName ) ; if ( location != null ) { if ( ! location . getJobVertexId ( ) . equals ( jobVertexId ) ) { throw new IllegalArgumentException ( "Another operator (" + location . getJobVertexId ( ) + ") registered the KvState " + "under '" + registrationName + "'." ) ; } location . unregisterKvState ( keyGroupRange ) ; if ( location . getNumRegisteredKeyGroups ( ) == 0 ) { lookupTable . remove ( registrationName ) ; } } else { throw new IllegalArgumentException ( "Unknown registration name '" + registrationName + "'. " + "Probably registration/unregistration race." ) ; } } | Notifies the registry about an unregistered KvState instance . |
15,167 | private static void extractIntersectingState ( Collection < KeyedStateHandle > originalSubtaskStateHandles , KeyGroupRange rangeToExtract , List < KeyedStateHandle > extractedStateCollector ) { for ( KeyedStateHandle keyedStateHandle : originalSubtaskStateHandles ) { if ( keyedStateHandle != null ) { KeyedStateHandle intersectedKeyedStateHandle = keyedStateHandle . getIntersection ( rangeToExtract ) ; if ( intersectedKeyedStateHandle != null ) { extractedStateCollector . add ( intersectedKeyedStateHandle ) ; } } } } | Extracts certain key group ranges from the given state handles and adds them to the collector . |
15,168 | private static void checkParallelismPreconditions ( OperatorState operatorState , ExecutionJobVertex executionJobVertex ) { if ( operatorState . getMaxParallelism ( ) < executionJobVertex . getParallelism ( ) ) { throw new IllegalStateException ( "The state for task " + executionJobVertex . getJobVertexId ( ) + " can not be restored. The maximum parallelism (" + operatorState . getMaxParallelism ( ) + ") of the restored state is lower than the configured parallelism (" + executionJobVertex . getParallelism ( ) + "). Please reduce the parallelism of the task to be lower or equal to the maximum parallelism." ) ; } if ( operatorState . getMaxParallelism ( ) != executionJobVertex . getMaxParallelism ( ) ) { if ( ! executionJobVertex . isMaxParallelismConfigured ( ) ) { LOG . debug ( "Overriding maximum parallelism for JobVertex {} from {} to {}" , executionJobVertex . getJobVertexId ( ) , executionJobVertex . getMaxParallelism ( ) , operatorState . getMaxParallelism ( ) ) ; executionJobVertex . setMaxParallelism ( operatorState . getMaxParallelism ( ) ) ; } else { throw new IllegalStateException ( "The maximum parallelism (" + operatorState . getMaxParallelism ( ) + ") with which the latest " + "checkpoint of the execution job vertex " + executionJobVertex + " has been taken and the current maximum parallelism (" + executionJobVertex . getMaxParallelism ( ) + ") changed. This " + "is currently not supported." ) ; } } } | Verifies conditions in regards to parallelism and maxParallelism that must be met when restoring state . |
15,169 | private static void checkStateMappingCompleteness ( boolean allowNonRestoredState , Map < OperatorID , OperatorState > operatorStates , Map < JobVertexID , ExecutionJobVertex > tasks ) { Set < OperatorID > allOperatorIDs = new HashSet < > ( ) ; for ( ExecutionJobVertex executionJobVertex : tasks . values ( ) ) { allOperatorIDs . addAll ( executionJobVertex . getOperatorIDs ( ) ) ; } for ( Map . Entry < OperatorID , OperatorState > operatorGroupStateEntry : operatorStates . entrySet ( ) ) { OperatorState operatorState = operatorGroupStateEntry . getValue ( ) ; if ( ! allOperatorIDs . contains ( operatorGroupStateEntry . getKey ( ) ) ) { if ( allowNonRestoredState ) { LOG . info ( "Skipped checkpoint state for operator {}." , operatorState . getOperatorID ( ) ) ; } else { throw new IllegalStateException ( "There is no operator for the state " + operatorState . getOperatorID ( ) ) ; } } } } | Verifies that all operator states can be mapped to an execution job vertex . |
15,170 | public void shutdownAndWait ( ) { try { client . shutdown ( ) . get ( ) ; LOG . info ( "The Queryable State Client was shutdown successfully." ) ; } catch ( Exception e ) { LOG . warn ( "The Queryable State Client shutdown failed: " , e ) ; } } | Shuts down the client and waits until shutdown is completed . |
15,171 | private CompletableFuture < KvStateResponse > getKvState ( final JobID jobId , final String queryableStateName , final int keyHashCode , final byte [ ] serializedKeyAndNamespace ) { LOG . debug ( "Sending State Request to {}." , remoteAddress ) ; try { KvStateRequest request = new KvStateRequest ( jobId , queryableStateName , keyHashCode , serializedKeyAndNamespace ) ; return client . sendRequest ( remoteAddress , request ) ; } catch ( Exception e ) { LOG . error ( "Unable to send KVStateRequest: " , e ) ; return FutureUtils . getFailedFuture ( e ) ; } } | Returns a future holding the serialized request result . |
15,172 | public TypeSerializer < T > getElementSerializer ( ) { final TypeSerializer < List < T > > rawSerializer = getSerializer ( ) ; if ( ! ( rawSerializer instanceof ListSerializer ) ) { throw new IllegalStateException ( ) ; } return ( ( ListSerializer < T > ) rawSerializer ) . getElementSerializer ( ) ; } | Gets the serializer for the elements contained in the list . |
15,173 | public static void main ( String [ ] args ) { EnvironmentInformation . logEnvironmentInfo ( LOG , "YARN TaskExecutor runner" , args ) ; SignalHandler . register ( LOG ) ; JvmShutdownSafeguard . installAsShutdownHook ( LOG ) ; run ( args ) ; } | The entry point for the YARN task executor runner . |
15,174 | public void returnLogicalSlot ( LogicalSlot logicalSlot ) { checkNotNull ( logicalSlot ) ; checkArgument ( logicalSlot instanceof Slot ) ; final Slot slot = ( ( Slot ) logicalSlot ) ; checkArgument ( ! slot . isAlive ( ) , "slot is still alive" ) ; checkArgument ( slot . getOwner ( ) == this , "slot belongs to the wrong TaskManager." ) ; if ( slot . markReleased ( ) ) { LOG . debug ( "Return allocated slot {}." , slot ) ; synchronized ( instanceLock ) { if ( isDead ) { return ; } if ( this . allocatedSlots . remove ( slot ) ) { this . availableSlots . add ( slot . getSlotNumber ( ) ) ; if ( this . slotAvailabilityListener != null ) { this . slotAvailabilityListener . newSlotAvailable ( this ) ; } } else { throw new IllegalArgumentException ( "Slot was not allocated from this TaskManager." ) ; } } } } | Returns a slot that has been allocated from this instance . The slot needs have been canceled prior to calling this method . |
15,175 | public void close ( ) throws IOException { Throwable throwable = null ; try { socket . close ( ) ; sender . close ( ) ; receiver . close ( ) ; } catch ( Throwable t ) { throwable = t ; } try { destroyProcess ( process ) ; } catch ( Throwable t ) { throwable = ExceptionUtils . firstOrSuppressed ( t , throwable ) ; } ShutdownHookUtil . removeShutdownHook ( shutdownThread , getClass ( ) . getSimpleName ( ) , LOG ) ; ExceptionUtils . tryRethrowIOException ( throwable ) ; } | Closes this streamer . |
15,176 | public final void sendBroadCastVariables ( Configuration config ) throws IOException { try { int broadcastCount = config . getInteger ( PLANBINDER_CONFIG_BCVAR_COUNT , 0 ) ; String [ ] names = new String [ broadcastCount ] ; for ( int x = 0 ; x < names . length ; x ++ ) { names [ x ] = config . getString ( PLANBINDER_CONFIG_BCVAR_NAME_PREFIX + x , null ) ; } out . write ( new IntSerializer ( ) . serializeWithoutTypeInfo ( broadcastCount ) ) ; StringSerializer stringSerializer = new StringSerializer ( ) ; for ( String name : names ) { Iterator < byte [ ] > bcv = function . getRuntimeContext ( ) . < byte [ ] > getBroadcastVariable ( name ) . iterator ( ) ; out . write ( stringSerializer . serializeWithoutTypeInfo ( name ) ) ; while ( bcv . hasNext ( ) ) { out . writeByte ( 1 ) ; out . write ( bcv . next ( ) ) ; } out . writeByte ( 0 ) ; } } catch ( SocketTimeoutException ignored ) { throw new RuntimeException ( "External process for task " + function . getRuntimeContext ( ) . getTaskName ( ) + " stopped responding." + msg ) ; } } | Sends all broadcast - variables encoded in the configuration to the external process . |
15,177 | protected static Path getCheckpointDirectoryForJob ( Path baseCheckpointPath , JobID jobId ) { return new Path ( baseCheckpointPath , jobId . toString ( ) ) ; } | Builds directory into which a specific job checkpoints meaning the directory inside which it creates the checkpoint - specific subdirectories . |
15,178 | public static CheckpointStorageLocationReference encodePathAsReference ( Path path ) { byte [ ] refBytes = path . toString ( ) . getBytes ( StandardCharsets . UTF_8 ) ; byte [ ] bytes = new byte [ REFERENCE_MAGIC_NUMBER . length + refBytes . length ] ; System . arraycopy ( REFERENCE_MAGIC_NUMBER , 0 , bytes , 0 , REFERENCE_MAGIC_NUMBER . length ) ; System . arraycopy ( refBytes , 0 , bytes , REFERENCE_MAGIC_NUMBER . length , refBytes . length ) ; return new CheckpointStorageLocationReference ( bytes ) ; } | Encodes the given path as a reference in bytes . The path is encoded as a UTF - 8 string and prepended as a magic number . |
15,179 | @ SuppressWarnings ( "unchecked" ) public T newInstance ( ClassLoader classLoader ) { try { return ( T ) compile ( classLoader ) . getConstructor ( Object [ ] . class ) . newInstance ( new Object [ ] { references } ) ; } catch ( Exception e ) { throw new RuntimeException ( "Could not instantiate generated class '" + className + "'" , e ) ; } } | Create a new instance of this generated class . |
15,180 | private void restoreWithoutRescaling ( KeyedStateHandle keyedStateHandle ) throws Exception { if ( keyedStateHandle instanceof IncrementalRemoteKeyedStateHandle ) { IncrementalRemoteKeyedStateHandle incrementalRemoteKeyedStateHandle = ( IncrementalRemoteKeyedStateHandle ) keyedStateHandle ; restorePreviousIncrementalFilesStatus ( incrementalRemoteKeyedStateHandle ) ; restoreFromRemoteState ( incrementalRemoteKeyedStateHandle ) ; } else if ( keyedStateHandle instanceof IncrementalLocalKeyedStateHandle ) { IncrementalLocalKeyedStateHandle incrementalLocalKeyedStateHandle = ( IncrementalLocalKeyedStateHandle ) keyedStateHandle ; restorePreviousIncrementalFilesStatus ( incrementalLocalKeyedStateHandle ) ; restoreFromLocalState ( incrementalLocalKeyedStateHandle ) ; } else { throw new BackendBuildingException ( "Unexpected state handle type, " + "expected " + IncrementalRemoteKeyedStateHandle . class + " or " + IncrementalLocalKeyedStateHandle . class + ", but found " + keyedStateHandle . getClass ( ) ) ; } } | Recovery from a single remote incremental state without rescaling . |
15,181 | private void restoreWithRescaling ( Collection < KeyedStateHandle > restoreStateHandles ) throws Exception { KeyedStateHandle initialHandle = RocksDBIncrementalCheckpointUtils . chooseTheBestStateHandleForInitial ( restoreStateHandles , keyGroupRange ) ; if ( initialHandle != null ) { restoreStateHandles . remove ( initialHandle ) ; initDBWithRescaling ( initialHandle ) ; } else { openDB ( ) ; } byte [ ] startKeyGroupPrefixBytes = new byte [ keyGroupPrefixBytes ] ; RocksDBKeySerializationUtils . serializeKeyGroup ( keyGroupRange . getStartKeyGroup ( ) , startKeyGroupPrefixBytes ) ; byte [ ] stopKeyGroupPrefixBytes = new byte [ keyGroupPrefixBytes ] ; RocksDBKeySerializationUtils . serializeKeyGroup ( keyGroupRange . getEndKeyGroup ( ) + 1 , stopKeyGroupPrefixBytes ) ; for ( KeyedStateHandle rawStateHandle : restoreStateHandles ) { if ( ! ( rawStateHandle instanceof IncrementalRemoteKeyedStateHandle ) ) { throw new IllegalStateException ( "Unexpected state handle type, " + "expected " + IncrementalRemoteKeyedStateHandle . class + ", but found " + rawStateHandle . getClass ( ) ) ; } Path temporaryRestoreInstancePath = new Path ( instanceBasePath . getAbsolutePath ( ) + UUID . randomUUID ( ) . toString ( ) ) ; try ( RestoredDBInstance tmpRestoreDBInfo = restoreDBInstanceFromStateHandle ( ( IncrementalRemoteKeyedStateHandle ) rawStateHandle , temporaryRestoreInstancePath ) ; RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper ( this . db ) ) { List < ColumnFamilyDescriptor > tmpColumnFamilyDescriptors = tmpRestoreDBInfo . columnFamilyDescriptors ; List < ColumnFamilyHandle > tmpColumnFamilyHandles = tmpRestoreDBInfo . columnFamilyHandles ; for ( int i = 0 ; i < tmpColumnFamilyDescriptors . size ( ) ; ++ i ) { ColumnFamilyHandle tmpColumnFamilyHandle = tmpColumnFamilyHandles . get ( i ) ; ColumnFamilyHandle targetColumnFamilyHandle = getOrRegisterStateColumnFamilyHandle ( null , tmpRestoreDBInfo . stateMetaInfoSnapshots . get ( i ) ) . columnFamilyHandle ; try ( RocksIteratorWrapper iterator = RocksDBOperationUtils . getRocksIterator ( tmpRestoreDBInfo . db , tmpColumnFamilyHandle ) ) { iterator . seek ( startKeyGroupPrefixBytes ) ; while ( iterator . isValid ( ) ) { if ( RocksDBIncrementalCheckpointUtils . beforeThePrefixBytes ( iterator . key ( ) , stopKeyGroupPrefixBytes ) ) { writeBatchWrapper . put ( targetColumnFamilyHandle , iterator . key ( ) , iterator . value ( ) ) ; } else { break ; } iterator . next ( ) ; } } } } finally { cleanUpPathQuietly ( temporaryRestoreInstancePath ) ; } } } | Recovery from multi incremental states with rescaling . For rescaling this method creates a temporary RocksDB instance for a key - groups shard . All contents from the temporary instance are copied into the real restore instance and then the temporary instance is discarded . |
15,182 | private KeyedBackendSerializationProxy < K > readMetaData ( StreamStateHandle metaStateHandle ) throws Exception { FSDataInputStream inputStream = null ; try { inputStream = metaStateHandle . openInputStream ( ) ; cancelStreamRegistry . registerCloseable ( inputStream ) ; DataInputView in = new DataInputViewStreamWrapper ( inputStream ) ; return readMetaData ( in ) ; } finally { if ( cancelStreamRegistry . unregisterCloseable ( inputStream ) ) { inputStream . close ( ) ; } } } | Reads Flink s state meta data file from the state handle . |
15,183 | private static void deleteRange ( RocksDB db , List < ColumnFamilyHandle > columnFamilyHandles , byte [ ] beginKeyBytes , byte [ ] endKeyBytes ) throws RocksDBException { for ( ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles ) { try ( RocksIteratorWrapper iteratorWrapper = RocksDBOperationUtils . getRocksIterator ( db , columnFamilyHandle ) ; RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper ( db ) ) { iteratorWrapper . seek ( beginKeyBytes ) ; while ( iteratorWrapper . isValid ( ) ) { final byte [ ] currentKey = iteratorWrapper . key ( ) ; if ( beforeThePrefixBytes ( currentKey , endKeyBytes ) ) { writeBatchWrapper . remove ( columnFamilyHandle , currentKey ) ; } else { break ; } iteratorWrapper . next ( ) ; } } } } | Delete the record falls into [ beginKeyBytes endKeyBytes ) of the db . |
15,184 | public DoubleParameter setDefaultValue ( double defaultValue ) { super . setDefaultValue ( defaultValue ) ; if ( hasMinimumValue ) { if ( minimumValueInclusive ) { Util . checkParameter ( defaultValue >= minimumValue , "Default value (" + defaultValue + ") must be greater than or equal to minimum (" + minimumValue + ")" ) ; } else { Util . checkParameter ( defaultValue > minimumValue , "Default value (" + defaultValue + ") must be greater than minimum (" + minimumValue + ")" ) ; } } if ( hasMaximumValue ) { if ( maximumValueInclusive ) { Util . checkParameter ( defaultValue <= maximumValue , "Default value (" + defaultValue + ") must be less than or equal to maximum (" + maximumValue + ")" ) ; } else { Util . checkParameter ( defaultValue < maximumValue , "Default value (" + defaultValue + ") must be less than maximum (" + maximumValue + ")" ) ; } } return this ; } | Set the default value . |
15,185 | public DoubleParameter setMinimumValue ( double minimumValue , boolean inclusive ) { if ( hasDefaultValue ) { if ( inclusive ) { Util . checkParameter ( minimumValue <= defaultValue , "Minimum value (" + minimumValue + ") must be less than or equal to default (" + defaultValue + ")" ) ; } else { Util . checkParameter ( minimumValue < defaultValue , "Minimum value (" + minimumValue + ") must be less than default (" + defaultValue + ")" ) ; } } else if ( hasMaximumValue ) { if ( inclusive && maximumValueInclusive ) { Util . checkParameter ( minimumValue <= maximumValue , "Minimum value (" + minimumValue + ") must be less than or equal to maximum (" + maximumValue + ")" ) ; } else { Util . checkParameter ( minimumValue < maximumValue , "Minimum value (" + minimumValue + ") must be less than maximum (" + maximumValue + ")" ) ; } } this . hasMinimumValue = true ; this . minimumValue = minimumValue ; this . minimumValueInclusive = inclusive ; return this ; } | Set the minimum value . The minimum value is an acceptable value if and only if inclusive is set to true . |
15,186 | public DoubleParameter setMaximumValue ( double maximumValue , boolean inclusive ) { if ( hasDefaultValue ) { if ( inclusive ) { Util . checkParameter ( maximumValue >= defaultValue , "Maximum value (" + maximumValue + ") must be greater than or equal to default (" + defaultValue + ")" ) ; } else { Util . checkParameter ( maximumValue > defaultValue , "Maximum value (" + maximumValue + ") must be greater than default (" + defaultValue + ")" ) ; } } else if ( hasMinimumValue ) { if ( inclusive && minimumValueInclusive ) { Util . checkParameter ( maximumValue >= minimumValue , "Maximum value (" + maximumValue + ") must be greater than or equal to minimum (" + minimumValue + ")" ) ; } else { Util . checkParameter ( maximumValue > minimumValue , "Maximum value (" + maximumValue + ") must be greater than minimum (" + minimumValue + ")" ) ; } } this . hasMaximumValue = true ; this . maximumValue = maximumValue ; this . maximumValueInclusive = inclusive ; return this ; } | Set the maximum value . The maximum value is an acceptable value if and only if inclusive is set to true . |
15,187 | public void addBroadcastSetForSumFunction ( String name , DataSet < ? > data ) { this . bcVarsSum . add ( new Tuple2 < > ( name , data ) ) ; } | Adds a data set as a broadcast set to the sum function . |
15,188 | public void addBroadcastSetForApplyFunction ( String name , DataSet < ? > data ) { this . bcVarsApply . add ( new Tuple2 < > ( name , data ) ) ; } | Adds a data set as a broadcast set to the apply function . |
15,189 | @ SuppressWarnings ( "unchecked" ) public < X > StreamRecord < X > replace ( X element ) { this . value = ( T ) element ; return ( StreamRecord < X > ) this ; } | Replace the currently stored value by the given new value . This returns a StreamElement with the generic type parameter that matches the new value while keeping the old timestamp . |
15,190 | @ SuppressWarnings ( "unchecked" ) public < X > StreamRecord < X > replace ( X value , long timestamp ) { this . timestamp = timestamp ; this . value = ( T ) value ; this . hasTimestamp = true ; return ( StreamRecord < X > ) this ; } | Replace the currently stored value by the given new value and the currently stored timestamp with the new timestamp . This returns a StreamElement with the generic type parameter that matches the new value . |
15,191 | public StreamRecord < T > copy ( T valueCopy ) { StreamRecord < T > copy = new StreamRecord < > ( valueCopy ) ; copy . timestamp = this . timestamp ; copy . hasTimestamp = this . hasTimestamp ; return copy ; } | Creates a copy of this stream record . Uses the copied value as the value for the new record i . e . only copies timestamp fields . |
15,192 | public void copyTo ( T valueCopy , StreamRecord < T > target ) { target . value = valueCopy ; target . timestamp = this . timestamp ; target . hasTimestamp = this . hasTimestamp ; } | Copies this record into the new stream record . Uses the copied value as the value for the new record i . e . only copies timestamp fields . |
15,193 | public void onEvent ( TaskEvent event ) { if ( event instanceof TerminationEvent ) { terminationSignaled = true ; } else if ( event instanceof AllWorkersDoneEvent ) { AllWorkersDoneEvent wde = ( AllWorkersDoneEvent ) event ; aggregatorNames = wde . getAggregatorNames ( ) ; aggregates = wde . getAggregates ( userCodeClassLoader ) ; } else { throw new IllegalArgumentException ( "Unknown event type." ) ; } latch . countDown ( ) ; } | Barrier will release the waiting thread if an event occurs . |
15,194 | public static MesosTaskManagerParameters create ( Configuration flinkConfig ) { List < ConstraintEvaluator > constraints = parseConstraints ( flinkConfig . getString ( MESOS_CONSTRAINTS_HARD_HOSTATTR ) ) ; ContaineredTaskManagerParameters containeredParameters = ContaineredTaskManagerParameters . create ( flinkConfig , flinkConfig . getInteger ( MESOS_RM_TASKS_MEMORY_MB ) , flinkConfig . getInteger ( MESOS_RM_TASKS_SLOTS ) ) ; double cpus = flinkConfig . getDouble ( MESOS_RM_TASKS_CPUS ) ; if ( cpus <= 0.0 ) { cpus = Math . max ( containeredParameters . numSlots ( ) , 1.0 ) ; } int gpus = flinkConfig . getInteger ( MESOS_RM_TASKS_GPUS ) ; if ( gpus < 0 ) { throw new IllegalConfigurationException ( MESOS_RM_TASKS_GPUS . key ( ) + " cannot be negative" ) ; } int disk = flinkConfig . getInteger ( MESOS_RM_TASKS_DISK_MB ) ; String imageName = flinkConfig . getString ( MESOS_RM_CONTAINER_IMAGE_NAME ) ; ContainerType containerType ; String containerTypeString = flinkConfig . getString ( MESOS_RM_CONTAINER_TYPE ) ; switch ( containerTypeString ) { case MESOS_RESOURCEMANAGER_TASKS_CONTAINER_TYPE_MESOS : containerType = ContainerType . MESOS ; break ; case MESOS_RESOURCEMANAGER_TASKS_CONTAINER_TYPE_DOCKER : containerType = ContainerType . DOCKER ; if ( imageName == null || imageName . length ( ) == 0 ) { throw new IllegalConfigurationException ( MESOS_RM_CONTAINER_IMAGE_NAME . key ( ) + " must be specified for docker container type" ) ; } break ; default : throw new IllegalConfigurationException ( "invalid container type: " + containerTypeString ) ; } Option < String > containerVolOpt = Option . < String > apply ( flinkConfig . getString ( MESOS_RM_CONTAINER_VOLUMES ) ) ; Option < String > dockerParamsOpt = Option . < String > apply ( flinkConfig . getString ( MESOS_RM_CONTAINER_DOCKER_PARAMETERS ) ) ; Option < String > uriParamsOpt = Option . < String > apply ( flinkConfig . getString ( MESOS_TM_URIS ) ) ; boolean dockerForcePullImage = flinkConfig . getBoolean ( MESOS_RM_CONTAINER_DOCKER_FORCE_PULL_IMAGE ) ; List < Protos . Volume > containerVolumes = buildVolumes ( containerVolOpt ) ; List < Protos . Parameter > dockerParameters = buildDockerParameters ( dockerParamsOpt ) ; List < String > uris = buildUris ( uriParamsOpt ) ; Option < String > taskManagerHostname = Option . apply ( flinkConfig . getString ( MESOS_TM_HOSTNAME ) ) ; String tmCommand = flinkConfig . getString ( MESOS_TM_CMD ) ; Option < String > tmBootstrapCommand = Option . apply ( flinkConfig . getString ( MESOS_TM_BOOTSTRAP_CMD ) ) ; return new MesosTaskManagerParameters ( cpus , gpus , disk , containerType , Option . apply ( imageName ) , containeredParameters , containerVolumes , dockerParameters , dockerForcePullImage , constraints , tmCommand , tmBootstrapCommand , taskManagerHostname , uris ) ; } | Create the Mesos TaskManager parameters . |
15,195 | public static List < Protos . Volume > buildVolumes ( Option < String > containerVolumes ) { if ( containerVolumes . isEmpty ( ) ) { return Collections . emptyList ( ) ; } else { String [ ] volumeSpecifications = containerVolumes . get ( ) . split ( "," ) ; List < Protos . Volume > volumes = new ArrayList < > ( volumeSpecifications . length ) ; for ( String volumeSpecification : volumeSpecifications ) { if ( ! volumeSpecification . trim ( ) . isEmpty ( ) ) { Protos . Volume . Builder volume = Protos . Volume . newBuilder ( ) ; volume . setMode ( Protos . Volume . Mode . RW ) ; String [ ] parts = volumeSpecification . split ( ":" ) ; switch ( parts . length ) { case 1 : volume . setContainerPath ( parts [ 0 ] ) ; break ; case 2 : try { Protos . Volume . Mode mode = Protos . Volume . Mode . valueOf ( parts [ 1 ] . trim ( ) . toUpperCase ( ) ) ; volume . setMode ( mode ) . setContainerPath ( parts [ 0 ] ) ; } catch ( IllegalArgumentException e ) { volume . setHostPath ( parts [ 0 ] ) . setContainerPath ( parts [ 1 ] ) ; } break ; case 3 : Protos . Volume . Mode mode = Protos . Volume . Mode . valueOf ( parts [ 2 ] . trim ( ) . toUpperCase ( ) ) ; volume . setMode ( mode ) . setHostPath ( parts [ 0 ] ) . setContainerPath ( parts [ 1 ] ) ; break ; default : throw new IllegalArgumentException ( "volume specification is invalid, given: " + volumeSpecification ) ; } volumes . add ( volume . build ( ) ) ; } } return volumes ; } } | Used to build volume specs for mesos . This allows for mounting additional volumes into a container |
15,196 | public static List < String > buildUris ( Option < String > uris ) { if ( uris . isEmpty ( ) ) { return Collections . emptyList ( ) ; } else { List < String > urisList = new ArrayList < > ( ) ; for ( String uri : uris . get ( ) . split ( "," ) ) { urisList . add ( uri . trim ( ) ) ; } return urisList ; } } | Build a list of URIs for providing custom artifacts to Mesos tasks . |
15,197 | public static < T extends RestfulGateway > Optional < StaticFileServerHandler < T > > tryLoadWebContent ( GatewayRetriever < ? extends T > leaderRetriever , Time timeout , File tmpDir ) throws IOException { if ( isFlinkRuntimeWebInClassPath ( ) ) { return Optional . of ( new StaticFileServerHandler < > ( leaderRetriever , timeout , tmpDir ) ) ; } else { return Optional . empty ( ) ; } } | Checks whether the flink - runtime - web dependency is available and if so returns a StaticFileServerHandler which can serve the static file contents . |
15,198 | public Iterator < T > sample ( final Iterator < T > input ) { if ( fraction == 0 ) { return emptyIterable ; } return new SampledIterator < T > ( ) { T currentElement ; int currentCount = 0 ; public boolean hasNext ( ) { if ( currentCount > 0 ) { return true ; } else { samplingProcess ( ) ; if ( currentCount > 0 ) { return true ; } else { return false ; } } } public T next ( ) { if ( currentCount <= 0 ) { samplingProcess ( ) ; } currentCount -- ; return currentElement ; } public int poisson_ge1 ( double p ) { double q = Math . pow ( Math . E , - p ) ; double t = q + ( 1 - q ) * random . nextDouble ( ) ; int k = 1 ; t = t * random . nextDouble ( ) ; while ( t > q ) { k ++ ; t = t * random . nextDouble ( ) ; } return k ; } private void skipGapElements ( int num ) { int elementCount = 0 ; while ( input . hasNext ( ) && elementCount < num ) { currentElement = input . next ( ) ; elementCount ++ ; } } private void samplingProcess ( ) { if ( fraction <= THRESHOLD ) { double u = Math . max ( random . nextDouble ( ) , EPSILON ) ; int gap = ( int ) ( Math . log ( u ) / - fraction ) ; skipGapElements ( gap ) ; if ( input . hasNext ( ) ) { currentElement = input . next ( ) ; currentCount = poisson_ge1 ( fraction ) ; } } else { while ( input . hasNext ( ) ) { currentElement = input . next ( ) ; currentCount = poissonDistribution . sample ( ) ; if ( currentCount > 0 ) { break ; } } } } } ; } | Sample the input elements for each input element generate its count following a poisson distribution . |
15,199 | public static SimpleDateFormat newDateFormat ( String format ) { SimpleDateFormat sdf = new SimpleDateFormat ( format , Locale . ROOT ) ; sdf . setLenient ( false ) ; return sdf ; } | Creates a new date formatter with Farrago specific options . Farrago parsing is strict and does not allow values such as day 0 month 13 etc . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.