idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
14,300 | public long cleanUpAllAllocatedMemory ( ) { synchronized ( this ) { for ( MemoryConsumer c : consumers ) { if ( c != null && c . getUsed ( ) > 0 ) { logger . debug ( "unreleased " + Utils . bytesToString ( c . getUsed ( ) ) + " memory from " + c ) ; } } consumers . clear ( ) ; for ( MemoryBlock page : pageTable ) { if ( page != null ) { logger . debug ( "unreleased page: " + page + " in task " + taskAttemptId ) ; page . pageNumber = MemoryBlock . FREED_IN_TMM_PAGE_NUMBER ; memoryManager . tungstenMemoryAllocator ( ) . free ( page ) ; } } Arrays . fill ( pageTable , null ) ; } memoryManager . releaseExecutionMemory ( acquiredButNotUsed , taskAttemptId , tungstenMemoryMode ) ; return memoryManager . releaseAllExecutionMemoryForTask ( taskAttemptId ) ; } | Clean up all allocated memory and pages . Returns the number of bytes freed . A non - zero return value can be used to detect memory leaks . |
14,301 | public synchronized byte [ ] response ( byte [ ] token ) { try { return saslServer != null ? saslServer . evaluateResponse ( token ) : new byte [ 0 ] ; } catch ( SaslException e ) { throw Throwables . propagate ( e ) ; } } | Used to respond to server SASL tokens . |
14,302 | public synchronized void dispose ( ) { if ( saslServer != null ) { try { saslServer . dispose ( ) ; } catch ( SaslException e ) { } finally { saslServer = null ; } } } | Disposes of any system resources or security - sensitive information the SaslServer might be using . |
14,303 | private static String getBase64EncodedString ( String str ) { ByteBuf byteBuf = null ; ByteBuf encodedByteBuf = null ; try { byteBuf = Unpooled . wrappedBuffer ( str . getBytes ( StandardCharsets . UTF_8 ) ) ; encodedByteBuf = Base64 . encode ( byteBuf ) ; return encodedByteBuf . toString ( StandardCharsets . UTF_8 ) ; } finally { if ( byteBuf != null ) { byteBuf . release ( ) ; if ( encodedByteBuf != null ) { encodedByteBuf . release ( ) ; } } } } | Return a Base64 - encoded string . |
14,304 | public static long packPointer ( long recordPointer , int partitionId ) { assert ( partitionId <= MAXIMUM_PARTITION_ID ) ; final long pageNumber = ( recordPointer & MASK_LONG_UPPER_13_BITS ) >>> 24 ; final long compressedAddress = pageNumber | ( recordPointer & MASK_LONG_LOWER_27_BITS ) ; return ( ( ( long ) partitionId ) << 40 ) | compressedAddress ; } | Pack a record address and partition id into a single word . |
14,305 | synchronized void dispose ( ) { if ( ! isDisposed ( ) ) { if ( connection != null ) { try { connection . waitForClose ( ) ; } catch ( IOException ioe ) { } } server . unregister ( this ) ; setState ( State . LOST , false ) ; this . disposed = true ; } } | Mark the handle as disposed and set it as LOST in case the current state is not final . |
14,306 | public synchronized String getDelegationTokenFromMetaStore ( String owner ) throws HiveSQLException , UnsupportedOperationException , LoginException , IOException { if ( ! hiveConf . getBoolVar ( HiveConf . ConfVars . METASTORE_USE_THRIFT_SASL ) || ! hiveConf . getBoolVar ( HiveConf . ConfVars . HIVE_SERVER2_ENABLE_DOAS ) ) { throw new UnsupportedOperationException ( "delegation token is can only be obtained for a secure remote metastore" ) ; } try { Hive . closeCurrent ( ) ; return Hive . get ( hiveConf ) . getDelegationToken ( owner , owner ) ; } catch ( HiveException e ) { if ( e . getCause ( ) instanceof UnsupportedOperationException ) { throw ( UnsupportedOperationException ) e . getCause ( ) ; } else { throw new HiveSQLException ( "Error connect metastore to setup impersonation" , e ) ; } } } | obtain delegation token for the give user from metastore |
14,307 | public Integer getDecimalDigits ( ) { switch ( this . type ) { case BOOLEAN_TYPE : case TINYINT_TYPE : case SMALLINT_TYPE : case INT_TYPE : case BIGINT_TYPE : return 0 ; case FLOAT_TYPE : return 7 ; case DOUBLE_TYPE : return 15 ; case DECIMAL_TYPE : return typeQualifiers . getScale ( ) ; case TIMESTAMP_TYPE : return 9 ; default : return null ; } } | The number of fractional digits for this type . Null is returned for data types where this is not applicable . |
14,308 | public static < OUT > Iterator < OUT > collect ( DataStream < OUT > stream ) throws IOException { TypeSerializer < OUT > serializer = stream . getType ( ) . createSerializer ( stream . getExecutionEnvironment ( ) . getConfig ( ) ) ; SocketStreamIterator < OUT > iter = new SocketStreamIterator < OUT > ( serializer ) ; StreamExecutionEnvironment env = stream . getExecutionEnvironment ( ) ; InetAddress clientAddress ; if ( env instanceof RemoteStreamEnvironment ) { String host = ( ( RemoteStreamEnvironment ) env ) . getHost ( ) ; int port = ( ( RemoteStreamEnvironment ) env ) . getPort ( ) ; try { clientAddress = ConnectionUtils . findConnectingAddress ( new InetSocketAddress ( host , port ) , 2000 , 400 ) ; } catch ( Exception e ) { throw new IOException ( "Could not determine an suitable network address to " + "receive back data from the streaming program." , e ) ; } } else if ( env instanceof LocalStreamEnvironment ) { clientAddress = InetAddress . getLoopbackAddress ( ) ; } else { try { clientAddress = InetAddress . getLocalHost ( ) ; } catch ( UnknownHostException e ) { throw new IOException ( "Could not determine this machines own local address to " + "receive back data from the streaming program." , e ) ; } } DataStreamSink < OUT > sink = stream . addSink ( new CollectSink < OUT > ( clientAddress , iter . getPort ( ) , serializer ) ) ; sink . setParallelism ( 1 ) ; ( new CallExecute ( env , iter ) ) . start ( ) ; return iter ; } | Returns an iterator to iterate over the elements of the DataStream . |
14,309 | public CommandLine getCommandLine ( Options commandLineOptions ) throws Exception { final List < String > args = new ArrayList < > ( ) ; properties . asMap ( ) . forEach ( ( k , v ) -> { if ( commandLineOptions . hasOption ( k ) ) { final Option o = commandLineOptions . getOption ( k ) ; final String argument = "--" + o . getLongOpt ( ) ; if ( ! o . hasArg ( ) ) { final Boolean flag = Boolean . parseBoolean ( v ) ; if ( flag ) { args . add ( argument ) ; } } else if ( ! o . hasArgs ( ) ) { args . add ( argument ) ; args . add ( v ) ; } else { throw new IllegalArgumentException ( "Option '" + o + "' is not supported yet." ) ; } } } ) ; return CliFrontendParser . parse ( commandLineOptions , args . toArray ( new String [ args . size ( ) ] ) , true ) ; } | Parses the given command line options from the deployment properties . Ignores properties that are not defined by options . |
14,310 | public static DeploymentEntry merge ( DeploymentEntry deployment1 , DeploymentEntry deployment2 ) { final Map < String , String > mergedProperties = new HashMap < > ( deployment1 . asMap ( ) ) ; mergedProperties . putAll ( deployment2 . asMap ( ) ) ; final DescriptorProperties properties = new DescriptorProperties ( true ) ; properties . putProperties ( mergedProperties ) ; return new DeploymentEntry ( properties ) ; } | Merges two deployments entries . The properties of the first deployment entry might be overwritten by the second one . |
14,311 | public static RpcService createRpcService ( final Configuration configuration , final HighAvailabilityServices haServices ) throws Exception { checkNotNull ( configuration ) ; checkNotNull ( haServices ) ; final String taskManagerAddress = determineTaskManagerBindAddress ( configuration , haServices ) ; final String portRangeDefinition = configuration . getString ( TaskManagerOptions . RPC_PORT ) ; return AkkaRpcServiceUtils . createRpcService ( taskManagerAddress , portRangeDefinition , configuration ) ; } | Create a RPC service for the task manager . |
14,312 | long refreshAndGetTotal ( ) { long total = 0 ; for ( ResultSubpartition part : partition . getAllPartitions ( ) ) { total += part . unsynchronizedGetNumberOfQueuedBuffers ( ) ; } return total ; } | Iterates over all sub - partitions and collects the total number of queued buffers in a best - effort way . |
14,313 | int refreshAndGetMin ( ) { int min = Integer . MAX_VALUE ; ResultSubpartition [ ] allPartitions = partition . getAllPartitions ( ) ; if ( allPartitions . length == 0 ) { return 0 ; } for ( ResultSubpartition part : allPartitions ) { int size = part . unsynchronizedGetNumberOfQueuedBuffers ( ) ; min = Math . min ( min , size ) ; } return min ; } | Iterates over all sub - partitions and collects the minimum number of queued buffers in a sub - partition in a best - effort way . |
14,314 | int refreshAndGetMax ( ) { int max = 0 ; for ( ResultSubpartition part : partition . getAllPartitions ( ) ) { int size = part . unsynchronizedGetNumberOfQueuedBuffers ( ) ; max = Math . max ( max , size ) ; } return max ; } | Iterates over all sub - partitions and collects the maximum number of queued buffers in a sub - partition in a best - effort way . |
14,315 | float refreshAndGetAvg ( ) { long total = 0 ; ResultSubpartition [ ] allPartitions = partition . getAllPartitions ( ) ; for ( ResultSubpartition part : allPartitions ) { int size = part . unsynchronizedGetNumberOfQueuedBuffers ( ) ; total += size ; } return total / ( float ) allPartitions . length ; } | Iterates over all sub - partitions and collects the average number of queued buffers in a sub - partition in a best - effort way . |
14,316 | public synchronized URL addFile ( File localFile , String remoteFile ) throws IOException , MalformedURLException { return addPath ( new Path ( localFile . toURI ( ) ) , new Path ( remoteFile ) ) ; } | Adds a file to the artifact server . |
14,317 | public synchronized URL addPath ( Path path , Path remoteFile ) throws IOException , MalformedURLException { if ( paths . containsKey ( remoteFile ) ) { throw new IllegalArgumentException ( "duplicate path registered" ) ; } if ( remoteFile . isAbsolute ( ) ) { throw new IllegalArgumentException ( "not expecting an absolute path" ) ; } URL fileURL = new URL ( baseURL , remoteFile . toString ( ) ) ; router . addAny ( fileURL . getPath ( ) , new VirtualFileServerHandler ( path ) ) ; paths . put ( remoteFile , fileURL ) ; return fileURL ; } | Adds a path to the artifact server . |
14,318 | public synchronized void stop ( ) throws Exception { if ( this . serverChannel != null ) { this . serverChannel . close ( ) . awaitUninterruptibly ( ) ; this . serverChannel = null ; } if ( bootstrap != null ) { if ( bootstrap . group ( ) != null ) { bootstrap . group ( ) . shutdownGracefully ( ) ; } bootstrap = null ; } } | Stops the artifact server . |
14,319 | private Object invokeRpc ( Method method , Object [ ] args ) throws Exception { String methodName = method . getName ( ) ; Class < ? > [ ] parameterTypes = method . getParameterTypes ( ) ; Annotation [ ] [ ] parameterAnnotations = method . getParameterAnnotations ( ) ; Time futureTimeout = extractRpcTimeout ( parameterAnnotations , args , timeout ) ; final RpcInvocation rpcInvocation = createRpcInvocationMessage ( methodName , parameterTypes , args ) ; Class < ? > returnType = method . getReturnType ( ) ; final Object result ; if ( Objects . equals ( returnType , Void . TYPE ) ) { tell ( rpcInvocation ) ; result = null ; } else { CompletableFuture < ? > resultFuture = ask ( rpcInvocation , futureTimeout ) ; CompletableFuture < ? > completableFuture = resultFuture . thenApply ( ( Object o ) -> { if ( o instanceof SerializedValue ) { try { return ( ( SerializedValue < ? > ) o ) . deserializeValue ( getClass ( ) . getClassLoader ( ) ) ; } catch ( IOException | ClassNotFoundException e ) { throw new CompletionException ( new RpcException ( "Could not deserialize the serialized payload of RPC method : " + methodName , e ) ) ; } } else { return o ; } } ) ; if ( Objects . equals ( returnType , CompletableFuture . class ) ) { result = completableFuture ; } else { try { result = completableFuture . get ( futureTimeout . getSize ( ) , futureTimeout . getUnit ( ) ) ; } catch ( ExecutionException ee ) { throw new RpcException ( "Failure while obtaining synchronous RPC result." , ExceptionUtils . stripExecutionException ( ee ) ) ; } } } return result ; } | Invokes a RPC method by sending the RPC invocation details to the rpc endpoint . |
14,320 | protected RpcInvocation createRpcInvocationMessage ( final String methodName , final Class < ? > [ ] parameterTypes , final Object [ ] args ) throws IOException { final RpcInvocation rpcInvocation ; if ( isLocal ) { rpcInvocation = new LocalRpcInvocation ( methodName , parameterTypes , args ) ; } else { try { RemoteRpcInvocation remoteRpcInvocation = new RemoteRpcInvocation ( methodName , parameterTypes , args ) ; if ( remoteRpcInvocation . getSize ( ) > maximumFramesize ) { throw new IOException ( "The rpc invocation size exceeds the maximum akka framesize." ) ; } else { rpcInvocation = remoteRpcInvocation ; } } catch ( IOException e ) { LOG . warn ( "Could not create remote rpc invocation message. Failing rpc invocation because..." , e ) ; throw e ; } } return rpcInvocation ; } | Create the RpcInvocation message for the given RPC . |
14,321 | protected CompletableFuture < ? > ask ( Object message , Time timeout ) { return FutureUtils . toJava ( Patterns . ask ( rpcEndpoint , message , timeout . toMilliseconds ( ) ) ) ; } | Sends the message to the RPC endpoint and returns a future containing its response . |
14,322 | public Result unregisterReference ( SharedStateRegistryKey registrationKey ) { Preconditions . checkNotNull ( registrationKey ) ; final Result result ; final StreamStateHandle scheduledStateDeletion ; SharedStateRegistry . SharedStateEntry entry ; synchronized ( registeredStates ) { entry = registeredStates . get ( registrationKey ) ; Preconditions . checkState ( entry != null , "Cannot unregister a state that is not registered." ) ; entry . decreaseReferenceCount ( ) ; if ( entry . getReferenceCount ( ) <= 0 ) { registeredStates . remove ( registrationKey ) ; scheduledStateDeletion = entry . getStateHandle ( ) ; result = new Result ( null , 0 ) ; } else { scheduledStateDeletion = null ; result = new Result ( entry ) ; } } LOG . trace ( "Unregistered shared state {} under key {}." , entry , registrationKey ) ; scheduleAsyncDelete ( scheduledStateDeletion ) ; return result ; } | Releases one reference to the given shared state in the registry . This decreases the reference count by one . Once the count reaches zero the shared state is deleted . |
14,323 | public void registerAll ( Iterable < ? extends CompositeStateHandle > stateHandles ) { if ( stateHandles == null ) { return ; } synchronized ( registeredStates ) { for ( CompositeStateHandle stateHandle : stateHandles ) { stateHandle . registerSharedStates ( this ) ; } } } | Register given shared states in the registry . |
14,324 | public boolean setCancellerHandle ( ScheduledFuture < ? > cancellerHandle ) { synchronized ( lock ) { if ( this . cancellerHandle == null ) { if ( ! discarded ) { this . cancellerHandle = cancellerHandle ; return true ; } else { return false ; } } else { throw new IllegalStateException ( "A canceller handle was already set" ) ; } } } | Sets the handle for the canceller to this pending checkpoint . This method fails with an exception if a handle has already been set . |
14,325 | public TaskAcknowledgeResult acknowledgeTask ( ExecutionAttemptID executionAttemptId , TaskStateSnapshot operatorSubtaskStates , CheckpointMetrics metrics ) { synchronized ( lock ) { if ( discarded ) { return TaskAcknowledgeResult . DISCARDED ; } final ExecutionVertex vertex = notYetAcknowledgedTasks . remove ( executionAttemptId ) ; if ( vertex == null ) { if ( acknowledgedTasks . contains ( executionAttemptId ) ) { return TaskAcknowledgeResult . DUPLICATE ; } else { return TaskAcknowledgeResult . UNKNOWN ; } } else { acknowledgedTasks . add ( executionAttemptId ) ; } List < OperatorID > operatorIDs = vertex . getJobVertex ( ) . getOperatorIDs ( ) ; int subtaskIndex = vertex . getParallelSubtaskIndex ( ) ; long ackTimestamp = System . currentTimeMillis ( ) ; long stateSize = 0L ; if ( operatorSubtaskStates != null ) { for ( OperatorID operatorID : operatorIDs ) { OperatorSubtaskState operatorSubtaskState = operatorSubtaskStates . getSubtaskStateByOperatorID ( operatorID ) ; if ( operatorSubtaskState == null ) { operatorSubtaskState = new OperatorSubtaskState ( ) ; } OperatorState operatorState = operatorStates . get ( operatorID ) ; if ( operatorState == null ) { operatorState = new OperatorState ( operatorID , vertex . getTotalNumberOfParallelSubtasks ( ) , vertex . getMaxParallelism ( ) ) ; operatorStates . put ( operatorID , operatorState ) ; } operatorState . putState ( subtaskIndex , operatorSubtaskState ) ; stateSize += operatorSubtaskState . getStateSize ( ) ; } } ++ numAcknowledgedTasks ; final PendingCheckpointStats statsCallback = this . statsCallback ; if ( statsCallback != null ) { long alignmentDurationMillis = metrics . getAlignmentDurationNanos ( ) / 1_000_000 ; SubtaskStateStats subtaskStateStats = new SubtaskStateStats ( subtaskIndex , ackTimestamp , stateSize , metrics . getSyncDurationMillis ( ) , metrics . getAsyncDurationMillis ( ) , metrics . getBytesBufferedInAlignment ( ) , alignmentDurationMillis ) ; statsCallback . reportSubtaskStats ( vertex . getJobvertexId ( ) , subtaskStateStats ) ; } return TaskAcknowledgeResult . SUCCESS ; } } | Acknowledges the task with the given execution attempt id and the given subtask state . |
14,326 | public void abort ( CheckpointFailureReason reason , Throwable cause ) { try { CheckpointException exception = new CheckpointException ( reason , cause ) ; onCompletionPromise . completeExceptionally ( exception ) ; reportFailedCheckpoint ( exception ) ; assertAbortSubsumedForced ( reason ) ; } finally { dispose ( true ) ; } } | Aborts a checkpoint with reason and cause . |
14,327 | private void reportFailedCheckpoint ( Exception cause ) { final PendingCheckpointStats statsCallback = this . statsCallback ; if ( statsCallback != null ) { long failureTimestamp = System . currentTimeMillis ( ) ; statsCallback . reportFailedCheckpoint ( failureTimestamp , cause ) ; } } | Reports a failed checkpoint with the given optional cause . |
14,328 | static TypeInformation schemaToTypeInfo ( TypeDescription schema ) { switch ( schema . getCategory ( ) ) { case BOOLEAN : return BasicTypeInfo . BOOLEAN_TYPE_INFO ; case BYTE : return BasicTypeInfo . BYTE_TYPE_INFO ; case SHORT : return BasicTypeInfo . SHORT_TYPE_INFO ; case INT : return BasicTypeInfo . INT_TYPE_INFO ; case LONG : return BasicTypeInfo . LONG_TYPE_INFO ; case FLOAT : return BasicTypeInfo . FLOAT_TYPE_INFO ; case DOUBLE : return BasicTypeInfo . DOUBLE_TYPE_INFO ; case DECIMAL : return BasicTypeInfo . BIG_DEC_TYPE_INFO ; case STRING : case CHAR : case VARCHAR : return BasicTypeInfo . STRING_TYPE_INFO ; case DATE : return SqlTimeTypeInfo . DATE ; case TIMESTAMP : return SqlTimeTypeInfo . TIMESTAMP ; case BINARY : return PrimitiveArrayTypeInfo . BYTE_PRIMITIVE_ARRAY_TYPE_INFO ; case STRUCT : List < TypeDescription > fieldSchemas = schema . getChildren ( ) ; TypeInformation [ ] fieldTypes = new TypeInformation [ fieldSchemas . size ( ) ] ; for ( int i = 0 ; i < fieldSchemas . size ( ) ; i ++ ) { fieldTypes [ i ] = schemaToTypeInfo ( fieldSchemas . get ( i ) ) ; } String [ ] fieldNames = schema . getFieldNames ( ) . toArray ( new String [ ] { } ) ; return new RowTypeInfo ( fieldTypes , fieldNames ) ; case LIST : TypeDescription elementSchema = schema . getChildren ( ) . get ( 0 ) ; TypeInformation < ? > elementType = schemaToTypeInfo ( elementSchema ) ; return ObjectArrayTypeInfo . getInfoFor ( elementType ) ; case MAP : TypeDescription keySchema = schema . getChildren ( ) . get ( 0 ) ; TypeDescription valSchema = schema . getChildren ( ) . get ( 1 ) ; TypeInformation < ? > keyType = schemaToTypeInfo ( keySchema ) ; TypeInformation < ? > valType = schemaToTypeInfo ( valSchema ) ; return new MapTypeInfo < > ( keyType , valType ) ; case UNION : throw new UnsupportedOperationException ( "UNION type is not supported yet." ) ; default : throw new IllegalArgumentException ( "Unknown type " + schema ) ; } } | Converts an ORC schema to a Flink TypeInformation . |
14,329 | static int fillRows ( Row [ ] rows , TypeDescription schema , VectorizedRowBatch batch , int [ ] selectedFields ) { int rowsToRead = Math . min ( ( int ) batch . count ( ) , rows . length ) ; List < TypeDescription > fieldTypes = schema . getChildren ( ) ; for ( int fieldIdx = 0 ; fieldIdx < selectedFields . length ; fieldIdx ++ ) { int orcIdx = selectedFields [ fieldIdx ] ; readField ( rows , fieldIdx , fieldTypes . get ( orcIdx ) , batch . cols [ orcIdx ] , rowsToRead ) ; } return rowsToRead ; } | Fills an ORC batch into an array of Row . |
14,330 | private static void fillColumnWithRepeatingValue ( Object [ ] vals , int fieldIdx , Object repeatingValue , int childCount ) { if ( fieldIdx == - 1 ) { Arrays . fill ( vals , 0 , childCount , repeatingValue ) ; } else { Row [ ] rows = ( Row [ ] ) vals ; for ( int i = 0 ; i < childCount ; i ++ ) { rows [ i ] . setField ( fieldIdx , repeatingValue ) ; } } } | Sets a repeating value to all objects or row fields of the passed vals array . |
14,331 | public static String lpad ( String base , int len , String pad ) { if ( len < 0 || "" . equals ( pad ) ) { return null ; } else if ( len == 0 ) { return "" ; } char [ ] data = new char [ len ] ; char [ ] baseChars = base . toCharArray ( ) ; char [ ] padChars = pad . toCharArray ( ) ; int pos = Math . max ( len - base . length ( ) , 0 ) ; for ( int i = 0 ; i < pos ; i += pad . length ( ) ) { for ( int j = 0 ; j < pad . length ( ) && j < pos - i ; j ++ ) { data [ i + j ] = padChars [ j ] ; } } int i = 0 ; while ( pos + i < len && i < base . length ( ) ) { data [ pos + i ] = baseChars [ i ] ; i += 1 ; } return new String ( data ) ; } | Returns the string str left - padded with the string pad to a length of len characters . If str is longer than len the return value is shortened to len characters . |
14,332 | public static String rpad ( String base , int len , String pad ) { if ( len < 0 || "" . equals ( pad ) ) { return null ; } else if ( len == 0 ) { return "" ; } char [ ] data = new char [ len ] ; char [ ] baseChars = base . toCharArray ( ) ; char [ ] padChars = pad . toCharArray ( ) ; int pos = 0 ; while ( pos < base . length ( ) && pos < len ) { data [ pos ] = baseChars [ pos ] ; pos += 1 ; } while ( pos < len ) { int i = 0 ; while ( i < pad . length ( ) && i < len - pos ) { data [ pos + i ] = padChars [ i ] ; i += 1 ; } pos += pad . length ( ) ; } return new String ( data ) ; } | Returns the string str right - padded with the string pad to a length of len characters . If str is longer than len the return value is shortened to len characters . |
14,333 | public static String replace ( String str , String oldStr , String replacement ) { return str . replace ( oldStr , replacement ) ; } | Replaces all the old strings with the replacement string . |
14,334 | public static String regexpReplace ( String str , String regex , String replacement ) { if ( regex . isEmpty ( ) ) { return str ; } try { StringBuffer sb = new StringBuffer ( ) ; Matcher m = REGEXP_PATTERN_CACHE . get ( regex ) . matcher ( str ) ; while ( m . find ( ) ) { m . appendReplacement ( sb , replacement ) ; } m . appendTail ( sb ) ; return sb . toString ( ) ; } catch ( Exception e ) { LOG . error ( String . format ( "Exception in regexpReplace('%s', '%s', '%s')" , str , regex , replacement ) , e ) ; return null ; } } | Returns a string resulting from replacing all substrings that match the regular expression with replacement . |
14,335 | public static String regexpExtract ( String str , String regex , int extractIndex ) { if ( extractIndex < 0 ) { return null ; } try { Matcher m = REGEXP_PATTERN_CACHE . get ( regex ) . matcher ( str ) ; if ( m . find ( ) ) { MatchResult mr = m . toMatchResult ( ) ; return mr . group ( extractIndex ) ; } return null ; } catch ( Exception e ) { LOG . error ( String . format ( "Exception in regexpExtract('%s', '%s', '%d')" , str , regex , extractIndex ) , e ) ; return null ; } } | Returns a string extracted with a specified regular expression and a regex match group index . |
14,336 | public static String hash ( String algorithm , String str , String charsetName ) { try { byte [ ] digest = MessageDigest . getInstance ( algorithm ) . digest ( strToBytesWithCharset ( str , charsetName ) ) ; return EncodingUtils . hex ( digest ) ; } catch ( NoSuchAlgorithmException e ) { throw new IllegalArgumentException ( "Unsupported algorithm: " + algorithm , e ) ; } } | Calculate the hash value of a given string . |
14,337 | public static String parseUrl ( String urlStr , String partToExtract ) { URL url ; try { url = URL_CACHE . get ( urlStr ) ; } catch ( Exception e ) { LOG . error ( "Parse URL error: " + urlStr , e ) ; return null ; } if ( "HOST" . equals ( partToExtract ) ) { return url . getHost ( ) ; } if ( "PATH" . equals ( partToExtract ) ) { return url . getPath ( ) ; } if ( "QUERY" . equals ( partToExtract ) ) { return url . getQuery ( ) ; } if ( "REF" . equals ( partToExtract ) ) { return url . getRef ( ) ; } if ( "PROTOCOL" . equals ( partToExtract ) ) { return url . getProtocol ( ) ; } if ( "FILE" . equals ( partToExtract ) ) { return url . getFile ( ) ; } if ( "AUTHORITY" . equals ( partToExtract ) ) { return url . getAuthority ( ) ; } if ( "USERINFO" . equals ( partToExtract ) ) { return url . getUserInfo ( ) ; } return null ; } | Parse url and return various components of the URL . If accept any null arguments return null . |
14,338 | public static String parseUrl ( String urlStr , String partToExtract , String key ) { if ( ! "QUERY" . equals ( partToExtract ) ) { return null ; } String query = parseUrl ( urlStr , partToExtract ) ; if ( query == null ) { return null ; } Pattern p = Pattern . compile ( "(&|^)" + Pattern . quote ( key ) + "=([^&]*)" ) ; Matcher m = p . matcher ( query ) ; if ( m . find ( ) ) { return m . group ( 2 ) ; } return null ; } | Parse url and return various parameter of the URL . If accept any null arguments return null . |
14,339 | public static String hex ( String x ) { return EncodingUtils . hex ( x . getBytes ( StandardCharsets . UTF_8 ) ) . toUpperCase ( ) ; } | Returns the hex string of a string argument . |
14,340 | public static Map < String , String > strToMap ( String text , String listDelimiter , String keyValueDelimiter ) { if ( StringUtils . isEmpty ( text ) ) { return EMPTY_MAP ; } String [ ] keyValuePairs = text . split ( listDelimiter ) ; Map < String , String > ret = new HashMap < > ( keyValuePairs . length ) ; for ( String keyValuePair : keyValuePairs ) { String [ ] keyValue = keyValuePair . split ( keyValueDelimiter , 2 ) ; if ( keyValue . length < 2 ) { ret . put ( keyValuePair , null ) ; } else { ret . put ( keyValue [ 0 ] , keyValue [ 1 ] ) ; } } return ret ; } | Creates a map by parsing text . Split text into key - value pairs using two delimiters . The first delimiter separates pairs and the second delimiter separates key and value . |
14,341 | public ConsumerRecords < byte [ ] , byte [ ] > pollNext ( ) throws Exception { synchronized ( lock ) { while ( next == null && error == null ) { lock . wait ( ) ; } ConsumerRecords < byte [ ] , byte [ ] > n = next ; if ( n != null ) { next = null ; lock . notifyAll ( ) ; return n ; } else { ExceptionUtils . rethrowException ( error , error . getMessage ( ) ) ; return ConsumerRecords . empty ( ) ; } } } | Polls the next element from the Handover possibly blocking until the next element is available . This method behaves similar to polling from a blocking queue . |
14,342 | public void produce ( final ConsumerRecords < byte [ ] , byte [ ] > element ) throws InterruptedException , WakeupException , ClosedException { checkNotNull ( element ) ; synchronized ( lock ) { while ( next != null && ! wakeupProducer ) { lock . wait ( ) ; } wakeupProducer = false ; if ( next != null ) { throw new WakeupException ( ) ; } else if ( error == null ) { next = element ; lock . notifyAll ( ) ; } else { throw new ClosedException ( ) ; } } } | Hands over an element from the producer . If the Handover already has an element that was not yet picked up by the consumer thread this call blocks until the consumer picks up that previous element . |
14,343 | public boolean isCompatibleWith ( DeweyNumber other ) { if ( length ( ) > other . length ( ) ) { for ( int i = 0 ; i < other . length ( ) ; i ++ ) { if ( other . deweyNumber [ i ] != deweyNumber [ i ] ) { return false ; } } return true ; } else if ( length ( ) == other . length ( ) ) { int lastIndex = length ( ) - 1 ; for ( int i = 0 ; i < lastIndex ; i ++ ) { if ( other . deweyNumber [ i ] != deweyNumber [ i ] ) { return false ; } } return deweyNumber [ lastIndex ] >= other . deweyNumber [ lastIndex ] ; } else { return false ; } } | Checks whether this dewey number is compatible to the other dewey number . |
14,344 | public DeweyNumber increase ( int times ) { int [ ] newDeweyNumber = Arrays . copyOf ( deweyNumber , deweyNumber . length ) ; newDeweyNumber [ deweyNumber . length - 1 ] += times ; return new DeweyNumber ( newDeweyNumber ) ; } | Creates a new dewey number from this such that its last digit is increased by the supplied number . |
14,345 | public DeweyNumber addStage ( ) { int [ ] newDeweyNumber = Arrays . copyOf ( deweyNumber , deweyNumber . length + 1 ) ; return new DeweyNumber ( newDeweyNumber ) ; } | Creates a new dewey number from this such that a 0 is appended as new last digit . |
14,346 | public static DeweyNumber fromString ( final String deweyNumberString ) { String [ ] splits = deweyNumberString . split ( "\\." ) ; if ( splits . length == 0 ) { return new DeweyNumber ( Integer . parseInt ( deweyNumberString ) ) ; } else { int [ ] deweyNumber = new int [ splits . length ] ; for ( int i = 0 ; i < splits . length ; i ++ ) { deweyNumber [ i ] = Integer . parseInt ( splits [ i ] ) ; } return new DeweyNumber ( deweyNumber ) ; } } | Creates a dewey number from a string representation . The input string must be a dot separated string of integers . |
14,347 | public CompletableFuture < JobDetailsInfo > getJobDetails ( JobID jobId ) { final JobDetailsHeaders detailsHeaders = JobDetailsHeaders . getInstance ( ) ; final JobMessageParameters params = new JobMessageParameters ( ) ; params . jobPathParameter . resolve ( jobId ) ; return sendRequest ( detailsHeaders , params ) ; } | Requests the job details . |
14,348 | public int spillPartition ( List < MemorySegment > target , IOManager ioAccess , FileIOChannel . ID targetChannel , LinkedBlockingQueue < MemorySegment > bufferReturnQueue ) throws IOException { if ( ! isInMemory ( ) ) { throw new RuntimeException ( "Bug in Hybrid Hash Join: " + "Request to spill a partition that has already been spilled." ) ; } if ( getNumOccupiedMemorySegments ( ) < 2 ) { throw new RuntimeException ( "Bug in Hybrid Hash Join: " + "Request to spill a partition with less than two buffers." ) ; } for ( int i = 0 ; i < this . numOverflowSegments ; i ++ ) { target . add ( this . overflowSegments [ i ] ) ; } this . overflowSegments = null ; this . numOverflowSegments = 0 ; this . nextOverflowBucket = 0 ; this . buildSideChannel = ioAccess . createBlockChannelWriter ( targetChannel , bufferReturnQueue ) ; return this . buildSideWriteBuffer . spill ( this . buildSideChannel ) ; } | Spills this partition to disk and sets it up such that it continues spilling records that are added to it . The spilling process must free at least one buffer either in the partition s record buffers or in the memory segments for overflow buckets . The partition immediately takes back one buffer to use it for further spilling . |
14,349 | JobSubmissionResult finalizeExecute ( ) throws ProgramInvocationException { return client . run ( detachedPlan , jarFilesToAttach , classpathsToAttach , userCodeClassLoader , savepointSettings ) ; } | Finishes this Context Environment s execution by explicitly running the plan constructed . |
14,350 | public static void discardStateFuture ( RunnableFuture < ? extends StateObject > stateFuture ) throws Exception { if ( null != stateFuture ) { if ( ! stateFuture . cancel ( true ) ) { try { StateObject stateObject = FutureUtils . runIfNotDoneAndGet ( stateFuture ) ; if ( null != stateObject ) { stateObject . discardState ( ) ; } } catch ( CancellationException | ExecutionException ex ) { LOG . debug ( "Cancelled execution of snapshot future runnable. Cancellation produced the following " + "exception, which is expected an can be ignored." , ex ) ; } } } } | Discards the given state future by first trying to cancel it . If this is not possible then the state object contained in the future is calculated and afterwards discarded . |
14,351 | public static < T > T find ( Class < T > factoryClass , Descriptor descriptor ) { Preconditions . checkNotNull ( descriptor ) ; return findInternal ( factoryClass , descriptor . toProperties ( ) , Optional . empty ( ) ) ; } | Finds a table factory of the given class and descriptor . |
14,352 | public static < T > T find ( Class < T > factoryClass , Descriptor descriptor , ClassLoader classLoader ) { Preconditions . checkNotNull ( descriptor ) ; Preconditions . checkNotNull ( classLoader ) ; return findInternal ( factoryClass , descriptor . toProperties ( ) , Optional . of ( classLoader ) ) ; } | Finds a table factory of the given class descriptor and classloader . |
14,353 | public static < T > T find ( Class < T > factoryClass , Map < String , String > propertyMap ) { return findInternal ( factoryClass , propertyMap , Optional . empty ( ) ) ; } | Finds a table factory of the given class and property map . |
14,354 | private static List < TableFactory > discoverFactories ( Optional < ClassLoader > classLoader ) { try { List < TableFactory > result = new LinkedList < > ( ) ; if ( classLoader . isPresent ( ) ) { ServiceLoader . load ( TableFactory . class , classLoader . get ( ) ) . iterator ( ) . forEachRemaining ( result :: add ) ; } else { defaultLoader . iterator ( ) . forEachRemaining ( result :: add ) ; } return result ; } catch ( ServiceConfigurationError e ) { LOG . error ( "Could not load service provider for table factories." , e ) ; throw new TableException ( "Could not load service provider for table factories." , e ) ; } } | Searches for factories using Java service providers . |
14,355 | private static < T > List < TableFactory > filterByFactoryClass ( Class < T > factoryClass , Map < String , String > properties , List < TableFactory > foundFactories ) { List < TableFactory > classFactories = foundFactories . stream ( ) . filter ( p -> factoryClass . isAssignableFrom ( p . getClass ( ) ) ) . collect ( Collectors . toList ( ) ) ; if ( classFactories . isEmpty ( ) ) { throw new NoMatchingTableFactoryException ( String . format ( "No factory implements '%s'." , factoryClass . getCanonicalName ( ) ) , factoryClass , foundFactories , properties ) ; } return classFactories ; } | Filters factories with matching context by factory class . |
14,356 | private static < T > List < TableFactory > filterByContext ( Class < T > factoryClass , Map < String , String > properties , List < TableFactory > foundFactories , List < TableFactory > classFactories ) { List < TableFactory > matchingFactories = classFactories . stream ( ) . filter ( factory -> { Map < String , String > requestedContext = normalizeContext ( factory ) ; Map < String , String > plainContext = new HashMap < > ( requestedContext ) ; plainContext . remove ( CONNECTOR_PROPERTY_VERSION ) ; plainContext . remove ( FORMAT_PROPERTY_VERSION ) ; plainContext . remove ( METADATA_PROPERTY_VERSION ) ; plainContext . remove ( STATISTICS_PROPERTY_VERSION ) ; plainContext . remove ( CATALOG_PROPERTY_VERSION ) ; return plainContext . keySet ( ) . stream ( ) . allMatch ( e -> properties . containsKey ( e ) && properties . get ( e ) . equals ( plainContext . get ( e ) ) ) ; } ) . collect ( Collectors . toList ( ) ) ; if ( matchingFactories . isEmpty ( ) ) { throw new NoMatchingTableFactoryException ( "No context matches." , factoryClass , foundFactories , properties ) ; } return matchingFactories ; } | Filters for factories with matching context . |
14,357 | private static Map < String , String > normalizeContext ( TableFactory factory ) { Map < String , String > requiredContext = factory . requiredContext ( ) ; if ( requiredContext == null ) { throw new TableException ( String . format ( "Required context of factory '%s' must not be null." , factory . getClass ( ) . getName ( ) ) ) ; } return requiredContext . keySet ( ) . stream ( ) . collect ( Collectors . toMap ( key -> key . toLowerCase ( ) , key -> requiredContext . get ( key ) ) ) ; } | Prepares the properties of a context to be used for match operations . |
14,358 | private static < T > T filterBySupportedProperties ( Class < T > factoryClass , Map < String , String > properties , List < TableFactory > foundFactories , List < TableFactory > classFactories ) { final List < String > plainGivenKeys = new LinkedList < > ( ) ; properties . keySet ( ) . forEach ( k -> { String key = k . replaceAll ( ".\\d+" , ".#" ) ; if ( ! plainGivenKeys . contains ( key ) ) { plainGivenKeys . add ( key ) ; } } ) ; Optional < String > lastKey = Optional . empty ( ) ; List < TableFactory > supportedFactories = new LinkedList < > ( ) ; for ( TableFactory factory : classFactories ) { Set < String > requiredContextKeys = normalizeContext ( factory ) . keySet ( ) ; Tuple2 < List < String > , List < String > > tuple2 = normalizeSupportedProperties ( factory ) ; List < String > givenContextFreeKeys = plainGivenKeys . stream ( ) . filter ( p -> ! requiredContextKeys . contains ( p ) ) . collect ( Collectors . toList ( ) ) ; List < String > givenFilteredKeys = filterSupportedPropertiesFactorySpecific ( factory , givenContextFreeKeys ) ; Boolean allTrue = true ; for ( String k : givenFilteredKeys ) { lastKey = Optional . of ( k ) ; if ( ! ( tuple2 . f0 . contains ( k ) || tuple2 . f1 . stream ( ) . anyMatch ( p -> k . startsWith ( p ) ) ) ) { allTrue = false ; break ; } } if ( allTrue ) { supportedFactories . add ( factory ) ; } } if ( supportedFactories . isEmpty ( ) && classFactories . size ( ) == 1 && lastKey . isPresent ( ) ) { TableFactory factory = classFactories . get ( 0 ) ; Tuple2 < List < String > , List < String > > tuple2 = normalizeSupportedProperties ( factory ) ; String errorMessage = String . format ( "The matching factory '%s' doesn't support '%s'.\n\nSupported properties of " + "this factory are:\n%s" , factory . getClass ( ) . getName ( ) , lastKey . get ( ) , String . join ( "\n" , tuple2 . f0 ) ) ; throw new NoMatchingTableFactoryException ( errorMessage , factoryClass , foundFactories , properties ) ; } else if ( supportedFactories . isEmpty ( ) ) { throw new NoMatchingTableFactoryException ( "No factory supports all properties." , factoryClass , foundFactories , properties ) ; } else if ( supportedFactories . size ( ) > 1 ) { throw new AmbiguousTableFactoryException ( supportedFactories , factoryClass , foundFactories , properties ) ; } return ( T ) supportedFactories . get ( 0 ) ; } | Filters the matching class factories by supported properties . |
14,359 | private static Tuple2 < List < String > , List < String > > normalizeSupportedProperties ( TableFactory factory ) { List < String > supportedProperties = factory . supportedProperties ( ) ; if ( supportedProperties == null ) { throw new TableException ( String . format ( "Supported properties of factory '%s' must not be null." , factory . getClass ( ) . getName ( ) ) ) ; } List < String > supportedKeys = supportedProperties . stream ( ) . map ( p -> p . toLowerCase ( ) ) . collect ( Collectors . toList ( ) ) ; List < String > wildcards = extractWildcardPrefixes ( supportedKeys ) ; return Tuple2 . of ( supportedKeys , wildcards ) ; } | Prepares the supported properties of a factory to be used for match operations . |
14,360 | public void insertOrReplaceRecord ( T record ) throws IOException { if ( closed ) { return ; } T match = prober . getMatchFor ( record , reuse ) ; if ( match == null ) { prober . insertAfterNoMatch ( record ) ; } else { prober . updateMatch ( record ) ; } } | Searches the hash table for a record with the given key . If it is found then it is overridden with the specified record . Otherwise the specified record is inserted . |
14,361 | private void rebuild ( long newNumBucketSegments ) throws IOException { releaseBucketSegments ( ) ; allocateBucketSegments ( ( int ) newNumBucketSegments ) ; T record = buildSideSerializer . createInstance ( ) ; try { EntryIterator iter = getEntryIterator ( ) ; recordArea . resetAppendPosition ( ) ; recordArea . setWritePosition ( 0 ) ; while ( ( record = iter . next ( record ) ) != null && ! closed ) { final int hashCode = MathUtils . jenkinsHash ( buildSideComparator . hash ( record ) ) ; final int bucket = hashCode & numBucketsMask ; final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits ; final MemorySegment bucketSegment = bucketSegments [ bucketSegmentIndex ] ; final int bucketOffset = ( bucket & numBucketsPerSegmentMask ) << bucketSizeBits ; final long firstPointer = bucketSegment . getLong ( bucketOffset ) ; long ptrToAppended = recordArea . noSeekAppendPointerAndRecord ( firstPointer , record ) ; bucketSegment . putLong ( bucketOffset , ptrToAppended ) ; } recordArea . freeSegmentsAfterAppendPosition ( ) ; holes = 0 ; } catch ( EOFException ex ) { throw new RuntimeException ( "Bug in InPlaceMutableHashTable: we shouldn't get out of memory during a rebuild, " + "because we aren't allocating any new memory." ) ; } } | Same as above but the number of bucket segments of the new table can be specified . |
14,362 | public static StringifiedAccumulatorResult [ ] stringifyAccumulatorResults ( Map < String , OptionalFailure < Accumulator < ? , ? > > > accs ) { if ( accs == null || accs . isEmpty ( ) ) { return new StringifiedAccumulatorResult [ 0 ] ; } else { StringifiedAccumulatorResult [ ] results = new StringifiedAccumulatorResult [ accs . size ( ) ] ; int i = 0 ; for ( Map . Entry < String , OptionalFailure < Accumulator < ? , ? > > > entry : accs . entrySet ( ) ) { results [ i ++ ] = stringifyAccumulatorResult ( entry . getKey ( ) , entry . getValue ( ) ) ; } return results ; } } | Flatten a map of accumulator names to Accumulator instances into an array of StringifiedAccumulatorResult values . |
14,363 | public boolean isMatching ( ResourceProfile required ) { if ( required == UNKNOWN ) { return true ; } if ( cpuCores >= required . getCpuCores ( ) && heapMemoryInMB >= required . getHeapMemoryInMB ( ) && directMemoryInMB >= required . getDirectMemoryInMB ( ) && nativeMemoryInMB >= required . getNativeMemoryInMB ( ) && networkMemoryInMB >= required . getNetworkMemoryInMB ( ) ) { for ( Map . Entry < String , Resource > resource : required . extendedResources . entrySet ( ) ) { if ( ! extendedResources . containsKey ( resource . getKey ( ) ) || ! extendedResources . get ( resource . getKey ( ) ) . getResourceAggregateType ( ) . equals ( resource . getValue ( ) . getResourceAggregateType ( ) ) || extendedResources . get ( resource . getKey ( ) ) . getValue ( ) < resource . getValue ( ) . getValue ( ) ) { return false ; } } return true ; } return false ; } | Check whether required resource profile can be matched . |
14,364 | private void grow ( int minCapacity ) { int oldCapacity = segment . size ( ) ; int newCapacity = oldCapacity + ( oldCapacity >> 1 ) ; if ( newCapacity - minCapacity < 0 ) { newCapacity = minCapacity ; } segment = MemorySegmentFactory . wrap ( Arrays . copyOf ( segment . getArray ( ) , newCapacity ) ) ; afterGrow ( ) ; } | Increases the capacity to ensure that it can hold at least the minimum capacity argument . |
14,365 | public void releasePartitions ( Collection < ResultPartitionID > partitionIds ) { for ( ResultPartitionID partitionId : partitionIds ) { resultPartitionManager . releasePartition ( partitionId , null ) ; } } | Batch release intermediate result partitions . |
14,366 | public static RestartStrategyConfiguration fixedDelayRestart ( int restartAttempts , long delayBetweenAttempts ) { return fixedDelayRestart ( restartAttempts , Time . of ( delayBetweenAttempts , TimeUnit . MILLISECONDS ) ) ; } | Generates a FixedDelayRestartStrategyConfiguration . |
14,367 | public static FailureRateRestartStrategyConfiguration failureRateRestart ( int failureRate , Time failureInterval , Time delayInterval ) { return new FailureRateRestartStrategyConfiguration ( failureRate , failureInterval , delayInterval ) ; } | Generates a FailureRateRestartStrategyConfiguration . |
14,368 | private < T > void deployJob ( ExecutionContext < T > context , JobGraph jobGraph , Result < T > result ) { try ( final ClusterDescriptor < T > clusterDescriptor = context . createClusterDescriptor ( ) ) { try { if ( context . getClusterId ( ) == null ) { deployJobOnNewCluster ( clusterDescriptor , jobGraph , result , context . getClassLoader ( ) ) ; } else { deployJobOnExistingCluster ( context . getClusterId ( ) , clusterDescriptor , jobGraph , result ) ; } } catch ( Exception e ) { throw new SqlExecutionException ( "Could not retrieve or create a cluster." , e ) ; } } catch ( SqlExecutionException e ) { throw e ; } catch ( Exception e ) { throw new SqlExecutionException ( "Could not locate a cluster." , e ) ; } } | Deploys a job . Depending on the deployment creates a new job cluster . It saves the cluster id in the result and blocks until job completion . |
14,369 | void storeInitialHashTable ( ) throws IOException { if ( spilled ) { return ; } spilled = true ; for ( int partIdx = 0 ; partIdx < initialPartitions . size ( ) ; partIdx ++ ) { final ReOpenableHashPartition < BT , PT > p = ( ReOpenableHashPartition < BT , PT > ) initialPartitions . get ( partIdx ) ; if ( p . isInMemory ( ) ) { this . writeBehindBuffersAvailable += p . spillInMemoryPartition ( spilledInMemoryPartitions . next ( ) , ioManager , writeBehindBuffers ) ; } } } | This method stores the initial hash table s contents on disk if hash join needs the memory for further partition processing . The initial hash table is rebuild before a new secondary input is opened . |
14,370 | protected boolean checkNextIndexOffset ( ) { if ( this . currentSortIndexOffset > this . lastIndexEntryOffset ) { MemorySegment returnSegment = nextMemorySegment ( ) ; if ( returnSegment != null ) { this . currentSortIndexSegment = returnSegment ; this . sortIndex . add ( this . currentSortIndexSegment ) ; this . currentSortIndexOffset = 0 ; } else { return false ; } } return true ; } | check if we need request next index memory . |
14,371 | protected void writeIndexAndNormalizedKey ( BaseRow record , long currOffset ) { this . currentSortIndexSegment . putLong ( this . currentSortIndexOffset , currOffset ) ; if ( this . numKeyBytes != 0 ) { normalizedKeyComputer . putKey ( record , this . currentSortIndexSegment , this . currentSortIndexOffset + OFFSET_LEN ) ; } this . currentSortIndexOffset += this . indexEntrySize ; this . numRecords ++ ; } | Write of index and normalizedKey . |
14,372 | @ SuppressWarnings ( { "rawtypes" , "unchecked" } ) public void collectBuffer ( Collector < OUT > c , int bufferSize ) throws IOException { fileBuffer . position ( 0 ) ; while ( fileBuffer . position ( ) < bufferSize ) { c . collect ( deserializer . deserialize ( ) ) ; } } | Reads a buffer of the given size from the memory - mapped file and collects all records contained . This method assumes that all values in the buffer are of the same type . This method does NOT take care of synchronization . The user must guarantee that the buffer was completely written before calling this method . |
14,373 | public void init ( Map < EventId , Lockable < V > > events , Map < NodeId , Lockable < SharedBufferNode > > entries ) throws Exception { eventsBuffer . putAll ( events ) ; this . entries . putAll ( entries ) ; Map < Long , Integer > maxIds = events . keySet ( ) . stream ( ) . collect ( Collectors . toMap ( EventId :: getTimestamp , EventId :: getId , Math :: max ) ) ; eventsCount . putAll ( maxIds ) ; } | Initializes underlying state with given map of events and entries . Should be used only in case of migration from old state . |
14,374 | public boolean isEmpty ( ) throws Exception { return Iterables . isEmpty ( eventsBufferCache . keySet ( ) ) && Iterables . isEmpty ( eventsBuffer . keys ( ) ) ; } | Checks if there is no elements in the buffer . |
14,375 | void upsertEvent ( EventId eventId , Lockable < V > event ) { this . eventsBufferCache . put ( eventId , event ) ; } | Inserts or updates an event in cache . |
14,376 | void upsertEntry ( NodeId nodeId , Lockable < SharedBufferNode > entry ) { this . entryCache . put ( nodeId , entry ) ; } | Inserts or updates a shareBufferNode in cache . |
14,377 | void removeEvent ( EventId eventId ) throws Exception { this . eventsBufferCache . remove ( eventId ) ; this . eventsBuffer . remove ( eventId ) ; } | Removes an event from cache and state . |
14,378 | void removeEntry ( NodeId nodeId ) throws Exception { this . entryCache . remove ( nodeId ) ; this . entries . remove ( nodeId ) ; } | Removes a ShareBufferNode from cache and state . |
14,379 | Lockable < SharedBufferNode > getEntry ( NodeId nodeId ) { return entryCache . computeIfAbsent ( nodeId , id -> { try { return entries . get ( id ) ; } catch ( Exception ex ) { throw new WrappingRuntimeException ( ex ) ; } } ) ; } | It always returns node either from state or cache . |
14,380 | Lockable < V > getEvent ( EventId eventId ) { return eventsBufferCache . computeIfAbsent ( eventId , id -> { try { return eventsBuffer . get ( id ) ; } catch ( Exception ex ) { throw new WrappingRuntimeException ( ex ) ; } } ) ; } | It always returns event either from state or cache . |
14,381 | void flushCache ( ) throws Exception { if ( ! entryCache . isEmpty ( ) ) { entries . putAll ( entryCache ) ; entryCache . clear ( ) ; } if ( ! eventsBufferCache . isEmpty ( ) ) { eventsBuffer . putAll ( eventsBufferCache ) ; eventsBufferCache . clear ( ) ; } } | Flush the event and node from cache to state . |
14,382 | public TimeWindow cover ( TimeWindow other ) { return new TimeWindow ( Math . min ( start , other . start ) , Math . max ( end , other . end ) ) ; } | Returns the minimal window covers both this window and the given window . |
14,383 | protected void computeOperatorSpecificDefaultEstimates ( DataStatistics statistics ) { this . estimatedNumRecords = getPredecessorNode ( ) . getEstimatedNumRecords ( ) ; this . estimatedOutputSize = getPredecessorNode ( ) . getEstimatedOutputSize ( ) ; } | Computes the estimated outputs for the data sink . Since the sink does not modify anything it simply copies the output estimates from its direct predecessor . |
14,384 | public List < KafkaTopicPartition > discoverPartitions ( ) throws WakeupException , ClosedException { if ( ! closed && ! wakeup ) { try { List < KafkaTopicPartition > newDiscoveredPartitions ; if ( topicsDescriptor . isFixedTopics ( ) ) { newDiscoveredPartitions = getAllPartitionsForTopics ( topicsDescriptor . getFixedTopics ( ) ) ; } else { List < String > matchedTopics = getAllTopics ( ) ; Iterator < String > iter = matchedTopics . iterator ( ) ; while ( iter . hasNext ( ) ) { if ( ! topicsDescriptor . isMatchingTopic ( iter . next ( ) ) ) { iter . remove ( ) ; } } if ( matchedTopics . size ( ) != 0 ) { newDiscoveredPartitions = getAllPartitionsForTopics ( matchedTopics ) ; } else { newDiscoveredPartitions = null ; } } if ( newDiscoveredPartitions == null || newDiscoveredPartitions . isEmpty ( ) ) { throw new RuntimeException ( "Unable to retrieve any partitions with KafkaTopicsDescriptor: " + topicsDescriptor ) ; } else { Iterator < KafkaTopicPartition > iter = newDiscoveredPartitions . iterator ( ) ; KafkaTopicPartition nextPartition ; while ( iter . hasNext ( ) ) { nextPartition = iter . next ( ) ; if ( ! setAndCheckDiscoveredPartition ( nextPartition ) ) { iter . remove ( ) ; } } } return newDiscoveredPartitions ; } catch ( WakeupException e ) { wakeup = false ; throw e ; } } else if ( ! closed && wakeup ) { wakeup = false ; throw new WakeupException ( ) ; } else { throw new ClosedException ( ) ; } } | Execute a partition discovery attempt for this subtask . This method lets the partition discoverer update what partitions it has discovered so far . |
14,385 | public boolean setAndCheckDiscoveredPartition ( KafkaTopicPartition partition ) { if ( isUndiscoveredPartition ( partition ) ) { discoveredPartitions . add ( partition ) ; return KafkaTopicPartitionAssigner . assign ( partition , numParallelSubtasks ) == indexOfThisSubtask ; } return false ; } | Sets a partition as discovered . Partitions are considered as new if its partition id is larger than all partition ids previously seen for the topic it belongs to . Therefore for a set of discovered partitions the order that this method is invoked with each partition is important . |
14,386 | public void shutdown ( JobStatus jobStatus ) throws Exception { synchronized ( lock ) { if ( ! shutdown ) { shutdown = true ; LOG . info ( "Stopping checkpoint coordinator for job {}." , job ) ; periodicScheduling = false ; triggerRequestQueued = false ; MasterHooks . close ( masterHooks . values ( ) , LOG ) ; masterHooks . clear ( ) ; timer . shutdownNow ( ) ; for ( PendingCheckpoint pending : pendingCheckpoints . values ( ) ) { pending . abort ( CheckpointFailureReason . CHECKPOINT_COORDINATOR_SHUTDOWN ) ; } pendingCheckpoints . clear ( ) ; completedCheckpointStore . shutdown ( jobStatus ) ; checkpointIdCounter . shutdown ( jobStatus ) ; } } } | Shuts down the checkpoint coordinator . |
14,387 | public CompletableFuture < CompletedCheckpoint > triggerSavepoint ( final long timestamp , final String targetLocation ) { final CheckpointProperties properties = CheckpointProperties . forSavepoint ( ) ; return triggerSavepointInternal ( timestamp , properties , false , targetLocation ) ; } | Triggers a savepoint with the given savepoint directory as a target . |
14,388 | public CompletableFuture < CompletedCheckpoint > triggerSynchronousSavepoint ( final long timestamp , final boolean advanceToEndOfEventTime , final String targetLocation ) { final CheckpointProperties properties = CheckpointProperties . forSyncSavepoint ( ) ; return triggerSavepointInternal ( timestamp , properties , advanceToEndOfEventTime , targetLocation ) ; } | Triggers a synchronous savepoint with the given savepoint directory as a target . |
14,389 | public boolean triggerCheckpoint ( long timestamp , boolean isPeriodic ) { try { triggerCheckpoint ( timestamp , checkpointProperties , null , isPeriodic , false ) ; return true ; } catch ( CheckpointException e ) { return false ; } } | Triggers a new standard checkpoint and uses the given timestamp as the checkpoint timestamp . |
14,390 | private void completePendingCheckpoint ( PendingCheckpoint pendingCheckpoint ) throws CheckpointException { final long checkpointId = pendingCheckpoint . getCheckpointId ( ) ; final CompletedCheckpoint completedCheckpoint ; Map < OperatorID , OperatorState > operatorStates = pendingCheckpoint . getOperatorStates ( ) ; sharedStateRegistry . registerAll ( operatorStates . values ( ) ) ; try { try { completedCheckpoint = pendingCheckpoint . finalizeCheckpoint ( ) ; } catch ( Exception e1 ) { if ( ! pendingCheckpoint . isDiscarded ( ) ) { pendingCheckpoint . abort ( CheckpointFailureReason . FINALIZE_CHECKPOINT_FAILURE , e1 ) ; } throw new CheckpointException ( "Could not finalize the pending checkpoint " + checkpointId + '.' , CheckpointFailureReason . FINALIZE_CHECKPOINT_FAILURE , e1 ) ; } Preconditions . checkState ( pendingCheckpoint . isDiscarded ( ) && completedCheckpoint != null ) ; try { completedCheckpointStore . addCheckpoint ( completedCheckpoint ) ; } catch ( Exception exception ) { executor . execute ( new Runnable ( ) { public void run ( ) { try { completedCheckpoint . discardOnFailedStoring ( ) ; } catch ( Throwable t ) { LOG . warn ( "Could not properly discard completed checkpoint {}." , completedCheckpoint . getCheckpointID ( ) , t ) ; } } } ) ; throw new CheckpointException ( "Could not complete the pending checkpoint " + checkpointId + '.' , CheckpointFailureReason . FINALIZE_CHECKPOINT_FAILURE , exception ) ; } } finally { pendingCheckpoints . remove ( checkpointId ) ; triggerQueuedRequests ( ) ; } rememberRecentCheckpointId ( checkpointId ) ; dropSubsumedCheckpoints ( checkpointId ) ; lastCheckpointCompletionNanos = System . nanoTime ( ) ; LOG . info ( "Completed checkpoint {} for job {} ({} bytes in {} ms)." , checkpointId , job , completedCheckpoint . getStateSize ( ) , completedCheckpoint . getDuration ( ) ) ; if ( LOG . isDebugEnabled ( ) ) { StringBuilder builder = new StringBuilder ( ) ; builder . append ( "Checkpoint state: " ) ; for ( OperatorState state : completedCheckpoint . getOperatorStates ( ) . values ( ) ) { builder . append ( state ) ; builder . append ( ", " ) ; } builder . setLength ( builder . length ( ) - 2 ) ; LOG . debug ( builder . toString ( ) ) ; } final long timestamp = completedCheckpoint . getTimestamp ( ) ; for ( ExecutionVertex ev : tasksToCommitTo ) { Execution ee = ev . getCurrentExecutionAttempt ( ) ; if ( ee != null ) { ee . notifyCheckpointComplete ( checkpointId , timestamp ) ; } } } | Try to complete the given pending checkpoint . |
14,391 | public void failUnacknowledgedPendingCheckpointsFor ( ExecutionAttemptID executionAttemptId , Throwable cause ) { synchronized ( lock ) { Iterator < PendingCheckpoint > pendingCheckpointIterator = pendingCheckpoints . values ( ) . iterator ( ) ; while ( pendingCheckpointIterator . hasNext ( ) ) { final PendingCheckpoint pendingCheckpoint = pendingCheckpointIterator . next ( ) ; if ( ! pendingCheckpoint . isAcknowledgedBy ( executionAttemptId ) ) { pendingCheckpointIterator . remove ( ) ; discardCheckpoint ( pendingCheckpoint , cause ) ; } } } } | Fails all pending checkpoints which have not been acknowledged by the given execution attempt id . |
14,392 | private void triggerQueuedRequests ( ) { if ( triggerRequestQueued ) { triggerRequestQueued = false ; if ( periodicScheduling ) { if ( currentPeriodicTrigger != null ) { currentPeriodicTrigger . cancel ( false ) ; } currentPeriodicTrigger = timer . scheduleAtFixedRate ( new ScheduledTrigger ( ) , 0L , baseInterval , TimeUnit . MILLISECONDS ) ; } else { timer . execute ( new ScheduledTrigger ( ) ) ; } } } | Triggers the queued request if there is one . |
14,393 | public boolean restoreSavepoint ( String savepointPointer , boolean allowNonRestored , Map < JobVertexID , ExecutionJobVertex > tasks , ClassLoader userClassLoader ) throws Exception { Preconditions . checkNotNull ( savepointPointer , "The savepoint path cannot be null." ) ; LOG . info ( "Starting job {} from savepoint {} ({})" , job , savepointPointer , ( allowNonRestored ? "allowing non restored state" : "" ) ) ; final CompletedCheckpointStorageLocation checkpointLocation = checkpointStorage . resolveCheckpoint ( savepointPointer ) ; CompletedCheckpoint savepoint = Checkpoints . loadAndValidateCheckpoint ( job , tasks , checkpointLocation , userClassLoader , allowNonRestored ) ; completedCheckpointStore . addCheckpoint ( savepoint ) ; long nextCheckpointId = savepoint . getCheckpointID ( ) + 1 ; checkpointIdCounter . setCount ( nextCheckpointId ) ; LOG . info ( "Reset the checkpoint ID of job {} to {}." , job , nextCheckpointId ) ; return restoreLatestCheckpointedState ( tasks , true , allowNonRestored ) ; } | Restore the state with given savepoint . |
14,394 | public void abortPendingCheckpoints ( CheckpointException exception ) { synchronized ( lock ) { for ( PendingCheckpoint p : pendingCheckpoints . values ( ) ) { p . abort ( exception . getCheckpointFailureReason ( ) ) ; } pendingCheckpoints . clear ( ) ; } } | Aborts all the pending checkpoints due to en exception . |
14,395 | private void discardSubtaskState ( final JobID jobId , final ExecutionAttemptID executionAttemptID , final long checkpointId , final TaskStateSnapshot subtaskState ) { if ( subtaskState != null ) { executor . execute ( new Runnable ( ) { public void run ( ) { try { subtaskState . discardState ( ) ; } catch ( Throwable t2 ) { LOG . warn ( "Could not properly discard state object of checkpoint {} " + "belonging to task {} of job {}." , checkpointId , executionAttemptID , jobId , t2 ) ; } } } ) ; } } | Discards the given state object asynchronously belonging to the given job execution attempt id and checkpoint id . |
14,396 | public ResourceSpec merge ( ResourceSpec other ) { ResourceSpec target = new ResourceSpec ( Math . max ( this . cpuCores , other . cpuCores ) , this . heapMemoryInMB + other . heapMemoryInMB , this . directMemoryInMB + other . directMemoryInMB , this . nativeMemoryInMB + other . nativeMemoryInMB , this . stateSizeInMB + other . stateSizeInMB ) ; target . extendedResources . putAll ( extendedResources ) ; for ( Resource resource : other . extendedResources . values ( ) ) { target . extendedResources . merge ( resource . getName ( ) , resource , ( v1 , v2 ) -> v1 . merge ( v2 ) ) ; } return target ; } | Used by system internally to merge the other resources of chained operators when generating the job graph or merge the resource consumed by state backend . |
14,397 | public boolean isValid ( ) { if ( this . cpuCores >= 0 && this . heapMemoryInMB >= 0 && this . directMemoryInMB >= 0 && this . nativeMemoryInMB >= 0 && this . stateSizeInMB >= 0 ) { for ( Resource resource : extendedResources . values ( ) ) { if ( resource . getValue ( ) < 0 ) { return false ; } } return true ; } else { return false ; } } | Check whether all the field values are valid . |
14,398 | public void addUniqueField ( FieldSet uniqueFieldSet ) { if ( this . uniqueFields == null ) { this . uniqueFields = new HashSet < FieldSet > ( ) ; } this . uniqueFields . add ( uniqueFieldSet ) ; } | Adds a FieldSet to be unique |
14,399 | public void addUniqueField ( int field ) { if ( this . uniqueFields == null ) { this . uniqueFields = new HashSet < FieldSet > ( ) ; } this . uniqueFields . add ( new FieldSet ( field ) ) ; } | Adds a field as having only unique values . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.