idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
14,300
public long cleanUpAllAllocatedMemory ( ) { synchronized ( this ) { for ( MemoryConsumer c : consumers ) { if ( c != null && c . getUsed ( ) > 0 ) { logger . debug ( "unreleased " + Utils . bytesToString ( c . getUsed ( ) ) + " memory from " + c ) ; } } consumers . clear ( ) ; for ( MemoryBlock page : pageTable ) { if ...
Clean up all allocated memory and pages . Returns the number of bytes freed . A non - zero return value can be used to detect memory leaks .
14,301
public synchronized byte [ ] response ( byte [ ] token ) { try { return saslServer != null ? saslServer . evaluateResponse ( token ) : new byte [ 0 ] ; } catch ( SaslException e ) { throw Throwables . propagate ( e ) ; } }
Used to respond to server SASL tokens .
14,302
public synchronized void dispose ( ) { if ( saslServer != null ) { try { saslServer . dispose ( ) ; } catch ( SaslException e ) { } finally { saslServer = null ; } } }
Disposes of any system resources or security - sensitive information the SaslServer might be using .
14,303
private static String getBase64EncodedString ( String str ) { ByteBuf byteBuf = null ; ByteBuf encodedByteBuf = null ; try { byteBuf = Unpooled . wrappedBuffer ( str . getBytes ( StandardCharsets . UTF_8 ) ) ; encodedByteBuf = Base64 . encode ( byteBuf ) ; return encodedByteBuf . toString ( StandardCharsets . UTF_8 ) ;...
Return a Base64 - encoded string .
14,304
public static long packPointer ( long recordPointer , int partitionId ) { assert ( partitionId <= MAXIMUM_PARTITION_ID ) ; final long pageNumber = ( recordPointer & MASK_LONG_UPPER_13_BITS ) >>> 24 ; final long compressedAddress = pageNumber | ( recordPointer & MASK_LONG_LOWER_27_BITS ) ; return ( ( ( long ) partitionI...
Pack a record address and partition id into a single word .
14,305
synchronized void dispose ( ) { if ( ! isDisposed ( ) ) { if ( connection != null ) { try { connection . waitForClose ( ) ; } catch ( IOException ioe ) { } } server . unregister ( this ) ; setState ( State . LOST , false ) ; this . disposed = true ; } }
Mark the handle as disposed and set it as LOST in case the current state is not final .
14,306
public synchronized String getDelegationTokenFromMetaStore ( String owner ) throws HiveSQLException , UnsupportedOperationException , LoginException , IOException { if ( ! hiveConf . getBoolVar ( HiveConf . ConfVars . METASTORE_USE_THRIFT_SASL ) || ! hiveConf . getBoolVar ( HiveConf . ConfVars . HIVE_SERVER2_ENABLE_DOA...
obtain delegation token for the give user from metastore
14,307
public Integer getDecimalDigits ( ) { switch ( this . type ) { case BOOLEAN_TYPE : case TINYINT_TYPE : case SMALLINT_TYPE : case INT_TYPE : case BIGINT_TYPE : return 0 ; case FLOAT_TYPE : return 7 ; case DOUBLE_TYPE : return 15 ; case DECIMAL_TYPE : return typeQualifiers . getScale ( ) ; case TIMESTAMP_TYPE : return 9 ...
The number of fractional digits for this type . Null is returned for data types where this is not applicable .
14,308
public static < OUT > Iterator < OUT > collect ( DataStream < OUT > stream ) throws IOException { TypeSerializer < OUT > serializer = stream . getType ( ) . createSerializer ( stream . getExecutionEnvironment ( ) . getConfig ( ) ) ; SocketStreamIterator < OUT > iter = new SocketStreamIterator < OUT > ( serializer ) ; S...
Returns an iterator to iterate over the elements of the DataStream .
14,309
public CommandLine getCommandLine ( Options commandLineOptions ) throws Exception { final List < String > args = new ArrayList < > ( ) ; properties . asMap ( ) . forEach ( ( k , v ) -> { if ( commandLineOptions . hasOption ( k ) ) { final Option o = commandLineOptions . getOption ( k ) ; final String argument = "--" + ...
Parses the given command line options from the deployment properties . Ignores properties that are not defined by options .
14,310
public static DeploymentEntry merge ( DeploymentEntry deployment1 , DeploymentEntry deployment2 ) { final Map < String , String > mergedProperties = new HashMap < > ( deployment1 . asMap ( ) ) ; mergedProperties . putAll ( deployment2 . asMap ( ) ) ; final DescriptorProperties properties = new DescriptorProperties ( tr...
Merges two deployments entries . The properties of the first deployment entry might be overwritten by the second one .
14,311
public static RpcService createRpcService ( final Configuration configuration , final HighAvailabilityServices haServices ) throws Exception { checkNotNull ( configuration ) ; checkNotNull ( haServices ) ; final String taskManagerAddress = determineTaskManagerBindAddress ( configuration , haServices ) ; final String po...
Create a RPC service for the task manager .
14,312
long refreshAndGetTotal ( ) { long total = 0 ; for ( ResultSubpartition part : partition . getAllPartitions ( ) ) { total += part . unsynchronizedGetNumberOfQueuedBuffers ( ) ; } return total ; }
Iterates over all sub - partitions and collects the total number of queued buffers in a best - effort way .
14,313
int refreshAndGetMin ( ) { int min = Integer . MAX_VALUE ; ResultSubpartition [ ] allPartitions = partition . getAllPartitions ( ) ; if ( allPartitions . length == 0 ) { return 0 ; } for ( ResultSubpartition part : allPartitions ) { int size = part . unsynchronizedGetNumberOfQueuedBuffers ( ) ; min = Math . min ( min ,...
Iterates over all sub - partitions and collects the minimum number of queued buffers in a sub - partition in a best - effort way .
14,314
int refreshAndGetMax ( ) { int max = 0 ; for ( ResultSubpartition part : partition . getAllPartitions ( ) ) { int size = part . unsynchronizedGetNumberOfQueuedBuffers ( ) ; max = Math . max ( max , size ) ; } return max ; }
Iterates over all sub - partitions and collects the maximum number of queued buffers in a sub - partition in a best - effort way .
14,315
float refreshAndGetAvg ( ) { long total = 0 ; ResultSubpartition [ ] allPartitions = partition . getAllPartitions ( ) ; for ( ResultSubpartition part : allPartitions ) { int size = part . unsynchronizedGetNumberOfQueuedBuffers ( ) ; total += size ; } return total / ( float ) allPartitions . length ; }
Iterates over all sub - partitions and collects the average number of queued buffers in a sub - partition in a best - effort way .
14,316
public synchronized URL addFile ( File localFile , String remoteFile ) throws IOException , MalformedURLException { return addPath ( new Path ( localFile . toURI ( ) ) , new Path ( remoteFile ) ) ; }
Adds a file to the artifact server .
14,317
public synchronized URL addPath ( Path path , Path remoteFile ) throws IOException , MalformedURLException { if ( paths . containsKey ( remoteFile ) ) { throw new IllegalArgumentException ( "duplicate path registered" ) ; } if ( remoteFile . isAbsolute ( ) ) { throw new IllegalArgumentException ( "not expecting an abso...
Adds a path to the artifact server .
14,318
public synchronized void stop ( ) throws Exception { if ( this . serverChannel != null ) { this . serverChannel . close ( ) . awaitUninterruptibly ( ) ; this . serverChannel = null ; } if ( bootstrap != null ) { if ( bootstrap . group ( ) != null ) { bootstrap . group ( ) . shutdownGracefully ( ) ; } bootstrap = null ;...
Stops the artifact server .
14,319
private Object invokeRpc ( Method method , Object [ ] args ) throws Exception { String methodName = method . getName ( ) ; Class < ? > [ ] parameterTypes = method . getParameterTypes ( ) ; Annotation [ ] [ ] parameterAnnotations = method . getParameterAnnotations ( ) ; Time futureTimeout = extractRpcTimeout ( parameter...
Invokes a RPC method by sending the RPC invocation details to the rpc endpoint .
14,320
protected RpcInvocation createRpcInvocationMessage ( final String methodName , final Class < ? > [ ] parameterTypes , final Object [ ] args ) throws IOException { final RpcInvocation rpcInvocation ; if ( isLocal ) { rpcInvocation = new LocalRpcInvocation ( methodName , parameterTypes , args ) ; } else { try { RemoteRpc...
Create the RpcInvocation message for the given RPC .
14,321
protected CompletableFuture < ? > ask ( Object message , Time timeout ) { return FutureUtils . toJava ( Patterns . ask ( rpcEndpoint , message , timeout . toMilliseconds ( ) ) ) ; }
Sends the message to the RPC endpoint and returns a future containing its response .
14,322
public Result unregisterReference ( SharedStateRegistryKey registrationKey ) { Preconditions . checkNotNull ( registrationKey ) ; final Result result ; final StreamStateHandle scheduledStateDeletion ; SharedStateRegistry . SharedStateEntry entry ; synchronized ( registeredStates ) { entry = registeredStates . get ( reg...
Releases one reference to the given shared state in the registry . This decreases the reference count by one . Once the count reaches zero the shared state is deleted .
14,323
public void registerAll ( Iterable < ? extends CompositeStateHandle > stateHandles ) { if ( stateHandles == null ) { return ; } synchronized ( registeredStates ) { for ( CompositeStateHandle stateHandle : stateHandles ) { stateHandle . registerSharedStates ( this ) ; } } }
Register given shared states in the registry .
14,324
public boolean setCancellerHandle ( ScheduledFuture < ? > cancellerHandle ) { synchronized ( lock ) { if ( this . cancellerHandle == null ) { if ( ! discarded ) { this . cancellerHandle = cancellerHandle ; return true ; } else { return false ; } } else { throw new IllegalStateException ( "A canceller handle was already...
Sets the handle for the canceller to this pending checkpoint . This method fails with an exception if a handle has already been set .
14,325
public TaskAcknowledgeResult acknowledgeTask ( ExecutionAttemptID executionAttemptId , TaskStateSnapshot operatorSubtaskStates , CheckpointMetrics metrics ) { synchronized ( lock ) { if ( discarded ) { return TaskAcknowledgeResult . DISCARDED ; } final ExecutionVertex vertex = notYetAcknowledgedTasks . remove ( executi...
Acknowledges the task with the given execution attempt id and the given subtask state .
14,326
public void abort ( CheckpointFailureReason reason , Throwable cause ) { try { CheckpointException exception = new CheckpointException ( reason , cause ) ; onCompletionPromise . completeExceptionally ( exception ) ; reportFailedCheckpoint ( exception ) ; assertAbortSubsumedForced ( reason ) ; } finally { dispose ( true...
Aborts a checkpoint with reason and cause .
14,327
private void reportFailedCheckpoint ( Exception cause ) { final PendingCheckpointStats statsCallback = this . statsCallback ; if ( statsCallback != null ) { long failureTimestamp = System . currentTimeMillis ( ) ; statsCallback . reportFailedCheckpoint ( failureTimestamp , cause ) ; } }
Reports a failed checkpoint with the given optional cause .
14,328
static TypeInformation schemaToTypeInfo ( TypeDescription schema ) { switch ( schema . getCategory ( ) ) { case BOOLEAN : return BasicTypeInfo . BOOLEAN_TYPE_INFO ; case BYTE : return BasicTypeInfo . BYTE_TYPE_INFO ; case SHORT : return BasicTypeInfo . SHORT_TYPE_INFO ; case INT : return BasicTypeInfo . INT_TYPE_INFO ;...
Converts an ORC schema to a Flink TypeInformation .
14,329
static int fillRows ( Row [ ] rows , TypeDescription schema , VectorizedRowBatch batch , int [ ] selectedFields ) { int rowsToRead = Math . min ( ( int ) batch . count ( ) , rows . length ) ; List < TypeDescription > fieldTypes = schema . getChildren ( ) ; for ( int fieldIdx = 0 ; fieldIdx < selectedFields . length ; f...
Fills an ORC batch into an array of Row .
14,330
private static void fillColumnWithRepeatingValue ( Object [ ] vals , int fieldIdx , Object repeatingValue , int childCount ) { if ( fieldIdx == - 1 ) { Arrays . fill ( vals , 0 , childCount , repeatingValue ) ; } else { Row [ ] rows = ( Row [ ] ) vals ; for ( int i = 0 ; i < childCount ; i ++ ) { rows [ i ] . setField ...
Sets a repeating value to all objects or row fields of the passed vals array .
14,331
public static String lpad ( String base , int len , String pad ) { if ( len < 0 || "" . equals ( pad ) ) { return null ; } else if ( len == 0 ) { return "" ; } char [ ] data = new char [ len ] ; char [ ] baseChars = base . toCharArray ( ) ; char [ ] padChars = pad . toCharArray ( ) ; int pos = Math . max ( len - base ....
Returns the string str left - padded with the string pad to a length of len characters . If str is longer than len the return value is shortened to len characters .
14,332
public static String rpad ( String base , int len , String pad ) { if ( len < 0 || "" . equals ( pad ) ) { return null ; } else if ( len == 0 ) { return "" ; } char [ ] data = new char [ len ] ; char [ ] baseChars = base . toCharArray ( ) ; char [ ] padChars = pad . toCharArray ( ) ; int pos = 0 ; while ( pos < base . ...
Returns the string str right - padded with the string pad to a length of len characters . If str is longer than len the return value is shortened to len characters .
14,333
public static String replace ( String str , String oldStr , String replacement ) { return str . replace ( oldStr , replacement ) ; }
Replaces all the old strings with the replacement string .
14,334
public static String regexpReplace ( String str , String regex , String replacement ) { if ( regex . isEmpty ( ) ) { return str ; } try { StringBuffer sb = new StringBuffer ( ) ; Matcher m = REGEXP_PATTERN_CACHE . get ( regex ) . matcher ( str ) ; while ( m . find ( ) ) { m . appendReplacement ( sb , replacement ) ; } ...
Returns a string resulting from replacing all substrings that match the regular expression with replacement .
14,335
public static String regexpExtract ( String str , String regex , int extractIndex ) { if ( extractIndex < 0 ) { return null ; } try { Matcher m = REGEXP_PATTERN_CACHE . get ( regex ) . matcher ( str ) ; if ( m . find ( ) ) { MatchResult mr = m . toMatchResult ( ) ; return mr . group ( extractIndex ) ; } return null ; }...
Returns a string extracted with a specified regular expression and a regex match group index .
14,336
public static String hash ( String algorithm , String str , String charsetName ) { try { byte [ ] digest = MessageDigest . getInstance ( algorithm ) . digest ( strToBytesWithCharset ( str , charsetName ) ) ; return EncodingUtils . hex ( digest ) ; } catch ( NoSuchAlgorithmException e ) { throw new IllegalArgumentExcept...
Calculate the hash value of a given string .
14,337
public static String parseUrl ( String urlStr , String partToExtract ) { URL url ; try { url = URL_CACHE . get ( urlStr ) ; } catch ( Exception e ) { LOG . error ( "Parse URL error: " + urlStr , e ) ; return null ; } if ( "HOST" . equals ( partToExtract ) ) { return url . getHost ( ) ; } if ( "PATH" . equals ( partToEx...
Parse url and return various components of the URL . If accept any null arguments return null .
14,338
public static String parseUrl ( String urlStr , String partToExtract , String key ) { if ( ! "QUERY" . equals ( partToExtract ) ) { return null ; } String query = parseUrl ( urlStr , partToExtract ) ; if ( query == null ) { return null ; } Pattern p = Pattern . compile ( "(&|^)" + Pattern . quote ( key ) + "=([^&]*)" )...
Parse url and return various parameter of the URL . If accept any null arguments return null .
14,339
public static String hex ( String x ) { return EncodingUtils . hex ( x . getBytes ( StandardCharsets . UTF_8 ) ) . toUpperCase ( ) ; }
Returns the hex string of a string argument .
14,340
public static Map < String , String > strToMap ( String text , String listDelimiter , String keyValueDelimiter ) { if ( StringUtils . isEmpty ( text ) ) { return EMPTY_MAP ; } String [ ] keyValuePairs = text . split ( listDelimiter ) ; Map < String , String > ret = new HashMap < > ( keyValuePairs . length ) ; for ( Str...
Creates a map by parsing text . Split text into key - value pairs using two delimiters . The first delimiter separates pairs and the second delimiter separates key and value .
14,341
public ConsumerRecords < byte [ ] , byte [ ] > pollNext ( ) throws Exception { synchronized ( lock ) { while ( next == null && error == null ) { lock . wait ( ) ; } ConsumerRecords < byte [ ] , byte [ ] > n = next ; if ( n != null ) { next = null ; lock . notifyAll ( ) ; return n ; } else { ExceptionUtils . rethrowExce...
Polls the next element from the Handover possibly blocking until the next element is available . This method behaves similar to polling from a blocking queue .
14,342
public void produce ( final ConsumerRecords < byte [ ] , byte [ ] > element ) throws InterruptedException , WakeupException , ClosedException { checkNotNull ( element ) ; synchronized ( lock ) { while ( next != null && ! wakeupProducer ) { lock . wait ( ) ; } wakeupProducer = false ; if ( next != null ) { throw new Wak...
Hands over an element from the producer . If the Handover already has an element that was not yet picked up by the consumer thread this call blocks until the consumer picks up that previous element .
14,343
public boolean isCompatibleWith ( DeweyNumber other ) { if ( length ( ) > other . length ( ) ) { for ( int i = 0 ; i < other . length ( ) ; i ++ ) { if ( other . deweyNumber [ i ] != deweyNumber [ i ] ) { return false ; } } return true ; } else if ( length ( ) == other . length ( ) ) { int lastIndex = length ( ) - 1 ; ...
Checks whether this dewey number is compatible to the other dewey number .
14,344
public DeweyNumber increase ( int times ) { int [ ] newDeweyNumber = Arrays . copyOf ( deweyNumber , deweyNumber . length ) ; newDeweyNumber [ deweyNumber . length - 1 ] += times ; return new DeweyNumber ( newDeweyNumber ) ; }
Creates a new dewey number from this such that its last digit is increased by the supplied number .
14,345
public DeweyNumber addStage ( ) { int [ ] newDeweyNumber = Arrays . copyOf ( deweyNumber , deweyNumber . length + 1 ) ; return new DeweyNumber ( newDeweyNumber ) ; }
Creates a new dewey number from this such that a 0 is appended as new last digit .
14,346
public static DeweyNumber fromString ( final String deweyNumberString ) { String [ ] splits = deweyNumberString . split ( "\\." ) ; if ( splits . length == 0 ) { return new DeweyNumber ( Integer . parseInt ( deweyNumberString ) ) ; } else { int [ ] deweyNumber = new int [ splits . length ] ; for ( int i = 0 ; i < split...
Creates a dewey number from a string representation . The input string must be a dot separated string of integers .
14,347
public CompletableFuture < JobDetailsInfo > getJobDetails ( JobID jobId ) { final JobDetailsHeaders detailsHeaders = JobDetailsHeaders . getInstance ( ) ; final JobMessageParameters params = new JobMessageParameters ( ) ; params . jobPathParameter . resolve ( jobId ) ; return sendRequest ( detailsHeaders , params ) ; }
Requests the job details .
14,348
public int spillPartition ( List < MemorySegment > target , IOManager ioAccess , FileIOChannel . ID targetChannel , LinkedBlockingQueue < MemorySegment > bufferReturnQueue ) throws IOException { if ( ! isInMemory ( ) ) { throw new RuntimeException ( "Bug in Hybrid Hash Join: " + "Request to spill a partition that has a...
Spills this partition to disk and sets it up such that it continues spilling records that are added to it . The spilling process must free at least one buffer either in the partition s record buffers or in the memory segments for overflow buckets . The partition immediately takes back one buffer to use it for further s...
14,349
JobSubmissionResult finalizeExecute ( ) throws ProgramInvocationException { return client . run ( detachedPlan , jarFilesToAttach , classpathsToAttach , userCodeClassLoader , savepointSettings ) ; }
Finishes this Context Environment s execution by explicitly running the plan constructed .
14,350
public static void discardStateFuture ( RunnableFuture < ? extends StateObject > stateFuture ) throws Exception { if ( null != stateFuture ) { if ( ! stateFuture . cancel ( true ) ) { try { StateObject stateObject = FutureUtils . runIfNotDoneAndGet ( stateFuture ) ; if ( null != stateObject ) { stateObject . discardSta...
Discards the given state future by first trying to cancel it . If this is not possible then the state object contained in the future is calculated and afterwards discarded .
14,351
public static < T > T find ( Class < T > factoryClass , Descriptor descriptor ) { Preconditions . checkNotNull ( descriptor ) ; return findInternal ( factoryClass , descriptor . toProperties ( ) , Optional . empty ( ) ) ; }
Finds a table factory of the given class and descriptor .
14,352
public static < T > T find ( Class < T > factoryClass , Descriptor descriptor , ClassLoader classLoader ) { Preconditions . checkNotNull ( descriptor ) ; Preconditions . checkNotNull ( classLoader ) ; return findInternal ( factoryClass , descriptor . toProperties ( ) , Optional . of ( classLoader ) ) ; }
Finds a table factory of the given class descriptor and classloader .
14,353
public static < T > T find ( Class < T > factoryClass , Map < String , String > propertyMap ) { return findInternal ( factoryClass , propertyMap , Optional . empty ( ) ) ; }
Finds a table factory of the given class and property map .
14,354
private static List < TableFactory > discoverFactories ( Optional < ClassLoader > classLoader ) { try { List < TableFactory > result = new LinkedList < > ( ) ; if ( classLoader . isPresent ( ) ) { ServiceLoader . load ( TableFactory . class , classLoader . get ( ) ) . iterator ( ) . forEachRemaining ( result :: add ) ;...
Searches for factories using Java service providers .
14,355
private static < T > List < TableFactory > filterByFactoryClass ( Class < T > factoryClass , Map < String , String > properties , List < TableFactory > foundFactories ) { List < TableFactory > classFactories = foundFactories . stream ( ) . filter ( p -> factoryClass . isAssignableFrom ( p . getClass ( ) ) ) . collect (...
Filters factories with matching context by factory class .
14,356
private static < T > List < TableFactory > filterByContext ( Class < T > factoryClass , Map < String , String > properties , List < TableFactory > foundFactories , List < TableFactory > classFactories ) { List < TableFactory > matchingFactories = classFactories . stream ( ) . filter ( factory -> { Map < String , String...
Filters for factories with matching context .
14,357
private static Map < String , String > normalizeContext ( TableFactory factory ) { Map < String , String > requiredContext = factory . requiredContext ( ) ; if ( requiredContext == null ) { throw new TableException ( String . format ( "Required context of factory '%s' must not be null." , factory . getClass ( ) . getNa...
Prepares the properties of a context to be used for match operations .
14,358
private static < T > T filterBySupportedProperties ( Class < T > factoryClass , Map < String , String > properties , List < TableFactory > foundFactories , List < TableFactory > classFactories ) { final List < String > plainGivenKeys = new LinkedList < > ( ) ; properties . keySet ( ) . forEach ( k -> { String key = k ....
Filters the matching class factories by supported properties .
14,359
private static Tuple2 < List < String > , List < String > > normalizeSupportedProperties ( TableFactory factory ) { List < String > supportedProperties = factory . supportedProperties ( ) ; if ( supportedProperties == null ) { throw new TableException ( String . format ( "Supported properties of factory '%s' must not b...
Prepares the supported properties of a factory to be used for match operations .
14,360
public void insertOrReplaceRecord ( T record ) throws IOException { if ( closed ) { return ; } T match = prober . getMatchFor ( record , reuse ) ; if ( match == null ) { prober . insertAfterNoMatch ( record ) ; } else { prober . updateMatch ( record ) ; } }
Searches the hash table for a record with the given key . If it is found then it is overridden with the specified record . Otherwise the specified record is inserted .
14,361
private void rebuild ( long newNumBucketSegments ) throws IOException { releaseBucketSegments ( ) ; allocateBucketSegments ( ( int ) newNumBucketSegments ) ; T record = buildSideSerializer . createInstance ( ) ; try { EntryIterator iter = getEntryIterator ( ) ; recordArea . resetAppendPosition ( ) ; recordArea . setWri...
Same as above but the number of bucket segments of the new table can be specified .
14,362
public static StringifiedAccumulatorResult [ ] stringifyAccumulatorResults ( Map < String , OptionalFailure < Accumulator < ? , ? > > > accs ) { if ( accs == null || accs . isEmpty ( ) ) { return new StringifiedAccumulatorResult [ 0 ] ; } else { StringifiedAccumulatorResult [ ] results = new StringifiedAccumulatorResul...
Flatten a map of accumulator names to Accumulator instances into an array of StringifiedAccumulatorResult values .
14,363
public boolean isMatching ( ResourceProfile required ) { if ( required == UNKNOWN ) { return true ; } if ( cpuCores >= required . getCpuCores ( ) && heapMemoryInMB >= required . getHeapMemoryInMB ( ) && directMemoryInMB >= required . getDirectMemoryInMB ( ) && nativeMemoryInMB >= required . getNativeMemoryInMB ( ) && n...
Check whether required resource profile can be matched .
14,364
private void grow ( int minCapacity ) { int oldCapacity = segment . size ( ) ; int newCapacity = oldCapacity + ( oldCapacity >> 1 ) ; if ( newCapacity - minCapacity < 0 ) { newCapacity = minCapacity ; } segment = MemorySegmentFactory . wrap ( Arrays . copyOf ( segment . getArray ( ) , newCapacity ) ) ; afterGrow ( ) ; ...
Increases the capacity to ensure that it can hold at least the minimum capacity argument .
14,365
public void releasePartitions ( Collection < ResultPartitionID > partitionIds ) { for ( ResultPartitionID partitionId : partitionIds ) { resultPartitionManager . releasePartition ( partitionId , null ) ; } }
Batch release intermediate result partitions .
14,366
public static RestartStrategyConfiguration fixedDelayRestart ( int restartAttempts , long delayBetweenAttempts ) { return fixedDelayRestart ( restartAttempts , Time . of ( delayBetweenAttempts , TimeUnit . MILLISECONDS ) ) ; }
Generates a FixedDelayRestartStrategyConfiguration .
14,367
public static FailureRateRestartStrategyConfiguration failureRateRestart ( int failureRate , Time failureInterval , Time delayInterval ) { return new FailureRateRestartStrategyConfiguration ( failureRate , failureInterval , delayInterval ) ; }
Generates a FailureRateRestartStrategyConfiguration .
14,368
private < T > void deployJob ( ExecutionContext < T > context , JobGraph jobGraph , Result < T > result ) { try ( final ClusterDescriptor < T > clusterDescriptor = context . createClusterDescriptor ( ) ) { try { if ( context . getClusterId ( ) == null ) { deployJobOnNewCluster ( clusterDescriptor , jobGraph , result , ...
Deploys a job . Depending on the deployment creates a new job cluster . It saves the cluster id in the result and blocks until job completion .
14,369
void storeInitialHashTable ( ) throws IOException { if ( spilled ) { return ; } spilled = true ; for ( int partIdx = 0 ; partIdx < initialPartitions . size ( ) ; partIdx ++ ) { final ReOpenableHashPartition < BT , PT > p = ( ReOpenableHashPartition < BT , PT > ) initialPartitions . get ( partIdx ) ; if ( p . isInMemory...
This method stores the initial hash table s contents on disk if hash join needs the memory for further partition processing . The initial hash table is rebuild before a new secondary input is opened .
14,370
protected boolean checkNextIndexOffset ( ) { if ( this . currentSortIndexOffset > this . lastIndexEntryOffset ) { MemorySegment returnSegment = nextMemorySegment ( ) ; if ( returnSegment != null ) { this . currentSortIndexSegment = returnSegment ; this . sortIndex . add ( this . currentSortIndexSegment ) ; this . curre...
check if we need request next index memory .
14,371
protected void writeIndexAndNormalizedKey ( BaseRow record , long currOffset ) { this . currentSortIndexSegment . putLong ( this . currentSortIndexOffset , currOffset ) ; if ( this . numKeyBytes != 0 ) { normalizedKeyComputer . putKey ( record , this . currentSortIndexSegment , this . currentSortIndexOffset + OFFSET_LE...
Write of index and normalizedKey .
14,372
@ SuppressWarnings ( { "rawtypes" , "unchecked" } ) public void collectBuffer ( Collector < OUT > c , int bufferSize ) throws IOException { fileBuffer . position ( 0 ) ; while ( fileBuffer . position ( ) < bufferSize ) { c . collect ( deserializer . deserialize ( ) ) ; } }
Reads a buffer of the given size from the memory - mapped file and collects all records contained . This method assumes that all values in the buffer are of the same type . This method does NOT take care of synchronization . The user must guarantee that the buffer was completely written before calling this method .
14,373
public void init ( Map < EventId , Lockable < V > > events , Map < NodeId , Lockable < SharedBufferNode > > entries ) throws Exception { eventsBuffer . putAll ( events ) ; this . entries . putAll ( entries ) ; Map < Long , Integer > maxIds = events . keySet ( ) . stream ( ) . collect ( Collectors . toMap ( EventId :: g...
Initializes underlying state with given map of events and entries . Should be used only in case of migration from old state .
14,374
public boolean isEmpty ( ) throws Exception { return Iterables . isEmpty ( eventsBufferCache . keySet ( ) ) && Iterables . isEmpty ( eventsBuffer . keys ( ) ) ; }
Checks if there is no elements in the buffer .
14,375
void upsertEvent ( EventId eventId , Lockable < V > event ) { this . eventsBufferCache . put ( eventId , event ) ; }
Inserts or updates an event in cache .
14,376
void upsertEntry ( NodeId nodeId , Lockable < SharedBufferNode > entry ) { this . entryCache . put ( nodeId , entry ) ; }
Inserts or updates a shareBufferNode in cache .
14,377
void removeEvent ( EventId eventId ) throws Exception { this . eventsBufferCache . remove ( eventId ) ; this . eventsBuffer . remove ( eventId ) ; }
Removes an event from cache and state .
14,378
void removeEntry ( NodeId nodeId ) throws Exception { this . entryCache . remove ( nodeId ) ; this . entries . remove ( nodeId ) ; }
Removes a ShareBufferNode from cache and state .
14,379
Lockable < SharedBufferNode > getEntry ( NodeId nodeId ) { return entryCache . computeIfAbsent ( nodeId , id -> { try { return entries . get ( id ) ; } catch ( Exception ex ) { throw new WrappingRuntimeException ( ex ) ; } } ) ; }
It always returns node either from state or cache .
14,380
Lockable < V > getEvent ( EventId eventId ) { return eventsBufferCache . computeIfAbsent ( eventId , id -> { try { return eventsBuffer . get ( id ) ; } catch ( Exception ex ) { throw new WrappingRuntimeException ( ex ) ; } } ) ; }
It always returns event either from state or cache .
14,381
void flushCache ( ) throws Exception { if ( ! entryCache . isEmpty ( ) ) { entries . putAll ( entryCache ) ; entryCache . clear ( ) ; } if ( ! eventsBufferCache . isEmpty ( ) ) { eventsBuffer . putAll ( eventsBufferCache ) ; eventsBufferCache . clear ( ) ; } }
Flush the event and node from cache to state .
14,382
public TimeWindow cover ( TimeWindow other ) { return new TimeWindow ( Math . min ( start , other . start ) , Math . max ( end , other . end ) ) ; }
Returns the minimal window covers both this window and the given window .
14,383
protected void computeOperatorSpecificDefaultEstimates ( DataStatistics statistics ) { this . estimatedNumRecords = getPredecessorNode ( ) . getEstimatedNumRecords ( ) ; this . estimatedOutputSize = getPredecessorNode ( ) . getEstimatedOutputSize ( ) ; }
Computes the estimated outputs for the data sink . Since the sink does not modify anything it simply copies the output estimates from its direct predecessor .
14,384
public List < KafkaTopicPartition > discoverPartitions ( ) throws WakeupException , ClosedException { if ( ! closed && ! wakeup ) { try { List < KafkaTopicPartition > newDiscoveredPartitions ; if ( topicsDescriptor . isFixedTopics ( ) ) { newDiscoveredPartitions = getAllPartitionsForTopics ( topicsDescriptor . getFixed...
Execute a partition discovery attempt for this subtask . This method lets the partition discoverer update what partitions it has discovered so far .
14,385
public boolean setAndCheckDiscoveredPartition ( KafkaTopicPartition partition ) { if ( isUndiscoveredPartition ( partition ) ) { discoveredPartitions . add ( partition ) ; return KafkaTopicPartitionAssigner . assign ( partition , numParallelSubtasks ) == indexOfThisSubtask ; } return false ; }
Sets a partition as discovered . Partitions are considered as new if its partition id is larger than all partition ids previously seen for the topic it belongs to . Therefore for a set of discovered partitions the order that this method is invoked with each partition is important .
14,386
public void shutdown ( JobStatus jobStatus ) throws Exception { synchronized ( lock ) { if ( ! shutdown ) { shutdown = true ; LOG . info ( "Stopping checkpoint coordinator for job {}." , job ) ; periodicScheduling = false ; triggerRequestQueued = false ; MasterHooks . close ( masterHooks . values ( ) , LOG ) ; masterHo...
Shuts down the checkpoint coordinator .
14,387
public CompletableFuture < CompletedCheckpoint > triggerSavepoint ( final long timestamp , final String targetLocation ) { final CheckpointProperties properties = CheckpointProperties . forSavepoint ( ) ; return triggerSavepointInternal ( timestamp , properties , false , targetLocation ) ; }
Triggers a savepoint with the given savepoint directory as a target .
14,388
public CompletableFuture < CompletedCheckpoint > triggerSynchronousSavepoint ( final long timestamp , final boolean advanceToEndOfEventTime , final String targetLocation ) { final CheckpointProperties properties = CheckpointProperties . forSyncSavepoint ( ) ; return triggerSavepointInternal ( timestamp , properties , a...
Triggers a synchronous savepoint with the given savepoint directory as a target .
14,389
public boolean triggerCheckpoint ( long timestamp , boolean isPeriodic ) { try { triggerCheckpoint ( timestamp , checkpointProperties , null , isPeriodic , false ) ; return true ; } catch ( CheckpointException e ) { return false ; } }
Triggers a new standard checkpoint and uses the given timestamp as the checkpoint timestamp .
14,390
private void completePendingCheckpoint ( PendingCheckpoint pendingCheckpoint ) throws CheckpointException { final long checkpointId = pendingCheckpoint . getCheckpointId ( ) ; final CompletedCheckpoint completedCheckpoint ; Map < OperatorID , OperatorState > operatorStates = pendingCheckpoint . getOperatorStates ( ) ; ...
Try to complete the given pending checkpoint .
14,391
public void failUnacknowledgedPendingCheckpointsFor ( ExecutionAttemptID executionAttemptId , Throwable cause ) { synchronized ( lock ) { Iterator < PendingCheckpoint > pendingCheckpointIterator = pendingCheckpoints . values ( ) . iterator ( ) ; while ( pendingCheckpointIterator . hasNext ( ) ) { final PendingCheckpoin...
Fails all pending checkpoints which have not been acknowledged by the given execution attempt id .
14,392
private void triggerQueuedRequests ( ) { if ( triggerRequestQueued ) { triggerRequestQueued = false ; if ( periodicScheduling ) { if ( currentPeriodicTrigger != null ) { currentPeriodicTrigger . cancel ( false ) ; } currentPeriodicTrigger = timer . scheduleAtFixedRate ( new ScheduledTrigger ( ) , 0L , baseInterval , Ti...
Triggers the queued request if there is one .
14,393
public boolean restoreSavepoint ( String savepointPointer , boolean allowNonRestored , Map < JobVertexID , ExecutionJobVertex > tasks , ClassLoader userClassLoader ) throws Exception { Preconditions . checkNotNull ( savepointPointer , "The savepoint path cannot be null." ) ; LOG . info ( "Starting job {} from savepoint...
Restore the state with given savepoint .
14,394
public void abortPendingCheckpoints ( CheckpointException exception ) { synchronized ( lock ) { for ( PendingCheckpoint p : pendingCheckpoints . values ( ) ) { p . abort ( exception . getCheckpointFailureReason ( ) ) ; } pendingCheckpoints . clear ( ) ; } }
Aborts all the pending checkpoints due to en exception .
14,395
private void discardSubtaskState ( final JobID jobId , final ExecutionAttemptID executionAttemptID , final long checkpointId , final TaskStateSnapshot subtaskState ) { if ( subtaskState != null ) { executor . execute ( new Runnable ( ) { public void run ( ) { try { subtaskState . discardState ( ) ; } catch ( Throwable ...
Discards the given state object asynchronously belonging to the given job execution attempt id and checkpoint id .
14,396
public ResourceSpec merge ( ResourceSpec other ) { ResourceSpec target = new ResourceSpec ( Math . max ( this . cpuCores , other . cpuCores ) , this . heapMemoryInMB + other . heapMemoryInMB , this . directMemoryInMB + other . directMemoryInMB , this . nativeMemoryInMB + other . nativeMemoryInMB , this . stateSizeInMB ...
Used by system internally to merge the other resources of chained operators when generating the job graph or merge the resource consumed by state backend .
14,397
public boolean isValid ( ) { if ( this . cpuCores >= 0 && this . heapMemoryInMB >= 0 && this . directMemoryInMB >= 0 && this . nativeMemoryInMB >= 0 && this . stateSizeInMB >= 0 ) { for ( Resource resource : extendedResources . values ( ) ) { if ( resource . getValue ( ) < 0 ) { return false ; } } return true ; } else ...
Check whether all the field values are valid .
14,398
public void addUniqueField ( FieldSet uniqueFieldSet ) { if ( this . uniqueFields == null ) { this . uniqueFields = new HashSet < FieldSet > ( ) ; } this . uniqueFields . add ( uniqueFieldSet ) ; }
Adds a FieldSet to be unique
14,399
public void addUniqueField ( int field ) { if ( this . uniqueFields == null ) { this . uniqueFields = new HashSet < FieldSet > ( ) ; } this . uniqueFields . add ( new FieldSet ( field ) ) ; }
Adds a field as having only unique values .