idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
15,600 | protected void validateValues ( SqlCall node , RelDataType targetRowType , final SqlValidatorScope scope ) { assert node . getKind ( ) == SqlKind . VALUES ; final List < SqlNode > operands = node . getOperandList ( ) ; for ( SqlNode operand : operands ) { if ( ! ( operand . getKind ( ) == SqlKind . ROW ) ) { throw Util . needToImplement ( "Values function where operands are scalars" ) ; } SqlCall rowConstructor = ( SqlCall ) operand ; if ( conformance . isInsertSubsetColumnsAllowed ( ) && targetRowType . isStruct ( ) && rowConstructor . operandCount ( ) < targetRowType . getFieldCount ( ) ) { targetRowType = typeFactory . createStructType ( targetRowType . getFieldList ( ) . subList ( 0 , rowConstructor . operandCount ( ) ) ) ; } else if ( targetRowType . isStruct ( ) && rowConstructor . operandCount ( ) != targetRowType . getFieldCount ( ) ) { return ; } inferUnknownTypes ( targetRowType , scope , rowConstructor ) ; if ( targetRowType . isStruct ( ) ) { for ( Pair < SqlNode , RelDataTypeField > pair : Pair . zip ( rowConstructor . getOperandList ( ) , targetRowType . getFieldList ( ) ) ) { if ( ! pair . right . getType ( ) . isNullable ( ) && SqlUtil . isNullLiteral ( pair . left , false ) ) { throw newValidationError ( node , RESOURCE . columnNotNullable ( pair . right . getName ( ) ) ) ; } } } } for ( SqlNode operand : operands ) { operand . validate ( this , scope ) ; } final int rowCount = operands . size ( ) ; if ( rowCount >= 2 ) { SqlCall firstRow = ( SqlCall ) operands . get ( 0 ) ; final int columnCount = firstRow . operandCount ( ) ; for ( SqlNode operand : operands ) { SqlCall thisRow = ( SqlCall ) operand ; if ( columnCount != thisRow . operandCount ( ) ) { throw newValidationError ( node , RESOURCE . incompatibleValueType ( SqlStdOperatorTable . VALUES . getName ( ) ) ) ; } } for ( int col = 0 ; col < columnCount ; col ++ ) { final int c = col ; final RelDataType type = typeFactory . leastRestrictive ( new AbstractList < RelDataType > ( ) { public RelDataType get ( int row ) { SqlCall thisRow = ( SqlCall ) operands . get ( row ) ; return deriveType ( scope , thisRow . operand ( c ) ) ; } public int size ( ) { return rowCount ; } } ) ; if ( null == type ) { throw newValidationError ( node , RESOURCE . incompatibleValueType ( SqlStdOperatorTable . VALUES . getName ( ) ) ) ; } } } } | Validates a VALUES clause . |
15,601 | private static String alias ( SqlNode item ) { assert item instanceof SqlCall ; assert item . getKind ( ) == SqlKind . AS ; final SqlIdentifier identifier = ( ( SqlCall ) item ) . operand ( 1 ) ; return identifier . getSimple ( ) ; } | Returns the alias of a expr AS alias expression . |
15,602 | public static OutputStreamAndPath createEntropyAware ( FileSystem fs , Path path , WriteMode writeMode ) throws IOException { final EntropyInjectingFileSystem efs = getEntropyFs ( fs ) ; final Path processedPath = efs == null ? path : resolveEntropy ( path , efs , true ) ; final FSDataOutputStream out = fs . create ( processedPath , writeMode ) ; return new OutputStreamAndPath ( out , processedPath ) ; } | Handles entropy injection across regular and entropy - aware file systems . |
15,603 | public FsStateBackend configure ( Configuration config , ClassLoader classLoader ) { return new FsStateBackend ( this , config , classLoader ) ; } | Creates a copy of this state backend that uses the values defined in the configuration for fields where that were not specified in this state backend . |
15,604 | public static List < ResolverRule > getResolverRules ( ) { return Arrays . asList ( ResolverRules . LOOKUP_CALL_BY_NAME , ResolverRules . FLATTEN_STAR_REFERENCE , ResolverRules . EXPAND_COLUMN_FUNCTIONS , ResolverRules . OVER_WINDOWS , ResolverRules . FIELD_RESOLVE , ResolverRules . FLATTEN_CALL , ResolverRules . RESOLVE_CALL_BY_ARGUMENTS , ResolverRules . VERIFY_NO_MORE_UNRESOLVED_EXPRESSIONS ) ; } | List of rules that will be applied during expression resolution . |
15,605 | public LogicalWindow resolveGroupWindow ( GroupWindow window ) { Expression alias = window . getAlias ( ) ; if ( ! ( alias instanceof UnresolvedReferenceExpression ) ) { throw new ValidationException ( "Alias of group window should be an UnresolvedFieldReference" ) ; } final String windowName = ( ( UnresolvedReferenceExpression ) alias ) . getName ( ) ; List < Expression > resolvedTimeFieldExpression = prepareExpressions ( Collections . singletonList ( window . getTimeField ( ) ) ) ; if ( resolvedTimeFieldExpression . size ( ) != 1 ) { throw new ValidationException ( "Group Window only supports a single time field column." ) ; } PlannerExpression timeField = resolvedTimeFieldExpression . get ( 0 ) . accept ( bridgeConverter ) ; WindowReference resolvedAlias = new WindowReference ( windowName , new Some < > ( timeField . resultType ( ) ) ) ; if ( window instanceof TumbleWithSizeOnTimeWithAlias ) { TumbleWithSizeOnTimeWithAlias tw = ( TumbleWithSizeOnTimeWithAlias ) window ; return new TumblingGroupWindow ( resolvedAlias , timeField , resolveFieldsInSingleExpression ( tw . getSize ( ) ) . accept ( bridgeConverter ) ) ; } else if ( window instanceof SlideWithSizeAndSlideOnTimeWithAlias ) { SlideWithSizeAndSlideOnTimeWithAlias sw = ( SlideWithSizeAndSlideOnTimeWithAlias ) window ; return new SlidingGroupWindow ( resolvedAlias , timeField , resolveFieldsInSingleExpression ( sw . getSize ( ) ) . accept ( bridgeConverter ) , resolveFieldsInSingleExpression ( sw . getSlide ( ) ) . accept ( bridgeConverter ) ) ; } else if ( window instanceof SessionWithGapOnTimeWithAlias ) { SessionWithGapOnTimeWithAlias sw = ( SessionWithGapOnTimeWithAlias ) window ; return new SessionGroupWindow ( resolvedAlias , timeField , resolveFieldsInSingleExpression ( sw . getGap ( ) ) . accept ( bridgeConverter ) ) ; } else { throw new TableException ( "Unknown window type" ) ; } } | Converts an API class to a logical window for planning with expressions already resolved . |
15,606 | public void start ( SlotActions initialSlotActions ) { this . slotActions = Preconditions . checkNotNull ( initialSlotActions ) ; timerService . start ( this ) ; started = true ; } | Start the task slot table with the given slot actions . |
15,607 | public boolean allocateSlot ( int index , JobID jobId , AllocationID allocationId , Time slotTimeout ) { checkInit ( ) ; TaskSlot taskSlot = taskSlots . get ( index ) ; boolean result = taskSlot . allocate ( jobId , allocationId ) ; if ( result ) { allocationIDTaskSlotMap . put ( allocationId , taskSlot ) ; timerService . registerTimeout ( allocationId , slotTimeout . getSize ( ) , slotTimeout . getUnit ( ) ) ; Set < AllocationID > slots = slotsPerJob . get ( jobId ) ; if ( slots == null ) { slots = new HashSet < > ( 4 ) ; slotsPerJob . put ( jobId , slots ) ; } slots . add ( allocationId ) ; } return result ; } | Allocate the slot with the given index for the given job and allocation id . Returns true if the slot could be allocated . Otherwise it returns false . |
15,608 | public int freeSlot ( AllocationID allocationId , Throwable cause ) throws SlotNotFoundException { checkInit ( ) ; TaskSlot taskSlot = getTaskSlot ( allocationId ) ; if ( taskSlot != null ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Free slot {}." , taskSlot , cause ) ; } else { LOG . info ( "Free slot {}." , taskSlot ) ; } final JobID jobId = taskSlot . getJobId ( ) ; if ( taskSlot . markFree ( ) ) { allocationIDTaskSlotMap . remove ( allocationId ) ; timerService . unregisterTimeout ( allocationId ) ; Set < AllocationID > slots = slotsPerJob . get ( jobId ) ; if ( slots == null ) { throw new IllegalStateException ( "There are no more slots allocated for the job " + jobId + ". This indicates a programming bug." ) ; } slots . remove ( allocationId ) ; if ( slots . isEmpty ( ) ) { slotsPerJob . remove ( jobId ) ; } return taskSlot . getIndex ( ) ; } else { taskSlot . markReleasing ( ) ; Iterator < Task > taskIterator = taskSlot . getTasks ( ) ; while ( taskIterator . hasNext ( ) ) { taskIterator . next ( ) . failExternally ( cause ) ; } return - 1 ; } } else { throw new SlotNotFoundException ( allocationId ) ; } } | Tries to free the slot . If the slot is empty it will set the state of the task slot to free and return its index . If the slot is not empty then it will set the state of the task slot to releasing fail all tasks and return - 1 . |
15,609 | public boolean isValidTimeout ( AllocationID allocationId , UUID ticket ) { checkInit ( ) ; return timerService . isValid ( allocationId , ticket ) ; } | Check whether the timeout with ticket is valid for the given allocation id . |
15,610 | public boolean isAllocated ( int index , JobID jobId , AllocationID allocationId ) { TaskSlot taskSlot = taskSlots . get ( index ) ; return taskSlot . isAllocated ( jobId , allocationId ) ; } | Check whether the slot for the given index is allocated for the given job and allocation id . |
15,611 | public boolean tryMarkSlotActive ( JobID jobId , AllocationID allocationId ) { TaskSlot taskSlot = getTaskSlot ( allocationId ) ; if ( taskSlot != null && taskSlot . isAllocated ( jobId , allocationId ) ) { return taskSlot . markActive ( ) ; } else { return false ; } } | Try to mark the specified slot as active if it has been allocated by the given job . |
15,612 | public boolean addTask ( Task task ) throws SlotNotFoundException , SlotNotActiveException { Preconditions . checkNotNull ( task ) ; TaskSlot taskSlot = getTaskSlot ( task . getAllocationId ( ) ) ; if ( taskSlot != null ) { if ( taskSlot . isActive ( task . getJobID ( ) , task . getAllocationId ( ) ) ) { if ( taskSlot . add ( task ) ) { taskSlotMappings . put ( task . getExecutionId ( ) , new TaskSlotMapping ( task , taskSlot ) ) ; return true ; } else { return false ; } } else { throw new SlotNotActiveException ( task . getJobID ( ) , task . getAllocationId ( ) ) ; } } else { throw new SlotNotFoundException ( task . getAllocationId ( ) ) ; } } | Add the given task to the slot identified by the task s allocation id . |
15,613 | public Task removeTask ( ExecutionAttemptID executionAttemptID ) { checkInit ( ) ; TaskSlotMapping taskSlotMapping = taskSlotMappings . remove ( executionAttemptID ) ; if ( taskSlotMapping != null ) { Task task = taskSlotMapping . getTask ( ) ; TaskSlot taskSlot = taskSlotMapping . getTaskSlot ( ) ; taskSlot . remove ( task . getExecutionId ( ) ) ; if ( taskSlot . isReleasing ( ) && taskSlot . isEmpty ( ) ) { slotActions . freeSlot ( taskSlot . getAllocationId ( ) ) ; } return task ; } else { return null ; } } | Remove the task with the given execution attempt id from its task slot . If the owning task slot is in state releasing and empty after removing the task the slot is freed via the slot actions . |
15,614 | public Task getTask ( ExecutionAttemptID executionAttemptID ) { TaskSlotMapping taskSlotMapping = taskSlotMappings . get ( executionAttemptID ) ; if ( taskSlotMapping != null ) { return taskSlotMapping . getTask ( ) ; } else { return null ; } } | Get the task for the given execution attempt id . If none could be found then return null . |
15,615 | public Kafka properties ( Properties properties ) { Preconditions . checkNotNull ( properties ) ; if ( this . kafkaProperties == null ) { this . kafkaProperties = new HashMap < > ( ) ; } this . kafkaProperties . clear ( ) ; properties . forEach ( ( k , v ) -> this . kafkaProperties . put ( ( String ) k , ( String ) v ) ) ; return this ; } | Sets the configuration properties for the Kafka consumer . Resets previously set properties . |
15,616 | public Kafka property ( String key , String value ) { Preconditions . checkNotNull ( key ) ; Preconditions . checkNotNull ( value ) ; if ( this . kafkaProperties == null ) { this . kafkaProperties = new HashMap < > ( ) ; } kafkaProperties . put ( key , value ) ; return this ; } | Adds a configuration properties for the Kafka consumer . |
15,617 | public Kafka startFromSpecificOffsets ( Map < Integer , Long > specificOffsets ) { this . startupMode = StartupMode . SPECIFIC_OFFSETS ; this . specificOffsets = Preconditions . checkNotNull ( specificOffsets ) ; return this ; } | Configures to start reading partitions from specific offsets set independently for each partition . Resets previously set offsets . |
15,618 | public Kafka startFromSpecificOffset ( int partition , long specificOffset ) { this . startupMode = StartupMode . SPECIFIC_OFFSETS ; if ( this . specificOffsets == null ) { this . specificOffsets = new HashMap < > ( ) ; } this . specificOffsets . put ( partition , specificOffset ) ; return this ; } | Configures to start reading partitions from specific offsets and specifies the given offset for the given partition . |
15,619 | public Kafka sinkPartitionerCustom ( Class < ? extends FlinkKafkaPartitioner > partitionerClass ) { sinkPartitionerType = CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM ; sinkPartitionerClass = Preconditions . checkNotNull ( partitionerClass ) ; return this ; } | Configures how to partition records from Flink s partitions into Kafka s partitions . |
15,620 | public static int assignKeyToParallelOperator ( Object key , int maxParallelism , int parallelism ) { return computeOperatorIndexForKeyGroup ( maxParallelism , parallelism , assignToKeyGroup ( key , maxParallelism ) ) ; } | Assigns the given key to a parallel operator index . |
15,621 | public static KeyGroupRange computeKeyGroupRangeForOperatorIndex ( int maxParallelism , int parallelism , int operatorIndex ) { checkParallelismPreconditions ( parallelism ) ; checkParallelismPreconditions ( maxParallelism ) ; Preconditions . checkArgument ( maxParallelism >= parallelism , "Maximum parallelism must not be smaller than parallelism." ) ; int start = ( ( operatorIndex * maxParallelism + parallelism - 1 ) / parallelism ) ; int end = ( ( operatorIndex + 1 ) * maxParallelism - 1 ) / parallelism ; return new KeyGroupRange ( start , end ) ; } | Computes the range of key - groups that are assigned to a given operator under the given parallelism and maximum parallelism . |
15,622 | public static int computeDefaultMaxParallelism ( int operatorParallelism ) { checkParallelismPreconditions ( operatorParallelism ) ; return Math . min ( Math . max ( MathUtils . roundUpToPowerOfTwo ( operatorParallelism + ( operatorParallelism / 2 ) ) , DEFAULT_LOWER_BOUND_MAX_PARALLELISM ) , UPPER_BOUND_MAX_PARALLELISM ) ; } | Computes a default maximum parallelism from the operator parallelism . This is used in case the user has not explicitly configured a maximum parallelism to still allow a certain degree of scale - up . |
15,623 | public JobExecutionResult executePlan ( Plan plan ) throws Exception { if ( plan == null ) { throw new IllegalArgumentException ( "The plan may not be null." ) ; } synchronized ( this . lock ) { final boolean shutDownAtEnd ; if ( jobExecutorService == null ) { shutDownAtEnd = true ; if ( this . taskManagerNumSlots == DEFAULT_TASK_MANAGER_NUM_SLOTS ) { int maxParallelism = plan . getMaximumParallelism ( ) ; if ( maxParallelism > 0 ) { this . taskManagerNumSlots = maxParallelism ; } } start ( ) ; } else { shutDownAtEnd = false ; } try { final int slotsPerTaskManager = jobExecutorServiceConfiguration . getInteger ( TaskManagerOptions . NUM_TASK_SLOTS , taskManagerNumSlots ) ; final int numTaskManagers = jobExecutorServiceConfiguration . getInteger ( ConfigConstants . LOCAL_NUMBER_TASK_MANAGER , 1 ) ; plan . setDefaultParallelism ( slotsPerTaskManager * numTaskManagers ) ; Optimizer pc = new Optimizer ( new DataStatistics ( ) , jobExecutorServiceConfiguration ) ; OptimizedPlan op = pc . compile ( plan ) ; JobGraphGenerator jgg = new JobGraphGenerator ( jobExecutorServiceConfiguration ) ; JobGraph jobGraph = jgg . compileJobGraph ( op , plan . getJobId ( ) ) ; return jobExecutorService . executeJobBlocking ( jobGraph ) ; } finally { if ( shutDownAtEnd ) { stop ( ) ; } } } } | Executes the given program on a local runtime and waits for the job to finish . |
15,624 | public static JobExecutionResult execute ( Program pa , String ... args ) throws Exception { return execute ( pa . getPlan ( args ) ) ; } | Executes the given program . |
15,625 | public static String optimizerPlanAsJSON ( Plan plan ) throws Exception { final int parallelism = plan . getDefaultParallelism ( ) == ExecutionConfig . PARALLELISM_DEFAULT ? 1 : plan . getDefaultParallelism ( ) ; Optimizer pc = new Optimizer ( new DataStatistics ( ) , new Configuration ( ) ) ; pc . setDefaultParallelism ( parallelism ) ; OptimizedPlan op = pc . compile ( plan ) ; return new PlanJSONDumpGenerator ( ) . getOptimizerPlanAsJSON ( op ) ; } | Creates a JSON representation of the given dataflow s execution plan . |
15,626 | public static String getPlanAsJSON ( Plan plan ) { List < DataSinkNode > sinks = Optimizer . createPreOptimizedPlan ( plan ) ; return new PlanJSONDumpGenerator ( ) . getPactPlanAsJSON ( sinks ) ; } | Creates a JSON representation of the given dataflow plan . |
15,627 | public final void resolve ( X value ) { Preconditions . checkState ( ! resolved , "This parameter was already resolved." ) ; this . value = Preconditions . checkNotNull ( value ) ; this . resolved = true ; } | Resolves this parameter for the given value . |
15,628 | protected ShardConsumer createShardConsumer ( Integer subscribedShardStateIndex , StreamShardHandle subscribedShard , SequenceNumber lastSequenceNum , ShardMetricsReporter shardMetricsReporter ) { return new ShardConsumer < > ( this , subscribedShardStateIndex , subscribedShard , lastSequenceNum , this . kinesisProxyFactory . create ( configProps ) , shardMetricsReporter ) ; } | Create a new shard consumer . Override this method to customize shard consumer behavior in subclasses . |
15,629 | public HashMap < StreamShardMetadata , SequenceNumber > snapshotState ( ) { assert Thread . holdsLock ( checkpointLock ) ; HashMap < StreamShardMetadata , SequenceNumber > stateSnapshot = new HashMap < > ( ) ; for ( KinesisStreamShardState shardWithState : subscribedShardsState ) { stateSnapshot . put ( shardWithState . getStreamShardMetadata ( ) , shardWithState . getLastProcessedSequenceNum ( ) ) ; } return stateSnapshot ; } | Creates a snapshot of the current last processed sequence numbers of each subscribed shard . |
15,630 | public void advanceLastDiscoveredShardOfStream ( String stream , String shardId ) { String lastSeenShardIdOfStream = this . subscribedStreamsToLastDiscoveredShardIds . get ( stream ) ; if ( lastSeenShardIdOfStream == null ) { this . subscribedStreamsToLastDiscoveredShardIds . put ( stream , shardId ) ; } else if ( shouldAdvanceLastDiscoveredShardId ( shardId , lastSeenShardIdOfStream ) ) { this . subscribedStreamsToLastDiscoveredShardIds . put ( stream , shardId ) ; } } | Updates the last discovered shard of a subscribed stream ; only updates if the update is valid . |
15,631 | public int registerNewSubscribedShardState ( KinesisStreamShardState newSubscribedShardState ) { synchronized ( checkpointLock ) { subscribedShardsState . add ( newSubscribedShardState ) ; if ( ! newSubscribedShardState . getLastProcessedSequenceNum ( ) . equals ( SentinelSequenceNumber . SENTINEL_SHARD_ENDING_SEQUENCE_NUM . get ( ) ) ) { this . numberOfActiveShards . incrementAndGet ( ) ; } int shardStateIndex = subscribedShardsState . size ( ) - 1 ; ShardWatermarkState sws = shardWatermarks . get ( shardStateIndex ) ; if ( sws == null ) { sws = new ShardWatermarkState ( ) ; try { sws . periodicWatermarkAssigner = InstantiationUtil . clone ( periodicWatermarkAssigner ) ; } catch ( Exception e ) { throw new RuntimeException ( "Failed to instantiate new WatermarkAssigner" , e ) ; } sws . lastUpdated = getCurrentTimeMillis ( ) ; sws . lastRecordTimestamp = Long . MIN_VALUE ; shardWatermarks . put ( shardStateIndex , sws ) ; } return shardStateIndex ; } } | Register a new subscribed shard state . |
15,632 | protected void emitWatermark ( ) { LOG . debug ( "Evaluating watermark for subtask {} time {}" , indexOfThisConsumerSubtask , getCurrentTimeMillis ( ) ) ; long potentialWatermark = Long . MAX_VALUE ; long idleTime = ( shardIdleIntervalMillis > 0 ) ? getCurrentTimeMillis ( ) - shardIdleIntervalMillis : Long . MAX_VALUE ; for ( Map . Entry < Integer , ShardWatermarkState > e : shardWatermarks . entrySet ( ) ) { Watermark w = e . getValue ( ) . periodicWatermarkAssigner . getCurrentWatermark ( ) ; if ( w != null && ( e . getValue ( ) . lastUpdated >= idleTime || w . getTimestamp ( ) > lastWatermark ) ) { potentialWatermark = Math . min ( potentialWatermark , w . getTimestamp ( ) ) ; } } if ( potentialWatermark == Long . MAX_VALUE ) { if ( shardWatermarks . isEmpty ( ) || shardIdleIntervalMillis > 0 ) { LOG . debug ( "No active shard for subtask {}, marking the source idle." , indexOfThisConsumerSubtask ) ; sourceContext . markAsTemporarilyIdle ( ) ; } } else if ( potentialWatermark > lastWatermark ) { LOG . debug ( "Emitting watermark {} from subtask {}" , potentialWatermark , indexOfThisConsumerSubtask ) ; sourceContext . emitWatermark ( new Watermark ( potentialWatermark ) ) ; lastWatermark = potentialWatermark ; } } | Called periodically to emit a watermark . Checks all shards for the current event time watermark and possibly emits the next watermark . |
15,633 | public GraphAnalyticBase < K , VV , EV , T > setParallelism ( int parallelism ) { Preconditions . checkArgument ( parallelism > 0 || parallelism == PARALLELISM_DEFAULT , "The parallelism must be at least one, or ExecutionConfig.PARALLELISM_DEFAULT (use system default)." ) ; this . parallelism = parallelism ; return this ; } | Set the parallelism for this analytic s operators . This parameter is necessary because processing a small amount of data with high operator parallelism is slow and wasteful with memory and buffers . |
15,634 | public static String createRandomName ( String prefix ) { Preconditions . checkNotNull ( prefix , "Prefix must not be null." ) ; long nameOffset ; do { nameOffset = nextNameOffset . get ( ) ; } while ( ! nextNameOffset . compareAndSet ( nameOffset , nameOffset + 1L ) ) ; return prefix + '_' + nameOffset ; } | Creates a random name of the form prefix_X where X is an increasing number . |
15,635 | public void dispose ( ) throws Exception { Exception exception = null ; StreamTask < ? , ? > containingTask = getContainingTask ( ) ; CloseableRegistry taskCloseableRegistry = containingTask != null ? containingTask . getCancelables ( ) : null ; try { if ( taskCloseableRegistry == null || taskCloseableRegistry . unregisterCloseable ( operatorStateBackend ) ) { operatorStateBackend . close ( ) ; } } catch ( Exception e ) { exception = e ; } try { if ( taskCloseableRegistry == null || taskCloseableRegistry . unregisterCloseable ( keyedStateBackend ) ) { keyedStateBackend . close ( ) ; } } catch ( Exception e ) { exception = ExceptionUtils . firstOrSuppressed ( e , exception ) ; } try { if ( operatorStateBackend != null ) { operatorStateBackend . dispose ( ) ; } } catch ( Exception e ) { exception = ExceptionUtils . firstOrSuppressed ( e , exception ) ; } try { if ( keyedStateBackend != null ) { keyedStateBackend . dispose ( ) ; } } catch ( Exception e ) { exception = ExceptionUtils . firstOrSuppressed ( e , exception ) ; } if ( exception != null ) { throw exception ; } } | This method is called at the very end of the operator s life both in the case of a successful completion of the operation and in the case of a failure and canceling . |
15,636 | public void snapshotState ( StateSnapshotContext context ) throws Exception { final KeyedStateBackend < ? > keyedStateBackend = getKeyedStateBackend ( ) ; if ( keyedStateBackend instanceof AbstractKeyedStateBackend && ( ( AbstractKeyedStateBackend < ? > ) keyedStateBackend ) . requiresLegacySynchronousTimerSnapshots ( ) ) { KeyedStateCheckpointOutputStream out ; try { out = context . getRawKeyedOperatorStateOutput ( ) ; } catch ( Exception exception ) { throw new Exception ( "Could not open raw keyed operator state stream for " + getOperatorName ( ) + '.' , exception ) ; } try { KeyGroupsList allKeyGroups = out . getKeyGroupList ( ) ; for ( int keyGroupIdx : allKeyGroups ) { out . startNewKeyGroup ( keyGroupIdx ) ; timeServiceManager . snapshotStateForKeyGroup ( new DataOutputViewStreamWrapper ( out ) , keyGroupIdx ) ; } } catch ( Exception exception ) { throw new Exception ( "Could not write timer service of " + getOperatorName ( ) + " to checkpoint state stream." , exception ) ; } finally { try { out . close ( ) ; } catch ( Exception closeException ) { LOG . warn ( "Could not close raw keyed operator state stream for {}. This " + "might have prevented deleting some state data." , getOperatorName ( ) , closeException ) ; } } } } | Stream operators with state which want to participate in a snapshot need to override this hook method . |
15,637 | public void putProperties ( Map < String , String > properties ) { for ( Map . Entry < String , String > property : properties . entrySet ( ) ) { put ( property . getKey ( ) , property . getValue ( ) ) ; } } | Adds a set of properties . |
15,638 | public void putProperties ( DescriptorProperties otherProperties ) { for ( Map . Entry < String , String > otherProperty : otherProperties . properties . entrySet ( ) ) { put ( otherProperty . getKey ( ) , otherProperty . getValue ( ) ) ; } } | Adds a set of descriptor properties . |
15,639 | public void putClass ( String key , Class < ? > clazz ) { checkNotNull ( key ) ; checkNotNull ( clazz ) ; final String error = InstantiationUtil . checkForInstantiationError ( clazz ) ; if ( error != null ) { throw new ValidationException ( "Class '" + clazz . getName ( ) + "' is not supported: " + error ) ; } put ( key , clazz . getName ( ) ) ; } | Adds a class under the given key . |
15,640 | public void putString ( String key , String str ) { checkNotNull ( key ) ; checkNotNull ( str ) ; put ( key , str ) ; } | Adds a string under the given key . |
15,641 | public void putBoolean ( String key , boolean b ) { checkNotNull ( key ) ; put ( key , Boolean . toString ( b ) ) ; } | Adds a boolean under the given key . |
15,642 | public void putLong ( String key , long l ) { checkNotNull ( key ) ; put ( key , Long . toString ( l ) ) ; } | Adds a long under the given key . |
15,643 | public void putInt ( String key , int i ) { checkNotNull ( key ) ; put ( key , Integer . toString ( i ) ) ; } | Adds an integer under the given key . |
15,644 | public void putCharacter ( String key , char c ) { checkNotNull ( key ) ; put ( key , Character . toString ( c ) ) ; } | Adds a character under the given key . |
15,645 | public void putTableSchema ( String key , TableSchema schema ) { checkNotNull ( key ) ; checkNotNull ( schema ) ; final String [ ] fieldNames = schema . getFieldNames ( ) ; final TypeInformation < ? > [ ] fieldTypes = schema . getFieldTypes ( ) ; final List < List < String > > values = new ArrayList < > ( ) ; for ( int i = 0 ; i < schema . getFieldCount ( ) ; i ++ ) { values . add ( Arrays . asList ( fieldNames [ i ] , TypeStringUtils . writeTypeInfo ( fieldTypes [ i ] ) ) ) ; } putIndexedFixedProperties ( key , Arrays . asList ( TABLE_SCHEMA_NAME , TABLE_SCHEMA_TYPE ) , values ) ; } | Adds a table schema under the given key . |
15,646 | public void putIndexedVariableProperties ( String key , List < Map < String , String > > subKeyValues ) { checkNotNull ( key ) ; checkNotNull ( subKeyValues ) ; for ( int idx = 0 ; idx < subKeyValues . size ( ) ; idx ++ ) { final Map < String , String > values = subKeyValues . get ( idx ) ; for ( Map . Entry < String , String > value : values . entrySet ( ) ) { put ( key + '.' + idx + '.' + value . getKey ( ) , value . getValue ( ) ) ; } } } | Adds an indexed mapping of properties under a common key . |
15,647 | public IntermediateResultPartition getPartitionById ( IntermediateResultPartitionID resultPartitionId ) { Integer partitionNumber = partitionLookupHelper . get ( checkNotNull ( resultPartitionId , "IntermediateResultPartitionID" ) ) ; if ( partitionNumber != null ) { return partitions [ partitionNumber ] ; } else { throw new IllegalArgumentException ( "Unknown intermediate result partition ID " + resultPartitionId ) ; } } | Returns the partition with the given ID . |
15,648 | public String get ( String key ) { addToDefaults ( key , null ) ; unrequestedParameters . remove ( key ) ; return data . get ( key ) ; } | Returns the String value for the given key . If the key does not exist it will return null . |
15,649 | public String get ( String key , String defaultValue ) { addToDefaults ( key , defaultValue ) ; String value = get ( key ) ; if ( value == null ) { return defaultValue ; } else { return value ; } } | Returns the String value for the given key . If the key does not exist it will return the given default value . |
15,650 | public boolean has ( String value ) { addToDefaults ( value , null ) ; unrequestedParameters . remove ( value ) ; return data . containsKey ( value ) ; } | Check if value is set . |
15,651 | public int getInt ( String key ) { addToDefaults ( key , null ) ; String value = getRequired ( key ) ; return Integer . parseInt ( value ) ; } | Returns the Integer value for the given key . The method fails if the key does not exist or the value is not an Integer . |
15,652 | public long getLong ( String key ) { addToDefaults ( key , null ) ; String value = getRequired ( key ) ; return Long . parseLong ( value ) ; } | Returns the Long value for the given key . The method fails if the key does not exist . |
15,653 | public float getFloat ( String key ) { addToDefaults ( key , null ) ; String value = getRequired ( key ) ; return Float . valueOf ( value ) ; } | Returns the Float value for the given key . The method fails if the key does not exist . |
15,654 | public float getFloat ( String key , float defaultValue ) { addToDefaults ( key , Float . toString ( defaultValue ) ) ; String value = get ( key ) ; if ( value == null ) { return defaultValue ; } else { return Float . valueOf ( value ) ; } } | Returns the Float value for the given key . If the key does not exists it will return the default value given . The method fails if the value is not a Float . |
15,655 | public double getDouble ( String key ) { addToDefaults ( key , null ) ; String value = getRequired ( key ) ; return Double . valueOf ( value ) ; } | Returns the Double value for the given key . The method fails if the key does not exist . |
15,656 | public double getDouble ( String key , double defaultValue ) { addToDefaults ( key , Double . toString ( defaultValue ) ) ; String value = get ( key ) ; if ( value == null ) { return defaultValue ; } else { return Double . valueOf ( value ) ; } } | Returns the Double value for the given key . If the key does not exists it will return the default value given . The method fails if the value is not a Double . |
15,657 | public boolean getBoolean ( String key ) { addToDefaults ( key , null ) ; String value = getRequired ( key ) ; return Boolean . valueOf ( value ) ; } | Returns the Boolean value for the given key . The method fails if the key does not exist . |
15,658 | public boolean getBoolean ( String key , boolean defaultValue ) { addToDefaults ( key , Boolean . toString ( defaultValue ) ) ; String value = get ( key ) ; if ( value == null ) { return defaultValue ; } else { return Boolean . valueOf ( value ) ; } } | Returns the Boolean value for the given key . If the key does not exists it will return the default value given . The method returns whether the string of the value is true ignoring cases . |
15,659 | public short getShort ( String key ) { addToDefaults ( key , null ) ; String value = getRequired ( key ) ; return Short . valueOf ( value ) ; } | Returns the Short value for the given key . The method fails if the key does not exist . |
15,660 | public short getShort ( String key , short defaultValue ) { addToDefaults ( key , Short . toString ( defaultValue ) ) ; String value = get ( key ) ; if ( value == null ) { return defaultValue ; } else { return Short . valueOf ( value ) ; } } | Returns the Short value for the given key . If the key does not exists it will return the default value given . The method fails if the value is not a Short . |
15,661 | public byte getByte ( String key ) { addToDefaults ( key , null ) ; String value = getRequired ( key ) ; return Byte . valueOf ( value ) ; } | Returns the Byte value for the given key . The method fails if the key does not exist . |
15,662 | public byte getByte ( String key , byte defaultValue ) { addToDefaults ( key , Byte . toString ( defaultValue ) ) ; String value = get ( key ) ; if ( value == null ) { return defaultValue ; } else { return Byte . valueOf ( value ) ; } } | Returns the Byte value for the given key . If the key does not exists it will return the default value given . The method fails if the value is not a Byte . |
15,663 | private static String getFqdnHostName ( InetAddress inetAddress ) { String fqdnHostName ; try { fqdnHostName = inetAddress . getCanonicalHostName ( ) ; } catch ( Throwable t ) { LOG . warn ( "Unable to determine the canonical hostname. Input split assignment (such as " + "for HDFS files) may be non-local when the canonical hostname is missing." ) ; LOG . debug ( "getCanonicalHostName() Exception:" , t ) ; fqdnHostName = inetAddress . getHostAddress ( ) ; } return fqdnHostName ; } | Gets the fully qualified hostname of the TaskManager based on the network address . |
15,664 | public static String getHostName ( InetAddress inetAddress ) { String hostName ; String fqdnHostName = getFqdnHostName ( inetAddress ) ; if ( fqdnHostName . equals ( inetAddress . getHostAddress ( ) ) ) { hostName = fqdnHostName ; LOG . warn ( "No hostname could be resolved for the IP address {}, using IP address as host name. " + "Local input split assignment (such as for HDFS files) may be impacted." , inetAddress . getHostAddress ( ) ) ; } else { hostName = NetUtils . getHostnameFromFQDN ( fqdnHostName ) ; } return hostName ; } | Gets the hostname of the TaskManager based on the network address . |
15,665 | public JDBCAppendTableSinkBuilder setParameterTypes ( TypeInformation < ? > ... types ) { int [ ] ty = new int [ types . length ] ; for ( int i = 0 ; i < types . length ; ++ i ) { ty [ i ] = JDBCTypeUtil . typeInformationToSqlType ( types [ i ] ) ; } this . parameterTypes = ty ; return this ; } | Specify the type of the rows that the sink will be accepting . |
15,666 | public JDBCAppendTableSink build ( ) { Preconditions . checkNotNull ( parameterTypes , "Types of the query parameters are not specified." + " Please specify types using the setParameterTypes() method." ) ; JDBCOutputFormat format = JDBCOutputFormat . buildJDBCOutputFormat ( ) . setUsername ( username ) . setPassword ( password ) . setDBUrl ( dbURL ) . setQuery ( query ) . setDrivername ( driverName ) . setBatchInterval ( batchSize ) . setSqlTypes ( parameterTypes ) . finish ( ) ; return new JDBCAppendTableSink ( format ) ; } | Finalizes the configuration and checks validity . |
15,667 | @ SuppressWarnings ( { "unchecked" , "deprecation" } ) private static < T > TypeSerializerSnapshot < T > configureForBackwardsCompatibility ( TypeSerializerSnapshot < ? > snapshot , TypeSerializer < ? > serializer ) { TypeSerializerSnapshot < T > typedSnapshot = ( TypeSerializerSnapshot < T > ) snapshot ; TypeSerializer < T > typedSerializer = ( TypeSerializer < T > ) serializer ; if ( snapshot instanceof TypeSerializerConfigSnapshot ) { ( ( TypeSerializerConfigSnapshot < T > ) typedSnapshot ) . setPriorSerializer ( typedSerializer ) ; } return typedSnapshot ; } | Utility method to bind the serializer and serializer snapshot to a common generic type variable . |
15,668 | public static < T > SerializedCheckpointData [ ] fromDeque ( ArrayDeque < Tuple2 < Long , Set < T > > > checkpoints , TypeSerializer < T > serializer ) throws IOException { return fromDeque ( checkpoints , serializer , new DataOutputSerializer ( 128 ) ) ; } | Converts a list of checkpoints with elements into an array of SerializedCheckpointData . |
15,669 | public static < T > SerializedCheckpointData [ ] fromDeque ( ArrayDeque < Tuple2 < Long , Set < T > > > checkpoints , TypeSerializer < T > serializer , DataOutputSerializer outputBuffer ) throws IOException { SerializedCheckpointData [ ] serializedCheckpoints = new SerializedCheckpointData [ checkpoints . size ( ) ] ; int pos = 0 ; for ( Tuple2 < Long , Set < T > > checkpoint : checkpoints ) { outputBuffer . clear ( ) ; Set < T > checkpointIds = checkpoint . f1 ; for ( T id : checkpointIds ) { serializer . serialize ( id , outputBuffer ) ; } serializedCheckpoints [ pos ++ ] = new SerializedCheckpointData ( checkpoint . f0 , outputBuffer . getCopyOfBuffer ( ) , checkpointIds . size ( ) ) ; } return serializedCheckpoints ; } | Converts a list of checkpoints into an array of SerializedCheckpointData . |
15,670 | public static < T > ArrayDeque < Tuple2 < Long , Set < T > > > toDeque ( SerializedCheckpointData [ ] data , TypeSerializer < T > serializer ) throws IOException { ArrayDeque < Tuple2 < Long , Set < T > > > deque = new ArrayDeque < > ( data . length ) ; DataInputDeserializer deser = null ; for ( SerializedCheckpointData checkpoint : data ) { byte [ ] serializedData = checkpoint . getSerializedData ( ) ; if ( deser == null ) { deser = new DataInputDeserializer ( serializedData , 0 , serializedData . length ) ; } else { deser . setBuffer ( serializedData ) ; } final Set < T > ids = new HashSet < > ( checkpoint . getNumIds ( ) ) ; final int numIds = checkpoint . getNumIds ( ) ; for ( int i = 0 ; i < numIds ; i ++ ) { ids . add ( serializer . deserialize ( deser ) ) ; } deque . addLast ( new Tuple2 < Long , Set < T > > ( checkpoint . checkpointId , ids ) ) ; } return deque ; } | De - serializes an array of SerializedCheckpointData back into an ArrayDeque of element checkpoints . |
15,671 | private static File generateDefaultConfigFile ( ) { final File jaasConfFile ; try { Path jaasConfPath = Files . createTempFile ( "jaas-" , ".conf" ) ; try ( InputStream resourceStream = JaasModule . class . getClassLoader ( ) . getResourceAsStream ( JAAS_CONF_RESOURCE_NAME ) ) { Files . copy ( resourceStream , jaasConfPath , StandardCopyOption . REPLACE_EXISTING ) ; } jaasConfFile = jaasConfPath . toFile ( ) ; jaasConfFile . deleteOnExit ( ) ; } catch ( IOException e ) { throw new RuntimeException ( "unable to generate a JAAS configuration file" , e ) ; } return jaasConfFile ; } | Generate the default JAAS config file . |
15,672 | public static List < Expression > renameColumns ( List < String > inputFields , List < Expression > newAliases ) { LinkedHashMap < String , Expression > finalFields = new LinkedHashMap < > ( ) ; inputFields . forEach ( field -> finalFields . put ( field , new UnresolvedReferenceExpression ( field ) ) ) ; newAliases . forEach ( expr -> { String name = expr . accept ( renameColumnExtractor ) ; finalFields . put ( name , expr ) ; } ) ; return new ArrayList < > ( finalFields . values ( ) ) ; } | Creates a projection list that renames existing columns to new names . |
15,673 | public static List < Expression > dropFields ( List < String > inputFields , List < Expression > dropExpressions ) { Set < String > columnsToDrop = dropExpressions . stream ( ) . map ( expr -> expr . accept ( dropColumnsExtractor ) ) . collect ( Collectors . toSet ( ) ) ; columnsToDrop . forEach ( c -> { if ( ! inputFields . contains ( c ) ) { throw new ValidationException ( format ( "Field %s does not exist in source table" , c ) ) ; } } ) ; return inputFields . stream ( ) . filter ( oldName -> ! columnsToDrop . contains ( oldName ) ) . map ( UnresolvedReferenceExpression :: new ) . collect ( Collectors . toList ( ) ) ; } | Creates a projection list that removes given columns . |
15,674 | public static < T > DataSet < Tuple2 < Integer , Long > > countElementsPerPartition ( DataSet < T > input ) { return input . mapPartition ( new RichMapPartitionFunction < T , Tuple2 < Integer , Long > > ( ) { public void mapPartition ( Iterable < T > values , Collector < Tuple2 < Integer , Long > > out ) throws Exception { long counter = 0 ; for ( T value : values ) { counter ++ ; } out . collect ( new Tuple2 < > ( getRuntimeContext ( ) . getIndexOfThisSubtask ( ) , counter ) ) ; } } ) ; } | Method that goes over all the elements in each partition in order to retrieve the total number of elements . |
15,675 | public static < T > PartitionOperator < T > partitionByRange ( DataSet < T > input , DataDistribution distribution , int ... fields ) { return new PartitionOperator < > ( input , PartitionOperatorBase . PartitionMethod . RANGE , new Keys . ExpressionKeys < > ( fields , input . getType ( ) , false ) , distribution , Utils . getCallLocationName ( ) ) ; } | Range - partitions a DataSet on the specified tuple field positions . |
15,676 | public static < T , K extends Comparable < K > > PartitionOperator < T > partitionByRange ( DataSet < T > input , DataDistribution distribution , KeySelector < T , K > keyExtractor ) { final TypeInformation < K > keyType = TypeExtractor . getKeySelectorTypes ( keyExtractor , input . getType ( ) ) ; return new PartitionOperator < > ( input , PartitionOperatorBase . PartitionMethod . RANGE , new Keys . SelectorFunctionKeys < > ( input . clean ( keyExtractor ) , input . getType ( ) , keyType ) , distribution , Utils . getCallLocationName ( ) ) ; } | Range - partitions a DataSet using the specified key selector function . |
15,677 | public static < R extends Tuple , T extends Tuple > R summarize ( DataSet < T > input ) throws Exception { if ( ! input . getType ( ) . isTupleType ( ) ) { throw new IllegalArgumentException ( "summarize() is only implemented for DataSet's of Tuples" ) ; } final TupleTypeInfoBase < ? > inType = ( TupleTypeInfoBase < ? > ) input . getType ( ) ; DataSet < TupleSummaryAggregator < R > > result = input . mapPartition ( new MapPartitionFunction < T , TupleSummaryAggregator < R > > ( ) { public void mapPartition ( Iterable < T > values , Collector < TupleSummaryAggregator < R > > out ) throws Exception { TupleSummaryAggregator < R > aggregator = SummaryAggregatorFactory . create ( inType ) ; for ( Tuple value : values ) { aggregator . aggregate ( value ) ; } out . collect ( aggregator ) ; } } ) . reduce ( new ReduceFunction < TupleSummaryAggregator < R > > ( ) { public TupleSummaryAggregator < R > reduce ( TupleSummaryAggregator < R > agg1 , TupleSummaryAggregator < R > agg2 ) throws Exception { agg1 . combine ( agg2 ) ; return agg1 ; } } ) ; return result . collect ( ) . get ( 0 ) . result ( ) ; } | Summarize a DataSet of Tuples by collecting single pass statistics for all columns . |
15,678 | public CsvReader ignoreComments ( String commentPrefix ) { if ( commentPrefix == null || commentPrefix . length ( ) == 0 ) { throw new IllegalArgumentException ( "The comment prefix must not be null or an empty string" ) ; } this . commentPrefix = commentPrefix ; return this ; } | Configures the string that starts comments . By default comments will be treated as invalid lines . This function only recognizes comments which start at the beginning of the line! |
15,679 | public < T > DataSource < T > pojoType ( Class < T > pojoType , String ... pojoFields ) { Preconditions . checkNotNull ( pojoType , "The POJO type class must not be null." ) ; Preconditions . checkNotNull ( pojoFields , "POJO fields must be specified (not null) if output type is a POJO." ) ; final TypeInformation < T > ti = TypeExtractor . createTypeInfo ( pojoType ) ; if ( ! ( ti instanceof PojoTypeInfo ) ) { throw new IllegalArgumentException ( "The specified class is not a POJO. The type class must meet the POJO requirements. Found: " + ti ) ; } final PojoTypeInfo < T > pti = ( PojoTypeInfo < T > ) ti ; CsvInputFormat < T > inputFormat = new PojoCsvInputFormat < T > ( path , this . lineDelimiter , this . fieldDelimiter , pti , pojoFields , this . includedMask ) ; configureInputFormat ( inputFormat ) ; return new DataSource < T > ( executionContext , inputFormat , pti , Utils . getCallLocationName ( ) ) ; } | Configures the reader to read the CSV data and parse it to the given type . The all fields of the type must be public or able to set value . The type information for the fields is obtained from the type class . |
15,680 | public static TextElement code ( String text ) { TextElement element = text ( text ) ; element . textStyles . add ( TextStyle . CODE ) ; return element ; } | Creates a block of text formatted as code . |
15,681 | public void setAnyPartitioning ( FieldSet partitionedFields ) { if ( partitionedFields == null ) { throw new NullPointerException ( ) ; } this . partitioning = PartitioningProperty . ANY_PARTITIONING ; this . partitioningFields = partitionedFields ; this . ordering = null ; } | Sets these properties to request some partitioning on the given fields . This will allow both hash partitioning and range partitioning to match . |
15,682 | public void reset ( ) { this . partitioning = PartitioningProperty . RANDOM_PARTITIONED ; this . ordering = null ; this . partitioningFields = null ; this . dataDistribution = null ; this . customPartitioner = null ; } | This method resets the properties to a state where no properties are given . |
15,683 | public void parameterizeChannel ( Channel channel , boolean globalDopChange , ExecutionMode exchangeMode , boolean breakPipeline ) { if ( channel . getSource ( ) . getGlobalProperties ( ) . isFullyReplicated ( ) && ! ( this . partitioning == PartitioningProperty . FULL_REPLICATION || this . partitioning == PartitioningProperty . ANY_DISTRIBUTION ) ) { throw new CompilerException ( "Fully replicated input must be preserved " + "and may not be converted into another global property." ) ; } if ( isTrivial ( ) || this . partitioning == PartitioningProperty . ANY_DISTRIBUTION ) { ShipStrategyType shipStrategy = globalDopChange ? ShipStrategyType . PARTITION_RANDOM : ShipStrategyType . FORWARD ; DataExchangeMode em = DataExchangeMode . select ( exchangeMode , shipStrategy , breakPipeline ) ; channel . setShipStrategy ( shipStrategy , em ) ; return ; } final GlobalProperties inGlobals = channel . getSource ( ) . getGlobalProperties ( ) ; if ( ! globalDopChange && isMetBy ( inGlobals ) ) { DataExchangeMode em = DataExchangeMode . select ( exchangeMode , ShipStrategyType . FORWARD , breakPipeline ) ; channel . setShipStrategy ( ShipStrategyType . FORWARD , em ) ; return ; } ShipStrategyType shipType ; FieldList partitionKeys ; boolean [ ] sortDirection ; Partitioner < ? > partitioner ; switch ( this . partitioning ) { case FULL_REPLICATION : shipType = ShipStrategyType . BROADCAST ; partitionKeys = null ; sortDirection = null ; partitioner = null ; break ; case ANY_PARTITIONING : case HASH_PARTITIONED : shipType = ShipStrategyType . PARTITION_HASH ; partitionKeys = Utils . createOrderedFromSet ( this . partitioningFields ) ; sortDirection = null ; partitioner = null ; break ; case RANGE_PARTITIONED : shipType = ShipStrategyType . PARTITION_RANGE ; partitionKeys = this . ordering . getInvolvedIndexes ( ) ; sortDirection = this . ordering . getFieldSortDirections ( ) ; partitioner = null ; if ( this . dataDistribution != null ) { channel . setDataDistribution ( this . dataDistribution ) ; } break ; case FORCED_REBALANCED : shipType = ShipStrategyType . PARTITION_FORCED_REBALANCE ; partitionKeys = null ; sortDirection = null ; partitioner = null ; break ; case CUSTOM_PARTITIONING : shipType = ShipStrategyType . PARTITION_CUSTOM ; partitionKeys = Utils . createOrderedFromSet ( this . partitioningFields ) ; sortDirection = null ; partitioner = this . customPartitioner ; break ; default : throw new CompilerException ( "Invalid partitioning to create through a data exchange: " + this . partitioning . name ( ) ) ; } DataExchangeMode exMode = DataExchangeMode . select ( exchangeMode , shipType , breakPipeline ) ; channel . setShipStrategy ( shipType , partitionKeys , sortDirection , partitioner , exMode ) ; } | Parametrizes the ship strategy fields of a channel such that the channel produces the desired global properties . |
15,684 | public void addAppConfigurationEntry ( String name , AppConfigurationEntry ... entry ) { final AppConfigurationEntry [ ] existing = dynamicEntries . get ( name ) ; final AppConfigurationEntry [ ] updated ; if ( existing == null ) { updated = Arrays . copyOf ( entry , entry . length ) ; } else { updated = merge ( existing , entry ) ; } dynamicEntries . put ( name , updated ) ; } | Add entries for the given application name . |
15,685 | @ SuppressWarnings ( "restriction" ) public final char getChar ( int index ) { final long pos = address + index ; if ( index >= 0 && pos <= addressLimit - 2 ) { return UNSAFE . getChar ( heapMemory , pos ) ; } else if ( address > addressLimit ) { throw new IllegalStateException ( "This segment has been freed." ) ; } else { throw new IndexOutOfBoundsException ( ) ; } } | Reads a char value from the given position in the system s native byte order . |
15,686 | @ SuppressWarnings ( "restriction" ) public final void putChar ( int index , char value ) { final long pos = address + index ; if ( index >= 0 && pos <= addressLimit - 2 ) { UNSAFE . putChar ( heapMemory , pos , value ) ; } else if ( address > addressLimit ) { throw new IllegalStateException ( "segment has been freed" ) ; } else { throw new IndexOutOfBoundsException ( ) ; } } | Writes a char value to the given position in the system s native byte order . |
15,687 | public final void putShort ( int index , short value ) { final long pos = address + index ; if ( index >= 0 && pos <= addressLimit - 2 ) { UNSAFE . putShort ( heapMemory , pos , value ) ; } else if ( address > addressLimit ) { throw new IllegalStateException ( "segment has been freed" ) ; } else { throw new IndexOutOfBoundsException ( ) ; } } | Writes the given short value into this buffer at the given position using the native byte order of the system . |
15,688 | public final int compare ( MemorySegment seg2 , int offset1 , int offset2 , int len ) { while ( len >= 8 ) { long l1 = this . getLongBigEndian ( offset1 ) ; long l2 = seg2 . getLongBigEndian ( offset2 ) ; if ( l1 != l2 ) { return ( l1 < l2 ) ^ ( l1 < 0 ) ^ ( l2 < 0 ) ? - 1 : 1 ; } offset1 += 8 ; offset2 += 8 ; len -= 8 ; } while ( len > 0 ) { int b1 = this . get ( offset1 ) & 0xff ; int b2 = seg2 . get ( offset2 ) & 0xff ; int cmp = b1 - b2 ; if ( cmp != 0 ) { return cmp ; } offset1 ++ ; offset2 ++ ; len -- ; } return 0 ; } | Compares two memory segment regions . |
15,689 | public final void swapBytes ( byte [ ] tempBuffer , MemorySegment seg2 , int offset1 , int offset2 , int len ) { if ( ( offset1 | offset2 | len | ( tempBuffer . length - len ) ) >= 0 ) { final long thisPos = this . address + offset1 ; final long otherPos = seg2 . address + offset2 ; if ( thisPos <= this . addressLimit - len && otherPos <= seg2 . addressLimit - len ) { UNSAFE . copyMemory ( this . heapMemory , thisPos , tempBuffer , BYTE_ARRAY_BASE_OFFSET , len ) ; UNSAFE . copyMemory ( seg2 . heapMemory , otherPos , this . heapMemory , thisPos , len ) ; UNSAFE . copyMemory ( tempBuffer , BYTE_ARRAY_BASE_OFFSET , seg2 . heapMemory , otherPos , len ) ; return ; } else if ( this . address > this . addressLimit ) { throw new IllegalStateException ( "this memory segment has been freed." ) ; } else if ( seg2 . address > seg2 . addressLimit ) { throw new IllegalStateException ( "other memory segment has been freed." ) ; } } throw new IndexOutOfBoundsException ( String . format ( "offset1=%d, offset2=%d, len=%d, bufferSize=%d, address1=%d, address2=%d" , offset1 , offset2 , len , tempBuffer . length , this . address , seg2 . address ) ) ; } | Swaps bytes between two memory segments using the given auxiliary buffer . |
15,690 | public final boolean equalTo ( MemorySegment seg2 , int offset1 , int offset2 , int length ) { int i = 0 ; while ( i <= length - 8 ) { if ( getLong ( offset1 + i ) != seg2 . getLong ( offset2 + i ) ) { return false ; } i += 8 ; } while ( i < length ) { if ( get ( offset1 + i ) != seg2 . get ( offset2 + i ) ) { return false ; } i += 1 ; } return true ; } | Equals two memory segment regions . |
15,691 | public List < FlatFieldDescriptor > getFlatFields ( String fieldExpression ) { List < FlatFieldDescriptor > result = new ArrayList < FlatFieldDescriptor > ( ) ; this . getFlatFields ( fieldExpression , 0 , result ) ; return result ; } | Returns the flat field descriptors for the given field expression . |
15,692 | public TypeComparator < T > createComparator ( int [ ] logicalKeyFields , boolean [ ] orders , int logicalFieldOffset , ExecutionConfig config ) { TypeComparatorBuilder < T > builder = createTypeComparatorBuilder ( ) ; builder . initializeTypeComparatorBuilder ( logicalKeyFields . length ) ; for ( int logicalKeyFieldIndex = 0 ; logicalKeyFieldIndex < logicalKeyFields . length ; logicalKeyFieldIndex ++ ) { int logicalKeyField = logicalKeyFields [ logicalKeyFieldIndex ] ; int logicalField = logicalFieldOffset ; boolean comparatorAdded = false ; for ( int localFieldId = 0 ; localFieldId < this . getArity ( ) && logicalField <= logicalKeyField && ! comparatorAdded ; localFieldId ++ ) { TypeInformation < ? > localFieldType = this . getTypeAt ( localFieldId ) ; if ( localFieldType instanceof AtomicType && logicalField == logicalKeyField ) { builder . addComparatorField ( localFieldId , ( ( AtomicType < ? > ) localFieldType ) . createComparator ( orders [ logicalKeyFieldIndex ] , config ) ) ; comparatorAdded = true ; } else if ( localFieldType instanceof CompositeType && logicalField <= logicalKeyField && logicalKeyField <= logicalField + ( localFieldType . getTotalFields ( ) - 1 ) ) { builder . addComparatorField ( localFieldId , ( ( CompositeType < ? > ) localFieldType ) . createComparator ( new int [ ] { logicalKeyField } , new boolean [ ] { orders [ logicalKeyFieldIndex ] } , logicalField , config ) ) ; comparatorAdded = true ; } if ( localFieldType instanceof CompositeType ) { logicalField += localFieldType . getTotalFields ( ) - 1 ; } logicalField ++ ; } if ( ! comparatorAdded ) { throw new IllegalArgumentException ( "Could not add a comparator for the logical" + "key field index " + logicalKeyFieldIndex + "." ) ; } } return builder . createTypeComparator ( config ) ; } | Generic implementation of the comparator creation . Composite types are supplying the infrastructure to create the actual comparators |
15,693 | private Path getYarnFilesDir ( final ApplicationId appId ) throws IOException { final FileSystem fileSystem = FileSystem . get ( yarnConfiguration ) ; final Path homeDir = fileSystem . getHomeDirectory ( ) ; return new Path ( homeDir , ".flink/" + appId + '/' ) ; } | Returns the Path where the YARN application files should be uploaded to . |
15,694 | private void failSessionDuringDeployment ( YarnClient yarnClient , YarnClientApplication yarnApplication ) { LOG . info ( "Killing YARN application" ) ; try { yarnClient . killApplication ( yarnApplication . getNewApplicationResponse ( ) . getApplicationId ( ) ) ; } catch ( Exception e ) { LOG . debug ( "Error while killing YARN application" , e ) ; } yarnClient . stop ( ) ; } | Kills YARN application and stops YARN client . |
15,695 | private DataStreamSink < T > setResources ( ResourceSpec minResources , ResourceSpec preferredResources ) { Preconditions . checkNotNull ( minResources , "The min resources must be not null." ) ; Preconditions . checkNotNull ( preferredResources , "The preferred resources must be not null." ) ; Preconditions . checkArgument ( minResources . isValid ( ) && preferredResources . isValid ( ) && minResources . lessThanOrEqual ( preferredResources ) , "The values in resources must be not less than 0 and the preferred resources must be greater than the min resources." ) ; transformation . setResources ( minResources , preferredResources ) ; return this ; } | Sets the minimum and preferred resources for this sink and the lower and upper resource limits will be considered in resource resize feature for future plan . |
15,696 | public void shutdown ( ) { synchronized ( lock ) { ScheduledExecutorService es = this . executorService ; if ( es != null ) { es . shutdown ( ) ; try { es . awaitTermination ( cleanupInterval , TimeUnit . MILLISECONDS ) ; } catch ( InterruptedException e ) { } } entries . clear ( ) ; jobRefHolders . clear ( ) ; for ( File dir : storageDirectories ) { try { FileUtils . deleteDirectory ( dir ) ; LOG . info ( "removed file cache directory {}" , dir . getAbsolutePath ( ) ) ; } catch ( IOException e ) { LOG . error ( "File cache could not properly clean up storage directory: {}" , dir . getAbsolutePath ( ) , e ) ; } } ShutdownHookUtil . removeShutdownHook ( shutdownHook , getClass ( ) . getSimpleName ( ) , LOG ) ; } } | Shuts down the file cache by cancelling all . |
15,697 | public Future < Path > createTmpFile ( String name , DistributedCacheEntry entry , JobID jobID , ExecutionAttemptID executionId ) throws Exception { synchronized ( lock ) { Map < String , Future < Path > > jobEntries = entries . computeIfAbsent ( jobID , k -> new HashMap < > ( ) ) ; final Set < ExecutionAttemptID > refHolders = jobRefHolders . computeIfAbsent ( jobID , id -> new HashSet < > ( ) ) ; refHolders . add ( executionId ) ; Future < Path > fileEntry = jobEntries . get ( name ) ; if ( fileEntry != null ) { return fileEntry ; } else { File tempDirToUse = new File ( storageDirectories [ nextDirectory ++ ] , jobID . toString ( ) ) ; if ( nextDirectory >= storageDirectories . length ) { nextDirectory = 0 ; } Callable < Path > cp ; if ( entry . blobKey != null ) { cp = new CopyFromBlobProcess ( entry , jobID , blobService , new Path ( tempDirToUse . getAbsolutePath ( ) ) ) ; } else { cp = new CopyFromDFSProcess ( entry , new Path ( tempDirToUse . getAbsolutePath ( ) ) ) ; } FutureTask < Path > copyTask = new FutureTask < > ( cp ) ; executorService . submit ( copyTask ) ; jobEntries . put ( name , copyTask ) ; return copyTask ; } } } | If the file doesn t exists locally retrieve the file from the blob - service . |
15,698 | public static NetworkEnvironmentConfiguration fromConfiguration ( Configuration configuration , long maxJvmHeapMemory , boolean localTaskManagerCommunication , InetAddress taskManagerAddress ) { final int dataport = getDataport ( configuration ) ; final int pageSize = getPageSize ( configuration ) ; final int numberOfNetworkBuffers = calculateNumberOfNetworkBuffers ( configuration , maxJvmHeapMemory ) ; final NettyConfig nettyConfig = createNettyConfig ( configuration , localTaskManagerCommunication , taskManagerAddress , dataport ) ; int initialRequestBackoff = configuration . getInteger ( TaskManagerOptions . NETWORK_REQUEST_BACKOFF_INITIAL ) ; int maxRequestBackoff = configuration . getInteger ( TaskManagerOptions . NETWORK_REQUEST_BACKOFF_MAX ) ; int buffersPerChannel = configuration . getInteger ( TaskManagerOptions . NETWORK_BUFFERS_PER_CHANNEL ) ; int extraBuffersPerGate = configuration . getInteger ( TaskManagerOptions . NETWORK_EXTRA_BUFFERS_PER_GATE ) ; boolean isCreditBased = nettyConfig != null && configuration . getBoolean ( TaskManagerOptions . NETWORK_CREDIT_MODEL ) ; return new NetworkEnvironmentConfiguration ( numberOfNetworkBuffers , pageSize , initialRequestBackoff , maxRequestBackoff , buffersPerChannel , extraBuffersPerGate , isCreditBased , nettyConfig ) ; } | Utility method to extract network related parameters from the configuration and to sanity check them . |
15,699 | public static long calculateNewNetworkBufferMemory ( Configuration config , long maxJvmHeapMemory ) { final long jvmHeapNoNet ; final MemoryType memoryType = ConfigurationParserUtils . getMemoryType ( config ) ; if ( memoryType == MemoryType . HEAP ) { jvmHeapNoNet = maxJvmHeapMemory ; } else if ( memoryType == MemoryType . OFF_HEAP ) { long configuredMemory = ConfigurationParserUtils . getManagedMemorySize ( config ) << 20 ; if ( configuredMemory > 0 ) { jvmHeapNoNet = maxJvmHeapMemory + configuredMemory ; } else { jvmHeapNoNet = ( long ) ( maxJvmHeapMemory / ( 1.0 - ConfigurationParserUtils . getManagedMemoryFraction ( config ) ) ) ; } } else { throw new RuntimeException ( "No supported memory type detected." ) ; } float networkBufFraction = config . getFloat ( TaskManagerOptions . NETWORK_BUFFERS_MEMORY_FRACTION ) ; long networkBufSize = ( long ) ( jvmHeapNoNet / ( 1.0 - networkBufFraction ) * networkBufFraction ) ; return calculateNewNetworkBufferMemory ( config , networkBufSize , maxJvmHeapMemory ) ; } | Calculates the amount of memory used for network buffers inside the current JVM instance based on the available heap or the max heap size and the according configuration parameters . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.