idx
int64
0
41.2k
question
stringlengths
74
4.21k
target
stringlengths
5
888
36,500
private void submitFailureSlaEvent ( Dataset dataset , String eventName ) { try { CompactionSlaEventHelper . getEventSubmitterBuilder ( dataset , Optional . < Job > absent ( ) , this . fs ) . eventSubmitter ( this . eventSubmitter ) . eventName ( eventName ) . build ( ) . submit ( ) ; } catch ( Throwable t ) { LOG . warn ( "Failed to submit failure sla event:" + t , t ) ; } }
Submit a failure sla event
36,501
public void close ( ) throws IOException { try { this . running = false ; this . accumulator . close ( ) ; if ( ! this . service . awaitTermination ( 60 , TimeUnit . SECONDS ) ) { forceClose ( ) ; } else { LOG . info ( "Closed properly: elapsed " + ( System . currentTimeMillis ( ) - startTime ) + " milliseconds" ) ; } } catch ( InterruptedException e ) { LOG . error ( "Interruption happened during close " + e . toString ( ) ) ; } finally { this . processor . close ( ) ; } }
Close all the resources this will be blocked until all the request are sent and gets acknowledged
36,502
public boolean getTokens ( long tokens , long timeout , TimeUnit timeoutUnit ) throws InterruptedException { long timeoutMillis = timeoutUnit . toMillis ( timeout ) ; long wait = tryReserveTokens ( tokens , timeoutMillis ) ; if ( wait < 0 ) { return false ; } if ( wait == 0 ) { return true ; } Thread . sleep ( wait ) ; return true ; }
Attempt to get the specified amount of tokens within the specified timeout . If the tokens cannot be retrieved in the specified timeout the call will return false immediately otherwise the call will block until the tokens are available .
36,503
protected void addLineageSourceInfo ( WorkUnit workUnit , State state ) { if ( ! lineageInfo . isPresent ( ) ) { log . info ( "Lineage is not enabled" ) ; return ; } String platform = state . getProp ( ConfigurationKeys . SOURCE_FILEBASED_PLATFORM , DatasetConstants . PLATFORM_HDFS ) ; Path dataDir = new Path ( state . getProp ( ConfigurationKeys . SOURCE_FILEBASED_DATA_DIRECTORY ) ) ; String dataset = Path . getPathWithoutSchemeAndAuthority ( dataDir ) . toString ( ) ; DatasetDescriptor source = new DatasetDescriptor ( platform , dataset ) ; lineageInfo . get ( ) . setSource ( source , workUnit ) ; }
Add lineage source info to a single work unit
36,504
public List < String > getcurrentFsSnapshot ( State state ) { List < String > results = new ArrayList < > ( ) ; String path = getLsPattern ( state ) ; try { log . info ( "Running ls command with input " + path ) ; results = this . fsHelper . ls ( path ) ; for ( int i = 0 ; i < results . size ( ) ; i ++ ) { URI uri = new URI ( results . get ( i ) ) ; String filePath = uri . toString ( ) ; if ( ! uri . isAbsolute ( ) ) { File file = new File ( state . getProp ( ConfigurationKeys . SOURCE_FILEBASED_DATA_DIRECTORY ) , uri . toString ( ) ) ; filePath = file . getAbsolutePath ( ) ; } results . set ( i , filePath + this . splitPattern + this . fsHelper . getFileMTime ( filePath ) ) ; } } catch ( FileBasedHelperException | URISyntaxException e ) { log . error ( "Not able to fetch the filename/file modified time to " + e . getMessage ( ) + " will not pull any files" , e ) ; } return results ; }
This method is responsible for connecting to the source and taking a snapshot of the folder where the data is present it then returns a list of the files in String format
36,505
private boolean checkDataQuality ( Optional < Object > schema ) throws Exception { if ( this . branches > 1 ) { this . forkTaskState . setProp ( ConfigurationKeys . EXTRACTOR_ROWS_EXPECTED , this . taskState . getProp ( ConfigurationKeys . EXTRACTOR_ROWS_EXPECTED ) ) ; this . forkTaskState . setProp ( ConfigurationKeys . EXTRACTOR_ROWS_EXTRACTED , this . taskState . getProp ( ConfigurationKeys . EXTRACTOR_ROWS_EXTRACTED ) ) ; } String writerRecordsWrittenKey = ForkOperatorUtils . getPropertyNameForBranch ( ConfigurationKeys . WRITER_RECORDS_WRITTEN , this . branches , this . index ) ; if ( this . writer . isPresent ( ) ) { this . forkTaskState . setProp ( ConfigurationKeys . WRITER_ROWS_WRITTEN , this . writer . get ( ) . recordsWritten ( ) ) ; this . taskState . setProp ( writerRecordsWrittenKey , this . writer . get ( ) . recordsWritten ( ) ) ; } else { this . forkTaskState . setProp ( ConfigurationKeys . WRITER_ROWS_WRITTEN , 0L ) ; this . taskState . setProp ( writerRecordsWrittenKey , 0L ) ; } if ( schema . isPresent ( ) ) { this . forkTaskState . setProp ( ConfigurationKeys . EXTRACT_SCHEMA , schema . get ( ) . toString ( ) ) ; } try { TaskLevelPolicyCheckResults taskResults = this . taskContext . getTaskLevelPolicyChecker ( this . forkTaskState , this . branches > 1 ? this . index : - 1 ) . executePolicies ( ) ; TaskPublisher publisher = this . taskContext . getTaskPublisher ( this . forkTaskState , taskResults ) ; switch ( publisher . canPublish ( ) ) { case SUCCESS : return true ; case CLEANUP_FAIL : this . logger . error ( "Cleanup failed for task " + this . taskId ) ; break ; case POLICY_TESTS_FAIL : this . logger . error ( "Not all quality checking passed for task " + this . taskId ) ; break ; case COMPONENTS_NOT_FINISHED : this . logger . error ( "Not all components completed for task " + this . taskId ) ; break ; default : break ; } return false ; } catch ( Throwable t ) { this . logger . error ( "Failed to check task-level data quality" , t ) ; return false ; } }
Check data quality .
36,506
private void commitData ( ) throws IOException { if ( this . writer . isPresent ( ) ) { this . writer . get ( ) . commit ( ) ; } try { if ( GobblinMetrics . isEnabled ( this . taskState . getWorkunit ( ) ) ) { updateRecordMetrics ( ) ; updateByteMetrics ( ) ; } } catch ( IOException ioe ) { this . logger . error ( "Failed to update byte-level metrics of task " + this . taskId ) ; } }
Commit task data .
36,507
private LinkedList < T > computeRecursiveTraversal ( T node , NodePath < T > nodePath ) { try { LinkedList < T > imports = new LinkedList < > ( ) ; Set < T > alreadyIncludedImports = new HashSet < > ( ) ; for ( T neighbor : this . traversalFunction . apply ( node ) ) { nodePath . appendNode ( neighbor ) ; addSubtraversal ( neighbor , imports , alreadyIncludedImports , nodePath ) ; nodePath . popTail ( ) ; } return imports ; } catch ( ExecutionException ee ) { throw new RuntimeException ( ee ) ; } }
Actually compute the traversal if it is not in the cache .
36,508
private void addSubtraversal ( T node , LinkedList < T > imports , Set < T > alreadyIncludedImports , NodePath < T > nodePath ) throws ExecutionException { if ( addNodeIfNotAlreadyIncluded ( node , imports , alreadyIncludedImports ) ) { for ( T inheritedFromParent : doTraverseGraphRecursively ( node , nodePath ) ) { addNodeIfNotAlreadyIncluded ( inheritedFromParent , imports , alreadyIncludedImports ) ; } } }
Add a sub - traversal for a neighboring node .
36,509
private boolean addNodeIfNotAlreadyIncluded ( T thisImport , LinkedList < T > imports , Set < T > alreadyIncludedImports ) { if ( alreadyIncludedImports . contains ( thisImport ) ) { return false ; } imports . add ( thisImport ) ; alreadyIncludedImports . add ( thisImport ) ; return true ; }
Only add node to traversal if it is not already included in it .
36,510
private RuntimeException unpackExecutionException ( Throwable exc ) { while ( exc instanceof ExecutionException || exc instanceof UncheckedExecutionException ) { exc = exc . getCause ( ) ; } return Throwables . propagate ( exc ) ; }
Due to recursive nature of algorithm we may end up with multiple layers of exceptions . Unpack them .
36,511
public URI getListeningURI ( ) { try { return new URI ( this . serverUri . getScheme ( ) , this . serverUri . getUserInfo ( ) , this . serverUri . getHost ( ) , this . port , null , null , null ) ; } catch ( URISyntaxException use ) { throw new RuntimeException ( "Invalid URI. This is an error in code." , use ) ; } }
Get the scheme and authority at which this server is listening .
36,512
public void publishData ( Collection < ? extends WorkUnitState > states ) throws IOException { LOG . info ( "Start publishing data" ) ; int branches = this . state . getPropAsInt ( ConfigurationKeys . FORK_BRANCHES_KEY , 1 ) ; Set < String > emptiedDestTables = Sets . newHashSet ( ) ; final Connection conn = createConnection ( ) ; final JdbcWriterCommands commands = this . jdbcWriterCommandsFactory . newInstance ( this . state , conn ) ; try { conn . setAutoCommit ( false ) ; for ( int i = 0 ; i < branches ; i ++ ) { final String destinationTable = this . state . getProp ( ForkOperatorUtils . getPropertyNameForBranch ( JDBC_PUBLISHER_FINAL_TABLE_NAME , branches , i ) ) ; final String databaseName = this . state . getProp ( ForkOperatorUtils . getPropertyNameForBranch ( JDBC_PUBLISHER_DATABASE_NAME , branches , i ) ) ; Preconditions . checkNotNull ( destinationTable ) ; if ( this . state . getPropAsBoolean ( ForkOperatorUtils . getPropertyNameForBranch ( JDBC_PUBLISHER_REPLACE_FINAL_TABLE , branches , i ) , false ) && ! emptiedDestTables . contains ( destinationTable ) ) { LOG . info ( "Deleting table " + destinationTable ) ; commands . deleteAll ( databaseName , destinationTable ) ; emptiedDestTables . add ( destinationTable ) ; } Map < String , List < WorkUnitState > > stagingTables = getStagingTables ( states , branches , i ) ; for ( Map . Entry < String , List < WorkUnitState > > entry : stagingTables . entrySet ( ) ) { String stagingTable = entry . getKey ( ) ; LOG . info ( "Copying data from staging table " + stagingTable + " into destination table " + destinationTable ) ; commands . copyTable ( databaseName , stagingTable , destinationTable ) ; for ( WorkUnitState workUnitState : entry . getValue ( ) ) { workUnitState . setWorkingState ( WorkUnitState . WorkingState . COMMITTED ) ; } } } LOG . info ( "Commit publish data" ) ; conn . commit ( ) ; } catch ( Exception e ) { try { LOG . error ( "Failed publishing. Rolling back." ) ; conn . rollback ( ) ; } catch ( SQLException se ) { LOG . error ( "Failed rolling back." , se ) ; } throw new RuntimeException ( "Failed publishing" , e ) ; } finally { try { conn . close ( ) ; } catch ( SQLException e ) { throw new RuntimeException ( e ) ; } } }
1 . Truncate destination table if requested 2 . Move data from staging to destination 3 . Update Workunit state
36,513
public synchronized static void setJobSpecificOutputPaths ( State state ) { if ( ! StringUtils . containsIgnoreCase ( state . getProp ( ConfigurationKeys . WRITER_STAGING_DIR ) , state . getProp ( ConfigurationKeys . JOB_ID_KEY ) ) ) { state . setProp ( ConfigurationKeys . WRITER_STAGING_DIR , new Path ( state . getProp ( ConfigurationKeys . WRITER_STAGING_DIR ) , state . getProp ( ConfigurationKeys . JOB_ID_KEY ) ) ) ; state . setProp ( ConfigurationKeys . WRITER_OUTPUT_DIR , new Path ( state . getProp ( ConfigurationKeys . WRITER_OUTPUT_DIR ) , state . getProp ( ConfigurationKeys . JOB_ID_KEY ) ) ) ; } }
Each job gets its own task - staging and task - output directory . Update the staging and output directories to contain job_id . This is to make sure uncleaned data from previous execution does not corrupt final published data produced by this execution .
36,514
public void onFileChange ( Path path ) { String fileExtension = path . getName ( ) . substring ( path . getName ( ) . lastIndexOf ( '.' ) + 1 ) ; if ( fileExtension . equalsIgnoreCase ( SchedulerUtils . JOB_PROPS_FILE_EXTENSION ) ) { LOG . info ( "Detected change to common properties file " + path . toString ( ) ) ; loadNewCommonConfigAndHandleNewJob ( path , JobScheduler . Action . RESCHEDULE ) ; return ; } if ( ! jobScheduler . jobConfigFileExtensions . contains ( fileExtension ) ) { return ; } LOG . info ( "Detected change to job configuration file " + path . toString ( ) ) ; loadNewJobConfigAndHandleNewJob ( path , JobScheduler . Action . RESCHEDULE ) ; }
Called when a job configuration file is changed .
36,515
private boolean checkCommonPropExistance ( Path rootPath , String noExtFileName ) throws IOException { Configuration conf = new Configuration ( ) ; FileStatus [ ] children = rootPath . getFileSystem ( conf ) . listStatus ( rootPath ) ; for ( FileStatus aChild : children ) { if ( aChild . getPath ( ) . getName ( ) . contains ( noExtFileName ) ) { return false ; } } return true ; }
Given the target rootPath check if there s common properties existed . Return false if so .
36,516
public Schema getSchemaByKey ( String key ) throws SchemaRegistryException { try { return cachedSchemasByKeys . get ( key ) ; } catch ( ExecutionException e ) { throw new SchemaRegistryException ( String . format ( "Schema with key %s cannot be retrieved" , key ) , e ) ; } }
Get schema from schema registry by key
36,517
protected Schema fetchSchemaByKey ( String key ) throws SchemaRegistryException { String schemaUrl = KafkaAvroSchemaRegistry . this . url + GET_RESOURCE_BY_ID + key ; GetMethod get = new GetMethod ( schemaUrl ) ; int statusCode ; String schemaString ; HttpClient httpClient = this . borrowClient ( ) ; try { statusCode = httpClient . executeMethod ( get ) ; schemaString = get . getResponseBodyAsString ( ) ; } catch ( IOException e ) { throw new SchemaRegistryException ( e ) ; } finally { get . releaseConnection ( ) ; this . httpClientPool . returnObject ( httpClient ) ; } if ( statusCode != HttpStatus . SC_OK ) { throw new SchemaRegistryException ( String . format ( "Schema with key %s cannot be retrieved, statusCode = %d" , key , statusCode ) ) ; } Schema schema ; try { schema = new Schema . Parser ( ) . parse ( schemaString ) ; } catch ( Throwable t ) { throw new SchemaRegistryException ( String . format ( "Schema with ID = %s cannot be parsed" , key ) , t ) ; } return schema ; }
Fetch schema by key .
36,518
public static String getDatasetUri ( Table table ) { return HIVE_DATASETS_CONFIG_PREFIX + table . getDbName ( ) + Path . SEPARATOR + table . getTableName ( ) ; }
Get the dataset uri for a hive db and table . The uri is relative to the store uri .
36,519
public final Future < RecordMetadata > enqueue ( D record , WriteCallback callback ) throws InterruptedException { final ReentrantLock lock = this . dqLock ; lock . lock ( ) ; try { BytesBoundedBatch last = dq . peekLast ( ) ; if ( last != null ) { Future < RecordMetadata > future = null ; try { future = last . tryAppend ( record , callback , this . largeMessagePolicy ) ; } catch ( RecordTooLargeException e ) { } if ( future != null ) { return future ; } } BytesBoundedBatch batch = new BytesBoundedBatch ( this . memSizeLimit , this . expireInMilliSecond ) ; LOG . debug ( "Batch " + batch . getId ( ) + " is generated" ) ; Future < RecordMetadata > future = null ; try { future = batch . tryAppend ( record , callback , this . largeMessagePolicy ) ; } catch ( RecordTooLargeException e ) { throw new RuntimeException ( "Failed due to a message that was too large" , e ) ; } if ( future == null ) { assert largeMessagePolicy . equals ( LargeMessagePolicy . DROP ) ; LOG . error ( "Batch " + batch . getId ( ) + " is silently marked as complete, dropping a huge record: " + record ) ; future = Futures . immediateFuture ( new RecordMetadata ( 0 ) ) ; callback . onSuccess ( WriteResponse . EMPTY ) ; return future ; } while ( dq . size ( ) >= this . capacity ) { LOG . debug ( "Accumulator size {} is greater than capacity {}, waiting" , dq . size ( ) , this . capacity ) ; this . notFull . await ( ) ; } dq . addLast ( batch ) ; incomplete . add ( batch ) ; this . notEmpty . signal ( ) ; return future ; } finally { lock . unlock ( ) ; } }
Add a data to internal deque data structure
36,520
public void flush ( ) { try { ArrayList < Batch > batches = this . incomplete . all ( ) ; int numOutstandingRecords = 0 ; for ( Batch batch : batches ) { numOutstandingRecords += batch . getRecords ( ) . size ( ) ; } LOG . debug ( "Flush called on {} batches with {} records total" , batches . size ( ) , numOutstandingRecords ) ; for ( Batch batch : batches ) { batch . await ( ) ; } } catch ( Exception e ) { LOG . error ( "Error happened while flushing batches" ) ; } }
This will block until all the incomplete batches are acknowledged
36,521
public static TopologySpec . Builder builder ( URI catalogURI , Properties topologyProps ) { String name = topologyProps . getProperty ( ConfigurationKeys . TOPOLOGY_NAME_KEY ) ; String group = topologyProps . getProperty ( ConfigurationKeys . TOPOLOGY_GROUP_KEY , "default" ) ; try { URI topologyURI = new URI ( catalogURI . getScheme ( ) , catalogURI . getAuthority ( ) , "/" + group + "/" + name , null ) ; TopologySpec . Builder builder = new TopologySpec . Builder ( topologyURI ) . withConfigAsProperties ( topologyProps ) ; String descr = topologyProps . getProperty ( ConfigurationKeys . TOPOLOGY_DESCRIPTION_KEY , null ) ; if ( null != descr ) { builder = builder . withDescription ( descr ) ; } return builder ; } catch ( URISyntaxException e ) { throw new RuntimeException ( "Unable to create a TopologySpec URI: " + e , e ) ; } }
Creates a builder for the TopologySpec based on values in a topology properties config .
36,522
public JsonSchema getValuesWithinDataType ( ) { JsonElement element = this . getDataType ( ) . get ( MAP_ITEMS_KEY ) ; if ( element . isJsonObject ( ) ) { return new JsonSchema ( element . getAsJsonObject ( ) ) ; } if ( element . isJsonArray ( ) ) { return new JsonSchema ( element . getAsJsonArray ( ) ) ; } if ( element . isJsonPrimitive ( ) ) { return buildBaseSchema ( Type . valueOf ( element . getAsString ( ) . toUpperCase ( ) ) ) ; } throw new UnsupportedOperationException ( "Map values can only be defined using JsonObject, JsonArray or JsonPrimitive." ) ; }
Fetches dataType . values from the JsonObject
36,523
public Type getTypeOfArrayItems ( ) throws DataConversionException { JsonSchema arrayValues = getItemsWithinDataType ( ) ; if ( arrayValues == null ) { throw new DataConversionException ( "Array types only allow values as primitive, null or JsonObject" ) ; } return arrayValues . getType ( ) ; }
Fetches the nested or primitive array items type from schema .
36,524
private String getTargetColumnName ( String sourceColumnName , String alias ) { String targetColumnName = alias ; Schema obj = this . getMetadataColumnMap ( ) . get ( sourceColumnName . toLowerCase ( ) ) ; if ( obj == null ) { targetColumnName = ( targetColumnName == null ? "unknown" + this . unknownColumnCounter : targetColumnName ) ; this . unknownColumnCounter ++ ; } else { targetColumnName = ( StringUtils . isNotBlank ( targetColumnName ) ? targetColumnName : sourceColumnName ) ; } targetColumnName = this . toCase ( targetColumnName ) ; return Utils . escapeSpecialCharacters ( targetColumnName , ConfigurationKeys . ESCAPE_CHARS_IN_COLUMN_NAME , "_" ) ; }
Get target column name if column is not found in metadata then name it as unknown column If alias is not found target column is nothing but source column
36,525
private void buildMetadataColumnMap ( JsonArray array ) { if ( array != null ) { for ( JsonElement columnElement : array ) { Schema schemaObj = gson . fromJson ( columnElement , Schema . class ) ; String columnName = schemaObj . getColumnName ( ) ; this . metadataColumnMap . put ( columnName . toLowerCase ( ) , schemaObj ) ; this . metadataColumnList . add ( columnName . toLowerCase ( ) ) ; } } }
Build metadata column map with column name and column schema object . Build metadata column list with list columns in metadata
36,526
private void updateDeltaFieldConfig ( String srcColumnName , String tgtColumnName ) { if ( this . workUnitState . contains ( ConfigurationKeys . EXTRACT_DELTA_FIELDS_KEY ) ) { String watermarkCol = this . workUnitState . getProp ( ConfigurationKeys . EXTRACT_DELTA_FIELDS_KEY ) ; this . workUnitState . setProp ( ConfigurationKeys . EXTRACT_DELTA_FIELDS_KEY , watermarkCol . replaceAll ( srcColumnName , tgtColumnName ) ) ; } }
Update water mark column property if there is an alias defined in query
36,527
private void updatePrimaryKeyConfig ( String srcColumnName , String tgtColumnName ) { if ( this . workUnitState . contains ( ConfigurationKeys . EXTRACT_PRIMARY_KEY_FIELDS_KEY ) ) { String primarykey = this . workUnitState . getProp ( ConfigurationKeys . EXTRACT_PRIMARY_KEY_FIELDS_KEY ) ; this . workUnitState . setProp ( ConfigurationKeys . EXTRACT_PRIMARY_KEY_FIELDS_KEY , primarykey . replaceAll ( srcColumnName , tgtColumnName ) ) ; } }
Update primary key column property if there is an alias defined in query
36,528
private void parseInputQuery ( String query ) { List < String > projectedColumns = new ArrayList < > ( ) ; if ( StringUtils . isNotBlank ( query ) ) { String queryLowerCase = query . toLowerCase ( ) ; int startIndex = queryLowerCase . indexOf ( "select " ) + 7 ; int endIndex = queryLowerCase . indexOf ( " from " ) ; if ( startIndex >= 0 && endIndex >= 0 ) { String columnProjection = query . substring ( startIndex , endIndex ) ; this . setInputColumnProjection ( columnProjection ) ; StringBuffer sb = new StringBuffer ( ) ; int bracketCount = 0 ; for ( int i = 0 ; i < columnProjection . length ( ) ; i ++ ) { char c = columnProjection . charAt ( i ) ; if ( c == '(' ) { bracketCount ++ ; } if ( c == ')' ) { bracketCount -- ; } if ( bracketCount != 0 ) { sb . append ( c ) ; } else { if ( c != ',' ) { sb . append ( c ) ; } else { projectedColumns . add ( sb . toString ( ) ) ; sb = new StringBuffer ( ) ; } } } projectedColumns . add ( sb . toString ( ) ) ; } } if ( this . isSelectAllColumns ( ) ) { List < String > columnList = this . getMetadataColumnList ( ) ; for ( String columnName : columnList ) { ColumnAttributes col = new ColumnAttributes ( ) ; col . setColumnName ( columnName ) ; col . setAliasName ( columnName ) ; col . setSourceColumnName ( columnName ) ; this . addToColumnAliasMap ( col ) ; } } else { for ( String projectedColumn : projectedColumns ) { String column = projectedColumn . trim ( ) ; String alias = null ; String sourceColumn = column ; int spaceOccurences = StringUtils . countMatches ( column . trim ( ) , " " ) ; if ( spaceOccurences > 0 ) { int lastSpaceIndex = column . toLowerCase ( ) . lastIndexOf ( " as " ) ; sourceColumn = column . substring ( 0 , lastSpaceIndex ) ; alias = column . substring ( lastSpaceIndex + 4 ) ; } String columnName = sourceColumn ; if ( sourceColumn . contains ( "." ) ) { columnName = sourceColumn . substring ( sourceColumn . indexOf ( "." ) + 1 ) ; } ColumnAttributes col = new ColumnAttributes ( ) ; col . setColumnName ( columnName ) ; col . setAliasName ( alias ) ; col . setSourceColumnName ( sourceColumn ) ; this . addToColumnAliasMap ( col ) ; } } }
Parse query provided in pull file Set input column projection - column projection in the input query Set columnAlias map - column and its alias mentioned in input query
36,529
private CommandOutput < ? , ? > executeSql ( List < Command > cmds ) { String query = null ; int fetchSize = 0 ; for ( Command cmd : cmds ) { if ( cmd instanceof JdbcCommand ) { JdbcCommandType type = ( JdbcCommandType ) cmd . getCommandType ( ) ; switch ( type ) { case QUERY : query = cmd . getParams ( ) . get ( 0 ) ; break ; case FETCHSIZE : fetchSize = Integer . parseInt ( cmd . getParams ( ) . get ( 0 ) ) ; break ; default : this . log . error ( "Command " + type . toString ( ) + " not recognized" ) ; break ; } } } this . log . info ( "Executing query:" + query ) ; ResultSet resultSet = null ; try { this . jdbcSource = createJdbcSource ( ) ; if ( this . dataConnection == null ) { this . dataConnection = this . jdbcSource . getConnection ( ) ; } Statement statement = this . dataConnection . createStatement ( ) ; if ( fetchSize != 0 && this . getExpectedRecordCount ( ) > 2000 ) { statement . setFetchSize ( fetchSize ) ; } final boolean status = statement . execute ( query ) ; if ( status == false ) { this . log . error ( "Failed to execute sql:" + query ) ; } resultSet = statement . getResultSet ( ) ; } catch ( Exception e ) { this . log . error ( "Failed to execute sql:" + query + " ;error-" + e . getMessage ( ) , e ) ; } CommandOutput < JdbcCommand , ResultSet > output = new JdbcCommandOutput ( ) ; output . put ( ( JdbcCommand ) cmds . get ( 0 ) , resultSet ) ; return output ; }
Execute query using JDBC simple Statement Set fetch size
36,530
private CommandOutput < ? , ? > executePreparedSql ( List < Command > cmds ) { String query = null ; List < String > queryParameters = null ; int fetchSize = 0 ; for ( Command cmd : cmds ) { if ( cmd instanceof JdbcCommand ) { JdbcCommandType type = ( JdbcCommandType ) cmd . getCommandType ( ) ; switch ( type ) { case QUERY : query = cmd . getParams ( ) . get ( 0 ) ; break ; case QUERYPARAMS : queryParameters = cmd . getParams ( ) ; break ; case FETCHSIZE : fetchSize = Integer . parseInt ( cmd . getParams ( ) . get ( 0 ) ) ; break ; default : this . log . error ( "Command " + type . toString ( ) + " not recognized" ) ; break ; } } } this . log . info ( "Executing query:" + query ) ; ResultSet resultSet = null ; try { this . jdbcSource = createJdbcSource ( ) ; if ( this . dataConnection == null ) { this . dataConnection = this . jdbcSource . getConnection ( ) ; } PreparedStatement statement = this . dataConnection . prepareStatement ( query , ResultSet . TYPE_FORWARD_ONLY , ResultSet . CONCUR_READ_ONLY ) ; int parameterPosition = 1 ; if ( queryParameters != null && queryParameters . size ( ) > 0 ) { for ( String parameter : queryParameters ) { statement . setString ( parameterPosition , parameter ) ; parameterPosition ++ ; } } if ( fetchSize != 0 ) { statement . setFetchSize ( fetchSize ) ; } final boolean status = statement . execute ( ) ; if ( status == false ) { this . log . error ( "Failed to execute sql:" + query ) ; } resultSet = statement . getResultSet ( ) ; } catch ( Exception e ) { this . log . error ( "Failed to execute sql:" + query + " ;error-" + e . getMessage ( ) , e ) ; } CommandOutput < JdbcCommand , ResultSet > output = new JdbcCommandOutput ( ) ; output . put ( ( JdbcCommand ) cmds . get ( 0 ) , resultSet ) ; return output ; }
Execute query using JDBC PreparedStatement to pass query parameters Set fetch size
36,531
protected JdbcProvider createJdbcSource ( ) { String driver = this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_DRIVER ) ; String userName = this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_USERNAME ) ; String password = PasswordManager . getInstance ( this . workUnitState ) . readPassword ( this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_PASSWORD ) ) ; String connectionUrl = this . getConnectionUrl ( ) ; String proxyHost = this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_USE_PROXY_URL ) ; int proxyPort = this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_USE_PROXY_PORT ) != null ? this . workUnitState . getPropAsInt ( ConfigurationKeys . SOURCE_CONN_USE_PROXY_PORT ) : - 1 ; if ( this . jdbcSource == null || this . jdbcSource . isClosed ( ) ) { this . jdbcSource = new JdbcProvider ( driver , connectionUrl , userName , password , 1 , this . getTimeOut ( ) , "DEFAULT" , proxyHost , proxyPort ) ; return this . jdbcSource ; } else { return this . jdbcSource ; } }
Create JDBC source to get connection
36,532
protected String concatPredicates ( List < Predicate > predicateList ) { List < String > conditions = new ArrayList < > ( ) ; for ( Predicate predicate : predicateList ) { conditions . add ( predicate . getCondition ( ) ) ; } return Joiner . on ( " and " ) . skipNulls ( ) . join ( conditions ) ; }
Concatenate all predicates with and clause
36,533
private JsonObject getDefaultWatermark ( ) { Schema schema = new Schema ( ) ; String dataType ; String columnName = "derivedwatermarkcolumn" ; schema . setColumnName ( columnName ) ; WatermarkType wmType = WatermarkType . valueOf ( this . workUnitState . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_WATERMARK_TYPE , "TIMESTAMP" ) . toUpperCase ( ) ) ; switch ( wmType ) { case TIMESTAMP : dataType = "timestamp" ; break ; case DATE : dataType = "date" ; break ; default : dataType = "int" ; break ; } String elementDataType = "string" ; List < String > mapSymbols = null ; JsonObject newDataType = this . convertDataType ( columnName , dataType , elementDataType , mapSymbols ) ; schema . setDataType ( newDataType ) ; schema . setWaterMark ( true ) ; schema . setPrimaryKey ( 0 ) ; schema . setLength ( 0 ) ; schema . setPrecision ( 0 ) ; schema . setScale ( 0 ) ; schema . setNullable ( false ) ; schema . setFormat ( null ) ; schema . setComment ( "Default watermark column" ) ; schema . setDefaultValue ( null ) ; schema . setUnique ( false ) ; String jsonStr = gson . toJson ( schema ) ; JsonObject obj = gson . fromJson ( jsonStr , JsonObject . class ) . getAsJsonObject ( ) ; return obj ; }
Schema of default watermark column - required if there are multiple watermarks
36,534
private Schema getCustomColumnSchema ( String columnName ) { Schema schema = new Schema ( ) ; String dataType = "string" ; schema . setColumnName ( columnName ) ; String elementDataType = "string" ; List < String > mapSymbols = null ; JsonObject newDataType = this . convertDataType ( columnName , dataType , elementDataType , mapSymbols ) ; schema . setDataType ( newDataType ) ; schema . setWaterMark ( false ) ; schema . setPrimaryKey ( 0 ) ; schema . setLength ( 0 ) ; schema . setPrecision ( 0 ) ; schema . setScale ( 0 ) ; schema . setNullable ( true ) ; schema . setFormat ( null ) ; schema . setComment ( "Custom column" ) ; schema . setDefaultValue ( null ) ; schema . setUnique ( false ) ; return schema ; }
Schema of a custom column - required if column not found in metadata
36,535
public static boolean hasJoinOperation ( String selectQuery ) { if ( selectQuery == null || selectQuery . length ( ) == 0 ) { return false ; } SqlParser sqlParser = SqlParser . create ( selectQuery ) ; try { SqlNode all = sqlParser . parseQuery ( ) ; SqlSelect query ; if ( all instanceof SqlSelect ) { query = ( SqlSelect ) all ; } else if ( all instanceof SqlOrderBy ) { query = ( SqlSelect ) ( ( SqlOrderBy ) all ) . query ; } else { throw new UnsupportedOperationException ( "The select query is type of " + all . getClass ( ) + " which is not supported here" ) ; } return query . getFrom ( ) . getKind ( ) == SqlKind . JOIN ; } catch ( SqlParseException e ) { return false ; } }
Check if the SELECT query has join operation
36,536
private String toCase ( String targetColumnName ) { String columnName = targetColumnName ; ColumnNameCase caseType = ColumnNameCase . valueOf ( this . workUnitState . getProp ( ConfigurationKeys . SOURCE_COLUMN_NAME_CASE , ConfigurationKeys . DEFAULT_COLUMN_NAME_CASE ) . toUpperCase ( ) ) ; switch ( caseType ) { case TOUPPER : columnName = targetColumnName . toUpperCase ( ) ; break ; case TOLOWER : columnName = targetColumnName . toLowerCase ( ) ; break ; default : columnName = targetColumnName ; break ; } return columnName ; }
Change the column name case to upper lower or nochange ; Default nochange
36,537
private GlobalOptions createGlobalOptions ( CommandLine parsedOpts ) { String host = parsedOpts . hasOption ( HOST_OPT ) ? parsedOpts . getOptionValue ( HOST_OPT ) : DEFAULT_REST_SERVER_HOST ; int port = DEFAULT_REST_SERVER_PORT ; try { if ( parsedOpts . hasOption ( PORT_OPT ) ) { port = Integer . parseInt ( parsedOpts . getOptionValue ( PORT_OPT ) ) ; } } catch ( NumberFormatException e ) { printHelpAndExit ( "The port must be a valid integer." ) ; } return new GlobalOptions ( host , port ) ; }
Build the GlobalOptions information from the raw parsed options
36,538
public String replacePortTokens ( String value ) { BiMap < String , Optional < Integer > > portMappings = HashBiMap . create ( ) ; Matcher regexMatcher = PORT_REGEX . matcher ( value ) ; while ( regexMatcher . find ( ) ) { String token = regexMatcher . group ( 0 ) ; if ( ! portMappings . containsKey ( token ) ) { Optional < Integer > portStart = Optional . absent ( ) ; Optional < Integer > portEnd = Optional . absent ( ) ; String unboundedStart = regexMatcher . group ( 1 ) ; if ( unboundedStart != null ) { int requestedEndPort = Integer . parseInt ( unboundedStart ) ; Preconditions . checkArgument ( requestedEndPort <= PortUtils . MAXIMUM_PORT ) ; portEnd = Optional . of ( requestedEndPort ) ; } else { String unboundedEnd = regexMatcher . group ( 2 ) ; if ( unboundedEnd != null ) { int requestedStartPort = Integer . parseInt ( unboundedEnd ) ; Preconditions . checkArgument ( requestedStartPort >= PortUtils . MINIMUM_PORT ) ; portStart = Optional . of ( requestedStartPort ) ; } else { String absolute = regexMatcher . group ( 3 ) ; if ( ! "?" . equals ( absolute ) ) { int requestedPort = Integer . parseInt ( absolute ) ; Preconditions . checkArgument ( requestedPort >= PortUtils . MINIMUM_PORT && requestedPort <= PortUtils . MAXIMUM_PORT ) ; portStart = Optional . of ( requestedPort ) ; portEnd = Optional . of ( requestedPort ) ; } } } Optional < Integer > port = takePort ( portStart , portEnd ) ; portMappings . put ( token , port ) ; } } for ( Map . Entry < String , Optional < Integer > > port : portMappings . entrySet ( ) ) { if ( port . getValue ( ) . isPresent ( ) ) { value = value . replace ( port . getKey ( ) , port . getValue ( ) . get ( ) . toString ( ) ) ; } } return value ; }
Replaces any port tokens in the specified string .
36,539
protected Schema generateSchemaWithNullifiedField ( WorkUnitState workUnitState , Schema currentAvroSchema ) { Configuration conf = new Configuration ( ) ; for ( String key : workUnitState . getPropertyNames ( ) ) { conf . set ( key , workUnitState . getProp ( key ) ) ; } Path originalSchemaPath = null ; if ( workUnitState . contains ( CONVERTER_AVRO_NULLIFY_FIELDS_ORIGINAL_SCHEMA_PATH ) ) { originalSchemaPath = new Path ( workUnitState . getProp ( CONVERTER_AVRO_NULLIFY_FIELDS_ORIGINAL_SCHEMA_PATH ) ) ; } else { LOG . info ( "Property " + CONVERTER_AVRO_NULLIFY_FIELDS_ORIGINAL_SCHEMA_PATH + "is not specified. Trying to get the orignal schema from previous avro files." ) ; originalSchemaPath = WriterUtils . getDataPublisherFinalDir ( workUnitState , workUnitState . getPropAsInt ( ConfigurationKeys . FORK_BRANCHES_KEY , 1 ) , workUnitState . getPropAsInt ( ConfigurationKeys . FORK_BRANCH_ID_KEY , 0 ) ) . getParent ( ) ; } try { Schema prevSchema = AvroUtils . getDirectorySchema ( originalSchemaPath , conf , false ) ; Schema mergedSchema = AvroUtils . nullifyFieldsForSchemaMerge ( prevSchema , currentAvroSchema ) ; return mergedSchema ; } catch ( IOException ioe ) { LOG . error ( "Unable to nullify fields. Will retain the current avro schema." , ioe ) ; return currentAvroSchema ; } }
Generate new avro schema by nullifying fields that previously existed but not in the current schema .
36,540
public static void setWorkUnitGuid ( State state , Guid guid ) { state . setProp ( WORK_UNIT_GUID , guid . toString ( ) ) ; }
Set a unique replicable guid for this work unit . Used for recovering partially successful work units .
36,541
protected Schema getFieldSchema ( GenericRecord record , String schemaIdLocation ) throws Exception { Optional < Object > schemaIdValue = AvroUtils . getFieldValue ( record , schemaIdLocation ) ; if ( ! schemaIdValue . isPresent ( ) ) { throw new Exception ( "Schema id with key " + schemaIdLocation + " not found in the record" ) ; } String schemaKey = String . valueOf ( schemaIdValue . get ( ) ) ; return ( Schema ) registry . getSchemaByKey ( schemaKey ) ; }
Get the schema of a field
36,542
protected byte [ ] getPayloadBytes ( GenericRecord inputRecord ) { try { return getFieldAsBytes ( inputRecord , payloadField ) ; } catch ( Exception e ) { return null ; } }
Get payload field and convert to byte array
36,543
protected byte [ ] getFieldAsBytes ( GenericRecord record , String fieldLocation ) throws Exception { Optional < Object > bytesValue = AvroUtils . getFieldValue ( record , fieldLocation ) ; if ( ! bytesValue . isPresent ( ) ) { throw new Exception ( "Bytes value with key " + fieldLocation + " not found in the record" ) ; } ByteBuffer bb = ( ByteBuffer ) bytesValue . get ( ) ; if ( bb . hasArray ( ) ) { return bb . array ( ) ; } else { byte [ ] payloadBytes = new byte [ bb . remaining ( ) ] ; bb . get ( payloadBytes ) ; return payloadBytes ; } }
Get field value byte array
36,544
protected P upConvertPayload ( GenericRecord inputRecord ) throws DataConversionException { try { Schema payloadSchema = getPayloadSchema ( inputRecord ) ; latestPayloadReader . setSchema ( payloadSchema ) ; byte [ ] payloadBytes = getPayloadBytes ( inputRecord ) ; Decoder decoder = DecoderFactory . get ( ) . binaryDecoder ( payloadBytes , null ) ; return latestPayloadReader . read ( null , decoder ) ; } catch ( Exception e ) { throw new DataConversionException ( e ) ; } }
Convert the payload in the input record to a deserialized object with the latest schema
36,545
protected void afterFork ( List < Boolean > forks , long startTimeNanos ) { int forksGenerated = 0 ; for ( Boolean fork : forks ) { forksGenerated += fork ? 1 : 0 ; } Instrumented . markMeter ( this . outputForks , forksGenerated ) ; Instrumented . updateTimer ( this . forkOperatorTimer , System . nanoTime ( ) - startTimeNanos , TimeUnit . NANOSECONDS ) ; }
Called after forkDataRecord .
36,546
@ SuppressWarnings ( "unchecked" ) public D readRecordImpl ( D reuse ) throws DataRecordException , IOException { if ( this . shutdownRequested . get ( ) ) { return null ; } long readStartTime = System . nanoTime ( ) ; while ( ! allPartitionsFinished ( ) ) { if ( currentPartitionFinished ( ) ) { moveToNextPartition ( ) ; continue ; } if ( this . messageIterator == null || ! this . messageIterator . hasNext ( ) ) { try { long fetchStartTime = System . nanoTime ( ) ; this . messageIterator = fetchNextMessageBuffer ( ) ; this . currentPartitionFetchMessageBufferTime += System . nanoTime ( ) - fetchStartTime ; } catch ( Exception e ) { LOG . error ( String . format ( "Failed to fetch next message buffer for partition %s. Will skip this partition." , getCurrentPartition ( ) ) , e ) ; moveToNextPartition ( ) ; continue ; } if ( this . messageIterator == null || ! this . messageIterator . hasNext ( ) ) { moveToNextPartition ( ) ; continue ; } } while ( ! currentPartitionFinished ( ) ) { if ( ! this . messageIterator . hasNext ( ) ) { break ; } KafkaConsumerRecord nextValidMessage = this . messageIterator . next ( ) ; if ( nextValidMessage . getOffset ( ) < this . nextWatermark . get ( this . currentPartitionIdx ) ) { continue ; } this . nextWatermark . set ( this . currentPartitionIdx , nextValidMessage . getNextOffset ( ) ) ; try { long decodeStartTime = System . nanoTime ( ) ; D record = decodeKafkaMessage ( nextValidMessage ) ; this . currentPartitionDecodeRecordTime += System . nanoTime ( ) - decodeStartTime ; this . currentPartitionRecordCount ++ ; this . currentPartitionTotalSize += nextValidMessage . getValueSizeInBytes ( ) ; this . currentPartitionReadRecordTime += System . nanoTime ( ) - readStartTime ; this . currentPartitionLastSuccessfulRecord = record ; return record ; } catch ( Throwable t ) { this . errorPartitions . add ( this . currentPartitionIdx ) ; this . undecodableMessageCount ++ ; if ( shouldLogError ( ) ) { LOG . error ( String . format ( "A record from partition %s cannot be decoded." , getCurrentPartition ( ) ) , t ) ; } incrementErrorCount ( ) ; } } } LOG . info ( "Finished pulling topic " + this . topicName ) ; this . currentPartitionReadRecordTime += System . nanoTime ( ) - readStartTime ; return null ; }
Return the next decodable record from the current partition . If the current partition has no more decodable record move on to the next partition . If all partitions have been processed return null .
36,547
private void moveToNextPartition ( ) { if ( this . currentPartitionIdx == INITIAL_PARTITION_IDX ) { LOG . info ( "Pulling topic " + this . topicName ) ; this . currentPartitionIdx = 0 ; } else { updateStatisticsForCurrentPartition ( ) ; this . currentPartitionIdx ++ ; this . currentPartitionRecordCount = 0 ; this . currentPartitionTotalSize = 0 ; this . currentPartitionDecodeRecordTime = 0 ; this . currentPartitionFetchMessageBufferTime = 0 ; this . currentPartitionReadRecordTime = 0 ; this . currentPartitionLastSuccessfulRecord = null ; } this . messageIterator = null ; if ( this . currentPartitionIdx < this . partitions . size ( ) ) { LOG . info ( String . format ( "Pulling partition %s from offset %d to %d, range=%d" , this . getCurrentPartition ( ) , this . nextWatermark . get ( this . currentPartitionIdx ) , this . highWatermark . get ( this . currentPartitionIdx ) , this . highWatermark . get ( this . currentPartitionIdx ) - this . nextWatermark . get ( this . currentPartitionIdx ) ) ) ; switchMetricContextToCurrentPartition ( ) ; } if ( ! allPartitionsFinished ( ) ) { this . startFetchEpochTime . put ( this . getCurrentPartition ( ) , System . currentTimeMillis ( ) ) ; } }
Record the avg time per record for the current partition then increment this . currentPartitionIdx and switch metric context to the new partition .
36,548
protected void onSuccess ( AsyncRequest < D , RQ > asyncRequest , ResponseStatus status ) { final WriteResponse response = WriteResponse . EMPTY ; for ( final AsyncRequest . Thunk thunk : asyncRequest . getThunks ( ) ) { WriteCallback callback = ( WriteCallback ) thunk . callback ; callback . onSuccess ( new WriteResponse ( ) { public Object getRawResponse ( ) { return response . getRawResponse ( ) ; } public String getStringResponse ( ) { return response . getStringResponse ( ) ; } public long bytesWritten ( ) { return thunk . sizeInBytes ; } } ) ; } }
Callback on sending the asyncRequest successfully
36,549
protected void onFailure ( AsyncRequest < D , RQ > asyncRequest , Throwable throwable ) { for ( AsyncRequest . Thunk thunk : asyncRequest . getThunks ( ) ) { thunk . callback . onFailure ( throwable ) ; } }
Callback on failing to send the asyncRequest
36,550
public synchronized boolean cancel ( ) { if ( this . taskFuture != null && this . taskFuture . cancel ( true ) ) { this . taskStateTracker . onTaskRunCompletion ( this ) ; return true ; } else { return false ; } }
return true if the task is successfully cancelled . This method is a copy of the method in parent class . We need this copy so TaskIFaceWrapper variables are not shared between this class and its parent class
36,551
public Schema convertSchema ( Schema inputSchema , WorkUnitState workUnit ) throws SchemaConversionException { if ( this . fieldRemover . isPresent ( ) ) { return this . fieldRemover . get ( ) . removeFields ( inputSchema ) ; } return inputSchema ; }
Remove the specified fields from inputSchema .
36,552
public Iterable < GenericRecord > convertRecordImpl ( Schema outputSchema , GenericRecord inputRecord , WorkUnitState workUnit ) throws DataConversionException { try { return new SingleRecordIterable < > ( AvroUtils . convertRecordSchema ( inputRecord , outputSchema ) ) ; } catch ( IOException e ) { throw new DataConversionException ( e ) ; } }
Convert the schema of inputRecord to outputSchema .
36,553
private Collection < String > getPages ( String startDate , String endDate , List < Dimension > dimensions , ApiDimensionFilter countryFilter , Queue < Pair < String , FilterOperator > > toProcess , int rowLimit ) { String country = GoogleWebmasterFilter . countryFilterToString ( countryFilter ) ; ConcurrentLinkedDeque < String > allPages = new ConcurrentLinkedDeque < > ( ) ; int r = 0 ; while ( r <= GET_PAGES_RETRIES ) { ++ r ; log . info ( String . format ( "Get pages at round %d with size %d." , r , toProcess . size ( ) ) ) ; ConcurrentLinkedDeque < Pair < String , FilterOperator > > nextRound = new ConcurrentLinkedDeque < > ( ) ; ExecutorService es = Executors . newFixedThreadPool ( 10 , ExecutorsUtils . newDaemonThreadFactory ( Optional . of ( log ) , Optional . of ( this . getClass ( ) . getSimpleName ( ) ) ) ) ; while ( ! toProcess . isEmpty ( ) ) { submitJob ( toProcess . poll ( ) , countryFilter , startDate , endDate , dimensions , es , allPages , nextRound , rowLimit ) ; } try { es . shutdown ( ) ; boolean terminated = es . awaitTermination ( 5 , TimeUnit . MINUTES ) ; if ( ! terminated ) { es . shutdownNow ( ) ; log . warn ( "Timed out while getting all pages for country-{} at round {}. Next round now has size {}." , country , r , nextRound . size ( ) ) ; } } catch ( InterruptedException e ) { throw new RuntimeException ( e ) ; } if ( nextRound . isEmpty ( ) ) { break ; } toProcess = nextRound ; coolDown ( r , PAGES_GET_COOLDOWN_TIME ) ; } if ( r == GET_PAGES_RETRIES + 1 ) { throw new RuntimeException ( String . format ( "Getting all pages reaches the maximum number of retires %d. Date range: %s ~ %s. Country: %s." , GET_PAGES_RETRIES , startDate , endDate , country ) ) ; } return allPages ; }
Get all pages in an async mode .
36,554
private ArrayList < String > getUrlPartitions ( String prefix ) { ArrayList < String > expanded = new ArrayList < > ( ) ; for ( char c = 'a' ; c <= 'z' ; ++ c ) { expanded . add ( prefix + c ) ; } for ( int num = 0 ; num <= 9 ; ++ num ) { expanded . add ( prefix + num ) ; } expanded . add ( prefix + "-" ) ; expanded . add ( prefix + "." ) ; expanded . add ( prefix + "_" ) ; expanded . add ( prefix + "~" ) ; expanded . add ( prefix + "/" ) ; expanded . add ( prefix + "%" ) ; expanded . add ( prefix + ":" ) ; expanded . add ( prefix + "?" ) ; expanded . add ( prefix + "#" ) ; expanded . add ( prefix + "@" ) ; expanded . add ( prefix + "!" ) ; expanded . add ( prefix + "$" ) ; expanded . add ( prefix + "&" ) ; expanded . add ( prefix + "+" ) ; expanded . add ( prefix + "*" ) ; expanded . add ( prefix + "'" ) ; expanded . add ( prefix + "=" ) ; return expanded ; }
This doesn t cover all cases but more than 99 . 9% captured .
36,555
public void addListener ( final PathAlterationListener listener ) { if ( listener != null ) { this . listeners . put ( listener , new ExceptionCatchingPathAlterationListenerDecorator ( listener ) ) ; } }
Add a file system listener .
36,556
public void initialize ( ) throws IOException { rootEntry . refresh ( rootEntry . getPath ( ) ) ; final FileStatusEntry [ ] children = doListPathsEntry ( rootEntry . getPath ( ) , rootEntry ) ; rootEntry . setChildren ( children ) ; }
Initialize the observer .
36,557
public void checkAndNotify ( ) throws IOException { for ( final PathAlterationListener listener : listeners . values ( ) ) { listener . onStart ( this ) ; } final Path rootPath = rootEntry . getPath ( ) ; if ( fs . exists ( rootPath ) ) { checkAndNotify ( rootEntry , rootEntry . getChildren ( ) , listPaths ( rootPath ) ) ; } else if ( rootEntry . isExists ( ) ) { checkAndNotify ( rootEntry , rootEntry . getChildren ( ) , EMPTY_PATH_ARRAY ) ; } else { } for ( final PathAlterationListener listener : listeners . values ( ) ) { listener . onStop ( this ) ; } }
Check whether the file and its chlidren have been created modified or deleted .
36,558
private void checkAndNotify ( final FileStatusEntry parent , final FileStatusEntry [ ] previous , final Path [ ] currentPaths ) throws IOException { int c = 0 ; final FileStatusEntry [ ] current = currentPaths . length > 0 ? new FileStatusEntry [ currentPaths . length ] : FileStatusEntry . EMPTY_ENTRIES ; for ( final FileStatusEntry previousEntry : previous ) { while ( c < currentPaths . length && comparator . compare ( previousEntry . getPath ( ) , currentPaths [ c ] ) > 0 ) { current [ c ] = createPathEntry ( parent , currentPaths [ c ] ) ; doCreate ( current [ c ] ) ; c ++ ; } if ( c < currentPaths . length && comparator . compare ( previousEntry . getPath ( ) , currentPaths [ c ] ) == 0 ) { doMatch ( previousEntry , currentPaths [ c ] ) ; checkAndNotify ( previousEntry , previousEntry . getChildren ( ) , listPaths ( currentPaths [ c ] ) ) ; current [ c ] = previousEntry ; c ++ ; } else { checkAndNotify ( previousEntry , previousEntry . getChildren ( ) , EMPTY_PATH_ARRAY ) ; doDelete ( previousEntry ) ; } } for ( ; c < currentPaths . length ; c ++ ) { current [ c ] = createPathEntry ( parent , currentPaths [ c ] ) ; doCreate ( current [ c ] ) ; } parent . setChildren ( current ) ; }
Compare two file lists for files which have been created modified or deleted .
36,559
private FileStatusEntry createPathEntry ( final FileStatusEntry parent , final Path childPath ) throws IOException { final FileStatusEntry entry = parent . newChildInstance ( childPath ) ; entry . refresh ( childPath ) ; final FileStatusEntry [ ] children = doListPathsEntry ( childPath , entry ) ; entry . setChildren ( children ) ; return entry ; }
Create a new FileStatusEntry for the specified file .
36,560
private FileStatusEntry [ ] doListPathsEntry ( Path path , FileStatusEntry entry ) throws IOException { final Path [ ] paths = listPaths ( path ) ; final FileStatusEntry [ ] children = paths . length > 0 ? new FileStatusEntry [ paths . length ] : FileStatusEntry . EMPTY_ENTRIES ; for ( int i = 0 ; i < paths . length ; i ++ ) { children [ i ] = createPathEntry ( entry , paths [ i ] ) ; } return children ; }
List the path in the format of FileStatusEntry array
36,561
private Path [ ] listPaths ( final Path path ) throws IOException { Path [ ] children = null ; ArrayList < Path > tmpChildrenPath = new ArrayList < > ( ) ; if ( fs . isDirectory ( path ) ) { FileStatus [ ] chiledrenFileStatus = pathFilter == null ? fs . listStatus ( path ) : fs . listStatus ( path , pathFilter ) ; for ( FileStatus childFileStatus : chiledrenFileStatus ) { tmpChildrenPath . add ( childFileStatus . getPath ( ) ) ; } children = tmpChildrenPath . toArray ( new Path [ tmpChildrenPath . size ( ) ] ) ; } if ( children == null ) { children = EMPTY_PATH_ARRAY ; } if ( comparator != null && children . length > 1 ) { Arrays . sort ( children , comparator ) ; } return children ; }
List the contents of a directory denoted by Path
36,562
public FlowStatus getFlowStatus ( String flowName , String flowGroup , long flowExecutionId ) { FlowStatus flowStatus = null ; Iterator < JobStatus > jobStatusIterator = jobStatusRetriever . getJobStatusesForFlowExecution ( flowName , flowGroup , flowExecutionId ) ; if ( jobStatusIterator . hasNext ( ) ) { flowStatus = new FlowStatus ( flowName , flowGroup , flowExecutionId , jobStatusIterator ) ; } return flowStatus ; }
Get the flow status for a specific execution .
36,563
public boolean isFlowRunning ( String flowName , String flowGroup ) { List < FlowStatus > flowStatusList = getLatestFlowStatus ( flowName , flowGroup , 1 ) ; if ( flowStatusList == null || flowStatusList . isEmpty ( ) ) { return false ; } else { FlowStatus flowStatus = flowStatusList . get ( 0 ) ; Iterator < JobStatus > jobStatusIterator = flowStatus . getJobStatusIterator ( ) ; while ( jobStatusIterator . hasNext ( ) ) { JobStatus jobStatus = jobStatusIterator . next ( ) ; if ( isJobRunning ( jobStatus ) ) { return true ; } } return false ; } }
Return true if another instance of a flow is running . A flow is determined to be in the RUNNING state if any of the jobs in the flow are in the RUNNING state .
36,564
@ SuppressWarnings ( value = "unchecked" ) < T , K extends SharedResourceKey > SharedResourceFactoryResponse < T > getScopedFromCache ( final SharedResourceFactory < T , K , S > factory , final K key , final ScopeWrapper < S > scope , final SharedResourcesBrokerImpl < S > broker ) throws ExecutionException { RawJobBrokerKey fullKey = new RawJobBrokerKey ( scope , factory . getName ( ) , key ) ; Object obj = this . sharedResourceCache . get ( fullKey , new Callable < Object > ( ) { public Object call ( ) throws Exception { return factory . createResource ( broker . getScopedView ( scope . getType ( ) ) , broker . getConfigView ( scope . getType ( ) , key , factory . getName ( ) ) ) ; } } ) ; return ( SharedResourceFactoryResponse < T > ) obj ; }
Get a scoped object from the cache .
36,565
private Iterator < JsonElement > getSoftDeletedRecords ( String schema , String entity , WorkUnit workUnit , List < Predicate > predicateList ) throws DataRecordException { return this . getRecordSet ( schema , entity , workUnit , predicateList ) ; }
Get soft deleted records using Rest Api
36,566
public boolean bulkApiLogin ( ) throws Exception { log . info ( "Authenticating salesforce bulk api" ) ; boolean success = false ; String hostName = this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_HOST_NAME ) ; String apiVersion = this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_VERSION ) ; if ( Strings . isNullOrEmpty ( apiVersion ) ) { apiVersion = this . bulkApiUseQueryAll ? "42.0" : "29.0" ; } String soapAuthEndPoint = hostName + SALESFORCE_SOAP_SERVICE + "/" + apiVersion ; try { ConnectorConfig partnerConfig = new ConnectorConfig ( ) ; if ( super . workUnitState . contains ( ConfigurationKeys . SOURCE_CONN_USE_PROXY_URL ) && ! super . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_USE_PROXY_URL ) . isEmpty ( ) ) { partnerConfig . setProxy ( super . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_USE_PROXY_URL ) , super . workUnitState . getPropAsInt ( ConfigurationKeys . SOURCE_CONN_USE_PROXY_PORT ) ) ; } String accessToken = sfConnector . getAccessToken ( ) ; if ( accessToken == null ) { boolean isConnectSuccess = sfConnector . connect ( ) ; if ( isConnectSuccess ) { accessToken = sfConnector . getAccessToken ( ) ; } } if ( accessToken != null ) { String serviceEndpoint = sfConnector . getInstanceUrl ( ) + SALESFORCE_SOAP_SERVICE + "/" + apiVersion ; partnerConfig . setSessionId ( accessToken ) ; partnerConfig . setServiceEndpoint ( serviceEndpoint ) ; } else { String securityToken = this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_SECURITY_TOKEN ) ; String password = PasswordManager . getInstance ( this . workUnitState ) . readPassword ( this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_PASSWORD ) ) ; partnerConfig . setUsername ( this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_USERNAME ) ) ; partnerConfig . setPassword ( password + securityToken ) ; } partnerConfig . setAuthEndpoint ( soapAuthEndPoint ) ; new PartnerConnection ( partnerConfig ) ; String soapEndpoint = partnerConfig . getServiceEndpoint ( ) ; String restEndpoint = soapEndpoint . substring ( 0 , soapEndpoint . indexOf ( "Soap/" ) ) + "async/" + apiVersion ; ConnectorConfig config = new ConnectorConfig ( ) ; config . setSessionId ( partnerConfig . getSessionId ( ) ) ; config . setRestEndpoint ( restEndpoint ) ; config . setCompression ( true ) ; config . setTraceFile ( "traceLogs.txt" ) ; config . setTraceMessage ( false ) ; config . setPrettyPrintXml ( true ) ; if ( super . workUnitState . contains ( ConfigurationKeys . SOURCE_CONN_USE_PROXY_URL ) && ! super . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_USE_PROXY_URL ) . isEmpty ( ) ) { config . setProxy ( super . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_USE_PROXY_URL ) , super . workUnitState . getPropAsInt ( ConfigurationKeys . SOURCE_CONN_USE_PROXY_PORT ) ) ; } this . bulkConnection = new BulkConnection ( config ) ; success = true ; } catch ( RuntimeException e ) { throw new RuntimeException ( "Failed to connect to salesforce bulk api; error - " + e , e ) ; } return success ; }
Login to salesforce
36,567
private BufferedReader getBulkBufferedReader ( int index ) throws AsyncApiException { return new BufferedReader ( new InputStreamReader ( this . bulkConnection . getQueryResultStream ( this . bulkJob . getId ( ) , this . bulkResultIdList . get ( index ) . getBatchId ( ) , this . bulkResultIdList . get ( index ) . getResultId ( ) ) , ConfigurationKeys . DEFAULT_CHARSET_ENCODING ) ) ; }
Get a buffered reader wrapping the query result stream for the result with the specified index
36,568
private void fetchResultBatchWithRetry ( RecordSetList < JsonElement > rs ) throws AsyncApiException , DataRecordException , IOException { boolean success = false ; int retryCount = 0 ; int recordCountBeforeFetch = this . bulkRecordCount ; do { try { if ( retryCount > 0 ) { reinitializeBufferedReader ( ) ; } fetchResultBatch ( rs , this . bulkRecordCount - recordCountBeforeFetch ) ; success = true ; } catch ( IOException e ) { if ( retryCount < this . fetchRetryLimit ) { log . info ( "Exception while fetching data, retrying: " + e . getMessage ( ) , e ) ; retryCount ++ ; } else { log . error ( "Exception while fetching data: " + e . getMessage ( ) , e ) ; throw e ; } } } while ( ! success ) ; }
Fetch a result batch with retry for network errors
36,569
private RecordSet < JsonElement > getBulkData ( ) throws DataRecordException { log . debug ( "Processing bulk api batch..." ) ; RecordSetList < JsonElement > rs = new RecordSetList < > ( ) ; try { if ( this . bulkBufferedReader == null || ! this . bulkBufferedReader . ready ( ) ) { if ( this . bulkResultIdCount > 0 ) { log . info ( "Result set {} had {} records" , this . bulkResultIdCount , this . bulkRecordCount - this . prevBulkRecordCount ) ; } if ( this . bulkResultIdCount < this . bulkResultIdList . size ( ) ) { log . info ( "Stream resultset for resultId:" + this . bulkResultIdList . get ( this . bulkResultIdCount ) ) ; this . setNewBulkResultSet ( true ) ; if ( this . bulkBufferedReader != null ) { this . bulkBufferedReader . close ( ) ; } this . bulkBufferedReader = getBulkBufferedReader ( this . bulkResultIdCount ) ; this . bulkResultIdCount ++ ; this . prevBulkRecordCount = bulkRecordCount ; } else { log . info ( "Bulk job is finished" ) ; this . setBulkJobFinished ( true ) ; return rs ; } } fetchResultBatchWithRetry ( rs ) ; } catch ( Exception e ) { throw new DataRecordException ( "Failed to get records from salesforce; error - " + e . getMessage ( ) , e ) ; } return rs ; }
Get data from the bulk api input stream
36,570
private BatchInfo waitForPkBatches ( BatchInfoList batchInfoList , int retryInterval ) throws InterruptedException , AsyncApiException { BatchInfo batchInfo = null ; BatchInfo [ ] batchInfos = batchInfoList . getBatchInfo ( ) ; for ( int i = 1 ; i < batchInfos . length ; i ++ ) { BatchInfo bi = batchInfos [ i ] ; bi = this . bulkConnection . getBatchInfo ( this . bulkJob . getId ( ) , bi . getId ( ) ) ; while ( ( bi . getState ( ) != BatchStateEnum . Completed ) && ( bi . getState ( ) != BatchStateEnum . Failed ) ) { Thread . sleep ( retryInterval * 1000 ) ; bi = this . bulkConnection . getBatchInfo ( this . bulkJob . getId ( ) , bi . getId ( ) ) ; log . debug ( "Bulk Api Batch Info:" + bi ) ; log . info ( "Waiting for bulk resultSetIds" ) ; } batchInfo = bi ; if ( batchInfo . getState ( ) == BatchStateEnum . Failed ) { break ; } } return batchInfo ; }
Waits for the PK batches to complete . The wait will stop after all batches are complete or on the first failed batch
36,571
public static Map < String , Object > getConfigForBranch ( State taskState , int numBranches , int branch ) { String typePropertyName = ForkOperatorUtils . getPropertyNameForBranch ( ConfigurationKeys . WRITER_CODEC_TYPE , numBranches , branch ) ; String compressionType = taskState . getProp ( typePropertyName ) ; if ( compressionType == null ) { return null ; } return ImmutableMap . < String , Object > of ( COMPRESSION_TYPE_KEY , compressionType ) ; }
Retrieve configuration settings for a given branch .
36,572
public static String getCompressionType ( Map < String , Object > properties ) { return ( String ) properties . get ( COMPRESSION_TYPE_KEY ) ; }
Return compression type
36,573
public void close ( ) throws IOException { try { this . pool . returnObject ( this . object ) ; } catch ( Exception exc ) { throw new IOException ( exc ) ; } finally { this . returned = true ; } }
Return the borrowed object to the pool .
36,574
public Schema convertSchema ( Schema inputSchema , WorkUnitState workUnit ) throws SchemaConversionException { LOG . info ( "Converting schema " + inputSchema ) ; String fieldsStr = workUnit . getProp ( ConfigurationKeys . CONVERTER_AVRO_FIELD_PICK_FIELDS ) ; Preconditions . checkNotNull ( fieldsStr , ConfigurationKeys . CONVERTER_AVRO_FIELD_PICK_FIELDS + " is required for converter " + this . getClass ( ) . getSimpleName ( ) ) ; LOG . info ( "Converting schema to selected fields: " + fieldsStr ) ; try { return createSchema ( inputSchema , fieldsStr ) ; } catch ( Exception e ) { throw new SchemaConversionException ( e ) ; } }
Convert the schema to contain only specified field . This will reuse AvroSchemaFieldRemover by listing fields not specified and remove it from the schema 1 . Retrieve list of fields from property 2 . Traverse schema and get list of fields to be removed 3 . While traversing also confirm specified fields from property also exist 4 . Convert schema by using AvroSchemaFieldRemover
36,575
private static Schema createSchema ( Schema schema , String fieldsStr ) { List < String > fields = SPLITTER_ON_COMMA . splitToList ( fieldsStr ) ; TrieNode root = buildTrie ( fields ) ; return createSchemaHelper ( schema , root ) ; }
Creates Schema containing only specified fields .
36,576
private static Schema getActualRecord ( Schema inputSchema ) { if ( Type . RECORD . equals ( inputSchema . getType ( ) ) ) { return inputSchema ; } Preconditions . checkArgument ( Type . UNION . equals ( inputSchema . getType ( ) ) , "Nested schema is only support with either record or union type of null with record" ) ; Preconditions . checkArgument ( inputSchema . getTypes ( ) . size ( ) <= 2 , "For union type in nested record, it should only have NULL and Record type" ) ; for ( Schema inner : inputSchema . getTypes ( ) ) { if ( Type . NULL . equals ( inner . getType ( ) ) ) { continue ; } Preconditions . checkArgument ( Type . RECORD . equals ( inner . getType ( ) ) , "For union type in nested record, it should only have NULL and Record type" ) ; return inner ; } throw new IllegalArgumentException ( inputSchema + " is not supported." ) ; }
For the schema that is a UNION type with NULL and Record type it provides Records type .
36,577
public static String getSerializedWithNewPackage ( String serialized ) { serialized = serialized . replace ( "\"gobblin.data.management." , "\"org.apache.gobblin.data.management." ) ; log . debug ( "Serialized updated copy entity: " + serialized ) ; return serialized ; }
Converts package name in serialized string to new name . This is temporary change and should get removed after all the states are switched from old to new package name .
36,578
public static Properties mergeTemplateWithUserCustomizedFile ( Properties template , Properties userCustomized ) { Properties cleanedTemplate = new Properties ( ) ; cleanedTemplate . putAll ( template ) ; if ( cleanedTemplate . containsKey ( ConfigurationKeys . REQUIRED_ATRRIBUTES_LIST ) ) { cleanedTemplate . remove ( ConfigurationKeys . REQUIRED_ATRRIBUTES_LIST ) ; } Properties cleanedUserCustomized = new Properties ( ) ; cleanedUserCustomized . putAll ( userCustomized ) ; if ( cleanedUserCustomized . containsKey ( ConfigurationKeys . JOB_TEMPLATE_PATH ) ) { cleanedUserCustomized . remove ( ConfigurationKeys . JOB_TEMPLATE_PATH ) ; } return PropertiesUtils . combineProperties ( cleanedTemplate , cleanedUserCustomized ) ; }
create a complete property file based on the given template
36,579
protected void startCancellationExecutor ( ) { this . cancellationExecutor . execute ( new Runnable ( ) { public void run ( ) { synchronized ( AbstractJobLauncher . this . cancellationRequest ) { try { while ( ! AbstractJobLauncher . this . cancellationRequested ) { AbstractJobLauncher . this . cancellationRequest . wait ( ) ; } LOG . info ( "Cancellation has been requested for job " + AbstractJobLauncher . this . jobContext . getJobId ( ) ) ; executeCancellation ( ) ; LOG . info ( "Cancellation has been executed for job " + AbstractJobLauncher . this . jobContext . getJobId ( ) ) ; } catch ( InterruptedException ie ) { Thread . currentThread ( ) . interrupt ( ) ; } } synchronized ( AbstractJobLauncher . this . cancellationExecution ) { AbstractJobLauncher . this . cancellationExecuted = true ; AbstractJobLauncher . this . jobContext . getJobState ( ) . setState ( JobState . RunningState . CANCELLED ) ; AbstractJobLauncher . this . cancellationExecution . notifyAll ( ) ; } } } ) ; }
Start the scheduled executor for executing job cancellation .
36,580
private boolean tryLockJob ( Properties properties ) { try { if ( Boolean . valueOf ( properties . getProperty ( ConfigurationKeys . JOB_LOCK_ENABLED_KEY , Boolean . TRUE . toString ( ) ) ) ) { this . jobLockOptional = Optional . of ( getJobLock ( properties , new JobLockEventListener ( ) { public void onLost ( ) { executeCancellation ( ) ; } } ) ) ; } return ! this . jobLockOptional . isPresent ( ) || this . jobLockOptional . get ( ) . tryLock ( ) ; } catch ( JobLockException ioe ) { LOG . error ( String . format ( "Failed to acquire job lock for job %s: %s" , this . jobContext . getJobId ( ) , ioe ) , ioe ) ; return false ; } }
Try acquiring the job lock and return whether the lock is successfully locked .
36,581
private void unlockJob ( ) { if ( this . jobLockOptional . isPresent ( ) ) { try { this . jobLockOptional . get ( ) . unlock ( ) ; } catch ( JobLockException ioe ) { LOG . error ( String . format ( "Failed to unlock for job %s: %s" , this . jobContext . getJobId ( ) , ioe ) , ioe ) ; } finally { try { this . jobLockOptional . get ( ) . close ( ) ; } catch ( IOException e ) { LOG . error ( String . format ( "Failed to close job lock for job %s: %s" , this . jobContext . getJobId ( ) , e ) , e ) ; } finally { this . jobLockOptional = Optional . absent ( ) ; } } } }
Unlock a completed or failed job .
36,582
private void cleanLeftoverStagingData ( WorkUnitStream workUnits , JobState jobState ) throws JobException { if ( jobState . getPropAsBoolean ( ConfigurationKeys . CLEANUP_STAGING_DATA_BY_INITIALIZER , false ) ) { return ; } try { if ( ! canCleanStagingData ( jobState ) ) { LOG . error ( "Job " + jobState . getJobName ( ) + " has unfinished commit sequences. Will not clean up staging data." ) ; return ; } } catch ( IOException e ) { throw new JobException ( "Failed to check unfinished commit sequences" , e ) ; } try { if ( this . jobContext . shouldCleanupStagingDataPerTask ( ) ) { if ( workUnits . isSafeToMaterialize ( ) ) { Closer closer = Closer . create ( ) ; Map < String , ParallelRunner > parallelRunners = Maps . newHashMap ( ) ; try { for ( WorkUnit workUnit : JobLauncherUtils . flattenWorkUnits ( workUnits . getMaterializedWorkUnitCollection ( ) ) ) { JobLauncherUtils . cleanTaskStagingData ( new WorkUnitState ( workUnit , jobState ) , LOG , closer , parallelRunners ) ; } } catch ( Throwable t ) { throw closer . rethrow ( t ) ; } finally { closer . close ( ) ; } } else { throw new RuntimeException ( "Work unit streams do not support cleaning staging data per task." ) ; } } else { if ( jobState . getPropAsBoolean ( ConfigurationKeys . CLEANUP_OLD_JOBS_DATA , ConfigurationKeys . DEFAULT_CLEANUP_OLD_JOBS_DATA ) ) { JobLauncherUtils . cleanUpOldJobData ( jobState , LOG , jobContext . getStagingDirProvided ( ) , jobContext . getOutputDirProvided ( ) ) ; } JobLauncherUtils . cleanJobStagingData ( jobState , LOG ) ; } } catch ( Throwable t ) { LOG . error ( "Failed to clean leftover staging data" , t ) ; } }
Cleanup the left - over staging data possibly from the previous run of the job that may have failed and not cleaned up its staging data .
36,583
private void cleanupStagingData ( JobState jobState ) throws JobException { if ( jobState . getPropAsBoolean ( ConfigurationKeys . CLEANUP_STAGING_DATA_BY_INITIALIZER , false ) ) { return ; } try { if ( ! canCleanStagingData ( jobState ) ) { LOG . error ( "Job " + jobState . getJobName ( ) + " has unfinished commit sequences. Will not clean up staging data." ) ; return ; } } catch ( IOException e ) { throw new JobException ( "Failed to check unfinished commit sequences" , e ) ; } if ( this . jobContext . shouldCleanupStagingDataPerTask ( ) ) { cleanupStagingDataPerTask ( jobState ) ; } else { cleanupStagingDataForEntireJob ( jobState ) ; } }
Cleanup the job s task staging data . This is not doing anything in case job succeeds and data is successfully committed because the staging data has already been moved to the job output directory . But in case the job fails and data is not committed we want the staging data to be cleaned up .
36,584
private boolean canCleanStagingData ( JobState jobState ) throws IOException { return this . jobContext . getSemantics ( ) != DeliverySemantics . EXACTLY_ONCE || ! this . jobContext . getCommitSequenceStore ( ) . get ( ) . exists ( jobState . getJobName ( ) ) ; }
Staging data cannot be cleaned if exactly once semantics is used and the job has unfinished commit sequences .
36,585
private void doRunningStateChange ( RunningState newState ) { RunningState oldState = null ; JobExecutionStateListener stateListener = null ; this . changeLock . lock ( ) ; try { if ( null == this . runningState ) { Preconditions . checkState ( RunningState . PENDING == newState ) ; } else { Preconditions . checkState ( EXPECTED_PRE_TRANSITION_STATES . get ( newState ) . contains ( this . runningState ) , "unexpected state transition " + this . runningState + " + newState ) ; } oldState = this . runningState ; this . runningState = newState ; if ( this . listener . isPresent ( ) ) { stateListener = this . listener . get ( ) ; } this . runningStateChanged . signalAll ( ) ; } finally { this . changeLock . unlock ( ) ; } if ( null != stateListener ) { stateListener . onStatusChange ( this , oldState , newState ) ; } }
This must be called only when holding changeLock
36,586
protected void setStagingFileGroup ( ) throws IOException { Preconditions . checkArgument ( this . fs . exists ( this . stagingFile ) , String . format ( "Staging output file %s does not exist" , this . stagingFile ) ) ; if ( this . group . isPresent ( ) ) { HadoopUtils . setGroup ( this . fs , this . stagingFile , this . group . get ( ) ) ; } }
Set the group name of the staging output file .
36,587
public static WorkUnit queryResultMaterializationWorkUnit ( String query , HiveConverterUtils . StorageFormat storageFormat , StageableTableMetadata destinationTable ) { WorkUnit workUnit = new WorkUnit ( ) ; workUnit . setProp ( MATERIALIZER_MODE_KEY , MaterializerMode . QUERY_RESULT_MATERIALIZATION . name ( ) ) ; workUnit . setProp ( STORAGE_FORMAT_KEY , storageFormat . name ( ) ) ; workUnit . setProp ( QUERY_RESULT_TO_MATERIALIZE_KEY , query ) ; workUnit . setProp ( STAGEABLE_TABLE_METADATA_KEY , HiveSource . GENERICS_AWARE_GSON . toJson ( destinationTable ) ) ; TaskUtils . setTaskFactoryClass ( workUnit , HiveMaterializerTaskFactory . class ) ; HiveTask . disableHiveWatermarker ( workUnit ) ; return workUnit ; }
Create a work unit to materialize a query to a target table using a staging table in between .
36,588
public void filterSkippedTaskStates ( ) { List < TaskState > skippedTaskStates = new ArrayList < > ( ) ; for ( TaskState taskState : this . taskStates . values ( ) ) { if ( taskState . getWorkingState ( ) == WorkUnitState . WorkingState . SKIPPED ) { skippedTaskStates . add ( taskState ) ; } } for ( TaskState taskState : skippedTaskStates ) { removeTaskState ( taskState ) ; addSkippedTaskState ( taskState ) ; } }
Filter the task states corresponding to the skipped work units and add it to the skippedTaskStates
36,589
public int getCompletedTasks ( ) { int completedTasks = 0 ; for ( TaskState taskState : this . taskStates . values ( ) ) { if ( taskState . isCompleted ( ) ) { completedTasks ++ ; } } return completedTasks ; }
Get the number of completed tasks .
36,590
protected void writeStateSummary ( JsonWriter jsonWriter ) throws IOException { jsonWriter . name ( "job name" ) . value ( this . getJobName ( ) ) . name ( "job id" ) . value ( this . getJobId ( ) ) . name ( "job state" ) . value ( this . getState ( ) . name ( ) ) . name ( "start time" ) . value ( this . getStartTime ( ) ) . name ( "end time" ) . value ( this . getEndTime ( ) ) . name ( "duration" ) . value ( this . getDuration ( ) ) . name ( "tasks" ) . value ( this . getTaskCount ( ) ) . name ( "completed tasks" ) . value ( this . getCompletedTasks ( ) ) ; }
Write a summary to the json document
36,591
public static String newAppId ( String appName ) { String appIdSuffix = String . format ( "%s_%d" , appName , System . currentTimeMillis ( ) ) ; return "app_" + appIdSuffix ; }
Create a new app ID .
36,592
private boolean taskSuccessfulInPriorAttempt ( String taskId ) { if ( this . taskStateStoreOptional . isPresent ( ) ) { StateStore < TaskState > taskStateStore = this . taskStateStoreOptional . get ( ) ; try { if ( taskStateStore . exists ( jobId , taskId + TASK_STATE_STORE_SUCCESS_MARKER_SUFFIX ) ) { log . info ( "Skipping task {} that successfully executed in a prior attempt." , taskId ) ; return true ; } } catch ( IOException e ) { return false ; } } return false ; }
Determine if the task executed successfully in a prior attempt by checkitn the task state store for the success marker .
36,593
public static GobblinMultiTaskAttempt runWorkUnits ( JobContext jobContext , Iterator < WorkUnit > workUnits , TaskStateTracker taskStateTracker , TaskExecutor taskExecutor , CommitPolicy multiTaskAttemptCommitPolicy ) throws IOException , InterruptedException { GobblinMultiTaskAttempt multiTaskAttempt = new GobblinMultiTaskAttempt ( workUnits , jobContext . getJobId ( ) , jobContext . getJobState ( ) , taskStateTracker , taskExecutor , Optional . < String > absent ( ) , Optional . < StateStore < TaskState > > absent ( ) , jobContext . getJobBroker ( ) ) ; multiTaskAttempt . runAndOptionallyCommitTaskAttempt ( multiTaskAttemptCommitPolicy ) ; return multiTaskAttempt ; }
FIXME this method is provided for backwards compatibility in the LocalJobLauncher since it does not access the task state store . This should be addressed as all task executions should be updating the task state .
36,594
void storeJobExecutionInfo ( ) { if ( this . jobHistoryStoreOptional . isPresent ( ) ) { try { this . logger . info ( "Writing job execution information to the job history store" ) ; this . jobHistoryStoreOptional . get ( ) . put ( this . jobState . toJobExecutionInfo ( ) ) ; } catch ( IOException ioe ) { this . logger . error ( "Failed to write job execution information to the job history store: " + ioe , ioe ) ; } } }
Store job execution information into the job history store .
36,595
void commit ( final boolean isJobCancelled ) throws IOException { this . datasetStatesByUrns = Optional . of ( computeDatasetStatesByUrns ( ) ) ; final boolean shouldCommitDataInJob = shouldCommitDataInJob ( this . jobState ) ; final DeliverySemantics deliverySemantics = DeliverySemantics . parse ( this . jobState ) ; final int numCommitThreads = numCommitThreads ( ) ; if ( ! shouldCommitDataInJob ) { this . logger . info ( "Job will not commit data since data are committed by tasks." ) ; } try { if ( this . datasetStatesByUrns . isPresent ( ) ) { this . logger . info ( "Persisting dataset urns." ) ; this . datasetStateStore . persistDatasetURNs ( this . jobName , this . datasetStatesByUrns . get ( ) . keySet ( ) ) ; } List < Either < Void , ExecutionException > > result = new IteratorExecutor < > ( Iterables . transform ( this . datasetStatesByUrns . get ( ) . entrySet ( ) , new Function < Map . Entry < String , DatasetState > , Callable < Void > > ( ) { public Callable < Void > apply ( final Map . Entry < String , DatasetState > entry ) { return createSafeDatasetCommit ( shouldCommitDataInJob , isJobCancelled , deliverySemantics , entry . getKey ( ) , entry . getValue ( ) , numCommitThreads > 1 , JobContext . this ) ; } } ) . iterator ( ) , numCommitThreads , ExecutorsUtils . newThreadFactory ( Optional . of ( this . logger ) , Optional . of ( "Commit-thread-%d" ) ) ) . executeAndGetResults ( ) ; IteratorExecutor . logFailures ( result , LOG , 10 ) ; if ( ! IteratorExecutor . verifyAllSuccessful ( result ) ) { this . jobState . setState ( JobState . RunningState . FAILED ) ; throw new IOException ( "Failed to commit dataset state for some dataset(s) of job " + this . jobId ) ; } } catch ( InterruptedException exc ) { throw new IOException ( exc ) ; } this . jobState . setState ( JobState . RunningState . COMMITTED ) ; }
Commit the job based on whether the job is cancelled .
36,596
protected Callable < Void > createSafeDatasetCommit ( boolean shouldCommitDataInJob , boolean isJobCancelled , DeliverySemantics deliverySemantics , String datasetUrn , JobState . DatasetState datasetState , boolean isMultithreaded , JobContext jobContext ) { return new SafeDatasetCommit ( shouldCommitDataInJob , isJobCancelled , deliverySemantics , datasetUrn , datasetState , isMultithreaded , jobContext ) ; }
The only reason for this methods is so that we can test the parallelization of commits . DO NOT OVERRIDE .
36,597
private static void validateInput ( State state ) { int branches = state . getPropAsInt ( ConfigurationKeys . FORK_BRANCHES_KEY , 1 ) ; Set < String > publishTables = Sets . newHashSet ( ) ; for ( int branchId = 0 ; branchId < branches ; branchId ++ ) { String publishTable = Preconditions . checkNotNull ( getProp ( state , JdbcPublisher . JDBC_PUBLISHER_FINAL_TABLE_NAME , branches , branchId ) , JdbcPublisher . JDBC_PUBLISHER_FINAL_TABLE_NAME + " should not be null." ) ; if ( publishTables . contains ( publishTable ) ) { throw new IllegalArgumentException ( "Duplicate " + JdbcPublisher . JDBC_PUBLISHER_FINAL_TABLE_NAME + " is not allowed across branches" ) ; } publishTables . add ( publishTable ) ; } Set < String > stagingTables = Sets . newHashSet ( ) ; for ( int branchId = 0 ; branchId < branches ; branchId ++ ) { String stagingTable = getProp ( state , ConfigurationKeys . WRITER_STAGING_TABLE , branches , branchId ) ; if ( ! StringUtils . isEmpty ( stagingTable ) && stagingTables . contains ( stagingTable ) ) { throw new IllegalArgumentException ( "Duplicate " + ConfigurationKeys . WRITER_STAGING_TABLE + " is not allowed across branches" ) ; } stagingTables . add ( stagingTable ) ; } JobCommitPolicy policy = JobCommitPolicy . getCommitPolicy ( state ) ; boolean isPublishJobLevel = state . getPropAsBoolean ( ConfigurationKeys . PUBLISH_DATA_AT_JOB_LEVEL , ConfigurationKeys . DEFAULT_PUBLISH_DATA_AT_JOB_LEVEL ) ; if ( JobCommitPolicy . COMMIT_ON_FULL_SUCCESS . equals ( policy ) ^ isPublishJobLevel ) { throw new IllegalArgumentException ( "Job commit policy should be only " + JobCommitPolicy . COMMIT_ON_FULL_SUCCESS + " when " + ConfigurationKeys . PUBLISH_DATA_AT_JOB_LEVEL + " is true." + " Or Job commit policy should not be " + JobCommitPolicy . COMMIT_ON_FULL_SUCCESS + " and " + ConfigurationKeys . PUBLISH_DATA_AT_JOB_LEVEL + " is false." ) ; } }
1 . User should not define same destination table across different branches . 2 . User should not define same staging table across different branches . 3 . If commit policy is not full Gobblin will try to write into final table even there s a failure . This will let Gobblin to write in task level . However publish data at job level is true it contradicts with the behavior of Gobblin writing in task level . Thus validate publish data at job level is false if commit policy is not full .
36,598
public static boolean canProxyAs ( String userNameToProxyAs , String superUserName , Path superUserKeytabLocation ) { try { loginAndProxyAsUser ( userNameToProxyAs , superUserName , superUserKeytabLocation ) ; } catch ( IOException e ) { return false ; } return true ; }
Returns true if superUserName can proxy as userNameToProxyAs using the specified superUserKeytabLocation false otherwise .
36,599
protected JobSpec jobSpecGenerator ( FlowSpec flowSpec ) { JobSpec jobSpec ; JobSpec . Builder jobSpecBuilder = JobSpec . builder ( jobSpecURIGenerator ( flowSpec ) ) . withConfig ( flowSpec . getConfig ( ) ) . withDescription ( flowSpec . getDescription ( ) ) . withVersion ( flowSpec . getVersion ( ) ) ; if ( flowSpec . getTemplateURIs ( ) . isPresent ( ) && templateCatalog . isPresent ( ) ) { jobSpecBuilder = jobSpecBuilder . withTemplate ( flowSpec . getTemplateURIs ( ) . get ( ) . iterator ( ) . next ( ) ) ; try { jobSpec = new ResolvedJobSpec ( jobSpecBuilder . build ( ) , templateCatalog . get ( ) ) ; log . info ( "Resolved JobSpec properties are: " + jobSpec . getConfigAsProperties ( ) ) ; } catch ( SpecNotFoundException | JobTemplate . TemplateException e ) { throw new RuntimeException ( "Could not resolve template in JobSpec from TemplateCatalog" , e ) ; } } else { jobSpec = jobSpecBuilder . build ( ) ; log . info ( "Unresolved JobSpec properties are: " + jobSpec . getConfigAsProperties ( ) ) ; } jobSpec . setConfig ( jobSpec . getConfig ( ) . withoutPath ( ConfigurationKeys . JOB_SCHEDULE_KEY ) ) ; if ( flowSpec . getConfig ( ) . hasPath ( ConfigurationKeys . FLOW_NAME_KEY ) ) { jobSpec . setConfig ( jobSpec . getConfig ( ) . withValue ( ConfigurationKeys . JOB_NAME_KEY , flowSpec . getConfig ( ) . getValue ( ConfigurationKeys . FLOW_NAME_KEY ) ) ) ; } if ( flowSpec . getConfig ( ) . hasPath ( ConfigurationKeys . FLOW_GROUP_KEY ) ) { jobSpec . setConfig ( jobSpec . getConfig ( ) . withValue ( ConfigurationKeys . JOB_GROUP_KEY , flowSpec . getConfig ( ) . getValue ( ConfigurationKeys . FLOW_GROUP_KEY ) ) ) ; } long flowExecutionId = FlowUtils . getOrCreateFlowExecutionId ( flowSpec ) ; jobSpec . setConfig ( jobSpec . getConfig ( ) . withValue ( ConfigurationKeys . FLOW_EXECUTION_ID_KEY , ConfigValueFactory . fromAnyRef ( flowExecutionId ) ) ) ; jobSpec . setConfigAsProperties ( ConfigUtils . configToProperties ( jobSpec . getConfig ( ) ) ) ; return jobSpec ; }
Naive implementation of generating jobSpec which fetch the first available template in an exemplified single - hop FlowCompiler implementation .