idx int64 0 41.2k | question stringlengths 74 4.21k | target stringlengths 5 888 |
|---|---|---|
36,200 | private Histogram getHistogramByDayBucketing ( SalesforceConnector connector , String entity , String watermarkColumn , Partition partition ) { Histogram histogram = new Histogram ( ) ; Calendar calendar = new GregorianCalendar ( ) ; Date startDate = Utils . toDate ( partition . getLowWatermark ( ) , Partitioner . WATERMARKTIMEFORMAT ) ; calendar . setTime ( startDate ) ; int startYear = calendar . get ( Calendar . YEAR ) ; String lowWatermarkDate = Utils . dateToString ( startDate , SalesforceExtractor . SALESFORCE_TIMESTAMP_FORMAT ) ; Date endDate = Utils . toDate ( partition . getHighWatermark ( ) , Partitioner . WATERMARKTIMEFORMAT ) ; calendar . setTime ( endDate ) ; int endYear = calendar . get ( Calendar . YEAR ) ; String highWatermarkDate = Utils . dateToString ( endDate , SalesforceExtractor . SALESFORCE_TIMESTAMP_FORMAT ) ; Map < String , String > values = new HashMap < > ( ) ; values . put ( "table" , entity ) ; values . put ( "column" , watermarkColumn ) ; StrSubstitutor sub = new StrSubstitutor ( values ) ; for ( int year = startYear ; year <= endYear ; year ++ ) { if ( year == startYear ) { values . put ( "start" , lowWatermarkDate ) ; values . put ( "greater" , partition . isLowWatermarkInclusive ( ) ? ">=" : ">" ) ; } else { values . put ( "start" , getDateString ( year ) ) ; values . put ( "greater" , ">=" ) ; } if ( year == endYear ) { values . put ( "end" , highWatermarkDate ) ; values . put ( "less" , partition . isHighWatermarkInclusive ( ) ? "<=" : "<" ) ; } else { values . put ( "end" , getDateString ( year + 1 ) ) ; values . put ( "less" , "<" ) ; } String query = sub . replace ( DAY_PARTITION_QUERY_TEMPLATE ) ; log . info ( "Histogram query: " + query ) ; histogram . add ( parseDayBucketingHistogram ( getRecordsForQuery ( connector , query ) ) ) ; } return histogram ; } | Get a histogram with day granularity buckets . |
36,201 | private Histogram getHistogram ( String entity , String watermarkColumn , SourceState state , Partition partition ) { SalesforceConnector connector = getConnector ( state ) ; try { if ( ! connector . connect ( ) ) { throw new RuntimeException ( "Failed to connect." ) ; } } catch ( RestApiConnectionException e ) { throw new RuntimeException ( "Failed to connect." , e ) ; } Histogram histogram = getHistogramByDayBucketing ( connector , entity , watermarkColumn , partition ) ; HistogramGroup firstGroup = histogram . get ( 0 ) ; Date lwmDate = Utils . toDate ( partition . getLowWatermark ( ) , Partitioner . WATERMARKTIMEFORMAT ) ; histogram . getGroups ( ) . set ( 0 , new HistogramGroup ( Utils . epochToDate ( lwmDate . getTime ( ) , SECONDS_FORMAT ) , firstGroup . getCount ( ) ) ) ; if ( state . getPropAsBoolean ( ENABLE_DYNAMIC_PROBING ) ) { histogram = getRefinedHistogram ( connector , entity , watermarkColumn , state , partition , histogram ) ; } return histogram ; } | Generate the histogram |
36,202 | private Retryer < Void > buildRetryer ( State state ) { RetryerBuilder < Void > builder = null ; if ( writer instanceof Retriable ) { builder = ( ( Retriable ) writer ) . getRetryerBuilder ( ) ; } else { builder = createRetryBuilder ( state ) ; } if ( GobblinMetrics . isEnabled ( state ) ) { final Optional < Meter > retryMeter = Optional . of ( Instrumented . getMetricContext ( state , getClass ( ) ) . meter ( FAILED_RETRY_WRITES_METER ) ) ; builder . withRetryListener ( new RetryListener ( ) { public < V > void onRetry ( Attempt < V > attempt ) { if ( attempt . hasException ( ) ) { LOG . warn ( "Caught exception. This may be retried." , attempt . getExceptionCause ( ) ) ; Instrumented . markMeter ( retryMeter ) ; failedWrites ++ ; } } } ) ; } return builder . build ( ) ; } | Build Retryer . - If Writer implements Retriable it will use the RetryerBuilder from the writer . - Otherwise it will use DEFAULT writer builder . |
36,203 | public static List < Partition > getPartitions ( IMetaStoreClient client , Table table , Optional < String > filter ) throws IOException { return getPartitions ( client , table , filter , Optional . < HivePartitionExtendedFilter > absent ( ) ) ; } | For backward compatibility when PathFilter is injected as a parameter . |
36,204 | public static Set < Path > getPaths ( InputFormat < ? , ? > inputFormat , Path location ) throws IOException { JobConf jobConf = new JobConf ( getHadoopConfiguration ( ) ) ; Set < Path > paths = Sets . newHashSet ( ) ; FileInputFormat . addInputPaths ( jobConf , location . toString ( ) ) ; InputSplit [ ] splits = inputFormat . getSplits ( jobConf , 1000 ) ; for ( InputSplit split : splits ) { if ( ! ( split instanceof FileSplit ) ) { throw new IOException ( "Not a file split. Found " + split . getClass ( ) . getName ( ) ) ; } FileSplit fileSplit = ( FileSplit ) split ; paths . add ( fileSplit . getPath ( ) ) ; } return paths ; } | Get paths from a Hive location using the provided input format . |
36,205 | public static String getPathForBranch ( State state , String path , int numBranches , int branchId ) { Preconditions . checkNotNull ( state ) ; Preconditions . checkNotNull ( path ) ; Preconditions . checkArgument ( numBranches >= 0 , "The number of branches is expected to be non-negative" ) ; Preconditions . checkArgument ( branchId >= 0 , "The branch id is expected to be non-negative" ) ; return numBranches > 1 ? path + Path . SEPARATOR + state . getProp ( ConfigurationKeys . FORK_BRANCH_NAME_KEY + "." + branchId , ConfigurationKeys . DEFAULT_FORK_BRANCH_NAME + branchId ) : path ; } | Get a new path with the given branch name as a sub directory . |
36,206 | protected static long getCreateTime ( Table table ) { return TimeUnit . MILLISECONDS . convert ( table . getTTable ( ) . getCreateTime ( ) , TimeUnit . SECONDS ) ; } | Convert createTime from seconds to milliseconds |
36,207 | private void silenceHiveLoggers ( ) { List < String > loggers = ImmutableList . of ( "org.apache.hadoop.hive" , "org.apache.hive" , "hive.ql.parse" ) ; for ( String name : loggers ) { Logger logger = Logger . getLogger ( name ) ; if ( logger != null ) { logger . setLevel ( Level . WARN ) ; } } } | Hive logging is too verbose at INFO level . Currently hive does not have a way to set log level . This is a workaround to set log level to WARN for hive loggers only |
36,208 | private void commitDataset ( Collection < TaskState > taskStates , DataPublisher publisher ) { try { publisher . publish ( taskStates ) ; } catch ( Throwable t ) { log . error ( "Failed to commit dataset" , t ) ; setTaskFailureException ( taskStates , t ) ; } } | Commit the output data of a dataset . |
36,209 | private boolean canCommitDataset ( JobState . DatasetState datasetState ) { return this . jobContext . getJobCommitPolicy ( ) == JobCommitPolicy . COMMIT_ON_PARTIAL_SUCCESS || this . jobContext . getJobCommitPolicy ( ) == JobCommitPolicy . COMMIT_SUCCESSFUL_TASKS || ( this . jobContext . getJobCommitPolicy ( ) == JobCommitPolicy . COMMIT_ON_FULL_SUCCESS && datasetState . getState ( ) == JobState . RunningState . SUCCESSFUL ) ; } | Check if it is OK to commit the output data of a dataset . |
36,210 | private void persistDatasetState ( String datasetUrn , JobState . DatasetState datasetState ) throws IOException { log . info ( "Persisting dataset state for dataset " + datasetUrn ) ; this . jobContext . getDatasetStateStore ( ) . persistDatasetState ( datasetUrn , datasetState ) ; } | Persist dataset state of a given dataset identified by the dataset URN . |
36,211 | private Path getOrGenerateSchemaFile ( Schema schema ) throws IOException { Preconditions . checkNotNull ( schema , "Avro Schema should not be null" ) ; String hashedSchema = Hashing . sha256 ( ) . hashString ( schema . toString ( ) , StandardCharsets . UTF_8 ) . toString ( ) ; if ( ! this . schemaPaths . containsKey ( hashedSchema ) ) { Path schemaFilePath = new Path ( this . schemaDir , String . valueOf ( System . currentTimeMillis ( ) + ".avsc" ) ) ; AvroUtils . writeSchemaToFile ( schema , schemaFilePath , fs , true ) ; this . schemaPaths . put ( hashedSchema , schemaFilePath ) ; } return this . schemaPaths . get ( hashedSchema ) ; } | If url for schema already exists return the url . If not create a new temporary schema file and return a the url . |
36,212 | public static void writeStringAsText ( DataOutput stream , String str ) throws IOException { byte [ ] utf8Encoded = str . getBytes ( StandardCharsets . UTF_8 ) ; writeVLong ( stream , utf8Encoded . length ) ; stream . write ( utf8Encoded ) ; } | Serialize a String using the same logic as a Hadoop Text object |
36,213 | public static String readTextAsString ( DataInput in ) throws IOException { int bufLen = ( int ) readVLong ( in ) ; byte [ ] buf = new byte [ bufLen ] ; in . readFully ( buf ) ; return new String ( buf , StandardCharsets . UTF_8 ) ; } | Deserialize a Hadoop Text object into a String |
36,214 | private static void writeVLong ( DataOutput stream , long i ) throws IOException { if ( i >= - 112 && i <= 127 ) { stream . writeByte ( ( byte ) i ) ; return ; } int len = - 112 ; if ( i < 0 ) { i ^= - 1L ; len = - 120 ; } long tmp = i ; while ( tmp != 0 ) { tmp = tmp >> 8 ; len -- ; } stream . writeByte ( ( byte ) len ) ; len = ( len < - 120 ) ? - ( len + 120 ) : - ( len + 112 ) ; for ( int idx = len ; idx != 0 ; idx -- ) { int shiftbits = ( idx - 1 ) * 8 ; long mask = 0xFFL << shiftbits ; stream . writeByte ( ( byte ) ( ( i & mask ) >> shiftbits ) ) ; } } | From org . apache . hadoop . io . WritableUtis |
36,215 | private static long readVLong ( DataInput stream ) throws IOException { byte firstByte = stream . readByte ( ) ; int len = decodeVIntSize ( firstByte ) ; if ( len == 1 ) { return firstByte ; } long i = 0 ; for ( int idx = 0 ; idx < len - 1 ; idx ++ ) { byte b = stream . readByte ( ) ; i = i << 8 ; i = i | ( b & 0xFF ) ; } return ( isNegativeVInt ( firstByte ) ? ( i ^ - 1L ) : i ) ; } | Reads a zero - compressed encoded long from input stream and returns it . |
36,216 | private boolean canRun ( String flowName , String flowGroup , boolean allowConcurrentExecution ) { if ( allowConcurrentExecution ) { return true ; } else { return ! flowStatusGenerator . isFlowRunning ( flowName , flowGroup ) ; } } | Check if the flow instance is allowed to run . |
36,217 | private synchronized void getAllPreviousOffsetState ( SourceState state ) { if ( this . doneGettingAllPreviousOffsets ) { return ; } this . previousOffsets . clear ( ) ; this . previousLowWatermarks . clear ( ) ; this . previousExpectedHighWatermarks . clear ( ) ; this . previousOffsetFetchEpochTimes . clear ( ) ; this . previousStartFetchEpochTimes . clear ( ) ; this . previousStopFetchEpochTimes . clear ( ) ; Map < String , Iterable < WorkUnitState > > workUnitStatesByDatasetUrns = state . getPreviousWorkUnitStatesByDatasetUrns ( ) ; if ( ! workUnitStatesByDatasetUrns . isEmpty ( ) && ! ( workUnitStatesByDatasetUrns . size ( ) == 1 && workUnitStatesByDatasetUrns . keySet ( ) . iterator ( ) . next ( ) . equals ( "" ) ) ) { this . isDatasetStateEnabled . set ( true ) ; } for ( WorkUnitState workUnitState : state . getPreviousWorkUnitStates ( ) ) { List < KafkaPartition > partitions = KafkaUtils . getPartitions ( workUnitState ) ; WorkUnit workUnit = workUnitState . getWorkunit ( ) ; MultiLongWatermark watermark = workUnitState . getActualHighWatermark ( MultiLongWatermark . class ) ; MultiLongWatermark previousLowWatermark = workUnit . getLowWatermark ( MultiLongWatermark . class ) ; MultiLongWatermark previousExpectedHighWatermark = workUnit . getExpectedHighWatermark ( MultiLongWatermark . class ) ; Preconditions . checkArgument ( partitions . size ( ) == watermark . size ( ) , String . format ( "Num of partitions doesn't match number of watermarks: partitions=%s, watermarks=%s" , partitions , watermark ) ) ; for ( int i = 0 ; i < partitions . size ( ) ; i ++ ) { KafkaPartition partition = partitions . get ( i ) ; if ( watermark . get ( i ) != ConfigurationKeys . DEFAULT_WATERMARK_VALUE ) { this . previousOffsets . put ( partition , watermark . get ( i ) ) ; } if ( previousLowWatermark . get ( i ) != ConfigurationKeys . DEFAULT_WATERMARK_VALUE ) { this . previousLowWatermarks . put ( partition , previousLowWatermark . get ( i ) ) ; } if ( previousExpectedHighWatermark . get ( i ) != ConfigurationKeys . DEFAULT_WATERMARK_VALUE ) { this . previousExpectedHighWatermarks . put ( partition , previousExpectedHighWatermark . get ( i ) ) ; } this . previousOffsetFetchEpochTimes . put ( partition , KafkaUtils . getPropAsLongFromSingleOrMultiWorkUnitState ( workUnitState , OFFSET_FETCH_EPOCH_TIME , i ) ) ; this . previousStartFetchEpochTimes . put ( partition , KafkaUtils . getPropAsLongFromSingleOrMultiWorkUnitState ( workUnitState , START_FETCH_EPOCH_TIME , i ) ) ; this . previousStopFetchEpochTimes . put ( partition , KafkaUtils . getPropAsLongFromSingleOrMultiWorkUnitState ( workUnitState , STOP_FETCH_EPOCH_TIME , i ) ) ; } } this . doneGettingAllPreviousOffsets = true ; } | this . previousOffsetFetchEpochTimes need to be initialized once |
36,218 | public static String getCoalesceColumnNames ( String columnOrColumnList ) { if ( Strings . isNullOrEmpty ( columnOrColumnList ) ) { return null ; } if ( columnOrColumnList . contains ( "," ) ) { return "COALESCE(" + columnOrColumnList + ")" ; } return columnOrColumnList ; } | Get coalesce of columns if there are multiple comma - separated columns |
36,219 | public static String printTiming ( long start , long end ) { long totalMillis = end - start ; long mins = TimeUnit . MILLISECONDS . toMinutes ( totalMillis ) ; long secs = TimeUnit . MILLISECONDS . toSeconds ( totalMillis ) - TimeUnit . MINUTES . toSeconds ( mins ) ; long millis = TimeUnit . MILLISECONDS . toMillis ( totalMillis ) - TimeUnit . MINUTES . toMillis ( mins ) - TimeUnit . SECONDS . toMillis ( secs ) ; return String . format ( "%d min, %d sec, %d millis" , mins , secs , millis ) ; } | Print time difference in minutes seconds and milliseconds |
36,220 | public static List < String > getColumnListFromQuery ( String query ) { if ( Strings . isNullOrEmpty ( query ) ) { return null ; } String queryLowerCase = query . toLowerCase ( ) ; int startIndex = queryLowerCase . indexOf ( "select " ) + 7 ; int endIndex = queryLowerCase . indexOf ( " from " ) ; if ( startIndex < 0 || endIndex < 0 ) { return null ; } String [ ] inputQueryColumns = query . substring ( startIndex , endIndex ) . toLowerCase ( ) . replaceAll ( " " , "" ) . split ( "," ) ; return Arrays . asList ( inputQueryColumns ) ; } | get column list from the user provided query to build schema with the respective columns |
36,221 | public static String escapeSpecialCharacters ( String columnName , String escapeChars , String character ) { if ( Strings . isNullOrEmpty ( columnName ) ) { return null ; } if ( StringUtils . isEmpty ( escapeChars ) ) { return columnName ; } List < String > specialChars = Arrays . asList ( escapeChars . split ( "," ) ) ; for ( String specialChar : specialChars ) { columnName = columnName . replace ( specialChar , character ) ; } return columnName ; } | escape characters in column name or table name |
36,222 | public static long getLongWithCurrentDate ( String value , String timezone ) { if ( Strings . isNullOrEmpty ( value ) ) { return 0 ; } DateTime time = getCurrentTime ( timezone ) ; DateTimeFormatter dtFormatter = DateTimeFormat . forPattern ( CURRENT_DATE_FORMAT ) . withZone ( time . getZone ( ) ) ; if ( value . toUpperCase ( ) . startsWith ( CURRENT_DAY ) ) { return Long . parseLong ( dtFormatter . print ( time . minusDays ( Integer . parseInt ( value . substring ( CURRENT_DAY . length ( ) + 1 ) ) ) ) ) ; } if ( value . toUpperCase ( ) . startsWith ( CURRENT_HOUR ) ) { return Long . parseLong ( dtFormatter . print ( time . minusHours ( Integer . parseInt ( value . substring ( CURRENT_HOUR . length ( ) + 1 ) ) ) ) ) ; } return Long . parseLong ( value ) ; } | Helper method for getting a value containing CURRENTDAY - 1 or CURRENTHOUR - 1 in the form yyyyMMddHHmmss |
36,223 | public static String dateTimeToString ( DateTime input , String format , String timezone ) { String tz = StringUtils . defaultString ( timezone , ConfigurationKeys . DEFAULT_SOURCE_TIMEZONE ) ; DateTimeZone dateTimeZone = getTimeZone ( tz ) ; DateTimeFormatter outputDtFormat = DateTimeFormat . forPattern ( format ) . withZone ( dateTimeZone ) ; return outputDtFormat . print ( input ) ; } | Convert joda time to a string in the given format |
36,224 | public static DateTime getCurrentTime ( String timezone ) { String tz = StringUtils . defaultString ( timezone , ConfigurationKeys . DEFAULT_SOURCE_TIMEZONE ) ; DateTimeZone dateTimeZone = getTimeZone ( tz ) ; DateTime currentTime = new DateTime ( dateTimeZone ) ; return currentTime ; } | Get current time - joda |
36,225 | public static DateTime toDateTime ( String input , String format , String timezone ) { String tz = StringUtils . defaultString ( timezone , ConfigurationKeys . DEFAULT_SOURCE_TIMEZONE ) ; DateTimeZone dateTimeZone = getTimeZone ( tz ) ; DateTimeFormatter inputDtFormat = DateTimeFormat . forPattern ( format ) . withZone ( dateTimeZone ) ; DateTime outputDateTime = inputDtFormat . parseDateTime ( input ) . withZone ( dateTimeZone ) ; return outputDateTime ; } | Convert timestamp in a string format to joda time |
36,226 | public static DateTime toDateTime ( long input , String format , String timezone ) { return toDateTime ( Long . toString ( input ) , format , timezone ) ; } | Convert timestamp in a long format to joda time |
36,227 | private static DateTimeZone getTimeZone ( String id ) { DateTimeZone zone ; try { zone = DateTimeZone . forID ( id ) ; } catch ( IllegalArgumentException e ) { throw new IllegalArgumentException ( "TimeZone " + id + " not recognized" ) ; } return zone ; } | Get time zone of time zone id |
36,228 | public synchronized void scheduleJob ( Properties jobProps , JobListener jobListener ) throws JobException { Map < String , Object > additionalJobDataMap = Maps . newHashMap ( ) ; additionalJobDataMap . put ( ServiceConfigKeys . GOBBLIN_SERVICE_FLOWSPEC , this . scheduledFlowSpecs . get ( jobProps . getProperty ( ConfigurationKeys . JOB_NAME_KEY ) ) ) ; try { scheduleJob ( jobProps , jobListener , additionalJobDataMap , GobblinServiceJob . class ) ; } catch ( Exception e ) { throw new JobException ( "Failed to schedule job " + jobProps . getProperty ( ConfigurationKeys . JOB_NAME_KEY ) , e ) ; } } | Synchronize the job scheduling because the same flowSpec can be scheduled by different threads . |
36,229 | public static ArrayList < String > groupToPages ( Triple < String , GoogleWebmasterFilter . FilterOperator , UrlTrieNode > group ) { ArrayList < String > ret = new ArrayList < > ( ) ; if ( group . getMiddle ( ) . equals ( GoogleWebmasterFilter . FilterOperator . EQUALS ) ) { if ( group . getRight ( ) . isExist ( ) ) { ret . add ( group . getLeft ( ) ) ; } } else if ( group . getMiddle ( ) . equals ( GoogleWebmasterFilter . FilterOperator . CONTAINS ) ) { UrlTrie trie = new UrlTrie ( group . getLeft ( ) , group . getRight ( ) ) ; Iterator < Pair < String , UrlTrieNode > > iterator = new UrlTriePostOrderIterator ( trie , 1 ) ; while ( iterator . hasNext ( ) ) { Pair < String , UrlTrieNode > next = iterator . next ( ) ; if ( next . getRight ( ) . isExist ( ) ) { ret . add ( next . getLeft ( ) ) ; } } } return ret ; } | Get the detailed pages under this group |
36,230 | public static void deletePathByRegex ( FileSystem fs , final Path path , final String regex ) throws IOException { FileStatus [ ] statusList = fs . listStatus ( path , path1 -> path1 . getName ( ) . matches ( regex ) ) ; for ( final FileStatus oldJobFile : statusList ) { HadoopUtils . deletePath ( fs , oldJobFile . getPath ( ) , true ) ; } } | Delete files according to the regular expression provided |
36,231 | public static void moveToTrash ( FileSystem fs , Path path ) throws IOException { Trash trash = new Trash ( fs , new Configuration ( ) ) ; trash . moveToTrash ( path ) ; } | Moves the object to the filesystem trash according to the file system policy . |
36,232 | public static boolean unsafeRenameIfNotExists ( FileSystem fs , Path from , Path to ) throws IOException { if ( ! fs . exists ( to ) ) { if ( ! fs . exists ( to . getParent ( ) ) ) { fs . mkdirs ( to . getParent ( ) ) ; } if ( ! renamePathHandleLocalFSRace ( fs , from , to ) ) { if ( ! fs . exists ( to ) ) { throw new IOException ( String . format ( "Failed to rename %s to %s." , from , to ) ) ; } return false ; } return true ; } return false ; } | Renames from to to if to doesn t exist in a non - thread - safe way . |
36,233 | public static void setGroup ( FileSystem fs , Path path , String group ) throws IOException { fs . setOwner ( path , fs . getFileStatus ( path ) . getOwner ( ) , group ) ; } | Set the group associated with a given path . |
36,234 | public static void setPermissions ( Path location , Optional < String > owner , Optional < String > group , FileSystem fs , FsPermission permission ) { try { if ( ! owner . isPresent ( ) ) { return ; } if ( ! group . isPresent ( ) ) { return ; } fs . setOwner ( location , owner . get ( ) , group . get ( ) ) ; fs . setPermission ( location , permission ) ; if ( ! fs . isDirectory ( location ) ) { return ; } for ( FileStatus fileStatus : fs . listStatus ( location ) ) { setPermissions ( fileStatus . getPath ( ) , owner , group , fs , permission ) ; } } catch ( IOException e ) { log . warn ( "Exception occurred while trying to change permissions : " + e . getMessage ( ) ) ; } } | Try to set owner and permissions for the path . Will not throw exception . |
36,235 | public List < String > generateQueries ( ) { ensureParentOfStagingPathExists ( ) ; List < String > hiveQueries = Lists . newArrayList ( ) ; hiveQueries . add ( "SET hive.exec.dynamic.partition.mode=nonstrict" ) ; Preconditions . checkNotNull ( this . workUnit , "Workunit must not be null" ) ; EventWorkunitUtils . setBeginDDLBuildTimeMetadata ( this . workUnit , System . currentTimeMillis ( ) ) ; HiveConverterUtils . createStagingDirectory ( fs , outputTableMetadata . getDestinationDataPath ( ) , conversionEntity , this . workUnitState ) ; String createStagingTableDDL = HiveConverterUtils . generateCreateDuplicateTableDDL ( inputDbName , inputTableName , stagingTableName , stagingDataLocation , Optional . of ( outputDatabaseName ) ) ; hiveQueries . add ( createStagingTableDDL ) ; log . debug ( "Create staging table DDL:\n" + createStagingTableDDL ) ; String insertInStagingTableDML = HiveConverterUtils . generateTableCopy ( inputTableName , stagingTableName , conversionEntity . getTable ( ) . getDbName ( ) , outputDatabaseName , Optional . of ( partitionsDMLInfo ) ) ; hiveQueries . add ( insertInStagingTableDML ) ; log . debug ( "Conversion staging DML: " + insertInStagingTableDML ) ; log . info ( "Conversion Queries {}\n" , hiveQueries ) ; EventWorkunitUtils . setEndDDLBuildTimeMetadata ( workUnit , System . currentTimeMillis ( ) ) ; return hiveQueries ; } | Returns hive queries to be run as a part of a hive task . This does not include publish queries . |
36,236 | public double setWorkUnitEstSizes ( Map < String , List < WorkUnit > > workUnitsByTopic ) { double totalEstDataSize = 0 ; for ( List < WorkUnit > workUnitsForTopic : workUnitsByTopic . values ( ) ) { for ( WorkUnit workUnit : workUnitsForTopic ) { setWorkUnitEstSize ( workUnit ) ; totalEstDataSize += getWorkUnitEstSize ( workUnit ) ; } } return totalEstDataSize ; } | Calculate the total size of the workUnits and set the estimated size for each workUnit |
36,237 | public FlowStatus getFlowStatus ( FlowStatusId flowStatusId ) throws RemoteInvocationException { LOG . debug ( "getFlowConfig with groupName " + flowStatusId . getFlowGroup ( ) + " flowName " + flowStatusId . getFlowName ( ) ) ; GetRequest < FlowStatus > getRequest = _flowstatusesRequestBuilders . get ( ) . id ( new ComplexResourceKey < > ( flowStatusId , new EmptyRecord ( ) ) ) . build ( ) ; Response < FlowStatus > response = _restClient . get ( ) . sendRequest ( getRequest ) . getResponse ( ) ; return response . getEntity ( ) ; } | Get a flow status |
36,238 | private void submitJobToHelix ( JobConfig . Builder jobConfigBuilder ) throws Exception { HelixUtils . submitJobToWorkFlow ( jobConfigBuilder , this . helixWorkFlowName , this . jobContext . getJobId ( ) , this . helixTaskDriver , this . helixManager , this . workFlowExpiryTimeSeconds ) ; } | Submit a job to run . |
36,239 | private static List < ? extends Tag < ? > > addAdditionalMetadataTags ( Properties jobProps , List < ? extends Tag < ? > > inputTags ) { List < Tag < ? > > metadataTags = Lists . newArrayList ( inputTags ) ; String jobId ; if ( jobProps . containsKey ( ConfigurationKeys . JOB_ID_KEY ) ) { jobId = jobProps . getProperty ( ConfigurationKeys . JOB_ID_KEY ) ; } else { jobId = JobLauncherUtils . newJobId ( JobState . getJobNameFromProps ( jobProps ) ) ; jobProps . put ( ConfigurationKeys . JOB_ID_KEY , jobId ) ; } String jobExecutionId = Long . toString ( Id . Job . parse ( jobId ) . getSequence ( ) ) ; if ( jobProps . containsKey ( ConfigurationKeys . FLOW_NAME_KEY ) ) { metadataTags . add ( new Tag < > ( TimingEvent . FlowEventConstants . FLOW_GROUP_FIELD , jobProps . getProperty ( ConfigurationKeys . FLOW_GROUP_KEY , "" ) ) ) ; metadataTags . add ( new Tag < > ( TimingEvent . FlowEventConstants . FLOW_NAME_FIELD , jobProps . getProperty ( ConfigurationKeys . FLOW_NAME_KEY ) ) ) ; metadataTags . add ( new Tag < > ( TimingEvent . FlowEventConstants . FLOW_EXECUTION_ID_FIELD , jobProps . getProperty ( ConfigurationKeys . FLOW_EXECUTION_ID_KEY , jobExecutionId ) ) ) ; } metadataTags . add ( new Tag < > ( TimingEvent . FlowEventConstants . JOB_GROUP_FIELD , jobProps . getProperty ( ConfigurationKeys . JOB_GROUP_KEY , "" ) ) ) ; metadataTags . add ( new Tag < > ( TimingEvent . FlowEventConstants . JOB_NAME_FIELD , jobProps . getProperty ( ConfigurationKeys . JOB_NAME_KEY , "" ) ) ) ; metadataTags . add ( new Tag < > ( TimingEvent . FlowEventConstants . JOB_EXECUTION_ID_FIELD , jobExecutionId ) ) ; LOGGER . debug ( "GobblinHelixJobLauncher.addAdditionalMetadataTags: metadataTags {}" , metadataTags ) ; return metadataTags ; } | Inject in some additional properties |
36,240 | public static Map < String , String > getClusterNameTags ( Configuration conf ) { ImmutableMap . Builder < String , String > tagMap = ImmutableMap . builder ( ) ; String clusterIdentifierTag = ClustersNames . getInstance ( ) . getClusterName ( conf ) ; if ( ! Strings . isNullOrEmpty ( clusterIdentifierTag ) ) { tagMap . put ( CLUSTER_IDENTIFIER_TAG_NAME , clusterIdentifierTag ) ; } return tagMap . build ( ) ; } | Gets all useful Hadoop cluster metrics . |
36,241 | public static FlowSpec . Builder builder ( URI catalogURI , Properties flowProps ) { String name = flowProps . getProperty ( ConfigurationKeys . FLOW_NAME_KEY ) ; String group = flowProps . getProperty ( ConfigurationKeys . FLOW_GROUP_KEY , "default" ) ; try { URI flowURI = new URI ( catalogURI . getScheme ( ) , catalogURI . getAuthority ( ) , "/" + group + "/" + name , null ) ; FlowSpec . Builder builder = new FlowSpec . Builder ( flowURI ) . withConfigAsProperties ( flowProps ) ; String descr = flowProps . getProperty ( ConfigurationKeys . FLOW_DESCRIPTION_KEY , null ) ; if ( null != descr ) { builder = builder . withDescription ( descr ) ; } return builder ; } catch ( URISyntaxException e ) { throw new RuntimeException ( "Unable to create a FlowSpec URI: " + e , e ) ; } } | Creates a builder for the FlowSpec based on values in a flow properties config . |
36,242 | protected boolean folderWithinAllowedPeriod ( Path inputFolder , DateTime folderTime ) { DateTime currentTime = new DateTime ( this . timeZone ) ; PeriodFormatter periodFormatter = getPeriodFormatter ( ) ; DateTime earliestAllowedFolderTime = getEarliestAllowedFolderTime ( currentTime , periodFormatter ) ; DateTime latestAllowedFolderTime = getLatestAllowedFolderTime ( currentTime , periodFormatter ) ; if ( folderTime . isBefore ( earliestAllowedFolderTime ) ) { log . info ( String . format ( "Folder time for %s is %s, earlier than the earliest allowed folder time, %s. Skipping" , inputFolder , folderTime , earliestAllowedFolderTime ) ) ; return false ; } else if ( folderTime . isAfter ( latestAllowedFolderTime ) ) { log . info ( String . format ( "Folder time for %s is %s, later than the latest allowed folder time, %s. Skipping" , inputFolder , folderTime , latestAllowedFolderTime ) ) ; return false ; } else { return true ; } } | Return true iff input folder time is between compaction . timebased . min . time . ago and compaction . timebased . max . time . ago . |
36,243 | public Result verify ( FileSystemDataset dataset ) { Map < String , Double > thresholdMap = RecompactionConditionBasedOnRatio . getDatasetRegexAndRecompactThreshold ( state . getProp ( MRCompactor . COMPACTION_LATEDATA_THRESHOLD_FOR_RECOMPACT_PER_DATASET , StringUtils . EMPTY ) ) ; CompactionPathParser . CompactionParserResult result = new CompactionPathParser ( state ) . parse ( dataset ) ; double threshold = RecompactionConditionBasedOnRatio . getRatioThresholdByDatasetName ( result . getDatasetName ( ) , thresholdMap ) ; log . debug ( "Threshold is {} for dataset {}" , threshold , result . getDatasetName ( ) ) ; InputRecordCountHelper helper = new InputRecordCountHelper ( state ) ; try { double newRecords = helper . calculateRecordCount ( Lists . newArrayList ( new Path ( dataset . datasetURN ( ) ) ) ) ; double oldRecords = helper . readRecordCount ( new Path ( result . getDstAbsoluteDir ( ) ) ) ; if ( oldRecords == 0 ) { return new Result ( true , "" ) ; } if ( ( newRecords - oldRecords ) / oldRecords > threshold ) { log . debug ( "Dataset {} records exceeded the threshold {}" , dataset . datasetURN ( ) , threshold ) ; return new Result ( true , "" ) ; } return new Result ( false , String . format ( "%s is failed for dataset %s. Prev=%f, Cur=%f, not reaching to threshold %f" , this . getName ( ) , result . getDatasetName ( ) , oldRecords , newRecords , threshold ) ) ; } catch ( IOException e ) { return new Result ( false , ExceptionUtils . getFullStackTrace ( e ) ) ; } } | There are two record count we are comparing here 1 ) The new record count in the input folder 2 ) The record count we compacted previously from last run Calculate two numbers difference and compare with a predefined threshold . |
36,244 | protected Collection < FileSystemDatasetVersion > listQualifiedRawFileSystemDatasetVersions ( Collection < FileSystemDatasetVersion > allVersions ) { return Lists . newArrayList ( Collections2 . filter ( allVersions , new Predicate < FileSystemDatasetVersion > ( ) { public boolean apply ( FileSystemDatasetVersion version ) { Iterable < Path > refinedDatasetPaths = getRefinedDatasetPaths ( version ) ; try { Optional < Long > latestRawDatasetModTime = getLatestModTime ( version . getPaths ( ) ) ; Optional < Long > latestRefinedDatasetModTime = getLatestModTime ( refinedDatasetPaths ) ; return latestRawDatasetModTime . isPresent ( ) && latestRefinedDatasetModTime . isPresent ( ) && latestRawDatasetModTime . get ( ) <= latestRefinedDatasetModTime . get ( ) ; } catch ( IOException e ) { throw new RuntimeException ( "Failed to get modification time" , e ) ; } } } ) ) ; } | A raw dataset version is qualified to be deleted iff the corresponding refined paths exist and the latest mod time of all files is in the raw dataset is earlier than the latest mod time of all files in the refined paths . |
36,245 | public JdbcEntrySchema convertSchema ( Schema inputSchema , WorkUnitState workUnit ) throws SchemaConversionException { LOG . info ( "Converting schema " + inputSchema ) ; Preconditions . checkArgument ( Type . RECORD . equals ( inputSchema . getType ( ) ) , "%s is expected for the first level element in Avro schema %s" , Type . RECORD , inputSchema ) ; Map < String , Type > avroColumnType = flatten ( inputSchema ) ; String jsonStr = Preconditions . checkNotNull ( workUnit . getProp ( CONVERTER_AVRO_JDBC_DATE_FIELDS ) ) ; java . lang . reflect . Type typeOfMap = new TypeToken < Map < String , JdbcType > > ( ) { } . getType ( ) ; Map < String , JdbcType > dateColumnMapping = new Gson ( ) . fromJson ( jsonStr , typeOfMap ) ; LOG . info ( "Date column mapping: " + dateColumnMapping ) ; List < JdbcEntryMetaDatum > jdbcEntryMetaData = Lists . newArrayList ( ) ; for ( Map . Entry < String , Type > avroEntry : avroColumnType . entrySet ( ) ) { String colName = tryConvertAvroColNameToJdbcColName ( avroEntry . getKey ( ) ) ; JdbcType JdbcType = dateColumnMapping . get ( colName ) ; if ( JdbcType == null ) { JdbcType = AVRO_TYPE_JDBC_TYPE_MAPPING . get ( avroEntry . getValue ( ) ) ; } Preconditions . checkNotNull ( JdbcType , "Failed to convert " + avroEntry + " AVRO_TYPE_JDBC_TYPE_MAPPING: " + AVRO_TYPE_JDBC_TYPE_MAPPING + " , dateColumnMapping: " + dateColumnMapping ) ; jdbcEntryMetaData . add ( new JdbcEntryMetaDatum ( colName , JdbcType ) ) ; } JdbcEntrySchema converted = new JdbcEntrySchema ( jdbcEntryMetaData ) ; LOG . info ( "Converted schema into " + converted ) ; return converted ; } | Converts Avro schema to JdbcEntrySchema . |
36,246 | private String tryConvertAvroColNameToJdbcColName ( String avroColName ) { if ( ! avroToJdbcColPairs . isPresent ( ) ) { String converted = avroColName . replaceAll ( AVRO_NESTED_COLUMN_DELIMITER_REGEX_COMPATIBLE , JDBC_FLATTENED_COLUMN_DELIMITER ) ; jdbcToAvroColPairs . put ( converted , avroColName ) ; return converted ; } String converted = avroToJdbcColPairs . get ( ) . get ( avroColName ) ; converted = converted != null ? converted : avroColName ; jdbcToAvroColPairs . put ( converted , avroColName ) ; return converted ; } | Convert Avro column name to JDBC column name . If name mapping is defined follow it . Otherwise just return avro column name while replacing nested column delimiter dot to underscore . This method also updates mapping from JDBC column name to Avro column name for reverse look up . |
36,247 | public RecordEnvelope < D > readRecordEnvelopeImpl ( ) throws DataRecordException , IOException { if ( ! _isStarted . get ( ) ) { throw new IOException ( "Streaming extractor has not been started." ) ; } while ( ( _records == null ) || ( ! _records . hasNext ( ) ) ) { synchronized ( _consumer ) { if ( _close . get ( ) ) { throw new ClosedChannelException ( ) ; } _records = _consumer . poll ( this . fetchTimeOut ) . iterator ( ) ; } } ConsumerRecord < S , D > record = _records . next ( ) ; _rowCount . getAndIncrement ( ) ; return new RecordEnvelope < D > ( record . value ( ) , new KafkaWatermark ( _partition , new LongWatermark ( record . offset ( ) ) ) ) ; } | Return the next record when available . Will never time out since this is a streaming source . |
36,248 | public void onFileDelete ( Path rawPath ) { URI jobSpecUri = this . converter . computeURI ( rawPath ) ; listeners . onDeleteJob ( jobSpecUri , null ) ; } | For already deleted job configuration file the only identifier is path it doesn t make sense to loadJobConfig Here . |
36,249 | public Future < ? > scheduleJobImmediately ( Properties jobProps , JobListener jobListener , JobLauncher jobLauncher ) { Callable < Void > callable = new Callable < Void > ( ) { public Void call ( ) throws JobException { try { runJob ( jobProps , jobListener , jobLauncher ) ; } catch ( JobException je ) { LOG . error ( "Failed to run job " + jobProps . getProperty ( ConfigurationKeys . JOB_NAME_KEY ) , je ) ; throw je ; } return null ; } } ; final Future < ? > future = this . jobExecutor . submit ( callable ) ; return new Future ( ) { public boolean cancel ( boolean mayInterruptIfRunning ) { if ( ! cancelRequested ) { return false ; } boolean result = true ; try { jobLauncher . cancelJob ( jobListener ) ; } catch ( JobException e ) { LOG . error ( "Failed to cancel job " + jobProps . getProperty ( ConfigurationKeys . JOB_NAME_KEY ) , e ) ; result = false ; } if ( mayInterruptIfRunning ) { result &= future . cancel ( true ) ; } return result ; } public boolean isCancelled ( ) { return future . isCancelled ( ) ; } public boolean isDone ( ) { return future . isDone ( ) ; } public Object get ( ) throws InterruptedException , ExecutionException { return future . get ( ) ; } public Object get ( long timeout , TimeUnit unit ) throws InterruptedException , ExecutionException , TimeoutException { return future . get ( timeout , unit ) ; } } ; } | Schedule a job immediately . |
36,250 | public void unscheduleJob ( String jobName ) throws JobException { if ( this . scheduledJobs . containsKey ( jobName ) ) { try { this . scheduler . getScheduler ( ) . deleteJob ( this . scheduledJobs . remove ( jobName ) ) ; } catch ( SchedulerException se ) { LOG . error ( "Failed to unschedule and delete job " + jobName , se ) ; throw new JobException ( "Failed to unschedule and delete job " + jobName , se ) ; } } } | Unschedule and delete a job . |
36,251 | private void scheduleGeneralConfiguredJobs ( ) throws ConfigurationException , JobException , IOException { LOG . info ( "Scheduling configured jobs" ) ; for ( Properties jobProps : loadGeneralJobConfigs ( ) ) { if ( ! jobProps . containsKey ( ConfigurationKeys . JOB_SCHEDULE_KEY ) ) { jobProps . setProperty ( ConfigurationKeys . JOB_RUN_ONCE_KEY , "true" ) ; } boolean runOnce = Boolean . valueOf ( jobProps . getProperty ( ConfigurationKeys . JOB_RUN_ONCE_KEY , "false" ) ) ; scheduleJob ( jobProps , runOnce ? new RunOnceJobListener ( ) : new EmailNotificationJobListener ( ) ) ; this . listener . addToJobNameMap ( jobProps ) ; } } | Schedule Gobblin jobs in general position |
36,252 | private void startGeneralJobConfigFileMonitor ( ) throws Exception { SchedulerUtils . addPathAlterationObserver ( this . pathAlterationDetector , this . listener , jobConfigFileDirPath ) ; this . pathAlterationDetector . start ( ) ; this . closer . register ( new Closeable ( ) { public void close ( ) throws IOException { try { pathAlterationDetector . stop ( 1000 ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } } } ) ; } | Start the job configuration file monitor using generic file system API . |
36,253 | public Collection < DbAndTable > getTables ( ) throws IOException { List < DbAndTable > tables = Lists . newArrayList ( ) ; try ( AutoReturnableObject < IMetaStoreClient > client = this . clientPool . getClient ( ) ) { Iterable < String > databases = Iterables . filter ( client . get ( ) . getAllDatabases ( ) , new Predicate < String > ( ) { public boolean apply ( String db ) { return HiveDatasetFinder . this . whitelistBlacklist . acceptDb ( db ) ; } } ) ; for ( final String db : databases ) { Iterable < String > tableNames = Iterables . filter ( client . get ( ) . getAllTables ( db ) , new Predicate < String > ( ) { public boolean apply ( String table ) { return HiveDatasetFinder . this . whitelistBlacklist . acceptTable ( db , table ) ; } } ) ; for ( String tableName : tableNames ) { tables . add ( new DbAndTable ( db , tableName ) ) ; } } } catch ( Exception exc ) { throw new IOException ( exc ) ; } return tables ; } | Get all tables in db with given table pattern . |
36,254 | protected static Set < URI > getValidDatasetURIsHelper ( Collection < URI > allDatasetURIs , Set < URI > disabledURISet , Path datasetCommonRoot ) { if ( allDatasetURIs == null || allDatasetURIs . isEmpty ( ) ) { return ImmutableSet . of ( ) ; } Comparator < URI > pathLengthComparator = new Comparator < URI > ( ) { public int compare ( URI c1 , URI c2 ) { return c1 . getPath ( ) . length ( ) - c2 . getPath ( ) . length ( ) ; } } ; List < URI > sortedDatasetsList = new ArrayList < URI > ( allDatasetURIs ) ; Collections . sort ( sortedDatasetsList , pathLengthComparator ) ; TreeSet < URI > uriSet = new TreeSet < URI > ( ) ; Set < URI > noneLeaf = new HashSet < URI > ( ) ; for ( URI u : sortedDatasetsList ) { if ( PathUtils . isAncestor ( datasetCommonRoot , new Path ( u . getPath ( ) ) ) ) { URI floor = uriSet . floor ( u ) ; if ( floor != null && PathUtils . isAncestor ( new Path ( floor . getPath ( ) ) , new Path ( u . getPath ( ) ) ) ) { noneLeaf . add ( floor ) ; } uriSet . add ( u ) ; } } Set < URI > validURISet = new HashSet < URI > ( ) ; for ( URI u : uriSet ) { if ( ! noneLeaf . contains ( u ) ) { validURISet . add ( u ) ; } } for ( URI disable : disabledURISet ) { if ( validURISet . remove ( disable ) ) { log . info ( "skip disabled dataset " + disable ) ; } else { log . info ( "There's no URI " + disable + " available in validURISet." ) ; } } return validURISet ; } | Extended signature for testing convenience . |
36,255 | public long getRecordCount ( Path filepath ) { String filename = filepath . getName ( ) ; Preconditions . checkArgument ( filename . startsWith ( M_OUTPUT_FILE_PREFIX ) || filename . startsWith ( MR_OUTPUT_FILE_PREFIX ) , String . format ( "%s is not a supported filename, which should start with %s, or %s." , filename , M_OUTPUT_FILE_PREFIX , MR_OUTPUT_FILE_PREFIX ) ) ; String prefixWithCounts = filename . split ( Pattern . quote ( SEPARATOR ) ) [ 0 ] ; if ( filename . startsWith ( M_OUTPUT_FILE_PREFIX ) ) { return Long . parseLong ( prefixWithCounts . substring ( M_OUTPUT_FILE_PREFIX . length ( ) ) ) ; } return Long . parseLong ( prefixWithCounts . substring ( MR_OUTPUT_FILE_PREFIX . length ( ) ) ) ; } | Get the record count through filename . |
36,256 | private void findPath ( Map < Spec , SpecExecutor > specExecutorInstanceMap , Spec spec ) { inMemoryWeightGraphGenerator ( ) ; FlowSpec flowSpec = ( FlowSpec ) spec ; if ( optionalUserSpecifiedPath . isPresent ( ) ) { log . info ( "Starting to evaluate user's specified path ... " ) ; if ( userSpecifiedPathVerificator ( specExecutorInstanceMap , flowSpec ) ) { log . info ( "User specified path[ " + optionalUserSpecifiedPath . get ( ) + "] successfully verified." ) ; return ; } else { log . error ( "Will not execute user specified path[ " + optionalUserSpecifiedPath . get ( ) + "]" ) ; log . info ( "Start to execute FlowCompiler's algorithm for valid data movement path" ) ; } } ServiceNode sourceNode = new BaseServiceNodeImpl ( flowSpec . getConfig ( ) . getString ( ServiceConfigKeys . FLOW_SOURCE_IDENTIFIER_KEY ) ) ; ServiceNode targetNode = new BaseServiceNodeImpl ( flowSpec . getConfig ( ) . getString ( ServiceConfigKeys . FLOW_DESTINATION_IDENTIFIER_KEY ) ) ; List < FlowEdge > resultEdgePath = dijkstraBasedPathFindingHelper ( sourceNode , targetNode , this . weightedGraph ) ; for ( int i = 0 ; i < resultEdgePath . size ( ) ; i ++ ) { FlowEdge tmpFlowEdge = resultEdgePath . get ( i ) ; ServiceNode edgeSrcNode = ( ( LoadBasedFlowEdgeImpl ) tmpFlowEdge ) . getSourceNode ( ) ; ServiceNode edgeTgtNode = ( ( LoadBasedFlowEdgeImpl ) tmpFlowEdge ) . getTargetNode ( ) ; specExecutorInstanceMap . put ( convertHopToJobSpec ( edgeSrcNode , edgeTgtNode , flowSpec ) , ( ( LoadBasedFlowEdgeImpl ) ( resultEdgePath . get ( i ) ) ) . getSpecExecutorInstance ( ) ) ; } } | that a topologySpec not being reflected in findPath . |
36,257 | protected void populateEdgeTemplateMap ( ) { if ( templateCatalog . isPresent ( ) ) { for ( FlowEdge flowEdge : this . weightedGraph . edgeSet ( ) ) { edgeTemplateMap . put ( flowEdge . getEdgeIdentity ( ) , templateCatalog . get ( ) . getAllTemplates ( ) . stream ( ) . map ( jobTemplate -> jobTemplate . getUri ( ) ) . collect ( Collectors . toList ( ) ) ) ; } } } | As the base implementation here all templates will be considered for each edge . |
36,258 | private boolean userSpecifiedPathVerificator ( Map < Spec , SpecExecutor > specExecutorInstanceMap , FlowSpec flowSpec ) { Map < Spec , SpecExecutor > tmpSpecExecutorInstanceMap = new HashMap < > ( ) ; List < String > userSpecfiedPath = Arrays . asList ( optionalUserSpecifiedPath . get ( ) . split ( "," ) ) ; for ( int i = 0 ; i < userSpecfiedPath . size ( ) - 1 ; i ++ ) { ServiceNode sourceNode = new BaseServiceNodeImpl ( userSpecfiedPath . get ( i ) ) ; ServiceNode targetNode = new BaseServiceNodeImpl ( userSpecfiedPath . get ( i + 1 ) ) ; if ( weightedGraph . containsVertex ( sourceNode ) && weightedGraph . containsVertex ( targetNode ) && weightedGraph . containsEdge ( sourceNode , targetNode ) ) { tmpSpecExecutorInstanceMap . put ( convertHopToJobSpec ( sourceNode , targetNode , flowSpec ) , ( ( ( LoadBasedFlowEdgeImpl ) weightedGraph . getEdge ( sourceNode , targetNode ) ) . getSpecExecutorInstance ( ) ) ) ; } else { log . error ( "User Specified Path is invalid" ) ; return false ; } } specExecutorInstanceMap . putAll ( tmpSpecExecutorInstanceMap ) ; return true ; } | else return true . |
36,259 | private void weightGraphGenerateHelper ( TopologySpec topologySpec ) { try { Map < ServiceNode , ServiceNode > capabilities = topologySpec . getSpecExecutor ( ) . getCapabilities ( ) . get ( ) ; for ( Map . Entry < ServiceNode , ServiceNode > capability : capabilities . entrySet ( ) ) { BaseServiceNodeImpl sourceNode = new BaseServiceNodeImpl ( capability . getKey ( ) . getNodeName ( ) ) ; BaseServiceNodeImpl targetNode = new BaseServiceNodeImpl ( capability . getValue ( ) . getNodeName ( ) ) ; if ( ! weightedGraph . containsVertex ( sourceNode ) ) { weightedGraph . addVertex ( sourceNode ) ; } if ( ! weightedGraph . containsVertex ( targetNode ) ) { weightedGraph . addVertex ( targetNode ) ; } FlowEdge flowEdge = new LoadBasedFlowEdgeImpl ( sourceNode , targetNode , defaultFlowEdgeProps , topologySpec . getSpecExecutor ( ) ) ; if ( ! weightedGraph . containsEdge ( flowEdge ) ) { weightedGraph . addEdge ( sourceNode , targetNode , flowEdge ) ; } } } catch ( InterruptedException | ExecutionException e ) { Instrumented . markMeter ( this . flowCompilationFailedMeter ) ; throw new RuntimeException ( "Cannot determine topology capabilities" , e ) ; } } | Helper function for transform TopologySpecMap into a weightedDirectedGraph . |
36,260 | private JobSpec convertHopToJobSpec ( ServiceNode sourceNode , ServiceNode targetNode , FlowSpec flowSpec ) { FlowEdge flowEdge = weightedGraph . getAllEdges ( sourceNode , targetNode ) . iterator ( ) . next ( ) ; URI templateURI = getTemplateURI ( sourceNode , targetNode , flowSpec , flowEdge ) ; return buildJobSpec ( sourceNode , targetNode , templateURI , flowSpec ) ; } | A naive implementation of resolving templates in each JobSpec among Multi - hop FlowSpec . Handle the case when edge is not specified . Always select the first available template . |
36,261 | public URI jobSpecURIGenerator ( Object ... objects ) { FlowSpec flowSpec = ( FlowSpec ) objects [ 0 ] ; ServiceNode sourceNode = ( ServiceNode ) objects [ 1 ] ; ServiceNode targetNode = ( ServiceNode ) objects [ 2 ] ; try { return new URI ( JobSpec . Builder . DEFAULT_JOB_CATALOG_SCHEME , flowSpec . getUri ( ) . getAuthority ( ) , StringUtils . appendIfMissing ( StringUtils . prependIfMissing ( flowSpec . getUri ( ) . getPath ( ) , "/" ) , "/" ) + sourceNode . getNodeName ( ) + "-" + targetNode . getNodeName ( ) , null ) ; } catch ( URISyntaxException e ) { log . error ( "URI construction failed when jobSpec from " + sourceNode . getNodeName ( ) + " to " + targetNode . getNodeName ( ) ) ; throw new RuntimeException ( ) ; } } | A naive implementation of generating a jobSpec s URI within a multi - hop logical Flow . |
36,262 | private PathFilter getFileFilter ( ) { final String extension = ( this . expectedExtension . startsWith ( "." ) ) ? this . expectedExtension : "." + this . expectedExtension ; return new PathFilter ( ) { public boolean accept ( Path path ) { return path . getName ( ) . endsWith ( extension ) && ! ( schemaInSourceDir && path . getName ( ) . equals ( schemaFile ) ) ; } } ; } | This method is to filter out files that don t need to be processed by extension |
36,263 | private static Deserializer getDeserializer ( HiveRegistrationUnit unit ) { Optional < String > serdeClass = unit . getSerDeType ( ) ; if ( ! serdeClass . isPresent ( ) ) { return null ; } String serde = serdeClass . get ( ) ; HiveConf hiveConf ; Deserializer deserializer ; try { hiveConf = SharedResourcesBrokerFactory . getImplicitBroker ( ) . getSharedResource ( new HiveConfFactory < > ( ) , SharedHiveConfKey . INSTANCE ) ; deserializer = ReflectionUtils . newInstance ( hiveConf . getClassByName ( serde ) . asSubclass ( Deserializer . class ) , hiveConf ) ; } catch ( ClassNotFoundException e ) { LOG . warn ( "Serde class " + serde + " not found!" , e ) ; return null ; } catch ( NotConfiguredException nce ) { LOG . error ( "Implicit broker is not configured properly" , nce ) ; return null ; } Properties props = new Properties ( ) ; props . putAll ( unit . getProps ( ) . getProperties ( ) ) ; props . putAll ( unit . getStorageProps ( ) . getProperties ( ) ) ; props . putAll ( unit . getSerDeProps ( ) . getProperties ( ) ) ; try { SerDeUtils . initializeSerDe ( deserializer , hiveConf , props , null ) ; if ( deserializer instanceof AvroSerDe ) { try { inVokeDetermineSchemaOrThrowExceptionMethod ( props , new Configuration ( ) ) ; } catch ( SchemaParseException | InvocationTargetException | NoSuchMethodException | IllegalAccessException e ) { LOG . warn ( "Failed to initialize AvroSerDe." ) ; throw new SerDeException ( e ) ; } } } catch ( SerDeException e ) { LOG . warn ( "Failed to initialize serde " + serde + " with properties " + props + " for table " + unit . getDbName ( ) + "." + unit . getTableName ( ) ) ; return null ; } return deserializer ; } | Returns a Deserializer from HiveRegistrationUnit if present and successfully initialized . Else returns null . |
36,264 | public synchronized List < JobSpec > getJobs ( ) { return Lists . transform ( Lists . newArrayList ( loader . loadPullFilesRecursively ( loader . getRootDirectory ( ) , this . sysConfig , shouldLoadGlobalConf ( ) ) ) , this . converter ) ; } | Fetch all the job files under the jobConfDirPath |
36,265 | public synchronized JobSpec getJobSpec ( URI uri ) throws JobSpecNotFoundException { try { Path targetJobSpecFullPath = getPathForURI ( this . jobConfDirPath , uri ) ; return this . converter . apply ( loader . loadPullFile ( targetJobSpecFullPath , this . sysConfig , shouldLoadGlobalConf ( ) ) ) ; } catch ( FileNotFoundException e ) { throw new JobSpecNotFoundException ( uri ) ; } catch ( IOException e ) { throw new RuntimeException ( "IO exception thrown on loading single job configuration file:" + e . getMessage ( ) ) ; } } | Fetch single job file based on its URI return null requested URI not existed |
36,266 | private void readPrevAvgRecordMillis ( SourceState state ) { Map < String , List < Double > > prevAvgMillis = Maps . newHashMap ( ) ; for ( WorkUnitState workUnitState : state . getPreviousWorkUnitStates ( ) ) { List < KafkaPartition > partitions = KafkaUtils . getPartitions ( workUnitState ) ; for ( KafkaPartition partition : partitions ) { if ( KafkaUtils . containsPartitionAvgRecordMillis ( workUnitState , partition ) ) { double prevAvgMillisForPartition = KafkaUtils . getPartitionAvgRecordMillis ( workUnitState , partition ) ; if ( prevAvgMillis . containsKey ( partition . getTopicName ( ) ) ) { prevAvgMillis . get ( partition . getTopicName ( ) ) . add ( prevAvgMillisForPartition ) ; } else { prevAvgMillis . put ( partition . getTopicName ( ) , Lists . newArrayList ( prevAvgMillisForPartition ) ) ; } } } } this . estAvgMillis . clear ( ) ; if ( prevAvgMillis . isEmpty ( ) ) { this . avgEstAvgMillis = 1.0 ; } else { List < Double > allEstAvgMillis = Lists . newArrayList ( ) ; for ( Map . Entry < String , List < Double > > entry : prevAvgMillis . entrySet ( ) ) { String topic = entry . getKey ( ) ; List < Double > prevAvgMillisForPartitions = entry . getValue ( ) ; double estAvgMillisForTopic = geometricMean ( prevAvgMillisForPartitions ) ; this . estAvgMillis . put ( topic , estAvgMillisForTopic ) ; LOG . info ( String . format ( "Estimated avg time to pull a record for topic %s is %f milliseconds" , topic , estAvgMillisForTopic ) ) ; allEstAvgMillis . add ( estAvgMillisForTopic ) ; } this . avgEstAvgMillis = geometricMean ( allEstAvgMillis ) ; } LOG . info ( "For all topics not pulled in the previous run, estimated avg time to pull a record is " + this . avgEstAvgMillis + " milliseconds" ) ; } | Get avg time to pull a record in the previous run for all topics each of which is the geometric mean of the avg time to pull a record of all partitions of the topic . |
36,267 | private Optional < DataFileVersionStrategy > initDataFileVersionStrategy ( EndPoint endPoint , ReplicationConfiguration rc , Properties props ) { if ( ! ( endPoint instanceof HadoopFsEndPoint ) ) { log . warn ( "Data file version currently only handle the Hadoop Fs EndPoint replication" ) ; return Optional . absent ( ) ; } Configuration conf = HadoopUtils . newConfiguration ( ) ; try { HadoopFsEndPoint hEndpoint = ( HadoopFsEndPoint ) endPoint ; FileSystem fs = FileSystem . get ( hEndpoint . getFsURI ( ) , conf ) ; this . versionStrategyFromCS = rc . getVersionStrategyFromConfigStore ( ) ; String nonEmptyStrategy = versionStrategyFromCS . isPresent ( ) ? versionStrategyFromCS . get ( ) : props . getProperty ( DataFileVersionStrategy . DATA_FILE_VERSION_STRATEGY_KEY , DataFileVersionStrategy . DEFAULT_DATA_FILE_VERSION_STRATEGY ) ; Config versionStrategyConfig = ConfigFactory . parseMap ( ImmutableMap . of ( DataFileVersionStrategy . DATA_FILE_VERSION_STRATEGY_KEY , nonEmptyStrategy ) ) ; DataFileVersionStrategy strategy = DataFileVersionStrategy . instantiateDataFileVersionStrategy ( fs , versionStrategyConfig ) ; log . debug ( "{} has version strategy {}" , hEndpoint . getClusterName ( ) , strategy . getClass ( ) . getName ( ) ) ; return Optional . of ( strategy ) ; } catch ( IOException e ) { log . error ( "Version strategy cannot be created due to {}" , e ) ; return Optional . absent ( ) ; } } | Get the version strategy that can retrieve the data file version from the end point . |
36,268 | public void closeCurrentFile ( ) { try { this . closer . close ( ) ; } catch ( IOException e ) { if ( this . currentFile != null ) { LOG . error ( "Failed to close file: " + this . currentFile , e ) ; } } } | Closes the current file being read . |
36,269 | private boolean ensureHiveTableExistenceBeforeAlternation ( String tableName , String dbName , IMetaStoreClient client , Table table , HiveSpec spec ) throws TException { try ( AutoCloseableLock lock = this . locks . getTableLock ( dbName , tableName ) ) { try { try ( Timer . Context context = this . metricContext . timer ( CREATE_HIVE_TABLE ) . time ( ) ) { client . createTable ( getTableWithCreateTimeNow ( table ) ) ; log . info ( String . format ( "Created Hive table %s in db %s" , tableName , dbName ) ) ; return true ; } catch ( AlreadyExistsException e ) { } } catch ( TException e ) { log . error ( String . format ( "Unable to create Hive table %s in db %s: " + e . getMessage ( ) , tableName , dbName ) , e ) ; throw e ; } log . info ( "Table {} already exists in db {}." , tableName , dbName ) ; try { HiveTable existingTable ; try ( Timer . Context context = this . metricContext . timer ( GET_HIVE_TABLE ) . time ( ) ) { existingTable = HiveMetaStoreUtils . getHiveTable ( client . getTable ( dbName , tableName ) ) ; } if ( needToUpdateTable ( existingTable , spec . getTable ( ) ) ) { try ( Timer . Context context = this . metricContext . timer ( ALTER_TABLE ) . time ( ) ) { client . alter_table ( dbName , tableName , getNewTblByMergingExistingTblProps ( table , existingTable ) ) ; } log . info ( String . format ( "updated Hive table %s in db %s" , tableName , dbName ) ) ; } } catch ( TException e2 ) { log . error ( String . format ( "Unable to create or alter Hive table %s in db %s: " + e2 . getMessage ( ) , tableName , dbName ) , e2 ) ; throw e2 ; } return false ; } } | If table existed on Hive side will return false ; Or will create the table thru . RPC and return retVal from remote MetaStore . |
36,270 | private boolean ensureHiveDbExistence ( String hiveDbName , IMetaStoreClient client ) throws IOException { try ( AutoCloseableLock lock = this . locks . getDbLock ( hiveDbName ) ) { Database db = new Database ( ) ; db . setName ( hiveDbName ) ; try { try ( Timer . Context context = this . metricContext . timer ( GET_HIVE_DATABASE ) . time ( ) ) { client . getDatabase ( db . getName ( ) ) ; } return false ; } catch ( NoSuchObjectException nsoe ) { } catch ( TException te ) { throw new IOException ( te ) ; } Preconditions . checkState ( this . hiveDbRootDir . isPresent ( ) , "Missing required property " + HiveRegProps . HIVE_DB_ROOT_DIR ) ; db . setLocationUri ( new Path ( this . hiveDbRootDir . get ( ) , hiveDbName + HIVE_DB_EXTENSION ) . toString ( ) ) ; try { try ( Timer . Context context = this . metricContext . timer ( CREATE_HIVE_DATABASE ) . time ( ) ) { client . createDatabase ( db ) ; } log . info ( "Created database " + hiveDbName ) ; HiveMetaStoreEventHelper . submitSuccessfulDBCreation ( this . eventSubmitter , hiveDbName ) ; return true ; } catch ( AlreadyExistsException e ) { return false ; } catch ( TException e ) { HiveMetaStoreEventHelper . submitFailedDBCreation ( this . eventSubmitter , hiveDbName , e ) ; throw new IOException ( "Unable to create Hive database " + hiveDbName , e ) ; } } } | If databse existed on Hive side will return false ; Or will create the table thru . RPC and return retVal from remote MetaStore . |
36,271 | public boolean isCompleted ( ) { WorkingState state = getWorkingState ( ) ; return state == WorkingState . SUCCESSFUL || state == WorkingState . COMMITTED || state == WorkingState . FAILED ; } | Return whether the task has completed running or not . |
36,272 | public synchronized void updateByteMetrics ( long bytesWritten , int branchIndex ) { TaskMetrics metrics = TaskMetrics . get ( this ) ; String forkBranchId = TaskMetrics . taskInstanceRemoved ( this . taskId ) ; Counter taskByteCounter = metrics . getCounter ( MetricGroup . TASK . name ( ) , forkBranchId , BYTES ) ; long inc = bytesWritten - taskByteCounter . getCount ( ) ; taskByteCounter . inc ( inc ) ; metrics . getMeter ( MetricGroup . TASK . name ( ) , forkBranchId , BYTES_PER_SECOND ) . mark ( inc ) ; metrics . getCounter ( MetricGroup . JOB . name ( ) , this . jobId , BYTES ) . inc ( inc ) ; metrics . getMeter ( MetricGroup . JOB . name ( ) , this . jobId , BYTES_PER_SECOND ) . mark ( inc ) ; } | Collect byte - level metrics . |
36,273 | public void adjustJobMetricsOnRetry ( int branches ) { TaskMetrics metrics = TaskMetrics . get ( this ) ; for ( int i = 0 ; i < branches ; i ++ ) { String forkBranchId = ForkOperatorUtils . getForkId ( this . taskId , i ) ; long recordsWritten = metrics . getCounter ( MetricGroup . TASK . name ( ) , forkBranchId , RECORDS ) . getCount ( ) ; long bytesWritten = metrics . getCounter ( MetricGroup . TASK . name ( ) , forkBranchId , BYTES ) . getCount ( ) ; metrics . getCounter ( MetricGroup . JOB . name ( ) , this . jobId , RECORDS ) . dec ( recordsWritten ) ; metrics . getCounter ( MetricGroup . JOB . name ( ) , this . jobId , BYTES ) . dec ( bytesWritten ) ; } } | Adjust job - level metrics when the task gets retried . |
36,274 | public static String getFullEventName ( State state ) { return Joiner . on ( '.' ) . join ( LineageEventBuilder . LIENAGE_EVENT_NAMESPACE , state . getProp ( getKey ( NAME_KEY ) ) ) ; } | Get the full lineage event name from a state |
36,275 | static double [ ] addVector ( double [ ] x , double [ ] y , double c , double [ ] reuse ) { if ( reuse == null ) { reuse = new double [ x . length ] ; } for ( int i = 0 ; i < x . length ; i ++ ) { reuse [ i ] = x [ i ] + c * y [ i ] ; } return reuse ; } | Performs x + cy |
36,276 | public static long getProcessedCount ( List < TaskState > taskStates ) { long value = 0 ; for ( TaskState taskState : taskStates ) { value += taskState . getPropAsLong ( ConfigurationKeys . WRITER_RECORDS_WRITTEN , 0 ) ; } return value ; } | Get the number of records written by all the writers |
36,277 | public static String getTaskFailureExceptions ( List < TaskState > taskStates ) { StringBuffer sb = new StringBuffer ( ) ; appendTaskStateValues ( taskStates , sb , TASK_FAILURE_MESSAGE_KEY ) ; appendTaskStateValues ( taskStates , sb , ConfigurationKeys . TASK_FAILURE_EXCEPTION_KEY ) ; return sb . toString ( ) ; } | Get failure messages |
36,278 | public void commit ( ) throws IOException { if ( ! this . actualProcessedCopyableFile . isPresent ( ) ) { return ; } CopyableFile copyableFile = this . actualProcessedCopyableFile . get ( ) ; Path stagingFilePath = getStagingFilePath ( copyableFile ) ; Path outputFilePath = getSplitOutputFilePath ( copyableFile , this . outputDir , copyableFile . getDatasetAndPartition ( this . copyableDatasetMetadata ) , this . state ) ; log . info ( String . format ( "Committing data from %s to %s" , stagingFilePath , outputFilePath ) ) ; try { setFilePermissions ( copyableFile ) ; Iterator < OwnerAndPermission > ancestorOwnerAndPermissionIt = copyableFile . getAncestorsOwnerAndPermission ( ) == null ? Iterators . < OwnerAndPermission > emptyIterator ( ) : copyableFile . getAncestorsOwnerAndPermission ( ) . iterator ( ) ; ensureDirectoryExists ( this . fs , outputFilePath . getParent ( ) , ancestorOwnerAndPermissionIt ) ; this . fileContext . rename ( stagingFilePath , outputFilePath , renameOptions ) ; } catch ( IOException ioe ) { log . error ( "Could not commit file %s." , outputFilePath ) ; this . recoveryHelper . persistFile ( this . state , copyableFile , stagingFilePath ) ; throw ioe ; } finally { try { this . fs . delete ( this . stagingDir , true ) ; } catch ( IOException ioe ) { log . warn ( "Failed to delete staging path at " + this . stagingDir ) ; } } } | Moves the file from task staging to task output . Each task has its own staging directory but all the tasks share the same task output directory . |
36,279 | public long getGap ( MultiLongWatermark highWatermark ) { Preconditions . checkNotNull ( highWatermark ) ; Preconditions . checkArgument ( this . values . size ( ) == highWatermark . values . size ( ) ) ; long diff = 0 ; for ( int i = 0 ; i < this . values . size ( ) ; i ++ ) { Preconditions . checkArgument ( this . values . get ( i ) <= highWatermark . values . get ( i ) ) ; diff += highWatermark . values . get ( i ) - this . values . get ( i ) ; } return diff ; } | Get the number of records that need to be pulled given the high watermark . |
36,280 | public static KafkaWrapper create ( State state ) { Preconditions . checkNotNull ( state . getProp ( ConfigurationKeys . KAFKA_BROKERS ) , "Need to specify at least one Kafka broker." ) ; KafkaWrapper . Builder builder = new KafkaWrapper . Builder ( ) ; if ( state . getPropAsBoolean ( USE_NEW_KAFKA_API , DEFAULT_USE_NEW_KAFKA_API ) ) { builder = builder . withNewKafkaAPI ( ) ; } Config config = ConfigUtils . propertiesToConfig ( state . getProperties ( ) ) ; return builder . withBrokers ( state . getPropAsList ( ConfigurationKeys . KAFKA_BROKERS ) ) . withConfig ( config ) . build ( ) ; } | Create a KafkaWrapper based on the given type of Kafka API and list of Kafka brokers . |
36,281 | protected void addWriterOutputToExistingDir ( Path writerOutput , Path publisherOutput , WorkUnitState workUnitState , int branchId , ParallelRunner parallelRunner ) throws IOException { for ( FileStatus status : FileListUtils . listFilesRecursively ( this . writerFileSystemByBranches . get ( branchId ) , writerOutput ) ) { String filePathStr = status . getPath ( ) . toString ( ) ; String pathSuffix = filePathStr . substring ( filePathStr . indexOf ( writerOutput . toString ( ) ) + writerOutput . toString ( ) . length ( ) + 1 ) ; Path outputPath = new Path ( publisherOutput , pathSuffix ) ; WriterUtils . mkdirsWithRecursivePermissionWithRetry ( this . publisherFileSystemByBranches . get ( branchId ) , outputPath . getParent ( ) , this . permissions . get ( branchId ) , this . retrierConfig ) ; movePath ( parallelRunner , workUnitState , status . getPath ( ) , outputPath , branchId ) ; } } | This method needs to be overridden for TimePartitionedDataPublisher since the output folder structure contains timestamp we have to move the files recursively . |
36,282 | public static void sendEmail ( State state , String subject , String message ) throws EmailException { Email email = new SimpleEmail ( ) ; email . setHostName ( state . getProp ( ConfigurationKeys . EMAIL_HOST_KEY , ConfigurationKeys . DEFAULT_EMAIL_HOST ) ) ; if ( state . contains ( ConfigurationKeys . EMAIL_SMTP_PORT_KEY ) ) { email . setSmtpPort ( state . getPropAsInt ( ConfigurationKeys . EMAIL_SMTP_PORT_KEY ) ) ; } email . setFrom ( state . getProp ( ConfigurationKeys . EMAIL_FROM_KEY ) ) ; if ( state . contains ( ConfigurationKeys . EMAIL_USER_KEY ) && state . contains ( ConfigurationKeys . EMAIL_PASSWORD_KEY ) ) { email . setAuthentication ( state . getProp ( ConfigurationKeys . EMAIL_USER_KEY ) , PasswordManager . getInstance ( state ) . readPassword ( state . getProp ( ConfigurationKeys . EMAIL_PASSWORD_KEY ) ) ) ; } Iterable < String > tos = Splitter . on ( ',' ) . trimResults ( ) . omitEmptyStrings ( ) . split ( state . getProp ( ConfigurationKeys . EMAIL_TOS_KEY ) ) ; for ( String to : tos ) { email . addTo ( to ) ; } String hostName ; try { hostName = InetAddress . getLocalHost ( ) . getHostName ( ) ; } catch ( UnknownHostException uhe ) { LOGGER . error ( "Failed to get the host name" , uhe ) ; hostName = "unknown" ; } email . setSubject ( subject ) ; String fromHostLine = String . format ( "This email was sent from host: %s%n%n" , hostName ) ; email . setMsg ( fromHostLine + message ) ; email . send ( ) ; } | A general method for sending emails . |
36,283 | public static void sendJobCompletionEmail ( String jobId , String message , String state , State jobState ) throws EmailException { sendEmail ( jobState , String . format ( "Gobblin notification: job %s has completed with state %s" , jobId , state ) , message ) ; } | Send a job completion notification email . |
36,284 | public static void sendJobCancellationEmail ( String jobId , String message , State jobState ) throws EmailException { sendEmail ( jobState , String . format ( "Gobblin notification: job %s has been cancelled" , jobId ) , message ) ; } | Send a job cancellation notification email . |
36,285 | public static void sendJobFailureAlertEmail ( String jobName , String message , int failures , State jobState ) throws EmailException { sendEmail ( jobState , String . format ( "Gobblin alert: job %s has failed %d %s consecutively in the past" , jobName , failures , failures > 1 ? "times" : "time" ) , message ) ; } | Send a job failure alert email . |
36,286 | public boolean isRecompactionNeeded ( DatasetHelper helper ) { if ( recompactionConditions . isEmpty ( ) ) return false ; if ( operation == CombineOperation . OR ) { for ( RecompactionCondition c : recompactionConditions ) { if ( c . isRecompactionNeeded ( helper ) ) { return true ; } } return false ; } else { for ( RecompactionCondition c : recompactionConditions ) { if ( ! c . isRecompactionNeeded ( helper ) ) { return false ; } } return true ; } } | For OR combination return true iff one of conditions return true For AND combination return true iff all of conditions return true Other cases return false |
36,287 | public void open ( Path errFilePath ) throws IOException { this . fs . mkdirs ( errFilePath . getParent ( ) ) ; OutputStream os = this . closer . register ( this . fs . exists ( errFilePath ) ? this . fs . append ( errFilePath ) : this . fs . create ( errFilePath ) ) ; this . writer = this . closer . register ( new BufferedWriter ( new OutputStreamWriter ( os , ConfigurationKeys . DEFAULT_CHARSET_ENCODING ) ) ) ; } | Open a BufferedWriter |
36,288 | public Partition getGlobalPartition ( long previousWatermark ) { ExtractType extractType = ExtractType . valueOf ( state . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_EXTRACT_TYPE ) . toUpperCase ( ) ) ; WatermarkType watermarkType = WatermarkType . valueOf ( state . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_WATERMARK_TYPE , ConfigurationKeys . DEFAULT_WATERMARK_TYPE ) . toUpperCase ( ) ) ; WatermarkPredicate watermark = new WatermarkPredicate ( null , watermarkType ) ; int deltaForNextWatermark = watermark . getDeltaNumForNextWatermark ( ) ; long lowWatermark = getLowWatermark ( extractType , watermarkType , previousWatermark , deltaForNextWatermark ) ; long highWatermark = getHighWatermark ( extractType , watermarkType ) ; return new Partition ( lowWatermark , highWatermark , true , hasUserSpecifiedHighWatermark ) ; } | Get the global partition of the whole data set which has the global low and high watermarks |
36,289 | public HashMap < Long , Long > getPartitions ( long previousWatermark ) { HashMap < Long , Long > defaultPartition = Maps . newHashMap ( ) ; if ( ! isWatermarkExists ( ) ) { defaultPartition . put ( ConfigurationKeys . DEFAULT_WATERMARK_VALUE , ConfigurationKeys . DEFAULT_WATERMARK_VALUE ) ; LOG . info ( "Watermark column or type not found - Default partition with low watermark and high watermark as " + ConfigurationKeys . DEFAULT_WATERMARK_VALUE ) ; return defaultPartition ; } ExtractType extractType = ExtractType . valueOf ( this . state . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_EXTRACT_TYPE ) . toUpperCase ( ) ) ; WatermarkType watermarkType = WatermarkType . valueOf ( this . state . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_WATERMARK_TYPE , ConfigurationKeys . DEFAULT_WATERMARK_TYPE ) . toUpperCase ( ) ) ; int interval = getUpdatedInterval ( this . state . getPropAsInt ( ConfigurationKeys . SOURCE_QUERYBASED_PARTITION_INTERVAL , 0 ) , extractType , watermarkType ) ; int sourceMaxAllowedPartitions = this . state . getPropAsInt ( ConfigurationKeys . SOURCE_MAX_NUMBER_OF_PARTITIONS , 0 ) ; int maxPartitions = ( sourceMaxAllowedPartitions != 0 ? sourceMaxAllowedPartitions : ConfigurationKeys . DEFAULT_MAX_NUMBER_OF_PARTITIONS ) ; WatermarkPredicate watermark = new WatermarkPredicate ( null , watermarkType ) ; int deltaForNextWatermark = watermark . getDeltaNumForNextWatermark ( ) ; LOG . info ( "is watermark override: " + this . isWatermarkOverride ( ) ) ; LOG . info ( "is full extract: " + this . isFullDump ( ) ) ; long lowWatermark = this . getLowWatermark ( extractType , watermarkType , previousWatermark , deltaForNextWatermark ) ; long highWatermark = this . getHighWatermark ( extractType , watermarkType ) ; if ( lowWatermark == ConfigurationKeys . DEFAULT_WATERMARK_VALUE || highWatermark == ConfigurationKeys . DEFAULT_WATERMARK_VALUE ) { LOG . info ( "Low watermark or high water mark is not found. Hence cannot generate partitions - Default partition with low watermark: " + lowWatermark + " and high watermark: " + highWatermark ) ; defaultPartition . put ( lowWatermark , highWatermark ) ; return defaultPartition ; } LOG . info ( "Generate partitions with low watermark: " + lowWatermark + "; high watermark: " + highWatermark + "; partition interval in hours: " + interval + "; Maximum number of allowed partitions: " + maxPartitions ) ; return watermark . getPartitions ( lowWatermark , highWatermark , interval , maxPartitions ) ; } | Get partitions with low and high water marks |
36,290 | public List < Partition > getPartitionList ( long previousWatermark ) { if ( state . getPropAsBoolean ( HAS_USER_SPECIFIED_PARTITIONS ) ) { return createUserSpecifiedPartitions ( ) ; } List < Partition > partitions = new ArrayList < > ( ) ; HashMap < Long , Long > partitionMap = getPartitions ( previousWatermark ) ; if ( partitionMap . size ( ) == 0 ) { return partitions ; } if ( partitionMap . size ( ) == 1 ) { Map . Entry < Long , Long > entry = partitionMap . entrySet ( ) . iterator ( ) . next ( ) ; Long lwm = entry . getKey ( ) ; Long hwm = entry . getValue ( ) ; if ( lwm == hwm ) { if ( lwm != - 1 ) { boolean allowEqualBoundary = state . getPropAsBoolean ( ALLOW_EQUAL_WATERMARK_BOUNDARY , false ) ; LOG . info ( "Single partition with LWM = HWM and allowEqualBoundary=" + allowEqualBoundary ) ; if ( ! allowEqualBoundary ) { return partitions ; } } } } Long highestWatermark = Collections . max ( partitionMap . values ( ) ) ; for ( Map . Entry < Long , Long > entry : partitionMap . entrySet ( ) ) { Long partitionHighWatermark = entry . getValue ( ) ; if ( partitionHighWatermark . equals ( highestWatermark ) ) { partitions . add ( new Partition ( entry . getKey ( ) , partitionHighWatermark , true , hasUserSpecifiedHighWatermark ) ) ; } else { partitions . add ( new Partition ( entry . getKey ( ) , partitionHighWatermark , false ) ) ; } } return partitions ; } | Get an unordered list of partition with lowWatermark highWatermark and hasUserSpecifiedHighWatermark . |
36,291 | private List < Partition > createUserSpecifiedPartitions ( ) { List < Partition > partitions = new ArrayList < > ( ) ; List < String > watermarkPoints = state . getPropAsList ( USER_SPECIFIED_PARTITIONS ) ; boolean isEarlyStopped = state . getPropAsBoolean ( IS_EARLY_STOPPED ) ; if ( watermarkPoints == null || watermarkPoints . size ( ) == 0 ) { LOG . info ( "There should be some partition points" ) ; long defaultWatermark = ConfigurationKeys . DEFAULT_WATERMARK_VALUE ; partitions . add ( new Partition ( defaultWatermark , defaultWatermark , true , true ) ) ; return partitions ; } WatermarkType watermarkType = WatermarkType . valueOf ( state . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_WATERMARK_TYPE , ConfigurationKeys . DEFAULT_WATERMARK_TYPE ) . toUpperCase ( ) ) ; long lowWatermark = adjustWatermark ( watermarkPoints . get ( 0 ) , watermarkType ) ; long highWatermark = ConfigurationKeys . DEFAULT_WATERMARK_VALUE ; if ( watermarkPoints . size ( ) == 1 ) { if ( watermarkType != WatermarkType . SIMPLE ) { String timeZone = this . state . getProp ( ConfigurationKeys . SOURCE_TIMEZONE ) ; String currentTime = Utils . dateTimeToString ( getCurrentTime ( timeZone ) , WATERMARKTIMEFORMAT , timeZone ) ; highWatermark = adjustWatermark ( currentTime , watermarkType ) ; } partitions . add ( new Partition ( lowWatermark , highWatermark , true , false ) ) ; return partitions ; } int i ; for ( i = 1 ; i < watermarkPoints . size ( ) - 1 ; i ++ ) { highWatermark = adjustWatermark ( watermarkPoints . get ( i ) , watermarkType ) ; partitions . add ( new Partition ( lowWatermark , highWatermark , true ) ) ; lowWatermark = highWatermark ; } highWatermark = adjustWatermark ( watermarkPoints . get ( i ) , watermarkType ) ; ExtractType extractType = ExtractType . valueOf ( this . state . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_EXTRACT_TYPE ) . toUpperCase ( ) ) ; if ( ( isFullDump ( ) || isSnapshot ( extractType ) ) && ! isEarlyStopped ) { partitions . add ( new Partition ( lowWatermark , highWatermark , true , false ) ) ; } else { partitions . add ( new Partition ( lowWatermark , highWatermark , true , true ) ) ; } return partitions ; } | Generate the partitions based on the lists specified by the user in job config |
36,292 | private static long adjustWatermark ( String baseWatermark , WatermarkType watermarkType ) { long result = ConfigurationKeys . DEFAULT_WATERMARK_VALUE ; switch ( watermarkType ) { case SIMPLE : result = SimpleWatermark . adjustWatermark ( baseWatermark , 0 ) ; break ; case DATE : result = DateWatermark . adjustWatermark ( baseWatermark , 0 ) ; break ; case HOUR : result = HourWatermark . adjustWatermark ( baseWatermark , 0 ) ; break ; case TIMESTAMP : result = TimestampWatermark . adjustWatermark ( baseWatermark , 0 ) ; break ; } return result ; } | Adjust a watermark based on watermark type |
36,293 | private static int getUpdatedInterval ( int inputInterval , ExtractType extractType , WatermarkType watermarkType ) { LOG . debug ( "Getting updated interval" ) ; if ( ( extractType == ExtractType . SNAPSHOT && watermarkType == WatermarkType . DATE ) ) { return inputInterval * 24 ; } else if ( extractType == ExtractType . APPEND_DAILY ) { return ( inputInterval < 1 ? 1 : inputInterval ) * 24 ; } else { return inputInterval ; } } | Calculate interval in hours with the given interval |
36,294 | private long getSnapshotLowWatermark ( WatermarkType watermarkType , long previousWatermark , int deltaForNextWatermark ) { LOG . debug ( "Getting snapshot low water mark" ) ; String timeZone = this . state . getProp ( ConfigurationKeys . SOURCE_TIMEZONE , ConfigurationKeys . DEFAULT_SOURCE_TIMEZONE ) ; if ( isPreviousWatermarkExists ( previousWatermark ) ) { if ( isSimpleWatermark ( watermarkType ) ) { return previousWatermark + deltaForNextWatermark - this . state . getPropAsInt ( ConfigurationKeys . SOURCE_QUERYBASED_LOW_WATERMARK_BACKUP_SECS , 0 ) ; } DateTime wm = Utils . toDateTime ( previousWatermark , WATERMARKTIMEFORMAT , timeZone ) . plusSeconds ( ( deltaForNextWatermark - this . state . getPropAsInt ( ConfigurationKeys . SOURCE_QUERYBASED_LOW_WATERMARK_BACKUP_SECS , 0 ) ) ) ; return Long . parseLong ( Utils . dateTimeToString ( wm , WATERMARKTIMEFORMAT , timeZone ) ) ; } long startValue = Utils . getLongWithCurrentDate ( this . state . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_START_VALUE ) , timeZone ) ; LOG . info ( "Overriding low water mark with the given start value: " + startValue ) ; return startValue ; } | Get low water mark |
36,295 | protected long getHighWatermark ( ExtractType extractType , WatermarkType watermarkType ) { LOG . debug ( "Getting high watermark" ) ; String timeZone = this . state . getProp ( ConfigurationKeys . SOURCE_TIMEZONE ) ; long highWatermark = ConfigurationKeys . DEFAULT_WATERMARK_VALUE ; if ( this . isWatermarkOverride ( ) ) { highWatermark = this . state . getPropAsLong ( ConfigurationKeys . SOURCE_QUERYBASED_END_VALUE , 0 ) ; if ( highWatermark == 0 ) { highWatermark = Long . parseLong ( Utils . dateTimeToString ( getCurrentTime ( timeZone ) , WATERMARKTIMEFORMAT , timeZone ) ) ; } else { hasUserSpecifiedHighWatermark = true ; } LOG . info ( "Overriding high water mark with the given end value:" + highWatermark ) ; } else { if ( isSnapshot ( extractType ) ) { highWatermark = this . getSnapshotHighWatermark ( watermarkType ) ; } else { highWatermark = this . getAppendHighWatermark ( extractType ) ; } } return ( highWatermark == 0 ? ConfigurationKeys . DEFAULT_WATERMARK_VALUE : highWatermark ) ; } | Get high water mark |
36,296 | private long getSnapshotHighWatermark ( WatermarkType watermarkType ) { LOG . debug ( "Getting snapshot high water mark" ) ; if ( isSimpleWatermark ( watermarkType ) ) { return ConfigurationKeys . DEFAULT_WATERMARK_VALUE ; } String timeZone = this . state . getProp ( ConfigurationKeys . SOURCE_TIMEZONE ) ; return Long . parseLong ( Utils . dateTimeToString ( getCurrentTime ( timeZone ) , WATERMARKTIMEFORMAT , timeZone ) ) ; } | Get snapshot high water mark |
36,297 | private long getAppendHighWatermark ( ExtractType extractType ) { LOG . debug ( "Getting append high water mark" ) ; if ( this . isFullDump ( ) ) { LOG . info ( "Overriding high water mark with end value:" + ConfigurationKeys . SOURCE_QUERYBASED_END_VALUE ) ; long highWatermark = this . state . getPropAsLong ( ConfigurationKeys . SOURCE_QUERYBASED_END_VALUE , 0 ) ; if ( highWatermark != 0 ) { hasUserSpecifiedHighWatermark = true ; } return highWatermark ; } return this . getAppendWatermarkCutoff ( extractType ) ; } | Get append high water mark |
36,298 | private long getAppendWatermarkCutoff ( ExtractType extractType ) { LOG . debug ( "Getting append water mark cutoff" ) ; long highWatermark = ConfigurationKeys . DEFAULT_WATERMARK_VALUE ; String timeZone = this . state . getProp ( ConfigurationKeys . SOURCE_TIMEZONE ) ; AppendMaxLimitType limitType = getAppendLimitType ( extractType , this . state . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_APPEND_MAX_WATERMARK_LIMIT ) ) ; if ( limitType == null ) { LOG . debug ( "Limit type is not found" ) ; return highWatermark ; } int limitDelta = getAppendLimitDelta ( this . state . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_APPEND_MAX_WATERMARK_LIMIT ) ) ; if ( limitDelta == 0 ) { highWatermark = Long . parseLong ( Utils . dateTimeToString ( getCurrentTime ( timeZone ) , WATERMARKTIMEFORMAT , timeZone ) ) ; } else { int seconds = 3599 ; String format = null ; switch ( limitType ) { case CURRENTDATE : format = "yyyyMMdd" ; limitDelta = limitDelta * 24 * 60 * 60 ; seconds = 86399 ; break ; case CURRENTHOUR : format = "yyyyMMddHH" ; limitDelta = limitDelta * 60 * 60 ; seconds = 3599 ; break ; case CURRENTMINUTE : format = "yyyyMMddHHmm" ; limitDelta = limitDelta * 60 ; seconds = 59 ; break ; case CURRENTSECOND : format = "yyyyMMddHHmmss" ; seconds = 0 ; break ; default : break ; } DateTime deltaTime = getCurrentTime ( timeZone ) . minusSeconds ( limitDelta ) ; DateTime previousTime = Utils . toDateTime ( Utils . dateTimeToString ( deltaTime , format , timeZone ) , format , timeZone ) . plusSeconds ( seconds ) ; highWatermark = Long . parseLong ( Utils . dateTimeToString ( previousTime , WATERMARKTIMEFORMAT , timeZone ) ) ; hasUserSpecifiedHighWatermark = true ; } return highWatermark ; } | Get cutoff for high water mark |
36,299 | private static AppendMaxLimitType getAppendLimitType ( ExtractType extractType , String maxLimit ) { LOG . debug ( "Getting append limit type" ) ; AppendMaxLimitType limitType ; switch ( extractType ) { case APPEND_DAILY : limitType = AppendMaxLimitType . CURRENTDATE ; break ; case APPEND_HOURLY : limitType = AppendMaxLimitType . CURRENTHOUR ; break ; default : limitType = null ; break ; } if ( ! Strings . isNullOrEmpty ( maxLimit ) ) { LOG . debug ( "Getting append limit type from the config" ) ; String [ ] limitParams = maxLimit . split ( "-" ) ; if ( limitParams . length >= 1 ) { limitType = AppendMaxLimitType . valueOf ( limitParams [ 0 ] ) ; } } return limitType ; } | Get append max limit type from the input |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.