idx int64 0 41.2k | question stringlengths 74 4.21k | target stringlengths 5 888 |
|---|---|---|
36,400 | public Schema convertSchema ( SI inputSchema , WorkUnitState workUnit ) throws SchemaConversionException { Preconditions . checkArgument ( workUnit . contains ( ConfigurationKeys . CONVERTER_AVRO_SCHEMA_KEY ) ) ; this . schema = new Schema . Parser ( ) . parse ( workUnit . getProp ( ConfigurationKeys . CONVERTER_AVRO_SCHEMA_KEY ) ) ; return this . schema ; } | Ignore input schema and parse in Avro schema from config |
36,401 | public static boolean isAncestorOrSame ( URI descendant , URI ancestor ) { Preconditions . checkNotNull ( descendant , "input can not be null" ) ; Preconditions . checkNotNull ( ancestor , "input can not be null" ) ; if ( ! stringSame ( descendant . getScheme ( ) , ancestor . getScheme ( ) ) ) { return false ; } if ( ! stringSame ( descendant . getAuthority ( ) , ancestor . getAuthority ( ) ) ) { return false ; } return isAncestorOrSame ( getConfigKeyPath ( descendant . getPath ( ) ) , getConfigKeyPath ( ancestor . getPath ( ) ) ) ; } | Utility method to check whether one URI is the ancestor of the other |
36,402 | public synchronized MD5Digest register ( String name , Schema schema ) throws IOException , SchemaRegistryException { MD5Digest md5Digest = generateId ( schema ) ; if ( ! _schemaHashMap . containsKey ( md5Digest ) ) { _schemaHashMap . put ( md5Digest , schema ) ; _topicSchemaMap . put ( name , schema ) ; } return md5Digest ; } | Register this schema under the provided name |
36,403 | public Schema getById ( MD5Digest id ) throws IOException , SchemaRegistryException { if ( _schemaHashMap . containsKey ( id ) ) { return _schemaHashMap . get ( id ) ; } else { throw new SchemaRegistryException ( "Could not find schema with id : " + id . asString ( ) ) ; } } | Get a schema given an id |
36,404 | public long calculateRecordCount ( Collection < Path > paths ) throws IOException { long sum = 0 ; for ( Path path : paths ) { sum += inputRecordCountProvider . getRecordCount ( DatasetHelper . getApplicableFilePaths ( this . fs , path , Lists . newArrayList ( AVRO ) ) ) ; } return sum ; } | Calculate record count at given paths |
36,405 | public void saveState ( Path dir , State state ) throws IOException { saveState ( this . fs , dir , state ) ; } | Save compaction state file |
36,406 | public synchronized boolean offer ( Runnable runnable ) { int allWorkingThreads = this . executor . getActiveCount ( ) + super . size ( ) ; return allWorkingThreads < this . executor . getPoolSize ( ) && super . offer ( runnable ) ; } | Inserts the specified element at the tail of this queue if there is at least one available thread to run the current task . If all pool threads are actively busy it rejects the offer . |
36,407 | public FlowConfig getFlowConfig ( FlowId flowId ) throws FlowConfigLoggedException { log . info ( "[GAAS-REST] Get called with flowGroup {} flowName {}" , flowId . getFlowGroup ( ) , flowId . getFlowName ( ) ) ; try { URI flowUri = FlowUriUtils . createFlowSpecUri ( flowId ) ; FlowSpec spec = ( FlowSpec ) flowCatalog . getSpec ( flowUri ) ; FlowConfig flowConfig = new FlowConfig ( ) ; Properties flowProps = spec . getConfigAsProperties ( ) ; Schedule schedule = null ; if ( flowProps . containsKey ( ConfigurationKeys . JOB_SCHEDULE_KEY ) ) { schedule = new Schedule ( ) ; schedule . setCronSchedule ( flowProps . getProperty ( ConfigurationKeys . JOB_SCHEDULE_KEY ) ) ; } if ( flowProps . containsKey ( ConfigurationKeys . JOB_TEMPLATE_PATH ) ) { flowConfig . setTemplateUris ( flowProps . getProperty ( ConfigurationKeys . JOB_TEMPLATE_PATH ) ) ; } else if ( spec . getTemplateURIs ( ) . isPresent ( ) ) { flowConfig . setTemplateUris ( StringUtils . join ( spec . getTemplateURIs ( ) . get ( ) , "," ) ) ; } else { flowConfig . setTemplateUris ( "NA" ) ; } if ( schedule != null ) { if ( flowProps . containsKey ( ConfigurationKeys . FLOW_RUN_IMMEDIATELY ) ) { schedule . setRunImmediately ( Boolean . valueOf ( flowProps . getProperty ( ConfigurationKeys . FLOW_RUN_IMMEDIATELY ) ) ) ; } flowConfig . setSchedule ( schedule ) ; } flowProps . remove ( ConfigurationKeys . JOB_SCHEDULE_KEY ) ; flowProps . remove ( ConfigurationKeys . JOB_TEMPLATE_PATH ) ; StringMap flowPropsAsStringMap = new StringMap ( ) ; flowPropsAsStringMap . putAll ( Maps . fromProperties ( flowProps ) ) ; return flowConfig . setId ( new FlowId ( ) . setFlowGroup ( flowId . getFlowGroup ( ) ) . setFlowName ( flowId . getFlowName ( ) ) ) . setProperties ( flowPropsAsStringMap ) ; } catch ( URISyntaxException e ) { throw new FlowConfigLoggedException ( HttpStatus . S_400_BAD_REQUEST , "bad URI " + flowId . getFlowName ( ) , e ) ; } catch ( SpecNotFoundException e ) { throw new FlowConfigLoggedException ( HttpStatus . S_404_NOT_FOUND , "Flow requested does not exist: " + flowId . getFlowName ( ) , null ) ; } } | Get flow config |
36,408 | public CreateResponse createFlowConfig ( FlowConfig flowConfig , boolean triggerListener ) throws FlowConfigLoggedException { log . info ( "[GAAS-REST] Create called with flowGroup " + flowConfig . getId ( ) . getFlowGroup ( ) + " flowName " + flowConfig . getId ( ) . getFlowName ( ) ) ; if ( flowConfig . hasExplain ( ) ) { return new CreateResponse ( new RestLiServiceException ( HttpStatus . S_400_BAD_REQUEST , "FlowConfig with explain not supported." ) ) ; } FlowSpec flowSpec = createFlowSpecForConfig ( flowConfig ) ; if ( ! flowConfig . hasSchedule ( ) && this . flowCatalog . exists ( flowSpec . getUri ( ) ) ) { return new CreateResponse ( new ComplexResourceKey < > ( flowConfig . getId ( ) , new EmptyRecord ( ) ) , HttpStatus . S_409_CONFLICT ) ; } else { this . flowCatalog . put ( flowSpec , triggerListener ) ; return new CreateResponse ( new ComplexResourceKey < > ( flowConfig . getId ( ) , new EmptyRecord ( ) ) , HttpStatus . S_201_CREATED ) ; } } | Add flowConfig locally and trigger all listeners iff |
36,409 | public UpdateResponse updateFlowConfig ( FlowId flowId , FlowConfig flowConfig , boolean triggerListener ) { log . info ( "[GAAS-REST] Update called with flowGroup {} flowName {}" , flowId . getFlowGroup ( ) , flowId . getFlowName ( ) ) ; if ( ! flowId . getFlowGroup ( ) . equals ( flowConfig . getId ( ) . getFlowGroup ( ) ) || ! flowId . getFlowName ( ) . equals ( flowConfig . getId ( ) . getFlowName ( ) ) ) { throw new FlowConfigLoggedException ( HttpStatus . S_400_BAD_REQUEST , "flowName and flowGroup cannot be changed in update" , null ) ; } if ( isUnscheduleRequest ( flowConfig ) ) { FlowConfig originalFlowConfig = getFlowConfig ( flowId ) ; originalFlowConfig . setSchedule ( NEVER_RUN_CRON_SCHEDULE ) ; flowConfig = originalFlowConfig ; } this . flowCatalog . put ( createFlowSpecForConfig ( flowConfig ) , triggerListener ) ; return new UpdateResponse ( HttpStatus . S_200_OK ) ; } | Update flowConfig locally and trigger all listeners iff |
36,410 | public UpdateResponse updateFlowConfig ( FlowId flowId , FlowConfig flowConfig ) throws FlowConfigLoggedException { return updateFlowConfig ( flowId , flowConfig , true ) ; } | Update flowConfig locally and trigger all listeners |
36,411 | public UpdateResponse deleteFlowConfig ( FlowId flowId , Properties header , boolean triggerListener ) throws FlowConfigLoggedException { log . info ( "[GAAS-REST] Delete called with flowGroup {} flowName {}" , flowId . getFlowGroup ( ) , flowId . getFlowName ( ) ) ; URI flowUri = null ; try { flowUri = FlowUriUtils . createFlowSpecUri ( flowId ) ; this . flowCatalog . remove ( flowUri , header , triggerListener ) ; return new UpdateResponse ( HttpStatus . S_200_OK ) ; } catch ( URISyntaxException e ) { throw new FlowConfigLoggedException ( HttpStatus . S_400_BAD_REQUEST , "bad URI " + flowUri , e ) ; } } | Delete flowConfig locally and trigger all listeners iff |
36,412 | public UpdateResponse deleteFlowConfig ( FlowId flowId , Properties header ) throws FlowConfigLoggedException { return deleteFlowConfig ( flowId , header , true ) ; } | Delete flowConfig locally and trigger all listeners |
36,413 | protected void createWorkUnits ( SourceState state ) throws IOException { createWorkunitsFromPreviousState ( state ) ; if ( this . datasets . isEmpty ( ) ) { return ; } for ( HivePartitionDataset dataset : this . datasets ) { Optional < String > owner = dataset . getOwner ( ) ; if ( workUnitsExceeded ( ) ) { log . info ( "Workunits exceeded" ) ; setJobWatermark ( state , dataset . datasetURN ( ) ) ; return ; } if ( ! this . policy . shouldPurge ( dataset ) ) { continue ; } WorkUnit workUnit = createNewWorkUnit ( dataset ) ; log . info ( "Created new work unit with partition " + workUnit . getProp ( ComplianceConfigurationKeys . PARTITION_NAME ) ) ; this . workUnitMap . put ( workUnit . getProp ( ComplianceConfigurationKeys . PARTITION_NAME ) , workUnit ) ; this . workUnitsCreatedCount ++ ; } if ( ! state . contains ( ComplianceConfigurationKeys . HIVE_PURGER_WATERMARK ) ) { this . setJobWatermark ( state , ComplianceConfigurationKeys . NO_PREVIOUS_WATERMARK ) ; } } | This method creates the list of all work units needed for the current execution . Fresh work units are created for each partition starting from watermark and failed work units from the previous run will be added to the list . |
36,414 | protected List < HivePartitionDataset > sortHiveDatasets ( List < HivePartitionDataset > datasets ) { Collections . sort ( datasets , new Comparator < HivePartitionDataset > ( ) { public int compare ( HivePartitionDataset o1 , HivePartitionDataset o2 ) { return o1 . datasetURN ( ) . compareTo ( o2 . datasetURN ( ) ) ; } } ) ; return datasets ; } | Sort all HiveDatasets on the basis of complete name ie dbName . tableName |
36,415 | protected void createWorkunitsFromPreviousState ( SourceState state ) { if ( this . lowWatermark . equalsIgnoreCase ( ComplianceConfigurationKeys . NO_PREVIOUS_WATERMARK ) ) { return ; } if ( Iterables . isEmpty ( state . getPreviousWorkUnitStates ( ) ) ) { return ; } for ( WorkUnitState workUnitState : state . getPreviousWorkUnitStates ( ) ) { if ( workUnitState . getWorkingState ( ) == WorkUnitState . WorkingState . COMMITTED ) { continue ; } WorkUnit workUnit = workUnitState . getWorkunit ( ) ; Preconditions . checkArgument ( workUnit . contains ( ComplianceConfigurationKeys . PARTITION_NAME ) , "Older WorkUnit doesn't contain property partition name." ) ; int executionAttempts = workUnit . getPropAsInt ( ComplianceConfigurationKeys . EXECUTION_ATTEMPTS , ComplianceConfigurationKeys . DEFAULT_EXECUTION_ATTEMPTS ) ; if ( executionAttempts < this . maxWorkUnitExecutionAttempts ) { Optional < WorkUnit > workUnitOptional = createNewWorkUnit ( workUnit . getProp ( ComplianceConfigurationKeys . PARTITION_NAME ) , ++ executionAttempts ) ; if ( ! workUnitOptional . isPresent ( ) ) { continue ; } workUnit = workUnitOptional . get ( ) ; log . info ( "Revived old Work Unit for partiton " + workUnit . getProp ( ComplianceConfigurationKeys . PARTITION_NAME ) + " having execution attempt " + workUnit . getProp ( ComplianceConfigurationKeys . EXECUTION_ATTEMPTS ) ) ; workUnitMap . put ( workUnit . getProp ( ComplianceConfigurationKeys . PARTITION_NAME ) , workUnit ) ; } } } | Add failed work units in a workUnitMap with partition name as Key . New work units are created using required configuration from the old work unit . |
36,416 | protected void setLowWatermark ( SourceState state ) { this . lowWatermark = getWatermarkFromPreviousWorkUnits ( state , ComplianceConfigurationKeys . HIVE_PURGER_WATERMARK ) ; log . info ( "Setting low watermark for the job: " + this . lowWatermark ) ; } | Sets the local watermark which is a class variable . Local watermark is a complete partition name which act as the starting point for the creation of fresh work units . |
36,417 | protected void submitCycleCompletionEvent ( ) { if ( ! this . lowWatermark . equalsIgnoreCase ( ComplianceConfigurationKeys . NO_PREVIOUS_WATERMARK ) ) { return ; } if ( this . executionCount > 1 ) { Map < String , String > metadata = new HashMap < > ( ) ; metadata . put ( ComplianceConfigurationKeys . TOTAL_EXECUTIONS , Integer . toString ( ( this . executionCount - 1 ) ) ) ; this . eventSubmitter . submit ( ComplianceEvents . Purger . CYCLE_COMPLETED , metadata ) ; this . executionCount = ComplianceConfigurationKeys . DEFAULT_EXECUTION_COUNT ; } } | If low watermark is at the reset point then either cycle is completed or starting for the first time If executionCount is greater than 1 then cycle is completed If cycle is completed executionCount will be reset and cycle completion event will be submitted |
36,418 | protected void setJobWatermark ( SourceState state , String watermark ) { state . setProp ( ComplianceConfigurationKeys . HIVE_PURGER_WATERMARK , watermark ) ; log . info ( "Setting job watermark for the job: " + watermark ) ; } | Sets Job Watermark in the SourceState which will be copied to all WorkUnitStates . Job Watermark is a complete partition name . During next run of this job fresh work units will be created starting from this partition . |
36,419 | protected static String getWatermarkFromPreviousWorkUnits ( SourceState state , String watermark ) { if ( state . getPreviousWorkUnitStates ( ) . isEmpty ( ) ) { return ComplianceConfigurationKeys . NO_PREVIOUS_WATERMARK ; } return state . getPreviousWorkUnitStates ( ) . get ( 0 ) . getProp ( watermark , ComplianceConfigurationKeys . NO_PREVIOUS_WATERMARK ) ; } | Fetches the value of a watermark given its key from the previous run . |
36,420 | public static String resolveUriPrefix ( Config config , SharedRestClientKey key ) throws URISyntaxException , NotConfiguredException { List < String > connectionPrefixes = parseConnectionPrefixes ( config , key ) ; Preconditions . checkArgument ( connectionPrefixes . size ( ) > 0 , "No uris found for service " + key . serviceName ) ; return connectionPrefixes . get ( new Random ( ) . nextInt ( connectionPrefixes . size ( ) ) ) ; } | Get a uri prefix from the input configuration . |
36,421 | public static List < String > parseConnectionPrefixes ( Config config , SharedRestClientKey key ) throws URISyntaxException , NotConfiguredException { if ( key instanceof UriRestClientKey ) { return Lists . newArrayList ( ( ( UriRestClientKey ) key ) . getUri ( ) ) ; } if ( ! config . hasPath ( SERVER_URI_KEY ) ) { throw new NotConfiguredException ( "Missing key " + SERVER_URI_KEY ) ; } List < String > uris = Lists . newArrayList ( ) ; for ( String uri : Splitter . on ( "," ) . omitEmptyStrings ( ) . trimResults ( ) . splitToList ( config . getString ( SERVER_URI_KEY ) ) ) { uris . add ( resolveUriPrefix ( new URI ( uri ) ) ) ; } return uris ; } | Parse the list of available input prefixes from the input configuration . |
36,422 | public static String resolveUriPrefix ( URI serverURI ) throws URISyntaxException { if ( RESTLI_SCHEMES . contains ( serverURI . getScheme ( ) ) ) { return new URI ( serverURI . getScheme ( ) , serverURI . getAuthority ( ) , null , null , null ) . toString ( ) + "/" ; } throw new RuntimeException ( "Unrecognized scheme for URI " + serverURI ) ; } | Convert the input URI into a correctly formatted uri prefix . In the future may also resolve d2 uris . |
36,423 | public void addAll ( GlobalMetadata other ) { throwIfImmutable ( ) ; datasetLevel . putAll ( other . datasetLevel ) ; for ( Map . Entry < String , Map < String , Object > > e : other . fileLevel . entrySet ( ) ) { Map < String , Object > val = new ConcurrentHashMap < > ( ) ; val . putAll ( e . getValue ( ) ) ; fileLevel . put ( e . getKey ( ) , val ) ; } cachedId = null ; } | Merge another GlobalMetadata object into this one . All keys from other will be placed into this object replacing any already existing keys . |
36,424 | public byte [ ] toJsonUtf8 ( ) { try { ByteArrayOutputStream bOs = new ByteArrayOutputStream ( 512 ) ; try ( JsonGenerator generator = jsonFactory . createJsonGenerator ( bOs , JsonEncoding . UTF8 ) . setCodec ( objectMapper ) ) { toJsonUtf8 ( generator ) ; } return bOs . toByteArray ( ) ; } catch ( IOException e ) { throw new RuntimeException ( "Unexpected IOException serializing to ByteArray" , e ) ; } } | Serialize as a UTF8 encoded JSON string . |
36,425 | protected void bodyToJsonUtf8 ( JsonGenerator generator ) throws IOException { generator . writeObjectField ( "dataset" , datasetLevel ) ; generator . writeObjectFieldStart ( "file" ) ; for ( Map . Entry < String , Map < String , Object > > entry : fileLevel . entrySet ( ) ) { generator . writeObjectField ( entry . getKey ( ) , entry . getValue ( ) ) ; } generator . writeEndObject ( ) ; } | Write this object out to an existing JSON stream |
36,426 | public void setDatasetMetadata ( String key , Object val ) { throwIfImmutable ( ) ; datasetLevel . put ( key , val ) ; cachedId = null ; } | Set an arbitrary dataset - level metadata key |
36,427 | public synchronized void addTransferEncoding ( String encoding ) { throwIfImmutable ( ) ; List < String > encodings = getTransferEncoding ( ) ; if ( encodings == null ) { encodings = new ArrayList < > ( ) ; } encodings . add ( encoding ) ; setDatasetMetadata ( TRANSFER_ENCODING_KEY , encodings ) ; } | Convenience method to add a new transfer - encoding to a dataset |
36,428 | public Object getFileMetadata ( String file , String key ) { Map < String , Object > fileKeys = fileLevel . get ( file ) ; if ( fileKeys == null ) { return null ; } return fileKeys . get ( key ) ; } | Get an arbitrary file - level metadata key |
36,429 | public void setFileMetadata ( String file , String key , Object val ) { throwIfImmutable ( ) ; Map < String , Object > fileKeys = fileLevel . get ( file ) ; if ( fileKeys == null ) { fileKeys = new ConcurrentHashMap < > ( ) ; fileLevel . put ( file , fileKeys ) ; } fileKeys . put ( key , val ) ; cachedId = null ; } | Set an arbitrary file - level metadata key |
36,430 | public void awaitNextRetry ( ) throws InterruptedException , NoMoreRetriesException { this . retryNumber ++ ; if ( this . retryNumber > this . maxRetries ) { throw new NoMoreRetriesException ( "Reached maximum number of retries: " + this . maxRetries ) ; } else if ( this . totalWait > this . maxWait ) { throw new NoMoreRetriesException ( "Reached maximum time to wait: " + this . maxWait ) ; } Thread . sleep ( this . nextDelay ) ; this . totalWait += this . nextDelay ; this . nextDelay = Math . min ( ( long ) ( this . alpha * this . nextDelay ) + 1 , this . maxDelay ) ; } | Block until next retry can be executed . |
36,431 | @ Builder ( builderMethodName = "awaitCondition" , buildMethodName = "await" ) private static boolean evaluateConditionUntilTrue ( Callable < Boolean > callable , Double alpha , Integer maxRetries , Long maxWait , Long maxDelay , Long initialDelay ) throws ExecutionException , InterruptedException { ExponentialBackoff exponentialBackoff = new ExponentialBackoff ( alpha , maxRetries , maxWait , maxDelay , initialDelay ) ; while ( true ) { try { if ( callable . call ( ) ) { return true ; } } catch ( Throwable t ) { throw new ExecutionException ( t ) ; } if ( ! exponentialBackoff . awaitNextRetryIfAvailable ( ) ) { return false ; } } } | Evaluate a condition until true with exponential backoff . |
36,432 | public static boolean isKeySchemaValid ( Schema keySchema , Schema topicSchema ) { return SchemaCompatibility . checkReaderWriterCompatibility ( keySchema , topicSchema ) . getType ( ) . equals ( SchemaCompatibilityType . COMPATIBLE ) ; } | keySchema is valid if a record with newestSchema can be converted to a record with keySchema . |
36,433 | public void markRecord ( BufferedRecord < D > record , int bytesWritten ) { synchronized ( this ) { thunks . add ( new Thunk < > ( record , bytesWritten ) ) ; byteSize += bytesWritten ; } } | Mark the record associated with this request |
36,434 | private void interruptGracefully ( ) throws IOException { LOG . info ( "Attempting graceful interruption of job " + this . jobContext . getJobId ( ) ) ; this . fs . createNewFile ( this . interruptPath ) ; long waitTimeStart = System . currentTimeMillis ( ) ; while ( ! this . job . isComplete ( ) && System . currentTimeMillis ( ) < waitTimeStart + 30 * 1000 ) { try { Thread . sleep ( 1000 ) ; } catch ( InterruptedException ie ) { break ; } } if ( ! this . job . isComplete ( ) ) { LOG . info ( "Interrupted job did not shut itself down after timeout. Killing job." ) ; this . job . killJob ( ) ; } } | Attempt a gracious interruption of the running job |
36,435 | private void addDependencies ( Configuration conf ) throws IOException { TimingEvent distributedCacheSetupTimer = this . eventSubmitter . getTimingEvent ( TimingEvent . RunJobTimings . MR_DISTRIBUTED_CACHE_SETUP ) ; Path jarFileDir = this . jarsDir ; if ( this . jobProps . containsKey ( ConfigurationKeys . FRAMEWORK_JAR_FILES_KEY ) ) { addJars ( jarFileDir , this . jobProps . getProperty ( ConfigurationKeys . FRAMEWORK_JAR_FILES_KEY ) , conf ) ; } if ( this . jobProps . containsKey ( ConfigurationKeys . JOB_JAR_FILES_KEY ) ) { addJars ( jarFileDir , this . jobProps . getProperty ( ConfigurationKeys . JOB_JAR_FILES_KEY ) , conf ) ; } if ( this . jobProps . containsKey ( ConfigurationKeys . JOB_LOCAL_FILES_KEY ) ) { addLocalFiles ( new Path ( this . mrJobDir , FILES_DIR_NAME ) , this . jobProps . getProperty ( ConfigurationKeys . JOB_LOCAL_FILES_KEY ) , conf ) ; } if ( this . jobProps . containsKey ( ConfigurationKeys . JOB_HDFS_FILES_KEY ) ) { addHDFSFiles ( this . jobProps . getProperty ( ConfigurationKeys . JOB_HDFS_FILES_KEY ) , conf ) ; } if ( this . jobProps . containsKey ( ConfigurationKeys . JOB_JAR_HDFS_FILES_KEY ) ) { addHdfsJars ( this . jobProps . getProperty ( ConfigurationKeys . JOB_JAR_HDFS_FILES_KEY ) , conf ) ; } distributedCacheSetupTimer . stop ( ) ; } | Add dependent jars and files . |
36,436 | @ SuppressWarnings ( "deprecation" ) private void addJars ( Path jarFileDir , String jarFileList , Configuration conf ) throws IOException { LocalFileSystem lfs = FileSystem . getLocal ( conf ) ; for ( String jarFile : SPLITTER . split ( jarFileList ) ) { Path srcJarFile = new Path ( jarFile ) ; FileStatus [ ] fileStatusList = lfs . globStatus ( srcJarFile ) ; for ( FileStatus status : fileStatusList ) { int retryCount = 0 ; boolean shouldFileBeAddedIntoDC = true ; Path destJarFile = calculateDestJarFile ( status , jarFileDir ) ; while ( ! this . fs . exists ( destJarFile ) || fs . getFileStatus ( destJarFile ) . getLen ( ) != status . getLen ( ) ) { try { if ( this . fs . exists ( destJarFile ) && fs . getFileStatus ( destJarFile ) . getLen ( ) != status . getLen ( ) ) { Thread . sleep ( WAITING_TIME_ON_IMCOMPLETE_UPLOAD ) ; throw new IOException ( "Waiting for file to complete on uploading ... " ) ; } this . fs . copyFromLocalFile ( false , false , status . getPath ( ) , destJarFile ) ; } catch ( IOException | InterruptedException e ) { LOG . warn ( "Path:" + destJarFile + " is not copied successfully. Will require retry." ) ; retryCount += 1 ; if ( retryCount >= this . jarFileMaximumRetry ) { LOG . error ( "The jar file:" + destJarFile + "failed in being copied into hdfs" , e ) ; shouldFileBeAddedIntoDC = false ; break ; } } } if ( shouldFileBeAddedIntoDC ) { LOG . info ( String . format ( "Adding %s to classpath" , destJarFile ) ) ; DistributedCache . addFileToClassPath ( destJarFile , conf , this . fs ) ; } } } } | Add framework or job - specific jars to the classpath through DistributedCache so the mappers can use them . |
36,437 | @ SuppressWarnings ( "deprecation" ) private void addLocalFiles ( Path jobFileDir , String jobFileList , Configuration conf ) throws IOException { DistributedCache . createSymlink ( conf ) ; for ( String jobFile : SPLITTER . split ( jobFileList ) ) { Path srcJobFile = new Path ( jobFile ) ; Path destJobFile = new Path ( this . fs . makeQualified ( jobFileDir ) , srcJobFile . getName ( ) ) ; this . fs . copyFromLocalFile ( srcJobFile , destJobFile ) ; URI destFileUri = URI . create ( destJobFile . toUri ( ) . getPath ( ) + "#" + destJobFile . getName ( ) ) ; LOG . info ( String . format ( "Adding %s to DistributedCache" , destFileUri ) ) ; DistributedCache . addCacheFile ( destFileUri , conf ) ; } } | Add local non - jar files the job depends on to DistributedCache . |
36,438 | @ SuppressWarnings ( "deprecation" ) private void addHDFSFiles ( String jobFileList , Configuration conf ) { DistributedCache . createSymlink ( conf ) ; jobFileList = PasswordManager . getInstance ( this . jobProps ) . readPassword ( jobFileList ) ; for ( String jobFile : SPLITTER . split ( jobFileList ) ) { Path srcJobFile = new Path ( jobFile ) ; URI srcFileUri = URI . create ( srcJobFile . toUri ( ) . getPath ( ) + "#" + srcJobFile . getName ( ) ) ; LOG . info ( String . format ( "Adding %s to DistributedCache" , srcFileUri ) ) ; DistributedCache . addCacheFile ( srcFileUri , conf ) ; } } | Add non - jar files already on HDFS that the job depends on to DistributedCache . |
36,439 | private void prepareJobInput ( List < WorkUnit > workUnits ) throws IOException { Closer closer = Closer . create ( ) ; try { ParallelRunner parallelRunner = closer . register ( new ParallelRunner ( this . parallelRunnerThreads , this . fs ) ) ; int multiTaskIdSequence = 0 ; for ( WorkUnit workUnit : workUnits ) { String workUnitFileName ; if ( workUnit instanceof MultiWorkUnit ) { workUnitFileName = JobLauncherUtils . newMultiTaskId ( this . jobContext . getJobId ( ) , multiTaskIdSequence ++ ) + MULTI_WORK_UNIT_FILE_EXTENSION ; } else { workUnitFileName = workUnit . getProp ( ConfigurationKeys . TASK_ID_KEY ) + WORK_UNIT_FILE_EXTENSION ; } Path workUnitFile = new Path ( this . jobInputPath , workUnitFileName ) ; LOG . debug ( "Writing work unit file " + workUnitFileName ) ; parallelRunner . serializeToFile ( workUnit , workUnitFile ) ; } } catch ( Throwable t ) { throw closer . rethrow ( t ) ; } finally { closer . close ( ) ; } } | Prepare the job input . |
36,440 | private void populateComparableKeyRecord ( GenericRecord source , GenericRecord target ) { for ( Field field : target . getSchema ( ) . getFields ( ) ) { if ( field . schema ( ) . getType ( ) == Schema . Type . UNION ) { Object fieldData = source . get ( field . name ( ) ) ; Schema actualFieldSchema = GenericData . get ( ) . induce ( fieldData ) ; if ( actualFieldSchema . getType ( ) == Schema . Type . RECORD ) { for ( Schema candidateType : field . schema ( ) . getTypes ( ) ) { if ( candidateType . getFullName ( ) . equals ( actualFieldSchema . getFullName ( ) ) ) { GenericRecord record = new GenericData . Record ( candidateType ) ; target . put ( field . name ( ) , record ) ; populateComparableKeyRecord ( ( GenericRecord ) fieldData , record ) ; break ; } } } else { target . put ( field . name ( ) , source . get ( field . name ( ) ) ) ; } } else if ( field . schema ( ) . getType ( ) == Schema . Type . RECORD ) { GenericRecord record = ( GenericRecord ) target . get ( field . name ( ) ) ; if ( record == null ) { record = new GenericData . Record ( field . schema ( ) ) ; target . put ( field . name ( ) , record ) ; } populateComparableKeyRecord ( ( GenericRecord ) source . get ( field . name ( ) ) , record ) ; } else { target . put ( field . name ( ) , source . get ( field . name ( ) ) ) ; } } } | Populate the target record based on the field values in the source record . Target record s schema should be a subset of source record s schema . Target record s schema cannot have MAP ARRAY or ENUM fields or UNION fields that contain these fields . |
36,441 | private JsonElement convertValue ( String value , JsonObject dataType ) { if ( dataType == null || ! dataType . has ( TYPE ) ) { return new JsonPrimitive ( value ) ; } String type = dataType . get ( TYPE ) . getAsString ( ) . toUpperCase ( ) ; ValueType valueType = ValueType . valueOf ( type ) ; return valueType . convert ( value ) ; } | Convert string value to the expected type |
36,442 | private void addPropsForPublisher ( QueryBasedHiveConversionEntity hiveConversionEntity ) { if ( ! hiveConversionEntity . getPartition ( ) . isPresent ( ) ) { return ; } ConvertibleHiveDataset convertibleHiveDataset = hiveConversionEntity . getConvertibleHiveDataset ( ) ; for ( String format : convertibleHiveDataset . getDestFormats ( ) ) { Optional < ConvertibleHiveDataset . ConversionConfig > conversionConfigForFormat = convertibleHiveDataset . getConversionConfigForFormat ( format ) ; if ( ! conversionConfigForFormat . isPresent ( ) ) { continue ; } SchemaAwareHivePartition sourcePartition = hiveConversionEntity . getHivePartition ( ) . get ( ) ; String completeSourcePartitionName = StringUtils . join ( Arrays . asList ( sourcePartition . getTable ( ) . getDbName ( ) , sourcePartition . getTable ( ) . getTableName ( ) , sourcePartition . getName ( ) ) , AT_CHAR ) ; ConvertibleHiveDataset . ConversionConfig config = conversionConfigForFormat . get ( ) ; String completeDestPartitionName = StringUtils . join ( Arrays . asList ( config . getDestinationDbName ( ) , config . getDestinationTableName ( ) , sourcePartition . getName ( ) ) , AT_CHAR ) ; workUnit . setProp ( HiveConvertPublisher . COMPLETE_SOURCE_PARTITION_NAME , completeSourcePartitionName ) ; workUnit . setProp ( HiveConvertPublisher . COMPLETE_DEST_PARTITION_NAME , completeDestPartitionName ) ; } } | Method to add properties needed by publisher to preserve partition params |
36,443 | public void publishData ( Collection < ? extends WorkUnitState > states ) throws IOException { for ( WorkUnitState state : states ) { if ( state . getWorkingState ( ) == WorkUnitState . WorkingState . SUCCESSFUL ) { state . setWorkingState ( WorkUnitState . WorkingState . COMMITTED ) ; log . info ( "Marking state committed" ) ; } } } | Publish the data for the given tasks . |
36,444 | public void setFullTrue ( long extractFullRunTime ) { setProp ( ConfigurationKeys . EXTRACT_IS_FULL_KEY , true ) ; setProp ( ConfigurationKeys . EXTRACT_FULL_RUN_TIME_KEY , extractFullRunTime ) ; } | Set full drop date from the given time . |
36,445 | public void setPrimaryKeys ( String ... primaryKeyFieldName ) { setProp ( ConfigurationKeys . EXTRACT_PRIMARY_KEY_FIELDS_KEY , Joiner . on ( "," ) . join ( primaryKeyFieldName ) ) ; } | Set primary keys . |
36,446 | public void addPrimaryKey ( String ... primaryKeyFieldName ) { StringBuilder sb = new StringBuilder ( getProp ( ConfigurationKeys . EXTRACT_PRIMARY_KEY_FIELDS_KEY , "" ) ) ; Joiner . on ( "," ) . appendTo ( sb , primaryKeyFieldName ) ; setProp ( ConfigurationKeys . EXTRACT_PRIMARY_KEY_FIELDS_KEY , sb . toString ( ) ) ; } | Add more primary keys to the existing set of primary keys . |
36,447 | public void setDeltaFields ( String ... deltaFieldName ) { setProp ( ConfigurationKeys . EXTRACT_DELTA_FIELDS_KEY , Joiner . on ( "," ) . join ( deltaFieldName ) ) ; } | Set delta fields . |
36,448 | public void addDeltaField ( String ... deltaFieldName ) { StringBuilder sb = new StringBuilder ( getProp ( ConfigurationKeys . EXTRACT_DELTA_FIELDS_KEY , "" ) ) ; Joiner . on ( "," ) . appendTo ( sb , deltaFieldName ) ; setProp ( ConfigurationKeys . EXTRACT_DELTA_FIELDS_KEY , sb . toString ( ) ) ; } | Add more delta fields to the existing set of delta fields . |
36,449 | public boolean connect ( ) throws RestApiConnectionException { if ( this . autoEstablishAuthToken ) { if ( this . authTokenTimeout <= 0 ) { return false ; } else if ( ( System . currentTimeMillis ( ) - this . createdAt ) > this . authTokenTimeout ) { return false ; } } HttpEntity httpEntity = null ; try { httpEntity = getAuthentication ( ) ; if ( httpEntity != null ) { JsonElement json = GSON . fromJson ( EntityUtils . toString ( httpEntity ) , JsonObject . class ) ; if ( json == null ) { log . error ( "Http entity: " + httpEntity ) ; log . error ( "entity class: " + httpEntity . getClass ( ) . getName ( ) ) ; log . error ( "entity string size: " + EntityUtils . toString ( httpEntity ) . length ( ) ) ; log . error ( "content length: " + httpEntity . getContentLength ( ) ) ; log . error ( "content: " + IOUtils . toString ( httpEntity . getContent ( ) , Charsets . UTF_8 ) ) ; throw new RestApiConnectionException ( "JSON is NULL ! Failed on authentication with the following HTTP response received:\n" + EntityUtils . toString ( httpEntity ) ) ; } JsonObject jsonRet = json . getAsJsonObject ( ) ; log . info ( "jsonRet: " + jsonRet . toString ( ) ) ; parseAuthenticationResponse ( jsonRet ) ; } } catch ( IOException e ) { throw new RestApiConnectionException ( "Failed to get rest api connection; error - " + e . getMessage ( ) , e ) ; } finally { if ( httpEntity != null ) { try { EntityUtils . consume ( httpEntity ) ; } catch ( IOException e ) { throw new RestApiConnectionException ( "Failed to consume httpEntity; error - " + e . getMessage ( ) , e ) ; } } } return true ; } | get http connection |
36,450 | public CommandOutput < ? , ? > getResponse ( List < Command > cmds ) throws RestApiProcessingException { String url = cmds . get ( 0 ) . getParams ( ) . get ( 0 ) ; log . info ( "URL: " + url ) ; String jsonStr = null ; HttpRequestBase httpRequest = new HttpGet ( url ) ; addHeaders ( httpRequest ) ; HttpEntity httpEntity = null ; HttpResponse httpResponse = null ; try { httpResponse = this . httpClient . execute ( httpRequest ) ; StatusLine status = httpResponse . getStatusLine ( ) ; httpEntity = httpResponse . getEntity ( ) ; if ( httpEntity != null ) { jsonStr = EntityUtils . toString ( httpEntity ) ; } if ( status . getStatusCode ( ) >= 400 ) { log . info ( "Unable to get response using: " + url ) ; JsonElement jsonRet = GSON . fromJson ( jsonStr , JsonArray . class ) ; throw new RestApiProcessingException ( getFirstErrorMessage ( "Failed to retrieve response from" , jsonRet ) ) ; } } catch ( Exception e ) { throw new RestApiProcessingException ( "Failed to process rest api request; error - " + e . getMessage ( ) , e ) ; } finally { try { if ( httpEntity != null ) { EntityUtils . consume ( httpEntity ) ; } } catch ( Exception e ) { throw new RestApiProcessingException ( "Failed to consume httpEntity; error - " + e . getMessage ( ) , e ) ; } } CommandOutput < RestApiCommand , String > output = new RestApiCommandOutput ( ) ; output . put ( ( RestApiCommand ) cmds . get ( 0 ) , jsonStr ) ; return output ; } | get http response in json format using url |
36,451 | private static String getFirstErrorMessage ( String defaultMessage , JsonElement json ) { if ( json == null ) { return defaultMessage ; } JsonObject jsonObject = null ; if ( ! json . isJsonArray ( ) ) { jsonObject = json . getAsJsonObject ( ) ; } else { JsonArray jsonArray = json . getAsJsonArray ( ) ; if ( jsonArray . size ( ) != 0 ) { jsonObject = jsonArray . get ( 0 ) . getAsJsonObject ( ) ; } } if ( jsonObject != null ) { if ( jsonObject . has ( "error_description" ) ) { defaultMessage = defaultMessage + jsonObject . get ( "error_description" ) . getAsString ( ) ; } else if ( jsonObject . has ( "message" ) ) { defaultMessage = defaultMessage + jsonObject . get ( "message" ) . getAsString ( ) ; } } return defaultMessage ; } | get error message while executing http url |
36,452 | public List < HivePartitionDataset > findDatasets ( ) throws IOException { List < HivePartitionDataset > list = new ArrayList < > ( ) ; for ( HivePartitionDataset hivePartitionDataset : super . findDatasets ( ) ) { CleanableHivePartitionDataset dataset = new CleanableHivePartitionDataset ( hivePartitionDataset , this . fs , this . state ) ; list . add ( dataset ) ; } return list ; } | Will find all datasets according to whitelist except the backup and staging tables . |
36,453 | public static Config getConfig ( ConfigClient client , URI u , Optional < Config > runtimeConfig ) { try { return client . getConfig ( u , runtimeConfig ) ; } catch ( ConfigStoreFactoryDoesNotExistsException | ConfigStoreCreationException e ) { throw new Error ( e ) ; } } | Wrapper to convert Checked Exception to Unchecked Exception Easy to use in lambda expressions |
36,454 | public static List < KafkaTopic > getTopicsFromConfigStore ( Properties properties , String configStoreUri , GobblinKafkaConsumerClient kafkaConsumerClient ) { ConfigClient configClient = ConfigClient . createConfigClient ( VersionStabilityPolicy . WEAK_LOCAL_STABILITY ) ; State state = new State ( ) ; state . setProp ( KafkaSource . TOPIC_WHITELIST , ".*" ) ; state . setProp ( KafkaSource . TOPIC_BLACKLIST , StringUtils . EMPTY ) ; List < KafkaTopic > allTopics = kafkaConsumerClient . getFilteredTopics ( DatasetFilterUtils . getPatternList ( state , KafkaSource . TOPIC_BLACKLIST ) , DatasetFilterUtils . getPatternList ( state , KafkaSource . TOPIC_WHITELIST ) ) ; Optional < Config > runtimeConfig = ConfigClientUtils . getOptionalRuntimeConfig ( properties ) ; if ( properties . containsKey ( GOBBLIN_CONFIG_TAGS_WHITELIST ) ) { Preconditions . checkArgument ( properties . containsKey ( GOBBLIN_CONFIG_FILTER ) , "Missing required property " + GOBBLIN_CONFIG_FILTER ) ; String filterString = properties . getProperty ( GOBBLIN_CONFIG_FILTER ) ; Path whiteListTagUri = PathUtils . mergePaths ( new Path ( configStoreUri ) , new Path ( properties . getProperty ( GOBBLIN_CONFIG_TAGS_WHITELIST ) ) ) ; List < String > whitelistedTopics = new ArrayList < > ( ) ; ConfigStoreUtils . getTopicsURIFromConfigStore ( configClient , whiteListTagUri , filterString , runtimeConfig ) . stream ( ) . filter ( ( URI u ) -> ConfigUtils . getBoolean ( ConfigStoreUtils . getConfig ( configClient , u , runtimeConfig ) , KafkaSource . TOPIC_WHITELIST , false ) ) . forEach ( ( ( URI u ) -> whitelistedTopics . add ( ConfigStoreUtils . getTopicNameFromURI ( u ) ) ) ) ; return allTopics . stream ( ) . filter ( ( KafkaTopic p ) -> whitelistedTopics . contains ( p . getName ( ) ) ) . collect ( Collectors . toList ( ) ) ; } else if ( properties . containsKey ( GOBBLIN_CONFIG_TAGS_BLACKLIST ) ) { Preconditions . checkArgument ( properties . containsKey ( GOBBLIN_CONFIG_FILTER ) , "Missing required property " + GOBBLIN_CONFIG_FILTER ) ; String filterString = properties . getProperty ( GOBBLIN_CONFIG_FILTER ) ; Path blackListTagUri = PathUtils . mergePaths ( new Path ( configStoreUri ) , new Path ( properties . getProperty ( GOBBLIN_CONFIG_TAGS_BLACKLIST ) ) ) ; List < String > blacklistedTopics = new ArrayList < > ( ) ; ConfigStoreUtils . getTopicsURIFromConfigStore ( configClient , blackListTagUri , filterString , runtimeConfig ) . stream ( ) . filter ( ( URI u ) -> ConfigUtils . getBoolean ( ConfigStoreUtils . getConfig ( configClient , u , runtimeConfig ) , KafkaSource . TOPIC_BLACKLIST , false ) ) . forEach ( ( ( URI u ) -> blacklistedTopics . add ( ConfigStoreUtils . getTopicNameFromURI ( u ) ) ) ) ; return allTopics . stream ( ) . filter ( ( KafkaTopic p ) -> ! blacklistedTopics . contains ( p . getName ( ) ) ) . collect ( Collectors . toList ( ) ) ; } else { log . warn ( "None of the blacklist or whitelist tags are provided" ) ; return allTopics ; } } | Get topics from config store . Topics will either be whitelisted or blacklisted using tag . After filtering out topics via tag their config property is checked . For each shortlisted topic config must contain either property topic . blacklist or topic . whitelist |
36,455 | public Path getAuditFilePath ( ) { StringBuilder auditFileNameBuilder = new StringBuilder ( ) ; auditFileNameBuilder . append ( "P=" ) . append ( auditMetadata . getPhase ( ) ) . append ( FILE_NAME_DELIMITTER ) . append ( "C=" ) . append ( auditMetadata . getCluster ( ) ) . append ( FILE_NAME_DELIMITTER ) . append ( "E=" ) . append ( auditMetadata . getExtractId ( ) ) . append ( FILE_NAME_DELIMITTER ) . append ( "S=" ) . append ( auditMetadata . getSnapshotId ( ) ) . append ( FILE_NAME_DELIMITTER ) . append ( "D=" ) . append ( auditMetadata . getDeltaId ( ) ) ; return new Path ( auditDirPath , PathUtils . combinePaths ( auditMetadata . getTableMetadata ( ) . getDatabase ( ) , auditMetadata . getTableMetadata ( ) . getTable ( ) , auditFileNameBuilder . toString ( ) , auditMetadata . getPartFileName ( ) ) ) ; } | Returns the complete path of the audit file . Generate the audit file path with format |
36,456 | public void createTrashSnapshot ( ) throws IOException { FileStatus [ ] pathsInTrash = this . fs . listStatus ( this . trashLocation , TRASH_NOT_SNAPSHOT_PATH_FILTER ) ; if ( pathsInTrash . length <= 0 ) { LOG . info ( "Nothing in trash. Will not create snapshot." ) ; return ; } Path snapshotDir = new Path ( this . trashLocation , new DateTime ( ) . toString ( TRASH_SNAPSHOT_NAME_FORMATTER ) ) ; if ( this . fs . exists ( snapshotDir ) ) { throw new IOException ( "New snapshot directory " + snapshotDir . toString ( ) + " already exists." ) ; } if ( ! safeFsMkdir ( fs , snapshotDir , PERM ) ) { throw new IOException ( "Failed to create new snapshot directory at " + snapshotDir . toString ( ) ) ; } LOG . info ( String . format ( "Moving %d paths in Trash directory to newly created snapshot at %s." , pathsInTrash . length , snapshotDir . toString ( ) ) ) ; int pathsFailedToMove = 0 ; for ( FileStatus fileStatus : pathsInTrash ) { Path pathRelativeToTrash = PathUtils . relativizePath ( fileStatus . getPath ( ) , this . trashLocation ) ; Path targetPath = new Path ( snapshotDir , pathRelativeToTrash ) ; boolean movedThisPath = true ; try { movedThisPath = this . fs . rename ( fileStatus . getPath ( ) , targetPath ) ; } catch ( IOException exception ) { LOG . error ( "Failed to move path " + fileStatus . getPath ( ) . toString ( ) + " to snapshot." , exception ) ; pathsFailedToMove += 1 ; continue ; } if ( ! movedThisPath ) { LOG . error ( "Failed to move path " + fileStatus . getPath ( ) . toString ( ) + " to snapshot." ) ; pathsFailedToMove += 1 ; } } if ( pathsFailedToMove > 0 ) { LOG . error ( String . format ( "Failed to move %d paths to the snapshot at %s." , pathsFailedToMove , snapshotDir . toString ( ) ) ) ; } } | Moves all current contents of trash directory into a snapshot directory with current timestamp . |
36,457 | private boolean safeFsMkdir ( FileSystem fs , Path f , FsPermission permission ) throws IOException { try { return fs . mkdirs ( f , permission ) ; } catch ( IOException e ) { if ( ! fs . exists ( f ) ) { throw new IOException ( "Failed to create trash folder while it is still not existed yet." ) ; } else { LOG . debug ( "Target folder %s has been created by other threads." , f . toString ( ) ) ; return true ; } } } | Safe creation of trash folder to ensure thread - safe . |
36,458 | public void push ( String name , String value , long timestamp ) throws IOException { graphiteSender . send ( name , value , timestamp ) ; } | Pushes a single metrics through the Graphite protocol to the underlying backend |
36,459 | private void handleLeadershipChange ( NotificationContext changeContext ) { if ( this . helixManager . isPresent ( ) && this . helixManager . get ( ) . isLeader ( ) ) { LOGGER . info ( "Leader notification for {} HM.isLeader {}" , this . helixManager . get ( ) . getInstanceName ( ) , this . helixManager . get ( ) . isLeader ( ) ) ; if ( this . isSchedulerEnabled ) { LOGGER . info ( "Gobblin Service is now running in master instance mode, enabling Scheduler." ) ; this . scheduler . setActive ( true ) ; } if ( this . isGitConfigMonitorEnabled ) { this . gitConfigMonitor . setActive ( true ) ; } if ( this . isDagManagerEnabled ) { if ( this . topologyCatalog . getInitComplete ( ) . getCount ( ) == 0 ) { this . dagManager . setActive ( true ) ; } } } else if ( this . helixManager . isPresent ( ) ) { LOGGER . info ( "Leader lost notification for {} HM.isLeader {}" , this . helixManager . get ( ) . getInstanceName ( ) , this . helixManager . get ( ) . isLeader ( ) ) ; if ( this . isSchedulerEnabled ) { LOGGER . info ( "Gobblin Service is now running in slave instance mode, disabling Scheduler." ) ; this . scheduler . setActive ( false ) ; } if ( this . isGitConfigMonitorEnabled ) { this . gitConfigMonitor . setActive ( false ) ; } if ( this . isDagManagerEnabled ) { this . dagManager . setActive ( false ) ; } } } | Handle leadership change . |
36,460 | public static String newJobId ( String jobName ) { return Id . Job . create ( jobName , System . currentTimeMillis ( ) ) . toString ( ) ; } | Create a new job ID . |
36,461 | public static String newTaskId ( String jobId , int sequence ) { return Id . Task . create ( Id . parse ( jobId ) . get ( Id . Parts . INSTANCE_NAME ) , sequence ) . toString ( ) ; } | Create a new task ID for the job with the given job ID . |
36,462 | public static void cleanJobStagingData ( State state , Logger logger ) throws IOException { Preconditions . checkArgument ( state . contains ( ConfigurationKeys . WRITER_STAGING_DIR ) , "Missing required property " + ConfigurationKeys . WRITER_STAGING_DIR ) ; Preconditions . checkArgument ( state . contains ( ConfigurationKeys . WRITER_OUTPUT_DIR ) , "Missing required property " + ConfigurationKeys . WRITER_OUTPUT_DIR ) ; String writerFsUri = state . getProp ( ConfigurationKeys . WRITER_FILE_SYSTEM_URI , ConfigurationKeys . LOCAL_FS_URI ) ; FileSystem fs = getFsWithProxy ( state , writerFsUri , WriterUtils . getFsConfiguration ( state ) ) ; Path jobStagingPath = new Path ( state . getProp ( ConfigurationKeys . WRITER_STAGING_DIR ) ) ; logger . info ( "Cleaning up staging directory " + jobStagingPath ) ; HadoopUtils . deletePath ( fs , jobStagingPath , true ) ; if ( fs . exists ( jobStagingPath . getParent ( ) ) && fs . listStatus ( jobStagingPath . getParent ( ) ) . length == 0 ) { logger . info ( "Deleting directory " + jobStagingPath . getParent ( ) ) ; HadoopUtils . deletePath ( fs , jobStagingPath . getParent ( ) , true ) ; } Path jobOutputPath = new Path ( state . getProp ( ConfigurationKeys . WRITER_OUTPUT_DIR ) ) ; logger . info ( "Cleaning up output directory " + jobOutputPath ) ; HadoopUtils . deletePath ( fs , jobOutputPath , true ) ; if ( fs . exists ( jobOutputPath . getParent ( ) ) && fs . listStatus ( jobOutputPath . getParent ( ) ) . length == 0 ) { logger . info ( "Deleting directory " + jobOutputPath . getParent ( ) ) ; HadoopUtils . deletePath ( fs , jobOutputPath . getParent ( ) , true ) ; } if ( state . contains ( ConfigurationKeys . ROW_LEVEL_ERR_FILE ) ) { if ( state . getPropAsBoolean ( ConfigurationKeys . CLEAN_ERR_DIR , ConfigurationKeys . DEFAULT_CLEAN_ERR_DIR ) ) { Path jobErrPath = new Path ( state . getProp ( ConfigurationKeys . ROW_LEVEL_ERR_FILE ) ) ; log . info ( "Cleaning up err directory : " + jobErrPath ) ; HadoopUtils . deleteIfExists ( fs , jobErrPath , true ) ; } } } | Cleanup staging data of all tasks of a job . |
36,463 | public static void cleanTaskStagingData ( State state , Logger logger ) throws IOException { int numBranches = state . getPropAsInt ( ConfigurationKeys . FORK_BRANCHES_KEY , 1 ) ; for ( int branchId = 0 ; branchId < numBranches ; branchId ++ ) { String writerFsUri = state . getProp ( ForkOperatorUtils . getPropertyNameForBranch ( ConfigurationKeys . WRITER_FILE_SYSTEM_URI , numBranches , branchId ) , ConfigurationKeys . LOCAL_FS_URI ) ; FileSystem fs = getFsWithProxy ( state , writerFsUri , WriterUtils . getFsConfiguration ( state ) ) ; Path stagingPath = WriterUtils . getWriterStagingDir ( state , numBranches , branchId ) ; if ( fs . exists ( stagingPath ) ) { logger . info ( "Cleaning up staging directory " + stagingPath . toUri ( ) . getPath ( ) ) ; if ( ! fs . delete ( stagingPath , true ) ) { throw new IOException ( "Clean up staging directory " + stagingPath . toUri ( ) . getPath ( ) + " failed" ) ; } } Path outputPath = WriterUtils . getWriterOutputDir ( state , numBranches , branchId ) ; if ( fs . exists ( outputPath ) ) { logger . info ( "Cleaning up output directory " + outputPath . toUri ( ) . getPath ( ) ) ; if ( ! fs . delete ( outputPath , true ) ) { throw new IOException ( "Clean up output directory " + outputPath . toUri ( ) . getPath ( ) + " failed" ) ; } } } } | Cleanup staging data of a Gobblin task . |
36,464 | public static void setEvolutionMetadata ( State state , List < String > evolutionDDLs ) { state . setProp ( EventConstants . SCHEMA_EVOLUTION_DDLS_NUM , evolutionDDLs == null ? 0 : evolutionDDLs . size ( ) ) ; } | Set number of schema evolution DDLs as Sla event metadata |
36,465 | public static void setIsFirstPublishMetadata ( WorkUnitState wus ) { if ( ! Boolean . valueOf ( wus . getPropAsBoolean ( IS_WATERMARK_WORKUNIT_KEY ) ) ) { LongWatermark previousWatermark = wus . getWorkunit ( ) . getLowWatermark ( LongWatermark . class ) ; wus . setProp ( SlaEventKeys . IS_FIRST_PUBLISH , ( null == previousWatermark || previousWatermark . getValue ( ) == 0 ) ) ; } } | Sets metadata to indicate whether this is the first time this table or partition is being published . |
36,466 | public void configure ( Map < String , ? > configs , boolean isKey ) { Preconditions . checkArgument ( isKey == false , "LiAvroDeserializer only works for value fields" ) ; _datumReader = new GenericDatumReader < > ( ) ; Properties props = new Properties ( ) ; for ( Map . Entry < String , ? > entry : configs . entrySet ( ) ) { String value = String . valueOf ( entry . getValue ( ) ) ; props . setProperty ( entry . getKey ( ) , value ) ; } _schemaRegistry = KafkaSchemaRegistryFactory . getSchemaRegistry ( props ) ; } | Configure this class . |
36,467 | public Optional < List < CopyRoute > > getPushRoutes ( ReplicationConfiguration rc , EndPoint copyFrom ) { if ( rc . getCopyMode ( ) == ReplicationCopyMode . PULL ) return Optional . absent ( ) ; DataFlowTopology topology = rc . getDataFlowToplogy ( ) ; List < DataFlowTopology . DataFlowPath > paths = topology . getDataFlowPaths ( ) ; for ( DataFlowTopology . DataFlowPath p : paths ) { List < CopyRoute > routes = p . getCopyRoutes ( ) ; if ( routes . isEmpty ( ) ) { continue ; } if ( routes . get ( 0 ) . getCopyFrom ( ) . equals ( copyFrom ) ) { return Optional . of ( routes ) ; } } return Optional . absent ( ) ; } | for push mode there is no optimization |
36,468 | private static String normalizeClusterUrl ( String clusterIdentifier ) { try { URI uri = new URI ( clusterIdentifier . trim ( ) ) ; if ( ! uri . isOpaque ( ) && null != uri . getHost ( ) ) { clusterIdentifier = uri . getHost ( ) ; } else { clusterIdentifier = uri . toString ( ) . replaceAll ( "[/:]" , " " ) . trim ( ) . replaceAll ( " " , "_" ) ; } } catch ( URISyntaxException e ) { } return clusterIdentifier ; } | Strip out the port number if it is a valid URI |
36,469 | public FlowConfig get ( ComplexResourceKey < FlowId , EmptyRecord > key ) { String flowGroup = key . getKey ( ) . getFlowGroup ( ) ; String flowName = key . getKey ( ) . getFlowName ( ) ; FlowId flowId = new FlowId ( ) . setFlowGroup ( flowGroup ) . setFlowName ( flowName ) ; return this . flowConfigsResourceHandler . getFlowConfig ( flowId ) ; } | Retrieve the flow configuration with the given key |
36,470 | public CreateResponse create ( FlowConfig flowConfig ) { List < ServiceRequester > requestorList = this . requesterService . findRequesters ( this ) ; try { String serialized = this . requesterService . serialize ( requestorList ) ; flowConfig . getProperties ( ) . put ( RequesterService . REQUESTER_LIST , serialized ) ; LOG . info ( "Rest requester list is " + serialized ) ; } catch ( IOException e ) { throw new FlowConfigLoggedException ( HttpStatus . S_401_UNAUTHORIZED , "cannot get who is the requester" , e ) ; } return this . flowConfigsResourceHandler . createFlowConfig ( flowConfig ) ; } | Create a flow configuration that the service will forward to execution instances for execution |
36,471 | public UpdateResponse update ( ComplexResourceKey < FlowId , EmptyRecord > key , FlowConfig flowConfig ) { String flowGroup = key . getKey ( ) . getFlowGroup ( ) ; String flowName = key . getKey ( ) . getFlowName ( ) ; FlowId flowId = new FlowId ( ) . setFlowGroup ( flowGroup ) . setFlowName ( flowName ) ; return this . flowConfigsResourceHandler . updateFlowConfig ( flowId , flowConfig ) ; } | Update the flow configuration with the specified key . Running flows are not affected . An error is raised if the flow configuration does not exist . |
36,472 | public UpdateResponse delete ( ComplexResourceKey < FlowId , EmptyRecord > key ) { String flowGroup = key . getKey ( ) . getFlowGroup ( ) ; String flowName = key . getKey ( ) . getFlowName ( ) ; FlowId flowId = new FlowId ( ) . setFlowGroup ( flowGroup ) . setFlowName ( flowName ) ; return this . flowConfigsResourceHandler . deleteFlowConfig ( flowId , getHeaders ( ) ) ; } | Delete a configured flow . Running flows are not affected . The schedule will be removed for scheduled flows . |
36,473 | private void initializeAppLauncherAndServices ( ) throws Exception { Properties properties = ConfigUtils . configToProperties ( this . config ) ; if ( ! properties . contains ( ServiceBasedAppLauncher . APP_STOP_TIME_SECONDS ) ) { properties . setProperty ( ServiceBasedAppLauncher . APP_STOP_TIME_SECONDS , Long . toString ( 300 ) ) ; } this . applicationLauncher = new ServiceBasedAppLauncher ( properties , this . clusterName ) ; if ( this . config . hasPath ( GobblinClusterConfigurationKeys . GOBBLIN_CLUSTER_PREFIX + ConfigurationKeys . JOB_CONFIG_FILE_GENERAL_PATH_KEY ) ) { String jobCatalogClassName = ConfigUtils . getString ( config , GobblinClusterConfigurationKeys . JOB_CATALOG_KEY , GobblinClusterConfigurationKeys . DEFAULT_JOB_CATALOG ) ; this . jobCatalog = ( MutableJobCatalog ) GobblinConstructorUtils . invokeFirstConstructor ( Class . forName ( jobCatalogClassName ) , ImmutableList . of ( config . getConfig ( StringUtils . removeEnd ( GobblinClusterConfigurationKeys . GOBBLIN_CLUSTER_PREFIX , "." ) ) . withFallback ( this . config ) ) ) ; } else { this . jobCatalog = null ; } SchedulerService schedulerService = new SchedulerService ( properties ) ; this . applicationLauncher . addService ( schedulerService ) ; this . jobScheduler = buildGobblinHelixJobScheduler ( config , this . appWorkDir , getMetadataTags ( clusterName , applicationId ) , schedulerService ) ; this . applicationLauncher . addService ( this . jobScheduler ) ; this . jobConfigurationManager = buildJobConfigurationManager ( config ) ; this . applicationLauncher . addService ( this . jobConfigurationManager ) ; } | Create the service based application launcher and other associated services |
36,474 | private void stopAppLauncherAndServices ( ) { try { this . applicationLauncher . stop ( ) ; } catch ( ApplicationException ae ) { LOGGER . error ( "Error while stopping Gobblin Cluster application launcher" , ae ) ; } if ( this . jobCatalog instanceof Service ) { ( ( Service ) this . jobCatalog ) . stopAsync ( ) . awaitTerminated ( ) ; } } | Stop the application launcher then any services that were started outside of the application launcher |
36,475 | public synchronized void start ( ) { LOGGER . info ( "Starting the Gobblin Cluster Manager" ) ; this . eventBus . register ( this ) ; this . multiManager . connect ( ) ; configureHelixQuotaBasedTaskScheduling ( ) ; if ( this . isStandaloneMode ) { this . idleProcessThread = new Thread ( new Runnable ( ) { public void run ( ) { while ( ! GobblinClusterManager . this . stopStatus . isStopInProgress ( ) && ! GobblinClusterManager . this . stopIdleProcessThread ) { try { Thread . sleep ( 300 ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; break ; } } } } ) ; this . idleProcessThread . start ( ) ; Runtime . getRuntime ( ) . addShutdownHook ( new Thread ( ) { public void run ( ) { GobblinClusterManager . this . stopIdleProcessThread = true ; } } ) ; } else { startAppLauncherAndServices ( ) ; } } | Start the Gobblin Cluster Manager . |
36,476 | public synchronized void stop ( ) { if ( this . stopStatus . isStopInProgress ( ) ) { return ; } this . stopStatus . setStopInprogress ( true ) ; LOGGER . info ( "Stopping the Gobblin Cluster Manager" ) ; if ( this . idleProcessThread != null ) { try { this . idleProcessThread . join ( ) ; } catch ( InterruptedException ie ) { Thread . currentThread ( ) . interrupt ( ) ; } } if ( ! this . isStandaloneMode ) { sendShutdownRequest ( ) ; } stopAppLauncherAndServices ( ) ; this . multiManager . disconnect ( ) ; } | Stop the Gobblin Cluster Manager . |
36,477 | public synchronized MD5Digest register ( Schema schema , PostMethod post ) throws SchemaRegistryException { if ( this . namespaceOverride . isPresent ( ) ) { schema = AvroUtils . switchNamespace ( schema , this . namespaceOverride . get ( ) ) ; } LOG . info ( "Registering schema " + schema . toString ( ) ) ; post . addParameter ( "schema" , schema . toString ( ) ) ; HttpClient httpClient = this . borrowClient ( ) ; try { LOG . debug ( "Loading: " + post . getURI ( ) ) ; int statusCode = httpClient . executeMethod ( post ) ; if ( statusCode != HttpStatus . SC_CREATED ) { throw new SchemaRegistryException ( "Error occurred while trying to register schema: " + statusCode ) ; } String response ; response = post . getResponseBodyAsString ( ) ; if ( response != null ) { LOG . info ( "Received response " + response ) ; } String schemaKey ; Header [ ] headers = post . getResponseHeaders ( SCHEMA_ID_HEADER_NAME ) ; if ( headers . length != 1 ) { throw new SchemaRegistryException ( "Error reading schema id returned by registerSchema call: headers.length = " + headers . length ) ; } else if ( ! headers [ 0 ] . getValue ( ) . startsWith ( SCHEMA_ID_HEADER_PREFIX ) ) { throw new SchemaRegistryException ( "Error parsing schema id returned by registerSchema call: header = " + headers [ 0 ] . getValue ( ) ) ; } else { LOG . info ( "Registered schema successfully" ) ; schemaKey = headers [ 0 ] . getValue ( ) . substring ( SCHEMA_ID_HEADER_PREFIX . length ( ) ) ; } MD5Digest schemaId = MD5Digest . fromString ( schemaKey ) ; return schemaId ; } catch ( Throwable t ) { throw new SchemaRegistryException ( t ) ; } finally { post . releaseConnection ( ) ; this . httpClientPool . returnObject ( httpClient ) ; } } | Register a schema to the Kafka schema registry |
36,478 | public void createFlowConfig ( FlowConfig flowConfig ) throws RemoteInvocationException { LOG . debug ( "createFlowConfig with groupName " + flowConfig . getId ( ) . getFlowGroup ( ) + " flowName " + flowConfig . getId ( ) . getFlowName ( ) ) ; CreateIdRequest < ComplexResourceKey < FlowId , EmptyRecord > , FlowConfig > request = _flowconfigsRequestBuilders . create ( ) . input ( flowConfig ) . build ( ) ; ResponseFuture < IdResponse < ComplexResourceKey < FlowId , EmptyRecord > > > flowConfigResponseFuture = _restClient . get ( ) . sendRequest ( request ) ; flowConfigResponseFuture . getResponse ( ) ; } | Create a flow configuration |
36,479 | public void updateFlowConfig ( FlowConfig flowConfig ) throws RemoteInvocationException { LOG . debug ( "updateFlowConfig with groupName " + flowConfig . getId ( ) . getFlowGroup ( ) + " flowName " + flowConfig . getId ( ) . getFlowName ( ) ) ; FlowId flowId = new FlowId ( ) . setFlowGroup ( flowConfig . getId ( ) . getFlowGroup ( ) ) . setFlowName ( flowConfig . getId ( ) . getFlowName ( ) ) ; UpdateRequest < FlowConfig > updateRequest = _flowconfigsRequestBuilders . update ( ) . id ( new ComplexResourceKey < > ( flowId , new EmptyRecord ( ) ) ) . input ( flowConfig ) . build ( ) ; ResponseFuture < EmptyRecord > response = _restClient . get ( ) . sendRequest ( updateRequest ) ; response . getResponse ( ) ; } | Update a flow configuration |
36,480 | public FlowConfig getFlowConfig ( FlowId flowId ) throws RemoteInvocationException { LOG . debug ( "getFlowConfig with groupName " + flowId . getFlowGroup ( ) + " flowName " + flowId . getFlowName ( ) ) ; GetRequest < FlowConfig > getRequest = _flowconfigsRequestBuilders . get ( ) . id ( new ComplexResourceKey < > ( flowId , new EmptyRecord ( ) ) ) . build ( ) ; Response < FlowConfig > response = _restClient . get ( ) . sendRequest ( getRequest ) . getResponse ( ) ; return response . getEntity ( ) ; } | Get a flow configuration |
36,481 | private boolean checkFileLevelRelativeToRoot ( Path filePath , int depth ) { if ( filePath == null ) { return false ; } Path path = filePath ; for ( int i = 0 ; i < depth - 1 ; i ++ ) { path = path . getParent ( ) ; } if ( ! path . getName ( ) . equals ( folderName ) ) { return false ; } return true ; } | Helper to check if a file has proper hierarchy . |
36,482 | private Config getNodeConfigWithOverrides ( Config nodeConfig , Path nodeFilePath ) { String nodeId = nodeFilePath . getParent ( ) . getName ( ) ; return nodeConfig . withValue ( FlowGraphConfigurationKeys . DATA_NODE_ID_KEY , ConfigValueFactory . fromAnyRef ( nodeId ) ) ; } | Helper that overrides the data . node . id property with name derived from the node file path |
36,483 | private Config getEdgeConfigWithOverrides ( Config edgeConfig , Path edgeFilePath ) { String source = edgeFilePath . getParent ( ) . getParent ( ) . getName ( ) ; String destination = edgeFilePath . getParent ( ) . getName ( ) ; String edgeName = Files . getNameWithoutExtension ( edgeFilePath . getName ( ) ) ; return edgeConfig . withValue ( FlowGraphConfigurationKeys . FLOW_EDGE_SOURCE_KEY , ConfigValueFactory . fromAnyRef ( source ) ) . withValue ( FlowGraphConfigurationKeys . FLOW_EDGE_DESTINATION_KEY , ConfigValueFactory . fromAnyRef ( destination ) ) . withValue ( FlowGraphConfigurationKeys . FLOW_EDGE_ID_KEY , ConfigValueFactory . fromAnyRef ( getEdgeId ( source , destination , edgeName ) ) ) ; } | Helper that overrides the flow edge properties with name derived from the edge file path |
36,484 | private Config loadNodeFileWithOverrides ( Path filePath ) throws IOException { Config nodeConfig = this . pullFileLoader . loadPullFile ( filePath , emptyConfig , false ) ; return getNodeConfigWithOverrides ( nodeConfig , filePath ) ; } | Load the node file . |
36,485 | private Config loadEdgeFileWithOverrides ( Path filePath ) throws IOException { Config edgeConfig = this . pullFileLoader . loadPullFile ( filePath , emptyConfig , false ) ; return getEdgeConfigWithOverrides ( edgeConfig , filePath ) ; } | Load the edge file . |
36,486 | private String getEdgeId ( String source , String destination , String edgeName ) { return Joiner . on ( FLOW_EDGE_LABEL_JOINER_CHAR ) . join ( source , destination , edgeName ) ; } | Get an edge label from the edge properties |
36,487 | protected void initializeBatch ( String databaseName , String table ) throws SQLException { this . insertStmtPrefix = createInsertStatementStr ( databaseName , table ) ; this . insertPstmtForFixedBatch = this . conn . prepareStatement ( createPrepareStatementStr ( this . batchSize ) ) ; LOG . info ( String . format ( "Initialized for %s insert " + this , ( this . batchSize > 1 ) ? "batch" : "" ) ) ; } | Initializes variables for batch insert and pre - compute PreparedStatement based on requested batch size and parameter size . |
36,488 | protected String createInsertStatementStr ( String databaseName , String table ) { return String . format ( INSERT_STATEMENT_PREFIX_FORMAT , databaseName , table , JOINER_ON_COMMA . join ( this . columnNames ) ) ; } | Populates the placeholders and constructs the prefix of batch insert statement |
36,489 | public Config loadPullFile ( Path path , Config sysProps , boolean loadGlobalProperties ) throws IOException { Config fallback = loadGlobalProperties ? loadAncestorGlobalConfigs ( path , sysProps ) : sysProps ; if ( this . javaPropsPullFileFilter . accept ( path ) ) { return loadJavaPropsWithFallback ( path , fallback ) . resolve ( ) ; } else if ( this . hoconPullFileFilter . accept ( path ) ) { return loadHoconConfigAtPath ( path ) . withFallback ( fallback ) . resolve ( ) ; } else { throw new IOException ( String . format ( "Cannot load pull file %s due to unrecognized extension." , path ) ) ; } } | Load a single pull file . |
36,490 | public CloseableHttpResponse waitForResponse ( ListenableFuture < CloseableHttpResponse > responseFuture ) { try { return responseFuture . get ( ) ; } catch ( InterruptedException | ExecutionException e ) { throw new RuntimeException ( e ) ; } } | Default implementation is to use HttpClients socket timeout which is waiting based on elapsed time between last packet sent from client till receive it from server . |
36,491 | public List < String > getcurrentFsSnapshot ( State state ) { List < String > results = new ArrayList < > ( ) ; String folderId = state . getProp ( SOURCE_FILEBASED_DATA_DIRECTORY , "" ) ; try { LOG . info ( "Running ls with folderId: " + folderId ) ; List < String > fileIds = this . fsHelper . ls ( folderId ) ; for ( String fileId : fileIds ) { results . add ( fileId + splitPattern + this . fsHelper . getFileMTime ( fileId ) ) ; } } catch ( FileBasedHelperException e ) { throw new RuntimeException ( "Failed to retrieve list of file IDs for folderID: " + folderId , e ) ; } return results ; } | Provide list of files snapshot where snap shot is consist of list of file ID with modified time . Folder ID and file ID are all optional where missing folder id represent search from root folder where missing file ID represents all files will be included on current and subfolder . |
36,492 | public void onSuccess ( final WriteResponse response ) { for ( final Thunk thunk : this . thunks ) { thunk . callback . onSuccess ( new WriteResponse ( ) { public Object getRawResponse ( ) { return response . getRawResponse ( ) ; } public String getStringResponse ( ) { return response . getStringResponse ( ) ; } public long bytesWritten ( ) { return thunk . sizeInBytes ; } } ) ; } } | After batch is sent and get acknowledged successfully this method will be invoked |
36,493 | public void onFailure ( Throwable throwable ) { for ( Thunk thunk : this . thunks ) { thunk . callback . onFailure ( throwable ) ; } } | When batch is sent with an error return this method will be invoked |
36,494 | public void close ( ) { closed = true ; while ( appendsInProgress . get ( ) > 0 ) { LOG . info ( "Append is still going on, wait for a while" ) ; try { Thread . sleep ( 100 ) ; } catch ( InterruptedException e ) { LOG . error ( "close is interrupted while appending data is in progress" ) ; } } this . closeComplete . countDown ( ) ; } | When close is invoked all new coming records will be rejected Add a busy loop here to ensure all the ongoing appends are completed |
36,495 | private void copyDependencyJarsToHdfs ( ) throws IOException { if ( ! this . state . contains ( ConfigurationKeys . JOB_JAR_FILES_KEY ) ) { return ; } LocalFileSystem lfs = FileSystem . getLocal ( this . conf ) ; Path tmpJarFileDir = new Path ( this . tmpOutputDir , "_gobblin_compaction_jars" ) ; this . state . setProp ( COMPACTION_JARS , tmpJarFileDir . toString ( ) ) ; this . fs . delete ( tmpJarFileDir , true ) ; for ( String jarFile : this . state . getPropAsList ( ConfigurationKeys . JOB_JAR_FILES_KEY ) ) { for ( FileStatus status : lfs . globStatus ( new Path ( jarFile ) ) ) { Path tmpJarFile = new Path ( this . fs . makeQualified ( tmpJarFileDir ) , status . getPath ( ) . getName ( ) ) ; this . fs . copyFromLocalFile ( status . getPath ( ) , tmpJarFile ) ; LOG . info ( String . format ( "%s will be added to classpath" , tmpJarFile ) ) ; } } } | Copy dependency jars from local fs to HDFS . |
36,496 | private void deleteDependencyJars ( ) throws IllegalArgumentException , IOException { if ( this . state . contains ( COMPACTION_JARS ) ) { this . fs . delete ( new Path ( this . state . getProp ( COMPACTION_JARS ) ) , true ) ; } } | Delete dependency jars from HDFS when job is done . |
36,497 | public static void renameSourceDirAsCompactionComplete ( FileSystem fs , Dataset dataset ) { try { for ( Path path : dataset . getRenamePaths ( ) ) { Path newPath = new Path ( path . getParent ( ) , path . getName ( ) + MRCompactor . COMPACTION_RENAME_SOURCE_DIR_SUFFIX ) ; LOG . info ( "[{}] Renaming {} to {}" , dataset . getDatasetName ( ) , path , newPath ) ; fs . rename ( path , newPath ) ; } } catch ( Exception e ) { LOG . error ( "Rename input path failed" , e ) ; } } | Rename all the source directories for a specific dataset |
36,498 | public static void addRunningHadoopJob ( Dataset dataset , Job job ) { MRCompactor . RUNNING_MR_JOBS . put ( dataset , job ) ; } | Keep track of running MR jobs so if the compaction is cancelled the MR jobs can be killed . |
36,499 | private void submitVerificationSuccessSlaEvent ( Results . Result result ) { try { CompactionSlaEventHelper . getEventSubmitterBuilder ( result . dataset ( ) , Optional . < Job > absent ( ) , this . fs ) . eventSubmitter ( this . eventSubmitter ) . eventName ( CompactionSlaEventHelper . COMPLETION_VERIFICATION_SUCCESS_EVENT_NAME ) . additionalMetadata ( Maps . transformValues ( result . verificationContext ( ) , Functions . toStringFunction ( ) ) ) . build ( ) . submit ( ) ; } catch ( Throwable t ) { LOG . warn ( "Failed to submit verification success event:" + t , t ) ; } } | Submit an event when completeness verification is successful |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.