idx
int64
0
41.2k
question
stringlengths
74
4.21k
target
stringlengths
5
888
36,000
public FileSystem getProxiedFileSystem ( State properties , AuthType authType , String authPath , String uri , final Configuration conf ) throws IOException , InterruptedException , URISyntaxException { Preconditions . checkArgument ( StringUtils . isNotBlank ( properties . getProp ( ConfigurationKeys . FS_PROXY_AS_USER_NAME ) ) , "State does not contain a proper proxy user name" ) ; String proxyUserName = properties . getProp ( ConfigurationKeys . FS_PROXY_AS_USER_NAME ) ; UserGroupInformation proxyUser ; switch ( authType ) { case KEYTAB : Preconditions . checkArgument ( StringUtils . isNotBlank ( properties . getProp ( ConfigurationKeys . SUPER_USER_NAME_TO_PROXY_AS_OTHERS ) ) , "State does not contain a proper proxy token file name" ) ; String superUser = properties . getProp ( ConfigurationKeys . SUPER_USER_NAME_TO_PROXY_AS_OTHERS ) ; UserGroupInformation . loginUserFromKeytab ( superUser , authPath ) ; proxyUser = UserGroupInformation . createProxyUser ( proxyUserName , UserGroupInformation . getLoginUser ( ) ) ; break ; case TOKEN : proxyUser = UserGroupInformation . createProxyUser ( proxyUserName , UserGroupInformation . getLoginUser ( ) ) ; Optional < Token < ? > > proxyToken = getTokenFromSeqFile ( authPath , proxyUserName ) ; if ( proxyToken . isPresent ( ) ) { proxyUser . addToken ( proxyToken . get ( ) ) ; } else { LOG . warn ( "No delegation token found for the current proxy user." ) ; } break ; default : LOG . warn ( "Creating a proxy user without authentication, which could not perform File system operations." ) ; proxyUser = UserGroupInformation . createProxyUser ( proxyUserName , UserGroupInformation . getLoginUser ( ) ) ; break ; } final URI fsURI = URI . create ( uri ) ; proxyUser . doAs ( new PrivilegedExceptionAction < Void > ( ) { public Void run ( ) throws IOException { LOG . debug ( "Now performing file system operations as :" + UserGroupInformation . getCurrentUser ( ) ) ; proxiedFs = FileSystem . get ( fsURI , conf ) ; return null ; } } ) ; return this . proxiedFs ; }
Getter for proxiedFs using the passed parameters to create an instance of a proxiedFs .
36,001
private static Optional < Token < ? > > getTokenFromSeqFile ( String authPath , String proxyUserName ) throws IOException { try ( Closer closer = Closer . create ( ) ) { FileSystem localFs = FileSystem . getLocal ( new Configuration ( ) ) ; SequenceFile . Reader tokenReader = closer . register ( new SequenceFile . Reader ( localFs , new Path ( authPath ) , localFs . getConf ( ) ) ) ; Text key = new Text ( ) ; Token < ? > value = new Token < > ( ) ; while ( tokenReader . next ( key , value ) ) { LOG . info ( "Found token for " + key ) ; if ( key . toString ( ) . equals ( proxyUserName ) ) { return Optional . < Token < ? > > of ( value ) ; } } } return Optional . absent ( ) ; }
Get token from the token sequence file .
36,002
public static Duration getLeadTimeDurationFromConfig ( State state ) { String leadTimeProp = state . getProp ( DATE_PARTITIONED_SOURCE_PARTITION_LEAD_TIME ) ; if ( leadTimeProp == null || leadTimeProp . length ( ) == 0 ) { return DEFAULT_PARTITIONED_SOURCE_PARTITION_LEAD_TIME ; } int leadTime = Integer . parseInt ( leadTimeProp ) ; DatePartitionType leadTimeGranularity = DEFAULT_DATE_PARTITIONED_SOURCE_PARTITION_LEAD_TIME_GRANULARITY ; String leadTimeGranularityProp = state . getProp ( DATE_PARTITIONED_SOURCE_PARTITION_LEAD_TIME_GRANULARITY ) ; if ( leadTimeGranularityProp != null ) { leadTimeGranularity = DatePartitionType . valueOf ( leadTimeGranularityProp ) ; } return new Duration ( leadTime * leadTimeGranularity . getUnitMilliseconds ( ) ) ; }
Retrieve the lead time duration from the LEAD_TIME and LEAD_TIME granularity config settings .
36,003
public void purge ( ) throws IOException { this . datasetOwner = getOwner ( ) ; State state = new State ( this . state ) ; this . datasetOwnerFs = ProxyUtils . getOwnerFs ( state , this . datasetOwner ) ; try ( HiveProxyQueryExecutor queryExecutor = ProxyUtils . getQueryExecutor ( state , this . datasetOwner ) ) { if ( this . simulate ) { log . info ( "Simulate is set to true. Wont't run actual queries" ) ; return ; } String originalPartitionLocation = getOriginalPartitionLocation ( ) ; queryExecutor . executeQueries ( HivePurgerQueryTemplate . getCreateStagingTableQuery ( this ) , this . datasetOwner ) ; this . startTime = getLastModifiedTime ( originalPartitionLocation ) ; queryExecutor . executeQueries ( this . purgeQueries , this . datasetOwner ) ; this . endTime = getLastModifiedTime ( originalPartitionLocation ) ; queryExecutor . executeQueries ( HivePurgerQueryTemplate . getBackupQueries ( this ) , this . datasetOwner ) ; String commitPolicyString = this . state . getProp ( ComplianceConfigurationKeys . PURGER_COMMIT_POLICY_CLASS , ComplianceConfigurationKeys . DEFAULT_PURGER_COMMIT_POLICY_CLASS ) ; CommitPolicy < PurgeableHivePartitionDataset > commitPolicy = GobblinConstructorUtils . invokeConstructor ( CommitPolicy . class , commitPolicyString ) ; if ( ! commitPolicy . shouldCommit ( this ) ) { log . error ( "Last modified time before start of execution : " + this . startTime ) ; log . error ( "Last modified time after execution of purge queries : " + this . endTime ) ; throw new RuntimeException ( "Failed to commit. File modified during job run." ) ; } queryExecutor . executeQueries ( HivePurgerQueryTemplate . getAlterOriginalPartitionLocationQueries ( this ) , this . datasetOwner ) ; queryExecutor . executeQueries ( HivePurgerQueryTemplate . getDropStagingTableQuery ( this ) , this . datasetOwner ) ; } catch ( SQLException e ) { throw new IOException ( e ) ; } }
This method is responsible for actual purging . - It first creates a staging table partition with the same schema as of original table partition . - Staging table partition is then populated by original table left outer joined with compliance id table .
36,004
public void launch ( ) throws IOException , YarnException { this . eventBus . register ( this ) ; String clusterName = this . config . getString ( GobblinClusterConfigurationKeys . HELIX_CLUSTER_NAME_KEY ) ; HelixUtils . createGobblinHelixCluster ( this . config . getString ( GobblinClusterConfigurationKeys . ZK_CONNECTION_STRING_KEY ) , clusterName ) ; LOGGER . info ( "Created Helix cluster " + clusterName ) ; connectHelixManager ( ) ; startYarnClient ( ) ; this . applicationId = getApplicationId ( ) ; this . applicationStatusMonitor . scheduleAtFixedRate ( new Runnable ( ) { public void run ( ) { try { eventBus . post ( new ApplicationReportArrivalEvent ( yarnClient . getApplicationReport ( applicationId . get ( ) ) ) ) ; } catch ( YarnException | IOException e ) { LOGGER . error ( "Failed to get application report for Gobblin Yarn application " + applicationId . get ( ) , e ) ; eventBus . post ( new GetApplicationReportFailureEvent ( e ) ) ; } } } , 0 , this . appReportIntervalMinutes , TimeUnit . MINUTES ) ; List < Service > services = Lists . newArrayList ( ) ; if ( this . config . hasPath ( GobblinYarnConfigurationKeys . KEYTAB_FILE_PATH ) ) { LOGGER . info ( "Adding YarnAppSecurityManager since login is keytab based" ) ; services . add ( buildYarnAppSecurityManager ( ) ) ; } if ( ! this . config . hasPath ( GobblinYarnConfigurationKeys . LOG_COPIER_DISABLE_DRIVER_COPY ) || ! this . config . getBoolean ( GobblinYarnConfigurationKeys . LOG_COPIER_DISABLE_DRIVER_COPY ) ) { services . add ( buildLogCopier ( this . config , new Path ( this . sinkLogRootDir , this . applicationName + Path . SEPARATOR + this . applicationId . get ( ) . toString ( ) ) , GobblinClusterUtils . getAppWorkDirPath ( this . fs , this . applicationName , this . applicationId . get ( ) . toString ( ) ) ) ) ; } if ( config . getBoolean ( ConfigurationKeys . JOB_EXECINFO_SERVER_ENABLED_KEY ) ) { LOGGER . info ( "Starting the job execution info server since it is enabled" ) ; Properties properties = ConfigUtils . configToProperties ( config ) ; JobExecutionInfoServer executionInfoServer = new JobExecutionInfoServer ( properties ) ; services . add ( executionInfoServer ) ; if ( config . getBoolean ( ConfigurationKeys . ADMIN_SERVER_ENABLED_KEY ) ) { LOGGER . info ( "Starting the admin UI server since it is enabled" ) ; services . add ( ServiceBasedAppLauncher . createAdminServer ( properties , executionInfoServer . getAdvertisedServerUri ( ) ) ) ; } } else if ( config . getBoolean ( ConfigurationKeys . ADMIN_SERVER_ENABLED_KEY ) ) { LOGGER . warn ( "NOT starting the admin UI because the job execution info server is NOT enabled" ) ; } this . serviceManager = Optional . of ( new ServiceManager ( services ) ) ; this . serviceManager . get ( ) . startAsync ( ) ; }
Launch a new Gobblin instance on Yarn .
36,005
public List < HivePartitionDataset > findDatasets ( ) throws IOException { Preconditions . checkArgument ( this . state . contains ( ComplianceConfigurationKeys . RESTORE_DATASET ) , "Missing required property " + ComplianceConfigurationKeys . RESTORE_DATASET ) ; HivePartitionDataset hivePartitionDataset = HivePartitionFinder . findDataset ( this . state . getProp ( ComplianceConfigurationKeys . RESTORE_DATASET ) , this . state ) ; Preconditions . checkNotNull ( hivePartitionDataset , "No dataset to restore" ) ; return Collections . singletonList ( hivePartitionDataset ) ; }
Will return a Singleton list of HivePartitionDataset to be restored .
36,006
public void pushMessages ( List < byte [ ] > messages ) { List < KeyedMessage < String , byte [ ] > > keyedMessages = Lists . transform ( messages , new Function < byte [ ] , KeyedMessage < String , byte [ ] > > ( ) { public KeyedMessage < String , byte [ ] > apply ( byte [ ] bytes ) { return new KeyedMessage < String , byte [ ] > ( topic , bytes ) ; } } ) ; this . producer . send ( keyedMessages ) ; }
Push all mbyte array messages to the Kafka topic .
36,007
protected ProducerCloseable < String , byte [ ] > createProducer ( ProducerConfig config ) { return this . closer . register ( new ProducerCloseable < String , byte [ ] > ( config ) ) ; }
Actually creates the Kafka producer .
36,008
private void acquirePermits ( long permits ) throws InterruptedException { long startMs = System . currentTimeMillis ( ) ; limiter . acquirePermits ( permits ) ; long permitAcquisitionTime = System . currentTimeMillis ( ) - startMs ; if ( throttledTimer . isPresent ( ) ) { Instrumented . updateTimer ( throttledTimer , permitAcquisitionTime , TimeUnit . MILLISECONDS ) ; } this . throttledTime += permitAcquisitionTime ; }
Acquire permit along with emitting metrics if enabled .
36,009
public S getSchemaByKey ( K key ) throws SchemaRegistryException { try { return cachedSchemasByKeys . get ( key ) ; } catch ( ExecutionException e ) { throw new SchemaRegistryException ( String . format ( "Schema with key %s cannot be retrieved" , key ) , e ) ; } }
Get schema from schema registry by key .
36,010
public static void generateDumpScript ( Path dumpScript , FileSystem fs , String heapFileName , String chmod ) throws IOException { if ( fs . exists ( dumpScript ) ) { LOG . info ( "Heap dump script already exists: " + dumpScript ) ; return ; } try ( BufferedWriter scriptWriter = new BufferedWriter ( new OutputStreamWriter ( fs . create ( dumpScript ) , ConfigurationKeys . DEFAULT_CHARSET_ENCODING ) ) ) { Path dumpDir = new Path ( dumpScript . getParent ( ) , DUMP_FOLDER ) ; if ( ! fs . exists ( dumpDir ) ) { fs . mkdirs ( dumpDir ) ; } scriptWriter . write ( "#!/bin/sh\n" ) ; scriptWriter . write ( "if [ -n \"$HADOOP_PREFIX\" ]; then\n" ) ; scriptWriter . write ( " ${HADOOP_PREFIX}/bin/hadoop dfs -put " + heapFileName + " " + dumpDir + "/${PWD//\\//_}.hprof\n" ) ; scriptWriter . write ( "else\n" ) ; scriptWriter . write ( " ${HADOOP_HOME}/bin/hadoop dfs -put " + heapFileName + " " + dumpDir + "/${PWD//\\//_}.hprof\n" ) ; scriptWriter . write ( "fi\n" ) ; } catch ( IOException ioe ) { LOG . error ( "Heap dump script is not generated successfully." ) ; if ( fs . exists ( dumpScript ) ) { fs . delete ( dumpScript , true ) ; } throw ioe ; } Runtime . getRuntime ( ) . exec ( chmod + " " + dumpScript ) ; }
Generate the dumpScript which is used when OOM error is thrown during task execution . The current content dumpScript puts the . prof files to the DUMP_FOLDER within the same directory of the dumpScript .
36,011
public static void submit ( Optional < EventSubmitter > submitter , String name ) { if ( submitter . isPresent ( ) ) { submitter . get ( ) . submit ( name ) ; } }
Calls submit on submitter if present .
36,012
public static String getJvmInputArguments ( ) { RuntimeMXBean runtimeMxBean = ManagementFactory . getRuntimeMXBean ( ) ; List < String > arguments = runtimeMxBean . getInputArguments ( ) ; return String . format ( "JVM Input Arguments: %s" , JOINER . join ( arguments ) ) ; }
Gets the input arguments passed to the JVM .
36,013
public static String formatJvmArguments ( Optional < String > jvmArguments ) { if ( jvmArguments . isPresent ( ) ) { return PORT_UTILS . replacePortTokens ( jvmArguments . get ( ) ) ; } return StringUtils . EMPTY ; }
Formats the specified jvm arguments such that any tokens are replaced with concrete values ;
36,014
public void initialize ( ) { String table = Preconditions . checkNotNull ( this . state . getProp ( ForkOperatorUtils . getPropertyNameForBranch ( JdbcPublisher . JDBC_PUBLISHER_FINAL_TABLE_NAME , this . branches , this . branchId ) ) ) ; String db = Preconditions . checkNotNull ( this . state . getProp ( ForkOperatorUtils . getPropertyNameForBranch ( JdbcPublisher . JDBC_PUBLISHER_DATABASE_NAME , this . branches , this . branchId ) ) ) ; try ( Connection conn = createConnection ( ) ) { JdbcWriterCommands commands = this . jdbcWriterCommandsFactory . newInstance ( this . state , conn ) ; Map < String , JdbcType > dateColumnMapping = commands . retrieveDateColumns ( db , table ) ; LOG . info ( "Date column mapping: " + dateColumnMapping ) ; final String dateFieldsKey = ForkOperatorUtils . getPropertyNameForBranch ( AvroToJdbcEntryConverter . CONVERTER_AVRO_JDBC_DATE_FIELDS , this . branches , this . branchId ) ; for ( WorkUnit wu : this . workUnits ) { wu . setProp ( dateFieldsKey , new Gson ( ) . toJson ( dateColumnMapping ) ) ; } } catch ( SQLException e ) { throw new RuntimeException ( e ) ; } }
AvroToJdbcEntryConverter list of date columns existing in the table . As we don t want each converter making a connection against database to get the same information . Here ConverterInitializer will retrieve it and store it into WorkUnit so that AvroToJdbcEntryConverter will use it later .
36,015
protected void startUp ( ) { log . info ( "Starting the " + getClass ( ) . getSimpleName ( ) ) ; log . info ( "Polling git with interval {} " , this . pollingInterval ) ; this . scheduledExecutor . scheduleAtFixedRate ( new Runnable ( ) { public void run ( ) { try { if ( shouldPollGit ( ) ) { processGitConfigChanges ( ) ; } } catch ( GitAPIException | IOException e ) { log . error ( "Failed to process git config changes" , e ) ; } } } , 0 , this . pollingInterval , TimeUnit . SECONDS ) ; }
Start the service .
36,016
void processGitConfigChangesHelper ( List < DiffEntry > changes ) throws IOException { for ( DiffEntry change : changes ) { switch ( change . getChangeType ( ) ) { case ADD : case MODIFY : addChange ( change ) ; break ; case DELETE : removeChange ( change ) ; break ; case RENAME : removeChange ( change ) ; addChange ( change ) ; break ; default : throw new RuntimeException ( "Unsupported change type " + change . getChangeType ( ) ) ; } } this . gitRepo . moveCheckpointAndHashesForward ( ) ; }
A helper method where actual processing of the list of changes since the last refresh of the repository takes place and the changes applied .
36,017
public void addAll ( State otherState ) { Properties diffCommonProps = new Properties ( ) ; diffCommonProps . putAll ( Maps . difference ( this . commonProperties , otherState . commonProperties ) . entriesOnlyOnRight ( ) ) ; addAll ( diffCommonProps ) ; addAll ( otherState . specProperties ) ; }
Populates this instance with properties of the other instance .
36,018
public String getProp ( String key ) { if ( this . specProperties . containsKey ( key ) ) { return this . specProperties . getProperty ( key ) ; } return this . commonProperties . getProperty ( key ) ; }
Get the value of a property .
36,019
public List < String > getPropAsList ( String key , String def ) { return LIST_SPLITTER . splitToList ( getProp ( key , def ) ) ; }
Get the value of a property as a list of strings using the given default value if the property is not set .
36,020
public long getPropAsLong ( String key , long def ) { return Long . parseLong ( getProp ( key , String . valueOf ( def ) ) ) ; }
Get the value of a property as a long integer using the given default value if the property is not set .
36,021
public int getPropAsInt ( String key , int def ) { return Integer . parseInt ( getProp ( key , String . valueOf ( def ) ) ) ; }
Get the value of a property as an integer using the given default value if the property is not set .
36,022
public double getPropAsDouble ( String key , double def ) { return Double . parseDouble ( getProp ( key , String . valueOf ( def ) ) ) ; }
Get the value of a property as a double using the given default value if the property is not set .
36,023
public boolean getPropAsBoolean ( String key , boolean def ) { return Boolean . parseBoolean ( getProp ( key , String . valueOf ( def ) ) ) ; }
Get the value of a property as a boolean using the given default value if the property is not set .
36,024
public void removeProp ( String key ) { this . specProperties . remove ( key ) ; if ( this . commonProperties . containsKey ( key ) ) { Properties commonPropsCopy = new Properties ( ) ; commonPropsCopy . putAll ( this . commonProperties ) ; commonPropsCopy . remove ( key ) ; this . commonProperties = commonPropsCopy ; } }
Remove a property if it exists .
36,025
public void removePropsWithPrefix ( String prefix ) { this . specProperties . entrySet ( ) . removeIf ( entry -> ( ( String ) entry . getKey ( ) ) . startsWith ( prefix ) ) ; Properties newCommonProperties = null ; for ( Object key : this . commonProperties . keySet ( ) ) { if ( ( ( String ) key ) . startsWith ( prefix ) ) { if ( newCommonProperties == null ) { newCommonProperties = new Properties ( ) ; newCommonProperties . putAll ( this . commonProperties ) ; } newCommonProperties . remove ( key ) ; } } if ( newCommonProperties != null ) { this . commonProperties = newCommonProperties ; } }
Remove all properties with a certain keyPrefix
36,026
private String getDbTableName ( String schemaName ) { Preconditions . checkArgument ( schemaName . matches ( ".+_.+_.+" ) ) ; return schemaName . replaceFirst ( "_" , "." ) . substring ( 0 , schemaName . lastIndexOf ( '_' ) ) ; }
Translate schema name to dbname . tablename to use in path
36,027
public static Collection < WorkUnitState > mergeAllSplitWorkUnits ( FileSystem fs , Collection < WorkUnitState > workUnits ) throws IOException { ListMultimap < CopyableFile , WorkUnitState > splitWorkUnitsMap = ArrayListMultimap . create ( ) ; for ( WorkUnitState workUnit : workUnits ) { if ( isSplitWorkUnit ( workUnit ) ) { CopyableFile copyableFile = ( CopyableFile ) CopySource . deserializeCopyEntity ( workUnit ) ; splitWorkUnitsMap . put ( copyableFile , workUnit ) ; } } for ( CopyableFile file : splitWorkUnitsMap . keySet ( ) ) { log . info ( String . format ( "Merging split file %s." , file . getDestination ( ) ) ) ; WorkUnitState oldWorkUnit = splitWorkUnitsMap . get ( file ) . get ( 0 ) ; Path outputDir = FileAwareInputStreamDataWriter . getOutputDir ( oldWorkUnit ) ; CopyEntity . DatasetAndPartition datasetAndPartition = file . getDatasetAndPartition ( CopySource . deserializeCopyableDataset ( oldWorkUnit ) ) ; Path parentPath = FileAwareInputStreamDataWriter . getOutputFilePath ( file , outputDir , datasetAndPartition ) . getParent ( ) ; WorkUnitState newWorkUnit = mergeSplits ( fs , file , splitWorkUnitsMap . get ( file ) , parentPath ) ; for ( WorkUnitState wu : splitWorkUnitsMap . get ( file ) ) { wu . setWorkingState ( WorkUnitState . WorkingState . COMMITTED ) ; workUnits . remove ( wu ) ; } workUnits . add ( newWorkUnit ) ; } return workUnits ; }
Finds all split work units in the input collection and merges the file parts into the expected output files .
36,028
private boolean shouldStickToTheSameNode ( int containerExitStatus ) { switch ( containerExitStatus ) { case ContainerExitStatus . DISKS_FAILED : return false ; case ContainerExitStatus . ABORTED : return false ; default : return this . containerHostAffinityEnabled ; } }
Check the exit status of a completed container and see if the replacement container should try to be started on the same node . Some exit status indicates a disk or node failure and in such cases the replacement container should try to be started on a different node .
36,029
private void handleContainerCompletion ( ContainerStatus containerStatus ) { Map . Entry < Container , String > completedContainerEntry = this . containerMap . remove ( containerStatus . getContainerId ( ) ) ; String completedInstanceName = completedContainerEntry . getValue ( ) ; LOGGER . info ( String . format ( "Container %s running Helix instance %s has completed with exit status %d" , containerStatus . getContainerId ( ) , completedInstanceName , containerStatus . getExitStatus ( ) ) ) ; if ( ! Strings . isNullOrEmpty ( containerStatus . getDiagnostics ( ) ) ) { LOGGER . info ( String . format ( "Received the following diagnostics information for container %s: %s" , containerStatus . getContainerId ( ) , containerStatus . getDiagnostics ( ) ) ) ; } if ( this . shutdownInProgress ) { return ; } this . helixInstanceRetryCount . putIfAbsent ( completedInstanceName , new AtomicInteger ( 0 ) ) ; int retryCount = this . helixInstanceRetryCount . get ( completedInstanceName ) . incrementAndGet ( ) ; Optional < ImmutableMap . Builder < String , String > > eventMetadataBuilder = Optional . absent ( ) ; if ( this . eventSubmitter . isPresent ( ) ) { eventMetadataBuilder = Optional . of ( buildContainerStatusEventMetadata ( containerStatus ) ) ; eventMetadataBuilder . get ( ) . put ( GobblinYarnEventConstants . EventMetadata . HELIX_INSTANCE_ID , completedInstanceName ) ; eventMetadataBuilder . get ( ) . put ( GobblinYarnEventConstants . EventMetadata . CONTAINER_STATUS_RETRY_ATTEMPT , retryCount + "" ) ; } if ( this . helixInstanceMaxRetries > 0 && retryCount > this . helixInstanceMaxRetries ) { if ( this . eventSubmitter . isPresent ( ) ) { this . eventSubmitter . get ( ) . submit ( GobblinYarnEventConstants . EventNames . HELIX_INSTANCE_COMPLETION , eventMetadataBuilder . get ( ) . build ( ) ) ; } LOGGER . warn ( "Maximum number of retries has been achieved for Helix instance " + completedInstanceName ) ; return ; } this . unusedHelixInstanceNames . offer ( completedInstanceName ) ; if ( this . eventSubmitter . isPresent ( ) ) { this . eventSubmitter . get ( ) . submit ( GobblinYarnEventConstants . EventNames . HELIX_INSTANCE_COMPLETION , eventMetadataBuilder . get ( ) . build ( ) ) ; } LOGGER . info ( String . format ( "Requesting a new container to replace %s to run Helix instance %s" , containerStatus . getContainerId ( ) , completedInstanceName ) ) ; this . eventBus . post ( new NewContainerRequest ( shouldStickToTheSameNode ( containerStatus . getExitStatus ( ) ) ? Optional . of ( completedContainerEntry . getKey ( ) ) : Optional . < Container > absent ( ) ) ) ; }
Handle the completion of a container . A new container will be requested to replace the one that just exited . Depending on the exit status and if container host affinity is enabled the new container may or may not try to be started on the same node .
36,030
public static Builder builder ( URI catalogURI , Properties jobProps ) { String name = JobState . getJobNameFromProps ( jobProps ) ; String group = JobState . getJobGroupFromProps ( jobProps ) ; if ( null == group ) { group = "default" ; } try { URI jobURI = new URI ( catalogURI . getScheme ( ) , catalogURI . getAuthority ( ) , "/" + group + "/" + name , null ) ; Builder builder = new Builder ( jobURI ) . withConfigAsProperties ( jobProps ) ; String descr = JobState . getJobDescriptionFromProps ( jobProps ) ; if ( null != descr ) { builder . withDescription ( descr ) ; } return builder ; } catch ( URISyntaxException e ) { throw new RuntimeException ( "Unable to create a JobSpec URI: " + e , e ) ; } }
Creates a builder for the JobSpec based on values in a job properties config .
36,031
public void sendNotification ( final Notification notification ) { ContextAwareTimer . Context timer = this . notificationTimer . time ( ) ; if ( ! this . notificationTargets . isEmpty ( ) ) { for ( final Map . Entry < UUID , Function < Notification , Void > > entry : this . notificationTargets . entrySet ( ) ) { try { entry . getValue ( ) . apply ( notification ) ; } catch ( RuntimeException exception ) { LOG . warn ( "RuntimeException when running notification target. Skipping." , exception ) ; } } } if ( getParent ( ) . isPresent ( ) ) { getParent ( ) . get ( ) . sendNotification ( notification ) ; } timer . stop ( ) ; }
Send a notification to all targets of this context and to the parent of this context .
36,032
public RecordWriter getRecordWriter ( TaskAttemptContext taskAttemptContext ) throws IOException { Configuration conf = taskAttemptContext . getConfiguration ( ) ; String extension = "." + conf . get ( COMPACTION_OUTPUT_EXTENSION , "orc" ) ; Path filename = getDefaultWorkFile ( taskAttemptContext , extension ) ; Writer writer = OrcFile . createWriter ( filename , org . apache . orc . mapred . OrcOutputFormat . buildOptions ( conf ) ) ; return new OrcMapreduceRecordWriter ( writer ) ; }
Required for extension since super method hard - coded file extension as . orc . To keep flexibility of extension name we made it configuration driven .
36,033
public static FileBasedJobLockFactory create ( Config factoryConfig , Configuration hadoopConf , Optional < Logger > log ) throws IOException { FileSystem fs = factoryConfig . hasPath ( FS_URI_CONFIG ) ? FileSystem . get ( URI . create ( factoryConfig . getString ( FS_URI_CONFIG ) ) , hadoopConf ) : getDefaultFileSystem ( hadoopConf ) ; String lockFilesDir = factoryConfig . hasPath ( LOCK_DIR_CONFIG ) ? factoryConfig . getString ( LOCK_DIR_CONFIG ) : getDefaultLockDir ( fs , log ) ; return new FileBasedJobLockFactory ( fs , lockFilesDir , log ) ; }
Create a new instance using the specified factory and hadoop configurations .
36,034
boolean isLocked ( Path lockFile ) throws JobLockException { try { return this . fs . exists ( lockFile ) ; } catch ( IOException e ) { throw new JobLockException ( e ) ; } }
Check if the lock is locked .
36,035
public long getRecordCount ( Collection < Path > paths ) { long count = 0 ; for ( Path path : paths ) { count += getRecordCount ( path ) ; } return count ; }
Get record count for a list of paths .
36,036
public void launch ( ) throws IOException , InterruptedException { this . eventBus . register ( this ) ; HelixUtils . createGobblinHelixCluster ( this . zkConnectionString , this . helixClusterName , false ) ; LOGGER . info ( "Created Helix cluster " + this . helixClusterName ) ; connectHelixManager ( ) ; this . clusterId = getClusterId ( ) ; countDownLatch . await ( ) ; }
Launch a new Gobblin cluster on AWS .
36,037
protected HttpOperation generateHttpOperation ( GenericRecord inputRecord , State state ) { Map < String , String > keyAndValue = new HashMap < > ( ) ; Optional < Iterable < String > > keys = getKeys ( state ) ; HttpOperation operation ; if ( keys . isPresent ( ) ) { for ( String key : keys . get ( ) ) { String value = inputRecord . get ( key ) . toString ( ) ; log . debug ( "Http join converter: key is {}, value is {}" , key , value ) ; keyAndValue . put ( key , value ) ; } operation = new HttpOperation ( ) ; operation . setKeys ( keyAndValue ) ; } else { operation = HttpUtils . toHttpOperation ( inputRecord ) ; } return operation ; }
Extract user defined keys by looking at gobblin . converter . http . keys If keys are defined extract key - value pair from inputRecord and set it to HttpOperation If keys are not defined generate HttpOperation by HttpUtils . toHttpOperation
36,038
public static Path getJobStateFilePath ( boolean usingStateStore , Path appWorkPath , String jobId ) { final Path jobStateFilePath ; if ( usingStateStore ) { jobStateFilePath = new Path ( appWorkPath , GobblinClusterConfigurationKeys . JOB_STATE_DIR_NAME + Path . SEPARATOR + jobId + Path . SEPARATOR + jobId + "." + AbstractJobLauncher . JOB_STATE_FILE_NAME ) ; } else { jobStateFilePath = new Path ( appWorkPath , jobId + "." + AbstractJobLauncher . JOB_STATE_FILE_NAME ) ; } log . info ( "job state file path: " + jobStateFilePath ) ; return jobStateFilePath ; }
Generate the path to the job . state file
36,039
private String loadExistingMetadata ( Path metadataFilename , int branchId ) { try { FileSystem fsForBranch = writerFileSystemByBranches . get ( branchId ) ; if ( ! fsForBranch . exists ( metadataFilename ) ) { return null ; } FSDataInputStream existingMetadata = writerFileSystemByBranches . get ( branchId ) . open ( metadataFilename ) ; return IOUtils . toString ( existingMetadata , StandardCharsets . UTF_8 ) ; } catch ( IOException e ) { LOG . warn ( "IOException {} while trying to read existing metadata {} - treating as null" , e . getMessage ( ) , metadataFilename . toString ( ) ) ; return null ; } }
Read in existing metadata as a UTF8 string .
36,040
protected DatasetDescriptor createDestinationDescriptor ( WorkUnitState state , int branchId ) { Path publisherOutputDir = getPublisherOutputDir ( state , branchId ) ; FileSystem fs = this . publisherFileSystemByBranches . get ( branchId ) ; DatasetDescriptor destination = new DatasetDescriptor ( fs . getScheme ( ) , publisherOutputDir . toString ( ) ) ; destination . addMetadata ( DatasetConstants . FS_URI , fs . getUri ( ) . toString ( ) ) ; destination . addMetadata ( DatasetConstants . BRANCH , String . valueOf ( branchId ) ) ; return destination ; }
Create destination dataset descriptor
36,041
public void publishMetadata ( Collection < ? extends WorkUnitState > states ) throws IOException { Set < String > partitions = new HashSet < > ( ) ; mergeMetadataAndCollectPartitionNames ( states , partitions ) ; partitions . removeIf ( Objects :: isNull ) ; WorkUnitState anyState = states . iterator ( ) . next ( ) ; for ( int branchId = 0 ; branchId < numBranches ; branchId ++ ) { String mdOutputPath = getMetadataOutputPathFromState ( anyState , branchId ) ; String userSpecifiedPath = getUserSpecifiedOutputPathFromState ( anyState , branchId ) ; if ( partitions . isEmpty ( ) || userSpecifiedPath != null ) { publishMetadata ( getMergedMetadataForPartitionAndBranch ( null , branchId ) , branchId , getMetadataOutputFileForBranch ( anyState , branchId ) ) ; } else { String metadataFilename = getMetadataFileNameForBranch ( anyState , branchId ) ; if ( mdOutputPath == null || metadataFilename == null ) { LOG . info ( "Metadata filename not set for branch " + String . valueOf ( branchId ) + ": not publishing metadata." ) ; continue ; } for ( String partition : partitions ) { publishMetadata ( getMergedMetadataForPartitionAndBranch ( partition , branchId ) , branchId , new Path ( new Path ( mdOutputPath , partition ) , metadataFilename ) ) ; } } } }
Merge all of the metadata output from each work - unit and publish the merged record .
36,042
private void publishMetadata ( String metadataValue , int branchId , Path metadataOutputPath ) throws IOException { try { if ( metadataOutputPath == null ) { LOG . info ( "Metadata output path not set for branch " + String . valueOf ( branchId ) + ", not publishing." ) ; return ; } if ( metadataValue == null ) { LOG . info ( "No metadata collected for branch " + String . valueOf ( branchId ) + ", not publishing." ) ; return ; } FileSystem fs = this . metaDataWriterFileSystemByBranches . get ( branchId ) ; if ( ! fs . exists ( metadataOutputPath . getParent ( ) ) ) { WriterUtils . mkdirsWithRecursivePermissionWithRetry ( fs , metadataOutputPath , this . permissions . get ( branchId ) , retrierConfig ) ; } if ( fs . exists ( metadataOutputPath ) ) { HadoopUtils . deletePath ( fs , metadataOutputPath , false ) ; } LOG . info ( "Writing metadata for branch " + String . valueOf ( branchId ) + " to " + metadataOutputPath . toString ( ) ) ; try ( FSDataOutputStream outputStream = fs . create ( metadataOutputPath ) ) { outputStream . write ( metadataValue . getBytes ( StandardCharsets . UTF_8 ) ) ; } } catch ( IOException e ) { LOG . error ( "Metadata file is not generated: " + e , e ) ; } }
Publish metadata to a set of paths
36,043
public Collection < URI > getImports ( URI configKeyUri , boolean recursive ) throws ConfigStoreFactoryDoesNotExistsException , ConfigStoreCreationException , VersionDoesNotExistException { return getImports ( configKeyUri , recursive , Optional . < Config > absent ( ) ) ; }
Get the import links of the input URI .
36,044
@ SuppressWarnings ( "unchecked" ) private ConfigStoreFactory < ConfigStore > getConfigStoreFactory ( URI configKeyUri ) throws ConfigStoreFactoryDoesNotExistsException { @ SuppressWarnings ( "rawtypes" ) ConfigStoreFactory csf = this . configStoreFactoryRegister . getConfigStoreFactory ( configKeyUri . getScheme ( ) ) ; if ( csf == null ) { throw new ConfigStoreFactoryDoesNotExistsException ( configKeyUri . getScheme ( ) , "scheme name does not exists" ) ; } return csf ; }
use serviceLoader to load configStoreFactories
36,045
private void seekNext ( ) { if ( ! needSeek ) { return ; } if ( this . currentIterator != null && this . currentIterator . hasNext ( ) ) { needSeek = false ; return ; } nextWu = null ; this . currentIterator = null ; while ( nextWu == null && workUnits . hasNext ( ) ) { nextWu = workUnits . next ( ) ; if ( nextWu instanceof MultiWorkUnit ) { this . currentIterator = ( ( MultiWorkUnit ) nextWu ) . getWorkUnits ( ) . iterator ( ) ; if ( ! this . currentIterator . hasNext ( ) ) { nextWu = null ; } } } needSeek = false ; }
Seek to the next available work unit skipping all empty work units
36,046
public void clean ( ) throws IOException { if ( this . isDatasetBlacklisted ) { this . log . info ( "Dataset blacklisted. Cleanup skipped for " + datasetRoot ( ) ) ; return ; } boolean atLeastOneFailureSeen = false ; for ( VersionFinderAndPolicy < T > versionFinderAndPolicy : getVersionFindersAndPolicies ( ) ) { VersionSelectionPolicy < T > selectionPolicy = versionFinderAndPolicy . getVersionSelectionPolicy ( ) ; VersionFinder < ? extends T > versionFinder = versionFinderAndPolicy . getVersionFinder ( ) ; if ( ! selectionPolicy . versionClass ( ) . isAssignableFrom ( versionFinder . versionClass ( ) ) ) { throw new IOException ( "Incompatible dataset version classes." ) ; } this . log . info ( String . format ( "Cleaning dataset %s. Using version finder %s and policy %s" , this , versionFinder . getClass ( ) . getName ( ) , selectionPolicy ) ) ; List < T > versions = Lists . newArrayList ( versionFinder . findDatasetVersions ( this ) ) ; if ( versions . isEmpty ( ) ) { this . log . warn ( "No dataset version can be found. Ignoring." ) ; continue ; } Collections . sort ( versions , Collections . reverseOrder ( ) ) ; Collection < T > deletableVersions = selectionPolicy . listSelectedVersions ( versions ) ; cleanImpl ( deletableVersions ) ; List < DatasetVersion > allVersions = Lists . newArrayList ( ) ; for ( T ver : versions ) { allVersions . add ( ver ) ; } for ( RetentionAction retentionAction : versionFinderAndPolicy . getRetentionActions ( ) ) { try { retentionAction . execute ( allVersions ) ; } catch ( Throwable t ) { atLeastOneFailureSeen = true ; log . error ( String . format ( "RetentionAction %s failed for dataset %s" , retentionAction . getClass ( ) . getName ( ) , this . datasetRoot ( ) ) , t ) ; } } } if ( atLeastOneFailureSeen ) { throw new RuntimeException ( String . format ( "At least one failure happened while processing %s. Look for previous logs for failures" , datasetRoot ( ) ) ) ; } }
Method to perform the Retention operations for this dataset .
36,047
public Iterable < JsonObject > convertRecord ( JsonArray outputSchema , String inputRecord , WorkUnitState workUnit ) throws DataConversionException { try { String strDelimiter = workUnit . getProp ( ConfigurationKeys . CONVERTER_CSV_TO_JSON_DELIMITER ) ; if ( Strings . isNullOrEmpty ( strDelimiter ) ) { throw new IllegalArgumentException ( "Delimiter cannot be empty" ) ; } InputStreamCSVReader reader = new InputStreamCSVReader ( inputRecord , strDelimiter . charAt ( 0 ) , workUnit . getProp ( ConfigurationKeys . CONVERTER_CSV_TO_JSON_ENCLOSEDCHAR , ConfigurationKeys . DEFAULT_CONVERTER_CSV_TO_JSON_ENCLOSEDCHAR ) . charAt ( 0 ) ) ; List < String > recordSplit ; recordSplit = Lists . newArrayList ( reader . splitRecord ( ) ) ; JsonObject outputRecord = new JsonObject ( ) ; for ( int i = 0 ; i < outputSchema . size ( ) ; i ++ ) { if ( i < recordSplit . size ( ) ) { if ( recordSplit . get ( i ) == null ) { outputRecord . add ( outputSchema . get ( i ) . getAsJsonObject ( ) . get ( "columnName" ) . getAsString ( ) , JsonNull . INSTANCE ) ; } else if ( recordSplit . get ( i ) . isEmpty ( ) || recordSplit . get ( i ) . toLowerCase ( ) . equals ( NULL ) ) { outputRecord . add ( outputSchema . get ( i ) . getAsJsonObject ( ) . get ( "columnName" ) . getAsString ( ) , JsonNull . INSTANCE ) ; } else { outputRecord . addProperty ( outputSchema . get ( i ) . getAsJsonObject ( ) . get ( "columnName" ) . getAsString ( ) , recordSplit . get ( i ) ) ; } } else { outputRecord . add ( outputSchema . get ( i ) . getAsJsonObject ( ) . get ( "columnName" ) . getAsString ( ) , JsonNull . INSTANCE ) ; } } return new SingleRecordIterable < > ( outputRecord ) ; } catch ( Exception e ) { throw new DataConversionException ( e ) ; } }
Takes in a record with format String and splits the data based on SOURCE_SCHEMA_DELIMITER Uses the inputSchema and the split record to convert the record to a JsonObject
36,048
@ SuppressWarnings ( "unchecked" ) public T compareAll ( ) { this . compareInputFormat ( ) . compareOutputFormat ( ) . compareIsCompressed ( ) . compareIsStoredAsSubDirs ( ) . compareNumBuckets ( ) . compareBucketCols ( ) . compareRawLocation ( ) . compareParameters ( ) ; return ( T ) this ; }
Compare all parameters .
36,049
public void inc ( E e , long n ) { if ( counters != null && counters . containsKey ( e ) ) { counters . get ( e ) . inc ( n ) ; } }
Increment the counter associated with enum value passed .
36,050
public long getCount ( E e ) { if ( counters . containsKey ( e ) ) { return counters . get ( e ) . getCount ( ) ; } else { return 0l ; } }
Get count for counter associated with enum value passed .
36,051
public void addOrAlterPartition ( HiveTable table , HivePartition partition ) throws IOException { if ( ! addPartitionIfNotExists ( table , partition ) ) { alterPartition ( table , partition ) ; } }
Add a partition to a table if not exists or alter a partition if exists .
36,052
public Schema getSchema ( ) { if ( this . workUnit . contains ( ConfigurationKeys . SOURCE_SCHEMA ) ) { return new Schema . Parser ( ) . parse ( this . workUnit . getProp ( ConfigurationKeys . SOURCE_SCHEMA ) ) ; } AvroFsHelper hfsHelper = ( AvroFsHelper ) this . fsHelper ; if ( this . filesToPull . isEmpty ( ) ) { return null ; } try { return hfsHelper . getAvroSchema ( this . filesToPull . get ( 0 ) ) ; } catch ( FileBasedHelperException e ) { Throwables . propagate ( e ) ; return null ; } }
Assumption is that all files in the input directory have the same schema
36,053
public static String getMachedLookbackTime ( String datasetName , String datasetsAndLookBacks , String sysDefaultLookback ) { String defaultLookback = sysDefaultLookback ; for ( String entry : Splitter . on ( ";" ) . trimResults ( ) . omitEmptyStrings ( ) . splitToList ( datasetsAndLookBacks ) ) { List < String > datasetAndLookbackTime = Splitter . on ( ":" ) . trimResults ( ) . omitEmptyStrings ( ) . splitToList ( entry ) ; if ( datasetAndLookbackTime . size ( ) == 1 ) { defaultLookback = datasetAndLookbackTime . get ( 0 ) ; } else if ( datasetAndLookbackTime . size ( ) == 2 ) { String regex = datasetAndLookbackTime . get ( 0 ) ; if ( Pattern . compile ( regex ) . matcher ( datasetName ) . find ( ) ) { return datasetAndLookbackTime . get ( 1 ) ; } } else { log . error ( "Invalid format in {}, {} cannot find its lookback time" , datasetsAndLookBacks , datasetName ) ; } } return defaultLookback ; }
Find the correct lookback time for a given dataset .
36,054
private boolean validateTemplateURI ( URI flowURI ) { if ( ! this . sysConfig . hasPath ( ServiceConfigKeys . TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY ) ) { log . error ( "Missing config " + ServiceConfigKeys . TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY ) ; return false ; } if ( ! flowURI . getScheme ( ) . equals ( FS_SCHEME ) ) { log . error ( "Expected scheme " + FS_SCHEME + " got unsupported scheme " + flowURI . getScheme ( ) ) ; return false ; } return true ; }
Determine if an URI of a jobTemplate or a FlowTemplate is valid .
36,055
public QueryBasedHivePublishEntity generatePublishQueries ( ) throws DataConversionException { QueryBasedHivePublishEntity publishEntity = new QueryBasedHivePublishEntity ( ) ; List < String > publishQueries = publishEntity . getPublishQueries ( ) ; Map < String , String > publishDirectories = publishEntity . getPublishDirectories ( ) ; List < String > cleanupQueries = publishEntity . getCleanupQueries ( ) ; List < String > cleanupDirectories = publishEntity . getCleanupDirectories ( ) ; String createFinalTableDDL = HiveConverterUtils . generateCreateDuplicateTableDDL ( outputDatabaseName , stagingTableName , outputTableName , outputDataLocation , Optional . of ( outputDatabaseName ) ) ; publishQueries . add ( createFinalTableDDL ) ; log . debug ( "Create final table DDL:\n" + createFinalTableDDL ) ; if ( ! this . supportTargetPartitioning || partitionsDDLInfo . size ( ) == 0 ) { log . debug ( "Snapshot directory to move: " + stagingDataLocation + " to: " + outputDataLocation ) ; publishDirectories . put ( stagingDataLocation , outputDataLocation ) ; String dropStagingTableDDL = HiveAvroORCQueryGenerator . generateDropTableDDL ( outputDatabaseName , stagingTableName ) ; log . debug ( "Drop staging table DDL: " + dropStagingTableDDL ) ; cleanupQueries . add ( dropStagingTableDDL ) ; log . debug ( "Staging table directory to delete: " + stagingDataLocation ) ; cleanupDirectories . add ( stagingDataLocation ) ; } else { String finalDataPartitionLocation = outputDataLocation + Path . SEPARATOR + stagingDataPartitionDirName ; Optional < Path > destPartitionLocation = HiveConverterUtils . getDestinationPartitionLocation ( destinationTableMeta , this . workUnitState , conversionEntity . getPartition ( ) . get ( ) . getName ( ) ) ; finalDataPartitionLocation = HiveConverterUtils . updatePartitionLocation ( finalDataPartitionLocation , this . workUnitState , destPartitionLocation ) ; log . debug ( "Partition directory to move: " + stagingDataPartitionLocation + " to: " + finalDataPartitionLocation ) ; publishDirectories . put ( stagingDataPartitionLocation , finalDataPartitionLocation ) ; List < String > dropPartitionsDDL = HiveAvroORCQueryGenerator . generateDropPartitionsDDL ( outputDatabaseName , outputTableName , partitionsDMLInfo ) ; log . debug ( "Drop partitions if exist in final table: " + dropPartitionsDDL ) ; publishQueries . addAll ( dropPartitionsDDL ) ; List < String > createFinalPartitionDDL = HiveAvroORCQueryGenerator . generateCreatePartitionDDL ( outputDatabaseName , outputTableName , finalDataPartitionLocation , partitionsDMLInfo , Optional . < String > absent ( ) ) ; log . debug ( "Create final partition DDL: " + createFinalPartitionDDL ) ; publishQueries . addAll ( createFinalPartitionDDL ) ; String dropStagingTableDDL = HiveAvroORCQueryGenerator . generateDropTableDDL ( outputDatabaseName , stagingTableName ) ; log . debug ( "Drop staging table DDL: " + dropStagingTableDDL ) ; cleanupQueries . add ( dropStagingTableDDL ) ; log . debug ( "Staging table directory to delete: " + stagingDataLocation ) ; cleanupDirectories . add ( stagingDataLocation ) ; publishQueries . addAll ( HiveAvroORCQueryGenerator . generateDropPartitionsDDL ( outputDatabaseName , outputTableName , AbstractAvroToOrcConverter . getDropPartitionsDDLInfo ( conversionEntity ) ) ) ; } log . info ( "Publish partition entity: " + publishEntity ) ; return publishEntity ; }
Returns a QueryBasedHivePublishEntity which includes publish level queries and cleanup commands .
36,056
private ApacheHttpRequest < GenericRecord > buildWriteRequest ( BufferedRecord < GenericRecord > record ) { if ( record == null ) { return null ; } ApacheHttpRequest < GenericRecord > request = new ApacheHttpRequest < > ( ) ; HttpOperation httpOperation = HttpUtils . toHttpOperation ( record . getRecord ( ) ) ; URI uri = HttpUtils . buildURI ( urlTemplate , httpOperation . getKeys ( ) , httpOperation . getQueryParams ( ) ) ; if ( uri == null ) { return null ; } RequestBuilder builder = RequestBuilder . create ( verb . toUpperCase ( ) ) ; builder . setUri ( uri ) ; Map < String , String > headers = httpOperation . getHeaders ( ) ; if ( headers != null && headers . size ( ) != 0 ) { for ( Map . Entry < String , String > header : headers . entrySet ( ) ) { builder . setHeader ( header . getKey ( ) , header . getValue ( ) ) ; } } int bytesWritten = addPayload ( builder , httpOperation . getBody ( ) ) ; if ( bytesWritten == - 1 ) { throw new RuntimeException ( "Fail to write payload into request" ) ; } request . setRawRequest ( build ( builder ) ) ; request . markRecord ( record , bytesWritten ) ; return request ; }
Build a write request from a single record
36,057
@ CliObjectOption ( description = "Specifies files should be updated if they're different in the source." ) public EmbeddedGobblinDistcp update ( ) { this . setConfiguration ( RecursiveCopyableDataset . UPDATE_KEY , Boolean . toString ( true ) ) ; return this ; }
Specifies that files in the target should be updated if they have changed in the source . Equivalent to - update option in Hadoop distcp .
36,058
@ CliObjectOption ( description = "Delete files in target that don't exist on source." ) public EmbeddedGobblinDistcp delete ( ) { this . setConfiguration ( RecursiveCopyableDataset . DELETE_KEY , Boolean . toString ( true ) ) ; return this ; }
Specifies that files in the target that don t exist in the source should be deleted . Equivalent to - delete option in Hadoop distcp .
36,059
public EmbeddedGobblin setTemplate ( String templateURI ) throws URISyntaxException , SpecNotFoundException , JobTemplate . TemplateException { return super . setTemplate ( templateURI ) ; }
Remove template from CLI
36,060
public static void addFile ( State state , String file ) { state . setProp ( ADD_FILES , state . getProp ( ADD_FILES , "" ) + "," + file ) ; }
Add the input file to the Hive session before running the task .
36,061
public static void addJar ( State state , String jar ) { state . setProp ( ADD_JARS , state . getProp ( ADD_JARS , "" ) + "," + jar ) ; }
Add the input jar to the Hive session before running the task .
36,062
public static void addSetupQuery ( State state , String query ) { state . setProp ( SETUP_QUERIES , state . getProp ( SETUP_QUERIES , "" ) + ";" + query ) ; }
Run the specified setup query on the Hive session before running the task .
36,063
public static String getQuotedString ( String st ) { Preconditions . checkNotNull ( st ) ; String quotedString = "" ; if ( ! st . startsWith ( SINGLE_QUOTE ) ) { quotedString += SINGLE_QUOTE ; } quotedString += st ; if ( ! st . endsWith ( SINGLE_QUOTE ) ) { quotedString += SINGLE_QUOTE ; } return quotedString ; }
Add single quotes to the string if not present . TestString will be converted to TestString
36,064
public static boolean isUnixTimeStamp ( String timeStamp ) { if ( timeStamp . length ( ) != ComplianceConfigurationKeys . TIME_STAMP_LENGTH ) { return false ; } try { Long . parseLong ( timeStamp ) ; return true ; } catch ( NumberFormatException e ) { return false ; } }
Check if a given string is a valid unixTimeStamp
36,065
public void stopMetricsReporting ( ) { LOGGER . info ( "Metrics reporting will be stopped: GobblinMetrics {}" , this . toString ( ) ) ; if ( ! this . metricsReportingStarted ) { LOGGER . warn ( "Metric reporting has not started yet" ) ; return ; } if ( this . jmxReporter . isPresent ( ) ) { this . jmxReporter . get ( ) . stop ( ) ; } RootMetricContext . get ( ) . stopReporting ( ) ; for ( com . codahale . metrics . ScheduledReporter scheduledReporter : this . codahaleScheduledReporters ) { scheduledReporter . report ( ) ; } try { this . codahaleReportersCloser . close ( ) ; } catch ( IOException ioe ) { LOGGER . error ( "Failed to close metric output stream for job " + this . id , ioe ) ; } catch ( Exception e ) { LOGGER . error ( "Failed to close metric output stream for job {} due to {}" , this . id , ExceptionUtils . getFullStackTrace ( e ) ) ; throw e ; } this . metricsReportingStarted = false ; GobblinMetrics . remove ( id ) ; LOGGER . info ( "Metrics reporting stopped successfully" ) ; }
Stop metric reporting .
36,066
public static SpecExecutor createDummySpecExecutor ( URI uri ) { Properties properties = new Properties ( ) ; properties . setProperty ( ConfigurationKeys . SPECEXECUTOR_INSTANCE_URI_KEY , uri . toString ( ) ) ; return new InMemorySpecExecutor ( ConfigFactory . parseProperties ( properties ) ) ; }
A creator that create a SpecExecutor only specifying URI for uniqueness .
36,067
private static long getRecordTimestamp ( Optional < Object > writerPartitionColumnValue ) { return writerPartitionColumnValue . orNull ( ) instanceof Long ? ( Long ) writerPartitionColumnValue . get ( ) : System . currentTimeMillis ( ) ; }
Check if the partition column value is present and is a Long object . Otherwise use current system time .
36,068
private Optional < Object > getWriterPartitionColumnValue ( GenericRecord record ) { if ( ! this . partitionColumns . isPresent ( ) ) { return Optional . absent ( ) ; } for ( String partitionColumn : this . partitionColumns . get ( ) ) { Optional < Object > fieldValue = AvroUtils . getFieldValue ( record , partitionColumn ) ; if ( fieldValue . isPresent ( ) ) { return fieldValue ; } } return Optional . absent ( ) ; }
Retrieve the value of the partition column field specified by this . partitionColumns
36,069
public synchronized void addWeakListener ( L listener ) { Preconditions . checkNotNull ( listener ) ; _log . info ( "Adding a weak listener " + listener ) ; _autoListeners . put ( listener , null ) ; }
Only weak references are stored for weak listeners . They will be removed from the dispatcher automatically once the listener objects are GCed . Note that weak listeners cannot be removed explicitly .
36,070
private static List < String > getDependencies ( Config config ) { return config . hasPath ( ConfigurationKeys . JOB_DEPENDENCIES ) ? Arrays . asList ( config . getString ( ConfigurationKeys . JOB_DEPENDENCIES ) . split ( "," ) ) : new ArrayList < > ( ) ; }
Get job dependencies of a given job from its config .
36,071
public static List < Object > getDecoratorLineage ( Object obj ) { List < Object > lineage = Lists . newArrayList ( obj ) ; Object currentObject = obj ; while ( currentObject instanceof Decorator ) { currentObject = ( ( Decorator ) currentObject ) . getDecoratedObject ( ) ; lineage . add ( currentObject ) ; } return Lists . reverse ( lineage ) ; }
Finds the decorator lineage of the given object .
36,072
private DateTime getCompactionTimestamp ( ) throws IOException { DateTimeZone timeZone = DateTimeZone . forID ( this . dataset . jobProps ( ) . getProp ( MRCompactor . COMPACTION_TIMEZONE , MRCompactor . DEFAULT_COMPACTION_TIMEZONE ) ) ; if ( ! this . recompactFromDestPaths ) { return new DateTime ( timeZone ) ; } Set < Path > inputPaths = getInputPaths ( ) ; long maxTimestamp = Long . MIN_VALUE ; for ( FileStatus status : FileListUtils . listFilesRecursively ( this . fs , inputPaths ) ) { maxTimestamp = Math . max ( maxTimestamp , status . getModificationTime ( ) ) ; } return maxTimestamp == Long . MIN_VALUE ? new DateTime ( timeZone ) : new DateTime ( maxTimestamp , timeZone ) ; }
For regular compactions compaction timestamp is the time the compaction job starts .
36,073
private void submitSlaEvent ( Job job ) { try { CompactionSlaEventHelper . getEventSubmitterBuilder ( this . dataset , Optional . of ( job ) , this . fs ) . eventSubmitter ( this . eventSubmitter ) . eventName ( CompactionSlaEventHelper . COMPACTION_COMPLETED_EVENT_NAME ) . additionalMetadata ( CompactionSlaEventHelper . LATE_RECORD_COUNT , Long . toString ( this . lateOutputRecordCountProvider . getRecordCount ( this . getApplicableFilePaths ( this . dataset . outputLatePath ( ) , this . fs ) ) ) ) . additionalMetadata ( CompactionSlaEventHelper . REGULAR_RECORD_COUNT , Long . toString ( this . outputRecordCountProvider . getRecordCount ( this . getApplicableFilePaths ( this . dataset . outputPath ( ) , this . fs ) ) ) ) . additionalMetadata ( CompactionSlaEventHelper . RECOMPATED_METADATA_NAME , Boolean . toString ( this . dataset . needToRecompact ( ) ) ) . build ( ) . submit ( ) ; } catch ( Throwable e ) { LOG . warn ( "Failed to submit compaction completed event:" + e , e ) ; } }
Submit an event when compaction MR job completes
36,074
private void submitRecordsCountsEvent ( ) { long lateOutputRecordCount = this . datasetHelper . getLateOutputRecordCount ( ) ; long outputRecordCount = this . datasetHelper . getOutputRecordCount ( ) ; try { CompactionSlaEventHelper . getEventSubmitterBuilder ( this . dataset , Optional . < Job > absent ( ) , this . fs ) . eventSubmitter ( this . eventSubmitter ) . eventName ( CompactionSlaEventHelper . COMPACTION_RECORD_COUNT_EVENT ) . additionalMetadata ( CompactionSlaEventHelper . DATASET_OUTPUT_PATH , this . dataset . outputPath ( ) . toString ( ) ) . additionalMetadata ( CompactionSlaEventHelper . LATE_RECORD_COUNT , Long . toString ( lateOutputRecordCount ) ) . additionalMetadata ( CompactionSlaEventHelper . REGULAR_RECORD_COUNT , Long . toString ( outputRecordCount ) ) . additionalMetadata ( CompactionSlaEventHelper . NEED_RECOMPACT , Boolean . toString ( this . dataset . needToRecompact ( ) ) ) . build ( ) . submit ( ) ; } catch ( Throwable e ) { LOG . warn ( "Failed to submit late event count:" + e , e ) ; } }
Submit an event reporting late record counts and non - late record counts .
36,075
public RecordStreamWithMetadata < D , S > processStream ( RecordStreamWithMetadata < D , S > inputStream , WorkUnitState state ) { Flowable < StreamEntity < D > > filteredStream = inputStream . getRecordStream ( ) . filter ( r -> { if ( r instanceof ControlMessage ) { getMessageHandler ( ) . handleMessage ( ( ControlMessage ) r ) ; return true ; } else if ( r instanceof RecordEnvelope ) { boolean accept = executePolicies ( ( ( RecordEnvelope ) r ) . getRecord ( ) , this . results ) ; if ( ! accept ) { r . ack ( ) ; } return accept ; } else { return true ; } } ) ; filteredStream = filteredStream . doFinally ( this :: close ) ; return inputStream . withRecordStream ( filteredStream ) ; }
Process the stream and drop any records that fail the quality check .
36,076
public void afterCheck ( Result result , long startTimeNanos ) { switch ( result ) { case FAILED : Instrumented . markMeter ( this . failedRecordsMeter ) ; break ; case PASSED : Instrumented . markMeter ( this . passedRecordsMeter ) ; break ; default : } Instrumented . updateTimer ( this . policyTimer , System . nanoTime ( ) - startTimeNanos , TimeUnit . NANOSECONDS ) ; }
Called after check is run .
36,077
public void addSerDeProperties ( Path path , HiveRegistrationUnit hiveUnit ) throws IOException { hiveUnit . setSerDeType ( this . serDeWrapper . getSerDe ( ) . getClass ( ) . getName ( ) ) ; hiveUnit . setInputFormat ( this . serDeWrapper . getInputFormatClassName ( ) ) ; hiveUnit . setOutputFormat ( this . serDeWrapper . getOutputFormatClassName ( ) ) ; addSchemaProperties ( path , hiveUnit ) ; }
Add ORC SerDe attributes into HiveUnit
36,078
protected void addSchemaPropertiesHelper ( Path path , HiveRegistrationUnit hiveUnit ) throws IOException { TypeInfo schema = getSchemaFromLatestFile ( path , this . fs ) ; if ( schema instanceof StructTypeInfo ) { StructTypeInfo structTypeInfo = ( StructTypeInfo ) schema ; hiveUnit . setSerDeProp ( SCHEMA_LITERAL , schema ) ; hiveUnit . setSerDeProp ( serdeConstants . LIST_COLUMNS , Joiner . on ( "," ) . join ( structTypeInfo . getAllStructFieldNames ( ) ) ) ; hiveUnit . setSerDeProp ( serdeConstants . LIST_COLUMN_TYPES , Joiner . on ( "," ) . join ( structTypeInfo . getAllStructFieldTypeInfos ( ) . stream ( ) . map ( x -> x . getTypeName ( ) ) . collect ( Collectors . toList ( ) ) ) ) ; } else { throw new IllegalStateException ( "A valid ORC schema should be an instance of struct" ) ; } }
Extensible if there s other source - of - truth for fetching schema instead of interacting with HDFS .
36,079
public static < T > Retryer < T > newInstance ( Config config ) { config = config . withFallback ( DEFAULTS ) ; RetryType type = RetryType . valueOf ( config . getString ( RETRY_TYPE ) . toUpperCase ( ) ) ; switch ( type ) { case EXPONENTIAL : return newExponentialRetryer ( config ) ; case FIXED : return newFixedRetryer ( config ) ; default : throw new IllegalArgumentException ( type + " is not supported" ) ; } }
Creates new instance of retryer based on the config . Accepted config keys are defined in RetryerFactory as static member variable . You can use State along with ConfigBuilder and config prefix to build config .
36,080
public static PasswordManager getInstance ( State state ) { try { return CACHED_INSTANCES . get ( new CachedInstanceKey ( state ) ) ; } catch ( ExecutionException e ) { throw new RuntimeException ( "Unable to get an instance of PasswordManager" , e ) ; } }
Get an instance . The location of the master password file is provided via encrypt . key . loc .
36,081
public static PasswordManager getInstance ( Path masterPwdLoc ) { State state = new State ( ) ; state . setProp ( ConfigurationKeys . ENCRYPT_KEY_LOC , masterPwdLoc . toString ( ) ) ; state . setProp ( ConfigurationKeys . ENCRYPT_KEY_FS_URI , masterPwdLoc . toUri ( ) ) ; try { return CACHED_INSTANCES . get ( new CachedInstanceKey ( state ) ) ; } catch ( ExecutionException e ) { throw new RuntimeException ( "Unable to get an instance of PasswordManager" , e ) ; } }
Get an instance . The master password file is given by masterPwdLoc .
36,082
public String encryptPassword ( String plain ) { Preconditions . checkArgument ( this . encryptors . size ( ) > 0 , "A master password needs to be provided for encrypting passwords." ) ; try { return this . encryptors . get ( 0 ) . encrypt ( plain ) ; } catch ( Exception e ) { throw new RuntimeException ( "Failed to encrypt password" , e ) ; } }
Encrypt a password . A master password must have been provided in the constructor .
36,083
public String decryptPassword ( String encrypted ) { Preconditions . checkArgument ( this . encryptors . size ( ) > 0 , "A master password needs to be provided for decrypting passwords." ) ; for ( TextEncryptor encryptor : encryptors ) { try { return encryptor . decrypt ( encrypted ) ; } catch ( Exception e ) { LOG . warn ( "Failed attempt to decrypt secret {}" , encrypted , e ) ; } } LOG . error ( "All {} decrypt attempt(s) failed." , encryptors . size ( ) ) ; throw new RuntimeException ( "Failed to decrypt password ENC(" + encrypted + ")" ) ; }
Decrypt an encrypted password . A master password file must have been provided in the constructor .
36,084
private JobConfig . Builder createJobBuilder ( Properties jobProps ) { String planningId = getPlanningJobId ( jobProps ) ; Map < String , TaskConfig > taskConfigMap = Maps . newHashMap ( ) ; Map < String , String > rawConfigMap = Maps . newHashMap ( ) ; for ( String key : jobProps . stringPropertyNames ( ) ) { rawConfigMap . put ( JOB_PROPS_PREFIX + key , ( String ) jobProps . get ( key ) ) ; } rawConfigMap . put ( GobblinClusterConfigurationKeys . TASK_SUCCESS_OPTIONAL_KEY , "true" ) ; taskConfigMap . put ( planningId , TaskConfig . Builder . from ( rawConfigMap ) ) ; JobConfig . Builder jobConfigBuilder = new JobConfig . Builder ( ) ; jobConfigBuilder . setMaxAttemptsPerTask ( 1 ) ; jobConfigBuilder . setTimeoutPerTask ( JobConfig . DEFAULT_TIMEOUT_PER_TASK * 24 * 30 ) ; if ( jobProps . containsKey ( GobblinClusterConfigurationKeys . HELIX_PLANNING_JOB_TAG_KEY ) ) { String jobPlanningTag = jobProps . getProperty ( GobblinClusterConfigurationKeys . HELIX_PLANNING_JOB_TAG_KEY ) ; log . info ( "PlanningJob {} has tags associated : {}" , planningId , jobPlanningTag ) ; jobConfigBuilder . setInstanceGroupTag ( jobPlanningTag ) ; } if ( jobProps . containsKey ( GobblinClusterConfigurationKeys . HELIX_PLANNING_JOB_TYPE_KEY ) ) { String jobType = jobProps . getProperty ( GobblinClusterConfigurationKeys . HELIX_PLANNING_JOB_TYPE_KEY ) ; log . info ( "PlanningJob {} has types associated : {}" , planningId , jobType ) ; jobConfigBuilder . setJobType ( jobType ) ; } jobConfigBuilder . setNumConcurrentTasksPerInstance ( PropertiesUtils . getPropAsInt ( jobProps , GobblinClusterConfigurationKeys . HELIX_CLUSTER_TASK_CONCURRENCY , GobblinClusterConfigurationKeys . HELIX_CLUSTER_TASK_CONCURRENCY_DEFAULT ) ) ; jobConfigBuilder . setFailureThreshold ( 1 ) ; jobConfigBuilder . addTaskConfigMap ( taskConfigMap ) . setCommand ( GobblinTaskRunner . GOBBLIN_JOB_FACTORY_NAME ) ; return jobConfigBuilder ; }
Create a job config builder which has a single task that wraps the original jobProps .
36,085
private void submitJobToHelix ( String jobName , String jobId , JobConfig . Builder jobConfigBuilder ) throws Exception { TaskDriver taskDriver = new TaskDriver ( this . planningJobHelixManager ) ; HelixUtils . submitJobToWorkFlow ( jobConfigBuilder , jobName , jobId , taskDriver , this . planningJobHelixManager , this . workFlowExpiryTimeSeconds ) ; this . jobSubmitted = true ; }
Submit a planning job to helix so that it can launched from a remote node .
36,086
public static List < FileStatus > listMostNestedPathRecursively ( FileSystem fs , Path path ) throws IOException { return listMostNestedPathRecursively ( fs , path , NO_OP_PATH_FILTER ) ; }
Method to list out all files or directory if no file exists under a specified path .
36,087
public static FileStatus getAnyNonHiddenFile ( FileSystem fs , Path path ) throws IOException { HiddenFilter hiddenFilter = new HiddenFilter ( ) ; FileStatus root = fs . getFileStatus ( path ) ; if ( ! root . isDirectory ( ) ) { return hiddenFilter . accept ( path ) ? root : null ; } Stack < FileStatus > folders = new Stack < > ( ) ; folders . push ( root ) ; while ( ! folders . empty ( ) ) { FileStatus curFolder = folders . pop ( ) ; try { for ( FileStatus status : fs . listStatus ( curFolder . getPath ( ) , hiddenFilter ) ) { if ( status . isDirectory ( ) ) { folders . push ( status ) ; } else { return status ; } } } catch ( FileNotFoundException exc ) { } } return null ; }
Get any data file which is not hidden or a directory from the given path
36,088
public FlowStatus get ( ComplexResourceKey < FlowStatusId , EmptyRecord > key ) { String flowGroup = key . getKey ( ) . getFlowGroup ( ) ; String flowName = key . getKey ( ) . getFlowName ( ) ; long flowExecutionId = key . getKey ( ) . getFlowExecutionId ( ) ; LOG . info ( "Get called with flowGroup " + flowGroup + " flowName " + flowName + " flowExecutionId " + flowExecutionId ) ; org . apache . gobblin . service . monitoring . FlowStatus flowStatus = _flowStatusGenerator . getFlowStatus ( flowName , flowGroup , flowExecutionId ) ; return convertFlowStatus ( flowStatus ) ; }
Retrieve the FlowStatus with the given key
36,089
static ExecutionStatus updatedFlowExecutionStatus ( ExecutionStatus jobExecutionStatus , ExecutionStatus currentFlowExecutionStatus ) { if ( currentFlowExecutionStatus == ExecutionStatus . FAILED || jobExecutionStatus == ExecutionStatus . FAILED ) { return ExecutionStatus . FAILED ; } if ( currentFlowExecutionStatus == ExecutionStatus . CANCELLED || jobExecutionStatus == ExecutionStatus . CANCELLED ) { return ExecutionStatus . CANCELLED ; } if ( currentFlowExecutionStatus == ExecutionStatus . RUNNING || jobExecutionStatus == ExecutionStatus . RUNNING || jobExecutionStatus == ExecutionStatus . ORCHESTRATED || jobExecutionStatus == ExecutionStatus . COMPILED ) { return ExecutionStatus . RUNNING ; } return currentFlowExecutionStatus ; }
Determines the new flow status based on the current flow status and new job status
36,090
public static String calculateEdgeIdentity ( ServiceNode sourceNode , ServiceNode targetNode , SpecExecutor specExecutorInstance ) { return sourceNode . getNodeName ( ) + "-" + specExecutorInstance . getUri ( ) + "-" + targetNode . getNodeName ( ) ; }
A naive implementation of edge identity calculation .
36,091
private URI getBaseURI ( URI configKey ) throws URISyntaxException { return new URI ( configKey . getScheme ( ) , configKey . getAuthority ( ) , null , configKey . getQuery ( ) , configKey . getFragment ( ) ) ; }
Base URI for a config store should be root of the zip file so change path part of URI to be null
36,092
public void stop ( Map < String , String > additionalMetadata ) { if ( this . stopped ) { return ; } this . stopped = true ; long endTime = System . currentTimeMillis ( ) ; long duration = endTime - this . startTime ; Map < String , String > finalMetadata = Maps . newHashMap ( ) ; finalMetadata . putAll ( additionalMetadata ) ; finalMetadata . put ( EventSubmitter . EVENT_TYPE , METADATA_TIMING_EVENT ) ; finalMetadata . put ( METADATA_START_TIME , Long . toString ( this . startTime ) ) ; finalMetadata . put ( METADATA_END_TIME , Long . toString ( endTime ) ) ; finalMetadata . put ( METADATA_DURATION , Long . toString ( duration ) ) ; this . submitter . submit ( this . name , finalMetadata ) ; }
Stop the timer and submit the event along with the additional metadata specified . If the timer was already stopped before this is a no - op .
36,093
public synchronized static RecordAccessor getRecordAccessorForObject ( Object obj ) { for ( RecordAccessorProvider p : recordAccessorProviders ) { RecordAccessor accessor = p . recordAccessorForObject ( obj ) ; if ( accessor != null ) { return accessor ; } } throw new IllegalArgumentException ( "Can't build accessor for object " + obj . toString ( ) + "!" ) ; }
Get a RecordAccessor for a given object . Throws IllegalArgumentException if none can be built .
36,094
private Set < Path > getNewDataInFolder ( Path inputFolder , Path outputFolder ) throws IOException { Set < Path > newFiles = Sets . newHashSet ( ) ; if ( ! this . fs . exists ( inputFolder ) || ! this . fs . exists ( outputFolder ) ) { return newFiles ; } DateTime lastCompactionTime = new DateTime ( MRCompactor . readCompactionTimestamp ( this . fs , outputFolder ) ) ; for ( FileStatus fstat : FileListUtils . listFilesRecursively ( this . fs , inputFolder ) ) { DateTime fileModificationTime = new DateTime ( fstat . getModificationTime ( ) ) ; if ( fileModificationTime . isAfter ( lastCompactionTime ) ) { LOG . info ( "[" + fileModificationTime . getMillis ( ) + "] " + fstat . getPath ( ) + " is after " + lastCompactionTime . getMillis ( ) ) ; newFiles . add ( fstat . getPath ( ) ) ; } } if ( ! newFiles . isEmpty ( ) ) { LOG . info ( String . format ( "Found %d new files within folder %s which are more recent than the previous " + "compaction start time of %s." , newFiles . size ( ) , inputFolder , lastCompactionTime ) ) ; } return newFiles ; }
Check if inputFolder contains any files which have modification times which are more recent than the last compaction time as stored within outputFolder ; return any files which do . An empty list will be returned if all files are older than the last compaction time .
36,095
public static String generateStagingCTASStatement ( HiveDatasetFinder . DbAndTable outputDbAndTable , String sourceQuery , StorageFormat storageFormat , String outputTableLocation ) { Preconditions . checkArgument ( ! Strings . isNullOrEmpty ( outputDbAndTable . getDb ( ) ) && ! Strings . isNullOrEmpty ( outputDbAndTable . getTable ( ) ) , "Invalid output db and table " + outputDbAndTable ) ; return String . format ( "CREATE TEMPORARY TABLE `%s`.`%s` STORED AS %s LOCATION '%s' AS %s" , outputDbAndTable . getDb ( ) , outputDbAndTable . getTable ( ) , storageFormat . getHiveName ( ) , outputTableLocation , sourceQuery ) ; }
Generates a CTAS statement to dump the results of a query into a new table .
36,096
public static String generateTableCopy ( String inputTblName , String outputTblName , String inputDbName , String outputDbName , Optional < Map < String , String > > optionalPartitionDMLInfo ) { Preconditions . checkArgument ( StringUtils . isNotBlank ( inputTblName ) ) ; Preconditions . checkArgument ( StringUtils . isNotBlank ( outputTblName ) ) ; Preconditions . checkArgument ( StringUtils . isNotBlank ( inputDbName ) ) ; Preconditions . checkArgument ( StringUtils . isNotBlank ( outputDbName ) ) ; StringBuilder dmlQuery = new StringBuilder ( ) ; dmlQuery . append ( String . format ( "INSERT OVERWRITE TABLE `%s`.`%s` %n" , outputDbName , outputTblName ) ) ; if ( optionalPartitionDMLInfo . isPresent ( ) && optionalPartitionDMLInfo . get ( ) . size ( ) > 0 ) { dmlQuery . append ( partitionKeyValues ( optionalPartitionDMLInfo ) ) ; } dmlQuery . append ( String . format ( "SELECT * FROM `%s`.`%s`" , inputDbName , inputTblName ) ) ; if ( optionalPartitionDMLInfo . isPresent ( ) ) { if ( optionalPartitionDMLInfo . get ( ) . size ( ) > 0 ) { dmlQuery . append ( " WHERE " ) ; String partitionsAndValues = optionalPartitionDMLInfo . get ( ) . entrySet ( ) . stream ( ) . map ( e -> "`" + e . getKey ( ) + "`='" + e . getValue ( ) + "'" ) . collect ( joining ( " AND " ) ) ; dmlQuery . append ( partitionsAndValues ) ; } } return dmlQuery . toString ( ) ; }
Fills data from input table into output table .
36,097
public static void populatePartitionInfo ( HiveProcessingEntity conversionEntity , Map < String , String > partitionsDDLInfo , Map < String , String > partitionsDMLInfo ) { String partitionsInfoString = null ; String partitionsTypeString = null ; if ( conversionEntity . getPartition ( ) . isPresent ( ) ) { partitionsInfoString = conversionEntity . getPartition ( ) . get ( ) . getName ( ) ; partitionsTypeString = conversionEntity . getPartition ( ) . get ( ) . getSchema ( ) . getProperty ( "partition_columns.types" ) ; } if ( StringUtils . isNotBlank ( partitionsInfoString ) || StringUtils . isNotBlank ( partitionsTypeString ) ) { if ( StringUtils . isBlank ( partitionsInfoString ) || StringUtils . isBlank ( partitionsTypeString ) ) { throw new IllegalArgumentException ( "Both partitions info and partitions must be present, if one is specified" ) ; } List < String > pInfo = Splitter . on ( HIVE_PARTITIONS_INFO ) . omitEmptyStrings ( ) . trimResults ( ) . splitToList ( partitionsInfoString ) ; List < String > pType = Splitter . on ( HIVE_PARTITIONS_TYPE ) . omitEmptyStrings ( ) . trimResults ( ) . splitToList ( partitionsTypeString ) ; log . debug ( "PartitionsInfoString: " + partitionsInfoString ) ; log . debug ( "PartitionsTypeString: " + partitionsTypeString ) ; if ( pInfo . size ( ) != pType . size ( ) ) { throw new IllegalArgumentException ( "partitions info and partitions type list should of same size" ) ; } for ( int i = 0 ; i < pInfo . size ( ) ; i ++ ) { List < String > partitionInfoParts = Splitter . on ( "=" ) . omitEmptyStrings ( ) . trimResults ( ) . splitToList ( pInfo . get ( i ) ) ; String partitionType = pType . get ( i ) ; if ( partitionInfoParts . size ( ) != 2 ) { throw new IllegalArgumentException ( String . format ( "Partition details should be of the format partitionName=partitionValue. Recieved: %s" , pInfo . get ( i ) ) ) ; } partitionsDDLInfo . put ( partitionInfoParts . get ( 0 ) , partitionType ) ; partitionsDMLInfo . put ( partitionInfoParts . get ( 0 ) , partitionInfoParts . get ( 1 ) ) ; } } }
It fills partitionsDDLInfo and partitionsDMLInfo with the partition information
36,098
public static void createStagingDirectory ( FileSystem fs , String destination , HiveProcessingEntity conversionEntity , WorkUnitState workUnit ) { Path destinationPath = new Path ( destination ) ; try { FsPermission permission ; String group = null ; if ( conversionEntity . getTable ( ) . getDataLocation ( ) != null ) { FileStatus sourceDataFileStatus = fs . getFileStatus ( conversionEntity . getTable ( ) . getDataLocation ( ) ) ; permission = sourceDataFileStatus . getPermission ( ) ; group = sourceDataFileStatus . getGroup ( ) ; } else { permission = FsPermission . getDefault ( ) ; } if ( ! fs . mkdirs ( destinationPath , permission ) ) { throw new RuntimeException ( String . format ( "Failed to create path %s with permissions %s" , destinationPath , permission ) ) ; } else { fs . setPermission ( destinationPath , permission ) ; if ( group != null && ! workUnit . getPropAsBoolean ( HIVE_DATASET_DESTINATION_SKIP_SETGROUP , DEFAULT_HIVE_DATASET_DESTINATION_SKIP_SETGROUP ) ) { fs . setOwner ( destinationPath , null , group ) ; } log . info ( String . format ( "Created %s with permissions %s and group %s" , destinationPath , permission , group ) ) ; } } catch ( IOException e ) { Throwables . propagate ( e ) ; } }
Creates a staging directory with the permission as in source directory .
36,099
public static Pair < Optional < Table > , Optional < List < Partition > > > getDestinationTableMeta ( String dbName , String tableName , Properties props ) { Optional < Table > table = Optional . < Table > absent ( ) ; Optional < List < Partition > > partitions = Optional . < List < Partition > > absent ( ) ; try { HiveMetastoreClientPool pool = HiveMetastoreClientPool . get ( props , Optional . fromNullable ( props . getProperty ( HiveDatasetFinder . HIVE_METASTORE_URI_KEY ) ) ) ; try ( AutoReturnableObject < IMetaStoreClient > client = pool . getClient ( ) ) { table = Optional . of ( client . get ( ) . getTable ( dbName , tableName ) ) ; if ( table . isPresent ( ) ) { org . apache . hadoop . hive . ql . metadata . Table qlTable = new org . apache . hadoop . hive . ql . metadata . Table ( table . get ( ) ) ; if ( HiveUtils . isPartitioned ( qlTable ) ) { partitions = Optional . of ( HiveUtils . getPartitions ( client . get ( ) , qlTable , Optional . < String > absent ( ) ) ) ; } } } } catch ( NoSuchObjectException e ) { return ImmutablePair . of ( table , partitions ) ; } catch ( IOException | TException e ) { throw new RuntimeException ( "Could not fetch destination table metadata" , e ) ; } return ImmutablePair . of ( table , partitions ) ; }
Returns a pair of Hive table and its partitions