idx
int64
0
41.2k
question
stringlengths
74
4.21k
target
stringlengths
5
888
36,100
private Map < Integer , Integer > getColumnPosSqlTypes ( ) { try { final Map < Integer , Integer > columnPosSqlTypes = Maps . newHashMap ( ) ; ParameterMetaData pMetaData = this . insertPstmtForFixedBatch . getParameterMetaData ( ) ; for ( int i = 1 ; i <= pMetaData . getParameterCount ( ) ; i ++ ) { columnPosSqlTypes . put ( i , pMetaData . getParameterType ( i ) ) ; } return columnPosSqlTypes ; } catch ( SQLException e ) { throw new RuntimeException ( "Cannot retrieve columns types for batch insert" , e ) ; } }
Creates a mapping between column positions and their data types
36,101
private void processBatchRequestResponse ( CloseableHttpResponse response ) throws IOException , UnexpectedResponseException { String entityStr = EntityUtils . toString ( response . getEntity ( ) ) ; int statusCode = response . getStatusLine ( ) . getStatusCode ( ) ; if ( statusCode >= 400 ) { throw new RuntimeException ( "Failed due to " + entityStr + " (Detail: " + ToStringBuilder . reflectionToString ( response , ToStringStyle . SHORT_PREFIX_STYLE ) + " )" ) ; } JsonObject jsonBody = new JsonParser ( ) . parse ( entityStr ) . getAsJsonObject ( ) ; if ( ! jsonBody . get ( "hasErrors" ) . getAsBoolean ( ) ) { return ; } JsonArray results = jsonBody . get ( "results" ) . getAsJsonArray ( ) ; for ( JsonElement jsonElem : results ) { JsonObject json = jsonElem . getAsJsonObject ( ) ; int subStatusCode = json . get ( "statusCode" ) . getAsInt ( ) ; if ( subStatusCode < 400 ) { continue ; } else if ( subStatusCode == 400 && Operation . INSERT_ONLY_NOT_EXIST . equals ( operation ) ) { JsonElement resultJsonElem = json . get ( "result" ) ; Preconditions . checkNotNull ( resultJsonElem , "Error response should contain result property" ) ; JsonObject resultJsonObject = resultJsonElem . getAsJsonArray ( ) . get ( 0 ) . getAsJsonObject ( ) ; if ( isDuplicate ( resultJsonObject , subStatusCode ) ) { continue ; } } throw new RuntimeException ( "Failed due to " + jsonBody + " (Detail: " + ToStringBuilder . reflectionToString ( response , ToStringStyle . SHORT_PREFIX_STYLE ) + " )" ) ; } }
Check results from batch response if any of the results is failure throw exception .
36,102
public static MD5Digest fromString ( String md5String ) { byte [ ] bytes ; try { bytes = Hex . decodeHex ( md5String . toCharArray ( ) ) ; return new MD5Digest ( md5String , bytes ) ; } catch ( DecoderException e ) { throw new IllegalArgumentException ( "Unable to convert md5string" , e ) ; } }
Static method to get an MD5Digest from a human - readable string representation
36,103
public static MD5Digest fromBytes ( byte [ ] md5Bytes ) { Preconditions . checkArgument ( md5Bytes . length == MD5_BYTES_LENGTH , "md5 bytes must be " + MD5_BYTES_LENGTH + " bytes in length, found " + md5Bytes . length + " bytes." ) ; String md5String = Hex . encodeHexString ( md5Bytes ) ; return new MD5Digest ( md5String , md5Bytes ) ; }
Static method to get an MD5Digest from a binary byte representation
36,104
public static MD5Digest fromBytes ( byte [ ] md5Bytes , int offset ) { byte [ ] md5BytesCopy = Arrays . copyOfRange ( md5Bytes , offset , offset + MD5_BYTES_LENGTH ) ; String md5String = Hex . encodeHexString ( md5BytesCopy ) ; return new MD5Digest ( md5String , md5BytesCopy ) ; }
Static method to get an MD5Digest from a binary byte representation .
36,105
public boolean add ( T t ) { if ( this . closed ) { throw new RuntimeException ( ConcurrentBoundedPriorityIterable . class . getSimpleName ( ) + " is no longer accepting requests!" ) ; } AllocatedRequestsIteratorBase . RequestWithResourceRequirement < T > newElement = new AllocatedRequestsIteratorBase . RequestWithResourceRequirement < > ( t , this . estimator . estimateRequirement ( t , this . resourcePool ) ) ; boolean addedWorkunits = addImpl ( newElement ) ; if ( ! addedWorkunits ) { this . rejectedElement = true ; } return addedWorkunits ; }
Offer an element to the container .
36,106
public static List < Partition > sortPartitions ( List < Partition > partitions ) { Collections . sort ( partitions , new Comparator < Partition > ( ) { public int compare ( Partition o1 , Partition o2 ) { return o1 . getCompleteName ( ) . compareTo ( o2 . getCompleteName ( ) ) ; } } ) ; return partitions ; }
Sort all partitions inplace on the basis of complete name ie dbName . tableName . partitionName
36,107
public List < Partition > getPartitionsFromDataset ( ) throws IOException { try ( AutoReturnableObject < IMetaStoreClient > client = getClientPool ( ) . getClient ( ) ) { List < Partition > partitions = HiveUtils . getPartitions ( client . get ( ) , getTable ( ) , Optional . < String > absent ( ) ) ; return sortPartitions ( partitions ) ; } }
This method returns a sorted list of partitions .
36,108
public boolean addFileSet ( FileSet < CopyEntity > fileSet , List < WorkUnit > workUnits ) { boolean addedWorkunits = addFileSetImpl ( fileSet , workUnits ) ; if ( ! addedWorkunits ) { this . rejectedFileSet = true ; } return addedWorkunits ; }
Add a file set to the container .
36,109
public void clean ( ) throws IOException { Path versionLocation = ( ( HivePartitionRetentionVersion ) this . datasetVersion ) . getLocation ( ) ; Path datasetLocation = ( ( CleanableHivePartitionDataset ) this . cleanableDataset ) . getLocation ( ) ; String completeName = ( ( HivePartitionRetentionVersion ) this . datasetVersion ) . datasetURN ( ) ; State state = new State ( this . state ) ; this . fs = ProxyUtils . getOwnerFs ( state , this . versionOwner ) ; try ( HiveProxyQueryExecutor queryExecutor = ProxyUtils . getQueryExecutor ( state , this . versionOwner ) ) { log . info ( "Trying to clean version " + completeName ) ; if ( ! this . fs . exists ( versionLocation ) ) { log . info ( "Data versionLocation doesn't exist. Metadata will be dropped for the version " + completeName ) ; } else if ( datasetLocation . toString ( ) . equalsIgnoreCase ( versionLocation . toString ( ) ) ) { log . info ( "Dataset location is same as version location. Won't delete the data but metadata will be dropped for the version " + completeName ) ; } else if ( this . nonDeletableVersionLocations . contains ( versionLocation . toString ( ) ) ) { log . info ( "This version corresponds to the non deletable version. Won't delete the data but metadata will be dropped for the version " + completeName ) ; } else if ( HadoopUtils . hasContent ( this . fs , versionLocation ) ) { if ( this . simulate ) { log . info ( "Simulate is set to true. Won't delete the partition " + completeName ) ; return ; } log . info ( "Deleting data from the version " + completeName ) ; this . fs . delete ( versionLocation , true ) ; } executeDropVersionQueries ( queryExecutor ) ; } }
If simulate is set to true this will simply return . If version is pointing to an empty location drop the partition and close the jdbc connection . If version is pointing to the same location as of the dataset then drop the partition and close the jdbc connection . If version is pointing to the non deletable version locations then drop the partition and close the jdbc connection . Otherwise delete the data underneath drop the partition and close the jdbc connection .
36,110
private static void addHiveSiteDirToClasspath ( String hiveSiteDir ) { LOG . info ( "Adding " + hiveSiteDir + " to CLASSPATH" ) ; File f = new File ( hiveSiteDir ) ; try { URL u = f . toURI ( ) . toURL ( ) ; URLClassLoader urlClassLoader = ( URLClassLoader ) ClassLoader . getSystemClassLoader ( ) ; Class < URLClassLoader > urlClass = URLClassLoader . class ; Method method = urlClass . getDeclaredMethod ( "addURL" , new Class [ ] { URL . class } ) ; method . setAccessible ( true ) ; method . invoke ( urlClassLoader , new Object [ ] { u } ) ; } catch ( ReflectiveOperationException | IOException e ) { throw new RuntimeException ( "Unable to add hive.site.dir to CLASSPATH" , e ) ; } }
Helper method to add the directory containing the hive - site . xml file to the classpath
36,111
static String choppedStatementNoLineChange ( String statement ) { statement = statement . replaceAll ( "\\r\\n|\\r|\\n" , " " ) ; if ( statement . length ( ) <= MAX_OUTPUT_STMT_LENGTH ) { return statement ; } return statement . substring ( 0 , MAX_OUTPUT_STMT_LENGTH ) + "...... (" + ( statement . length ( ) - MAX_OUTPUT_STMT_LENGTH ) + " characters omitted)" ; }
Chopped statements with all line - changing character being removed for saving space of log .
36,112
private void addShutdownHook ( ) { ServiceManager manager = this . serviceManager ; Runtime . getRuntime ( ) . addShutdownHook ( new Thread ( ) { public void run ( ) { try { manager . stopAsync ( ) . awaitStopped ( 5 , TimeUnit . SECONDS ) ; } catch ( TimeoutException timeout ) { } } } ) ; }
Register a shutdown hook for this thread .
36,113
public Result executePolicy ( Object record ) { if ( ! ( record instanceof GenericRecord ) ) { return RowLevelPolicy . Result . FAILED ; } GenericRecord header = ( GenericRecord ) ( ( GenericRecord ) record ) . get ( "header" ) ; if ( header == null ) { return RowLevelPolicy . Result . FAILED ; } if ( header . get ( "time" ) != null || header . get ( "timestamp" ) != null ) { return RowLevelPolicy . Result . PASSED ; } return RowLevelPolicy . Result . FAILED ; }
Return PASS if the record has either header . time or header . timestamp field .
36,114
public ChannelSftp getSftpChannel ( ) throws SftpException { try { ChannelSftp channelSftp = ( ChannelSftp ) this . session . openChannel ( "sftp" ) ; channelSftp . connect ( ) ; return channelSftp ; } catch ( JSchException e ) { throw new SftpException ( 0 , "Cannot open a channel to SFTP server" , e ) ; } }
Create new channel every time a command needs to be executed . This is required to support execution of multiple commands in parallel . All created channels are cleaned up when the session is closed .
36,115
public ChannelExec getExecChannel ( String command ) throws SftpException { ChannelExec channelExec ; try { channelExec = ( ChannelExec ) this . session . openChannel ( "exec" ) ; channelExec . setCommand ( command ) ; channelExec . connect ( ) ; return channelExec ; } catch ( JSchException e ) { throw new SftpException ( 0 , "Cannot open a channel to SFTP server" , e ) ; } }
Create a new sftp channel to execute commands .
36,116
public void connect ( ) throws FileBasedHelperException { String privateKey = PasswordManager . getInstance ( this . state ) . readPassword ( this . state . getProp ( ConfigurationKeys . SOURCE_CONN_PRIVATE_KEY ) ) ; String password = PasswordManager . getInstance ( this . state ) . readPassword ( this . state . getProp ( ConfigurationKeys . SOURCE_CONN_PASSWORD ) ) ; String knownHosts = this . state . getProp ( ConfigurationKeys . SOURCE_CONN_KNOWN_HOSTS ) ; String userName = this . state . getProp ( ConfigurationKeys . SOURCE_CONN_USERNAME ) ; String hostName = this . state . getProp ( ConfigurationKeys . SOURCE_CONN_HOST_NAME ) ; int port = this . state . getPropAsInt ( ConfigurationKeys . SOURCE_CONN_PORT , ConfigurationKeys . SOURCE_CONN_DEFAULT_PORT ) ; String proxyHost = this . state . getProp ( ConfigurationKeys . SOURCE_CONN_USE_PROXY_URL ) ; int proxyPort = this . state . getPropAsInt ( ConfigurationKeys . SOURCE_CONN_USE_PROXY_PORT , - 1 ) ; JSch . setLogger ( new JSchLogger ( ) ) ; JSch jsch = new JSch ( ) ; log . info ( "Attempting to connect to source via SFTP with" + " privateKey: " + privateKey + " knownHosts: " + knownHosts + " userName: " + userName + " hostName: " + hostName + " port: " + port + " proxyHost: " + proxyHost + " proxyPort: " + proxyPort ) ; try { if ( ! Strings . isNullOrEmpty ( privateKey ) ) { List < IdentityStrategy > identityStrategies = ImmutableList . of ( new LocalFileIdentityStrategy ( ) , new DistributedCacheIdentityStrategy ( ) , new HDFSIdentityStrategy ( ) ) ; for ( IdentityStrategy identityStrategy : identityStrategies ) { if ( identityStrategy . setIdentity ( privateKey , jsch ) ) { break ; } } } this . session = jsch . getSession ( userName , hostName , port ) ; this . session . setConfig ( "PreferredAuthentications" , "publickey,password" ) ; if ( Strings . isNullOrEmpty ( knownHosts ) ) { log . info ( "Known hosts path is not set, StrictHostKeyChecking will be turned off" ) ; this . session . setConfig ( "StrictHostKeyChecking" , "no" ) ; } else { jsch . setKnownHosts ( knownHosts ) ; } if ( ! Strings . isNullOrEmpty ( password ) ) { this . session . setPassword ( password ) ; } if ( proxyHost != null && proxyPort >= 0 ) { this . session . setProxy ( new ProxyHTTP ( proxyHost , proxyPort ) ) ; } UserInfo ui = new MyUserInfo ( ) ; this . session . setUserInfo ( ui ) ; this . session . setDaemonThread ( true ) ; this . session . connect ( ) ; log . info ( "Finished connecting to source" ) ; } catch ( JSchException e ) { if ( this . session != null ) { this . session . disconnect ( ) ; } log . error ( e . getMessage ( ) , e ) ; throw new FileBasedHelperException ( "Cannot connect to SFTP source" , e ) ; } }
Opens up a connection to specified host using the username . Connects to the source using a private key without prompting for a password . This method does not support connecting to a source using a password only by private key
36,117
public InputStream getFileStream ( String file ) throws FileBasedHelperException { SftpGetMonitor monitor = new SftpGetMonitor ( ) ; try { ChannelSftp channel = getSftpChannel ( ) ; return new SftpFsFileInputStream ( channel . get ( file , monitor ) , channel ) ; } catch ( SftpException e ) { throw new FileBasedHelperException ( "Cannot download file " + file + " due to " + e . getMessage ( ) , e ) ; } }
Executes a get SftpCommand and returns an input stream to the file
36,118
public Map < String , String > toDataMap ( ) { Map < String , String > map = Maps . newHashMap ( ) ; map . put ( PLATFORM_KEY , platform ) ; map . put ( NAME_KEY , getName ( ) ) ; map . putAll ( metadata ) ; return map ; }
Serialize to a string map
36,119
private synchronized void renewDelegationToken ( ) throws IOException , InterruptedException { this . token . renew ( this . fs . getConf ( ) ) ; writeDelegationTokenToFile ( ) ; if ( ! this . firstLogin ) { sendTokenFileUpdatedMessage ( InstanceType . CONTROLLER ) ; sendTokenFileUpdatedMessage ( InstanceType . PARTICIPANT ) ; } }
Renew the existing delegation token .
36,120
private void loginFromKeytab ( ) throws IOException { String keyTabFilePath = this . config . getString ( GobblinYarnConfigurationKeys . KEYTAB_FILE_PATH ) ; if ( Strings . isNullOrEmpty ( keyTabFilePath ) ) { throw new IOException ( "Keytab file path is not defined for Kerberos login" ) ; } if ( ! new File ( keyTabFilePath ) . exists ( ) ) { throw new IOException ( "Keytab file not found at: " + keyTabFilePath ) ; } String principal = this . config . getString ( GobblinYarnConfigurationKeys . KEYTAB_PRINCIPAL_NAME ) ; if ( Strings . isNullOrEmpty ( principal ) ) { principal = this . loginUser . getShortUserName ( ) + "/localhost@LOCALHOST" ; } Configuration conf = new Configuration ( ) ; conf . set ( "hadoop.security.authentication" , UserGroupInformation . AuthenticationMethod . KERBEROS . toString ( ) . toLowerCase ( ) ) ; UserGroupInformation . setConfiguration ( conf ) ; UserGroupInformation . loginUserFromKeytab ( principal , keyTabFilePath ) ; LOGGER . info ( String . format ( "Logged in from keytab file %s using principal %s" , keyTabFilePath , principal ) ) ; this . loginUser = UserGroupInformation . getLoginUser ( ) ; getNewDelegationTokenForLoginUser ( ) ; writeDelegationTokenToFile ( ) ; if ( ! this . firstLogin ) { sendTokenFileUpdatedMessage ( InstanceType . CONTROLLER ) ; sendTokenFileUpdatedMessage ( InstanceType . PARTICIPANT ) ; } }
Login the user from a given keytab file .
36,121
synchronized void writeDelegationTokenToFile ( ) throws IOException { if ( this . fs . exists ( this . tokenFilePath ) ) { LOGGER . info ( "Deleting existing token file " + this . tokenFilePath ) ; this . fs . delete ( this . tokenFilePath , false ) ; } LOGGER . info ( "Writing new or renewed token to token file " + this . tokenFilePath ) ; YarnHelixUtils . writeTokenToFile ( this . token , this . tokenFilePath , this . fs . getConf ( ) ) ; this . fs . setPermission ( this . tokenFilePath , new FsPermission ( FsAction . READ_WRITE , FsAction . NONE , FsAction . NONE ) ) ; }
Write the current delegation token to the token file .
36,122
private static int symmetricKeyAlgorithmNameToTag ( String cipherName ) { if ( StringUtils . isEmpty ( cipherName ) ) { return PGPEncryptedData . CAST5 ; } Set < Field > fields = ReflectionUtils . getAllFields ( PGPEncryptedData . class , ReflectionUtils . withName ( cipherName ) ) ; if ( fields . isEmpty ( ) ) { throw new RuntimeException ( "Could not find tag for cipher name " + cipherName ) ; } try { return fields . iterator ( ) . next ( ) . getInt ( null ) ; } catch ( IllegalAccessException e ) { throw new RuntimeException ( "Could not access field " + cipherName , e ) ; } }
Convert a string cipher name to the integer tag used by GPG
36,123
public List < WorkUnit > getWorkunits ( SourceState state ) { List < WorkUnit > workUnits = Lists . newArrayList ( ) ; Config config = ConfigUtils . propertiesToConfig ( state . getProperties ( ) ) ; Config sourceConfig = ConfigUtils . getConfigOrEmpty ( config , DATASET_CLEANER_SOURCE_PREFIX ) ; List < String > configurationNames = ConfigUtils . getStringList ( config , DATASET_CLEANER_CONFIGURATIONS ) ; if ( configurationNames . isEmpty ( ) ) { configurationNames = ImmutableList . of ( "DummyConfig" ) ; } for ( String configurationName : configurationNames ) { WorkUnit workUnit = WorkUnit . createEmpty ( ) ; Config wuConfig = ConfigUtils . getConfigOrEmpty ( sourceConfig , configurationName ) . withFallback ( sourceConfig ) . withFallback ( config ) ; workUnit . setProps ( ConfigUtils . configToProperties ( wuConfig ) , new Properties ( ) ) ; TaskUtils . setTaskFactoryClass ( workUnit , DatasetCleanerTaskFactory . class ) ; workUnits . add ( workUnit ) ; } return workUnits ; }
Create a work unit for each configuration defined or a single work unit if no configurations are defined
36,124
public Future < WriteResponse > write ( Batch < String > batch , WriteCallback callback ) { Timer . Context context = writeTimer . time ( ) ; int returnCode = 0 ; LOG . info ( "Dispatching batch " + batch . getId ( ) ) ; recordsAttempted . mark ( batch . getRecords ( ) . size ( ) ) ; try { String encoded = encodeBatch ( batch ) ; returnCode = request ( encoded ) ; WriteResponse < Integer > response = WRITE_RESPONSE_WRAPPER . wrap ( returnCode ) ; callback . onSuccess ( response ) ; bytesWritten . mark ( encoded . length ( ) ) ; recordsSuccess . mark ( batch . getRecords ( ) . size ( ) ) ; } catch ( Exception e ) { LOG . error ( "Dispatching batch " + batch . getId ( ) + " failed :" + e . toString ( ) ) ; callback . onFailure ( e ) ; recordsFailed . mark ( batch . getRecords ( ) . size ( ) ) ; } context . close ( ) ; Future < Integer > future = Futures . immediateFuture ( returnCode ) ; return new WriteResponseFuture < > ( future , WRITE_RESPONSE_WRAPPER ) ; }
Write a whole batch to eventhub
36,125
public WriteResponse write ( String record ) throws IOException { recordsAttempted . mark ( ) ; String encoded = encodeRecord ( record ) ; int returnCode = request ( encoded ) ; recordsSuccess . mark ( ) ; bytesWritten . mark ( encoded . length ( ) ) ; return WRITE_RESPONSE_WRAPPER . wrap ( returnCode ) ; }
Write a single record to eventhub
36,126
public void refreshSignature ( ) { if ( postStartTimestamp == 0 || ( System . nanoTime ( ) - postStartTimestamp ) > Duration . ofMinutes ( sigExpireInMinute ) . toNanos ( ) ) { try { signature = SharedAccessSignatureTokenProvider . generateSharedAccessSignature ( sasKeyName , sasKey , namespaceName , Duration . ofMinutes ( sigExpireInMinute ) ) ; postStartTimestamp = System . nanoTime ( ) ; LOG . info ( "Signature is refreshing: " + signature ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } }
A signature which contains the duration . After the duration is expired the signature becomes invalid
36,127
private int request ( String encoded ) throws IOException { refreshSignature ( ) ; HttpPost httpPost = new HttpPost ( targetURI ) ; httpPost . setHeader ( "Content-type" , "application/vnd.microsoft.servicebus.json" ) ; httpPost . setHeader ( "Authorization" , signature ) ; httpPost . setHeader ( "Host" , namespaceName + ".servicebus.windows.net " ) ; StringEntity entity = new StringEntity ( encoded ) ; httpPost . setEntity ( entity ) ; HttpResponse response = httpclient . execute ( httpPost ) ; StatusLine status = response . getStatusLine ( ) ; HttpEntity entity2 = response . getEntity ( ) ; EntityUtils . consume ( entity2 ) ; int returnCode = status . getStatusCode ( ) ; if ( returnCode != HttpStatus . SC_CREATED ) { LOG . error ( new IOException ( status . getReasonPhrase ( ) ) . toString ( ) ) ; throw new IOException ( status . getReasonPhrase ( ) ) ; } return returnCode ; }
Send an encoded string to the Eventhub using post method
36,128
private String encodeBatch ( Batch < String > batch ) throws IOException { List < String > records = batch . getRecords ( ) ; ArrayList < EventhubRequest > arrayList = new ArrayList < > ( ) ; for ( String record : records ) { arrayList . add ( new EventhubRequest ( record ) ) ; } return mapper . writeValueAsString ( arrayList ) ; }
Each record of batch is wrapped by a Body json object put this new object into an array encode the whole array
36,129
private String encodeRecord ( String record ) throws IOException { ArrayList < EventhubRequest > arrayList = new ArrayList < > ( ) ; arrayList . add ( new EventhubRequest ( record ) ) ; return mapper . writeValueAsString ( arrayList ) ; }
A single record is wrapped by a Body json object encode this json object
36,130
private void checkStability ( ) { if ( ( _watermarksInserted . getCount ( ) - _watermarksSwept . getCount ( ) ) > _watermarkLagThreshold ) { log . error ( "Setting abort flag for Watermark tracking because the lag between the " + "watermarksInserted: {} and watermarksSwept: {} is greater than the threshold: {}" , _watermarksInserted . getCount ( ) , _watermarksSwept . getCount ( ) , _watermarkLagThreshold ) ; _abort . set ( true ) ; } }
Check if the memory footprint of the data structure is within bounds
36,131
public synchronized void start ( ) { if ( ! _started . get ( ) ) { _executorService = new ScheduledThreadPoolExecutor ( 1 , ExecutorsUtils . newThreadFactory ( Optional . of ( LoggerFactory . getLogger ( FineGrainedWatermarkTracker . class ) ) ) ) ; _executorService . scheduleAtFixedRate ( _sweeper , 0 , _sweepIntervalMillis , TimeUnit . MILLISECONDS ) ; _executorService . scheduleAtFixedRate ( _stabilityChecker , 0 , _stabilityCheckIntervalMillis , TimeUnit . MILLISECONDS ) ; } _started . set ( true ) ; }
Schedule the sweeper and stability checkers
36,132
synchronized int sweep ( ) { long startTime = System . nanoTime ( ) ; int swept = 0 ; for ( Map . Entry < String , Deque < AcknowledgableWatermark > > entry : _watermarksMap . entrySet ( ) ) { Deque < AcknowledgableWatermark > watermarks = entry . getValue ( ) ; boolean continueIteration = true ; while ( continueIteration ) { Iterator < AcknowledgableWatermark > iter = watermarks . iterator ( ) ; if ( ! iter . hasNext ( ) ) { continueIteration = false ; continue ; } AcknowledgableWatermark first = iter . next ( ) ; if ( first . isAcked ( ) ) { if ( ! iter . hasNext ( ) ) { continueIteration = false ; continue ; } AcknowledgableWatermark second = iter . next ( ) ; if ( ( second != null ) && second . isAcked ( ) ) { watermarks . pop ( ) ; swept ++ ; } else { continueIteration = false ; } } else { continueIteration = false ; } } } long duration = ( System . nanoTime ( ) - startTime ) / MILLIS_TO_NANOS ; log . debug ( "Swept {} watermarks in {} millis" , swept , duration ) ; _watermarksSwept . mark ( swept ) ; return swept ; }
A helper method to garbage collect acknowledged watermarks
36,133
public static String toPartitionJsonList ( List < PartitionDescriptor > descriptors ) { return Descriptor . GSON . toJson ( descriptors , DESCRIPTOR_LIST_TYPE ) ; }
Serialize a list of partition descriptors as json string
36,134
public Job createJob ( FileSystemDataset dataset ) throws IOException { Configuration conf = HadoopUtils . getConfFromState ( state ) ; if ( conf . get ( "mapreduce.output.fileoutputformat.compress" ) == null && conf . get ( "mapred.output.compress" ) == null ) { conf . setBoolean ( "mapreduce.output.fileoutputformat.compress" , true ) ; } if ( conf . get ( "mapreduce.job.complete.cancel.delegation.tokens" ) == null ) { conf . setBoolean ( "mapreduce.job.complete.cancel.delegation.tokens" , false ) ; } addJars ( conf , this . state , fs ) ; Job job = Job . getInstance ( conf ) ; job . setJobName ( MRCompactorJobRunner . HADOOP_JOB_NAME ) ; boolean emptyDirectoryFlag = this . configureInputAndOutputPaths ( job , dataset ) ; if ( emptyDirectoryFlag ) { this . state . setProp ( HiveRegistrationPolicy . MAPREDUCE_JOB_INPUT_PATH_EMPTY_KEY , true ) ; } this . configureMapper ( job ) ; this . configureReducer ( job ) ; if ( emptyDirectoryFlag || ! this . shouldDeduplicate ) { job . setNumReduceTasks ( 0 ) ; } this . configureSchema ( job ) ; this . isJobCreated = true ; this . configuredJob = job ; return job ; }
Customized MR job creation for Avro .
36,135
private Path concatPaths ( String ... names ) { if ( names == null || names . length == 0 ) { return null ; } Path cur = new Path ( names [ 0 ] ) ; for ( int i = 1 ; i < names . length ; ++ i ) { cur = new Path ( cur , new Path ( names [ i ] ) ) ; } return cur ; }
Concatenate multiple directory or file names into one path
36,136
protected Collection < Path > getGranularInputPaths ( Path path ) throws IOException { boolean appendDelta = this . state . getPropAsBoolean ( MRCompactor . COMPACTION_RENAME_SOURCE_DIR_ENABLED , MRCompactor . DEFAULT_COMPACTION_RENAME_SOURCE_DIR_ENABLED ) ; Set < Path > uncompacted = Sets . newHashSet ( ) ; Set < Path > total = Sets . newHashSet ( ) ; for ( FileStatus fileStatus : FileListUtils . listFilesRecursively ( fs , path ) ) { if ( appendDelta ) { if ( ! fileStatus . getPath ( ) . getParent ( ) . toString ( ) . endsWith ( MRCompactor . COMPACTION_RENAME_SOURCE_DIR_SUFFIX ) ) { uncompacted . add ( fileStatus . getPath ( ) . getParent ( ) ) ; } total . add ( fileStatus . getPath ( ) . getParent ( ) ) ; } else { uncompacted . add ( fileStatus . getPath ( ) . getParent ( ) ) ; } } if ( appendDelta ) { this . fileNameRecordCount = new InputRecordCountHelper ( this . state ) . calculateRecordCount ( total ) ; log . info ( "{} has total input record count (based on file name) {}" , path , this . fileNameRecordCount ) ; } return uncompacted ; }
Converts a top level input path to a group of sub - paths according to user defined granularity . This may be required because if upstream application generates many sub - paths but the map - reduce job only keeps track of the top level path after the job is done we won t be able to tell if those new arriving sub - paths is processed by previous map - reduce job or not . Hence a better way is to pre - define those sub - paths as input paths before we start to run MR . The implementation of this method should depend on the data generation granularity controlled by upstream . Here we just list the deepest level of containing folder as the smallest granularity .
36,137
private static boolean isImmutableType ( Object thing ) { return ( ( thing == null ) || ( thing instanceof String ) || ( thing instanceof Integer ) || ( thing instanceof Long ) ) ; }
Contains a collection of supported immutable types for copying . Only keep the types that are worth supporting as record types .
36,138
public static Object copy ( Object thing ) throws CopyNotSupportedException { if ( ! isCopyable ( thing ) ) { throw new CopyNotSupportedException ( thing . getClass ( ) . getName ( ) + " cannot be copied. See Copyable" ) ; } if ( thing instanceof Copyable ) { return ( ( Copyable ) thing ) . copy ( ) ; } if ( thing instanceof byte [ ] ) { byte [ ] copy = new byte [ ( ( byte [ ] ) thing ) . length ] ; System . arraycopy ( thing , 0 , copy , 0 , ( ( byte [ ] ) thing ) . length ) ; return copy ; } return thing ; }
Copy this object if needed .
36,139
public static void printJobRuns ( List < JobExecutionInfo > jobExecutionInfos ) { if ( jobExecutionInfos == null ) { System . err . println ( "No job executions found." ) ; System . exit ( 1 ) ; } List < String > labels = Arrays . asList ( "Job Id" , "State" , "Schedule" , "Completed Tasks" , "Launched Tasks" , "Start Time" , "End Time" , "Duration (s)" ) ; List < String > flags = Arrays . asList ( "-" , "-" , "-" , "" , "" , "-" , "-" , "-" ) ; List < List < String > > data = new ArrayList < > ( ) ; for ( JobExecutionInfo jobInfo : jobExecutionInfos ) { List < String > entry = new ArrayList < > ( ) ; entry . add ( jobInfo . getJobId ( ) ) ; entry . add ( jobInfo . getState ( ) . toString ( ) ) ; entry . add ( extractJobSchedule ( jobInfo ) ) ; entry . add ( jobInfo . getCompletedTasks ( ) . toString ( ) ) ; entry . add ( jobInfo . getLaunchedTasks ( ) . toString ( ) ) ; entry . add ( dateTimeFormatter . print ( jobInfo . getStartTime ( ) ) ) ; entry . add ( dateTimeFormatter . print ( jobInfo . getEndTime ( ) ) ) ; entry . add ( jobInfo . getState ( ) == JobStateEnum . COMMITTED ? decimalFormatter . format ( jobInfo . getDuration ( ) / 1000.0 ) : "-" ) ; data . add ( entry ) ; } new CliTablePrinter . Builder ( ) . labels ( labels ) . data ( data ) . flags ( flags ) . delimiterWidth ( 2 ) . build ( ) . printTable ( ) ; }
Print a table describing a bunch of individual job executions .
36,140
public static void printAllJobs ( List < JobExecutionInfo > jobExecutionInfos , int resultsLimit ) { if ( jobExecutionInfos == null ) { System . err . println ( "No jobs found." ) ; System . exit ( 1 ) ; } List < String > labels = Arrays . asList ( "Job Name" , "State" , "Last Run Started" , "Last Run Completed" , "Schedule" , "Last Run Records Processed" , "Last Run Records Failed" ) ; List < String > flags = Arrays . asList ( "-" , "-" , "-" , "-" , "-" , "" , "" ) ; List < List < String > > data = new ArrayList < > ( ) ; for ( JobExecutionInfo jobInfo : jobExecutionInfos ) { List < String > entry = new ArrayList < > ( ) ; entry . add ( jobInfo . getJobName ( ) ) ; entry . add ( jobInfo . getState ( ) . toString ( ) ) ; entry . add ( dateTimeFormatter . print ( jobInfo . getStartTime ( ) ) ) ; entry . add ( dateTimeFormatter . print ( jobInfo . getEndTime ( ) ) ) ; entry . add ( extractJobSchedule ( jobInfo ) ) ; MetricArray metrics = jobInfo . getMetrics ( ) ; Double recordsProcessed = null ; Double recordsFailed = null ; try { for ( Metric metric : metrics ) { if ( metric . getName ( ) . equals ( MetricNames . ExtractorMetrics . RECORDS_READ_METER ) ) { recordsProcessed = Double . parseDouble ( metric . getValue ( ) ) ; } else if ( metric . getName ( ) . equals ( MetricNames . ExtractorMetrics . RECORDS_FAILED_METER ) ) { recordsFailed = Double . parseDouble ( metric . getValue ( ) ) ; } } if ( recordsProcessed != null && recordsFailed != null ) { entry . add ( recordsProcessed . toString ( ) ) ; entry . add ( recordsFailed . toString ( ) ) ; } } catch ( NumberFormatException ex ) { System . err . println ( "Failed to process metrics" ) ; } if ( recordsProcessed == null || recordsFailed == null ) { entry . add ( "-" ) ; entry . add ( "-" ) ; } data . add ( entry ) ; } new CliTablePrinter . Builder ( ) . labels ( labels ) . data ( data ) . flags ( flags ) . delimiterWidth ( 2 ) . build ( ) . printTable ( ) ; if ( jobExecutionInfos . size ( ) == resultsLimit ) { System . out . println ( "\nWARNING: There may be more jobs (# of results is equal to the limit)" ) ; } }
Print summary information about a bunch of jobs in the system
36,141
public static void printJob ( Optional < JobExecutionInfo > jobExecutionInfoOptional ) { if ( ! jobExecutionInfoOptional . isPresent ( ) ) { System . err . println ( "Job id not found." ) ; return ; } JobExecutionInfo jobExecutionInfo = jobExecutionInfoOptional . get ( ) ; List < List < String > > data = new ArrayList < > ( ) ; List < String > flags = Arrays . asList ( "" , "-" ) ; data . add ( Arrays . asList ( "Job Name" , jobExecutionInfo . getJobName ( ) ) ) ; data . add ( Arrays . asList ( "Job Id" , jobExecutionInfo . getJobId ( ) ) ) ; data . add ( Arrays . asList ( "State" , jobExecutionInfo . getState ( ) . toString ( ) ) ) ; data . add ( Arrays . asList ( "Completed/Launched Tasks" , String . format ( "%d/%d" , jobExecutionInfo . getCompletedTasks ( ) , jobExecutionInfo . getLaunchedTasks ( ) ) ) ) ; data . add ( Arrays . asList ( "Start Time" , dateTimeFormatter . print ( jobExecutionInfo . getStartTime ( ) ) ) ) ; data . add ( Arrays . asList ( "End Time" , dateTimeFormatter . print ( jobExecutionInfo . getEndTime ( ) ) ) ) ; data . add ( Arrays . asList ( "Duration" , jobExecutionInfo . getState ( ) == JobStateEnum . COMMITTED ? periodFormatter . print ( new Period ( jobExecutionInfo . getDuration ( ) . longValue ( ) ) ) : "-" ) ) ; data . add ( Arrays . asList ( "Tracking URL" , jobExecutionInfo . getTrackingUrl ( ) ) ) ; data . add ( Arrays . asList ( "Launcher Type" , jobExecutionInfo . getLauncherType ( ) . name ( ) ) ) ; new CliTablePrinter . Builder ( ) . data ( data ) . flags ( flags ) . delimiterWidth ( 2 ) . build ( ) . printTable ( ) ; JobInfoPrintUtils . printMetrics ( jobExecutionInfo . getMetrics ( ) ) ; }
Print information about one specific job .
36,142
public static void printJobProperties ( Optional < JobExecutionInfo > jobExecutionInfoOptional ) { if ( ! jobExecutionInfoOptional . isPresent ( ) ) { System . err . println ( "Job not found." ) ; return ; } List < List < String > > data = new ArrayList < > ( ) ; List < String > flags = Arrays . asList ( "" , "-" ) ; List < String > labels = Arrays . asList ( "Property Key" , "Property Value" ) ; for ( Map . Entry < String , String > entry : jobExecutionInfoOptional . get ( ) . getJobProperties ( ) . entrySet ( ) ) { data . add ( Arrays . asList ( entry . getKey ( ) , entry . getValue ( ) ) ) ; } new CliTablePrinter . Builder ( ) . labels ( labels ) . data ( data ) . flags ( flags ) . delimiterWidth ( 2 ) . build ( ) . printTable ( ) ; }
Print properties of a specific job
36,143
private static void printMetrics ( MetricArray metrics ) { System . out . println ( ) ; if ( metrics . size ( ) == 0 ) { System . out . println ( "No metrics found." ) ; return ; } List < List < String > > data = new ArrayList < > ( ) ; List < String > flags = Arrays . asList ( "" , "-" ) ; for ( Metric metric : metrics ) { data . add ( Arrays . asList ( metric . getName ( ) , metric . getValue ( ) ) ) ; } new CliTablePrinter . Builder ( ) . data ( data ) . flags ( flags ) . delimiterWidth ( 2 ) . build ( ) . printTable ( ) ; }
Print out various metrics
36,144
@ CliObjectOption ( description = "Sets the period for which articles should be pulled in ISO time format (e.g. P2D, PT1H)" ) public EmbeddedWikipediaExample lookback ( String isoLookback ) { this . setConfiguration ( WikipediaExtractor . BOOTSTRAP_PERIOD , isoLookback ) ; return this ; }
Set bootstrap lookback i . e . oldest revision to pull .
36,145
@ CliObjectOption ( description = "Write output to Avro files. Specify the output directory as argument." ) public EmbeddedWikipediaExample avroOutput ( String outputPath ) { this . setConfiguration ( ConfigurationKeys . WRITER_BUILDER_CLASS , AvroDataWriterBuilder . class . getName ( ) ) ; this . setConfiguration ( ConfigurationKeys . WRITER_DESTINATION_TYPE_KEY , Destination . DestinationType . HDFS . name ( ) ) ; this . setConfiguration ( ConfigurationKeys . WRITER_OUTPUT_FORMAT_KEY , WriterOutputFormat . AVRO . name ( ) ) ; this . setConfiguration ( ConfigurationKeys . WRITER_PARTITIONER_CLASS , WikipediaPartitioner . class . getName ( ) ) ; this . setConfiguration ( ConfigurationKeys . JOB_DATA_PUBLISHER_TYPE , BaseDataPublisher . class . getName ( ) ) ; this . setConfiguration ( ConfigurationKeys . CONVERTER_CLASSES_KEY , WikipediaConverter . class . getName ( ) ) ; this . setConfiguration ( ConfigurationKeys . DATA_PUBLISHER_FINAL_DIR , outputPath ) ; return this ; }
Write output to avro files at the given input location .
36,146
protected OrcKey convertOrcStructToOrcKey ( OrcStruct struct ) { OrcKey orcKey = new OrcKey ( ) ; orcKey . key = struct ; return orcKey ; }
The output key of mapper needs to be comparable . In the scenarios that we need the orc record itself to be the output key this conversion will be necessary .
36,147
public void afterRead ( D record , long startTime ) { Instrumented . updateTimer ( this . extractorTimer , System . nanoTime ( ) - startTime , TimeUnit . NANOSECONDS ) ; if ( record != null ) { Instrumented . markMeter ( this . readRecordsMeter ) ; } }
Called after each record is read .
36,148
public void nextStage ( String name ) throws IOException { endStage ( ) ; this . currentStage = name ; this . currentStageStart = System . currentTimeMillis ( ) ; }
End the previous stage record the time spent in that stage and start the timer for a new stage .
36,149
public void endStage ( ) { if ( this . currentStage != null ) { long time = System . currentTimeMillis ( ) - this . currentStageStart ; this . timings . add ( new Stage ( this . currentStage , time ) ) ; if ( reportAsMetrics && submitter . getMetricContext ( ) . isPresent ( ) ) { String timerName = submitter . getNamespace ( ) + "." + name + "." + this . currentStage ; submitter . getMetricContext ( ) . get ( ) . timer ( timerName ) . update ( time , TimeUnit . MILLISECONDS ) ; } } this . currentStage = null ; }
End the previous stage and record the time spent in that stage .
36,150
public void submit ( Map < String , String > additionalMetadata ) throws IOException { if ( this . submitted ) { throw new IOException ( "MultiTimingEvent has already been submitted." ) ; } this . submitted = true ; endStage ( ) ; Map < String , String > finalMetadata = Maps . newHashMap ( ) ; finalMetadata . putAll ( additionalMetadata ) ; finalMetadata . put ( EventSubmitter . EVENT_TYPE , MULTI_TIMING_EVENT ) ; for ( Stage timing : this . timings ) { finalMetadata . put ( timing . getName ( ) , Long . toString ( timing . getDuration ( ) ) ) ; } this . submitter . submit ( this . name , finalMetadata ) ; }
Ends the current stage and submits the event containing the timings of each event .
36,151
public Optional < JobExecutionInfo > queryByJobId ( String id ) throws RemoteInvocationException { JobExecutionQuery query = new JobExecutionQuery ( ) ; query . setIdType ( QueryIdTypeEnum . JOB_ID ) ; query . setId ( JobExecutionQuery . Id . create ( id ) ) ; query . setLimit ( 1 ) ; List < JobExecutionInfo > results = executeQuery ( query ) ; return getFirstFromQueryResults ( results ) ; }
Retrieve a Gobblin job by its id .
36,152
public List < JobExecutionInfo > queryAllJobs ( QueryListType lookupType , int resultsLimit ) throws RemoteInvocationException { JobExecutionQuery query = new JobExecutionQuery ( ) ; query . setIdType ( QueryIdTypeEnum . LIST_TYPE ) ; query . setId ( JobExecutionQuery . Id . create ( lookupType ) ) ; query . setJobProperties ( ConfigurationKeys . JOB_RUN_ONCE_KEY + "," + ConfigurationKeys . JOB_SCHEDULE_KEY ) ; query . setIncludeTaskExecutions ( false ) ; query . setLimit ( resultsLimit ) ; return executeQuery ( query ) ; }
Retrieve all jobs
36,153
public List < JobExecutionInfo > queryByJobName ( String name , int resultsLimit ) throws RemoteInvocationException { JobExecutionQuery query = new JobExecutionQuery ( ) ; query . setIdType ( QueryIdTypeEnum . JOB_NAME ) ; query . setId ( JobExecutionQuery . Id . create ( name ) ) ; query . setIncludeTaskExecutions ( false ) ; query . setLimit ( resultsLimit ) ; return executeQuery ( query ) ; }
Query jobs by name
36,154
private List < JobExecutionInfo > executeQuery ( JobExecutionQuery query ) throws RemoteInvocationException { JobExecutionQueryResult result = this . client . get ( query ) ; if ( result != null && result . hasJobExecutions ( ) ) { return result . getJobExecutions ( ) ; } return Collections . emptyList ( ) ; }
Execute a query and coerce the result into a java List
36,155
private boolean shouldPublishDataInTask ( ) { boolean publishDataAtJobLevel = this . taskState . getPropAsBoolean ( ConfigurationKeys . PUBLISH_DATA_AT_JOB_LEVEL , ConfigurationKeys . DEFAULT_PUBLISH_DATA_AT_JOB_LEVEL ) ; if ( publishDataAtJobLevel ) { LOG . info ( String . format ( "%s is true. Will publish data at the job level." , ConfigurationKeys . PUBLISH_DATA_AT_JOB_LEVEL ) ) ; return false ; } JobCommitPolicy jobCommitPolicy = JobCommitPolicy . getCommitPolicy ( this . taskState ) ; if ( jobCommitPolicy == JobCommitPolicy . COMMIT_SUCCESSFUL_TASKS ) { return this . taskState . getWorkingState ( ) == WorkUnitState . WorkingState . SUCCESSFUL ; } if ( jobCommitPolicy == JobCommitPolicy . COMMIT_ON_PARTIAL_SUCCESS ) { return true ; } LOG . info ( "Will publish data at the job level with job commit policy: " + jobCommitPolicy ) ; return false ; }
Whether the task should directly publish its output data to the final publisher output directory .
36,156
private static boolean inMultipleBranches ( List < Boolean > branches ) { int inBranches = 0 ; for ( Boolean bool : branches ) { if ( bool && ++ inBranches > 1 ) { break ; } } return inBranches > 1 ; }
Check if a schema or data record is being passed to more than one branches .
36,157
public synchronized boolean cancel ( ) { if ( this . taskFuture != null && this . taskFuture . cancel ( true ) ) { this . taskStateTracker . onTaskRunCompletion ( this ) ; this . completeShutdown ( ) ; return true ; } else { return false ; } }
return true if the task is successfully cancelled .
36,158
protected GenericRecord convertRecord ( GenericRecord record ) throws IOException { return AvroUtils . convertRecordSchema ( record , this . schema . get ( ) ) ; }
Convert the record to the output schema of this extractor
36,159
private Path getDatasetDirForKey ( ConfigKeyPath configKey ) throws VersionDoesNotExistException { return this . fs . getPath ( this . storePrefix , configKey . getAbsolutePathString ( ) ) ; }
Get path object using zipped file system and relative path
36,160
public Future < ? extends List < Pair < SpecExecutor . Verb , Spec > > > changedSpecs ( ) { List < Pair < SpecExecutor . Verb , Spec > > changesSpecs = new ArrayList < > ( ) ; try { Pair < SpecExecutor . Verb , Spec > specPair = _jobSpecQueue . take ( ) ; _metrics . jobSpecDeqCount . incrementAndGet ( ) ; do { changesSpecs . add ( specPair ) ; specPair = _jobSpecQueue . poll ( ) ; } while ( specPair != null ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; } return new CompletedFuture ( changesSpecs , null ) ; }
This method returns job specs receive from Kafka . It will block if there are no job specs .
36,161
public synchronized boolean remove ( String name ) { MetricContext metricContext = this . metricContext . get ( ) ; if ( metricContext != null ) { metricContext . removeFromMetrics ( this . contextAwareMetrics . get ( name ) . getContextAwareMetric ( ) ) ; } return this . contextAwareMetrics . remove ( name ) != null && removeChildrenMetrics ( name ) ; }
Remove a metric with a given name .
36,162
private static long getInterval ( long lowWatermarkValue , long highWatermarkValue , long partitionInterval , int maxIntervals ) { if ( lowWatermarkValue > highWatermarkValue ) { LOG . info ( "lowWatermarkValue: " + lowWatermarkValue + " is greater than highWatermarkValue: " + highWatermarkValue ) ; return 0 ; } long outputInterval = partitionInterval ; boolean longOverflow = false ; long totalIntervals = Long . MAX_VALUE ; try { totalIntervals = DoubleMath . roundToLong ( ( double ) highWatermarkValue / partitionInterval - ( double ) lowWatermarkValue / partitionInterval , RoundingMode . CEILING ) ; } catch ( java . lang . ArithmeticException e ) { longOverflow = true ; } if ( longOverflow || totalIntervals > maxIntervals ) { outputInterval = DoubleMath . roundToLong ( ( double ) highWatermarkValue / maxIntervals - ( double ) lowWatermarkValue / maxIntervals , RoundingMode . CEILING ) ; } return outputInterval ; }
recalculate interval if total number of partitions greater than maximum number of allowed partitions
36,163
protected void report ( SortedMap < String , Gauge > gauges , SortedMap < String , Counter > counters , SortedMap < String , Histogram > histograms , SortedMap < String , Meter > meters , SortedMap < String , Timer > timers , Map < String , Object > tags , boolean isFinal ) { report ( gauges , counters , histograms , meters , timers , tags ) ; }
Report the input metrics . The input tags apply to all input metrics .
36,164
public boolean getPermits ( long permits ) throws InterruptedException { if ( permits <= 0 ) { return true ; } long startTimeNanos = System . nanoTime ( ) ; this . permitsOutstanding . addEntryWithWeight ( permits ) ; this . lock . lock ( ) ; try { while ( true ) { if ( permits >= this . knownUnsatisfiablePermits ) { break ; } if ( elapsedMillis ( startTimeNanos ) > this . maxTimeout ) { break ; } if ( this . permitBatchContainer . tryTake ( permits ) ) { this . permitsOutstanding . removeEntryWithWeight ( permits ) ; return true ; } if ( this . retryStatus . canRetryWithinMillis ( remainingTime ( startTimeNanos , this . maxTimeout ) ) ) { long callbackCounterSnap = this . callbackCounter . get ( ) ; maybeSendNewPermitRequest ( ) ; if ( this . callbackCounter . get ( ) == callbackCounterSnap ) { boolean ignore = this . newPermitsAvailable . await ( remainingTime ( startTimeNanos , this . maxTimeout ) , TimeUnit . MILLISECONDS ) ; } } else { break ; } } } finally { this . lock . unlock ( ) ; } this . permitsOutstanding . removeEntryWithWeight ( permits ) ; return false ; }
Try to get a number of permits from this requester .
36,165
private void maybeSendNewPermitRequest ( ) { if ( ! this . requestSemaphore . tryAcquire ( ) ) { return ; } if ( ! this . retryStatus . canRetryNow ( ) ) { this . requestSemaphore . release ( ) ; return ; } try { long permits = computeNextPermitRequest ( ) ; if ( permits <= 0 ) { this . requestSemaphore . release ( ) ; return ; } PermitRequest permitRequest = this . basePermitRequest . copy ( ) ; permitRequest . setPermits ( permits ) ; permitRequest . setMinPermits ( ( long ) this . permitsOutstanding . getAverageWeightOrZero ( ) ) ; permitRequest . setVersion ( ThrottlingProtocolVersion . WAIT_ON_CLIENT . ordinal ( ) ) ; if ( BatchedPermitsRequester . this . restRequestHistogram != null ) { BatchedPermitsRequester . this . restRequestHistogram . update ( permits ) ; } log . debug ( "Sending permit request " + permitRequest ) ; this . requestSender . sendRequest ( permitRequest , new AllocationCallback ( BatchedPermitsRequester . this . restRequestTimer == null ? NoopCloseable . INSTANCE : BatchedPermitsRequester . this . restRequestTimer . time ( ) , new Sleeper ( ) ) ) ; } catch ( CloneNotSupportedException cnse ) { this . requestSemaphore . release ( ) ; throw new RuntimeException ( cnse ) ; } }
Send a new permit request to the server .
36,166
public static Optional < Long > getRateIfRateControlled ( FileSystem fs ) { if ( fs instanceof Decorator ) { List < Object > lineage = DecoratorUtils . getDecoratorLineage ( fs ) ; for ( Object obj : lineage ) { if ( obj instanceof RateControlledFileSystem ) { return Optional . of ( ( ( RateControlledFileSystem ) obj ) . limitPerSecond ) ; } } return Optional . absent ( ) ; } return Optional . absent ( ) ; }
Determines whether the file system is rate controlled and if so returns the allowed rate in operations per second .
36,167
public static DefaultJobSpecScheduleImpl createImmediateSchedule ( JobSpec jobSpec , Runnable jobRunnable ) { return new DefaultJobSpecScheduleImpl ( jobSpec , jobRunnable , Optional . of ( System . currentTimeMillis ( ) ) ) ; }
Creates a schedule denoting that the job is to be executed immediately
36,168
public static DefaultJobSpecScheduleImpl createNoSchedule ( JobSpec jobSpec , Runnable jobRunnable ) { return new DefaultJobSpecScheduleImpl ( jobSpec , jobRunnable , Optional . < Long > absent ( ) ) ; }
Creates a schedule denoting that the job is not to be executed
36,169
Path getTargetLocation ( FileSystem sourceFs , FileSystem targetFs , Path path , Optional < Partition > partition ) throws IOException { return getTargetPathHelper ( ) . getTargetPath ( path , targetFs , partition , false ) ; }
Compute the target location for a Hive location .
36,170
public static File getIvySettingsFile ( ) throws IOException { URL settingsUrl = Thread . currentThread ( ) . getContextClassLoader ( ) . getResource ( IVY_SETTINGS_FILE_NAME ) ; if ( settingsUrl == null ) { throw new IOException ( "Failed to find " + IVY_SETTINGS_FILE_NAME + " from class path" ) ; } File ivySettingsFile = new File ( settingsUrl . getFile ( ) ) ; if ( ivySettingsFile . exists ( ) ) { return ivySettingsFile ; } ivySettingsFile = File . createTempFile ( "ivy.settings" , ".xml" ) ; ivySettingsFile . deleteOnExit ( ) ; try ( OutputStream os = new BufferedOutputStream ( new FileOutputStream ( ivySettingsFile ) ) ) { Resources . copy ( settingsUrl , os ) ; } return ivySettingsFile ; }
Get ivy settings file from classpath
36,171
public DataWriterBuilder < S , D > writeTo ( Destination destination ) { this . destination = destination ; log . debug ( "For destination: {}" , destination ) ; return this ; }
Tell the writer the destination to write to .
36,172
public DataWriterBuilder < S , D > withWriterId ( String writerId ) { this . writerId = writerId ; log . debug ( "withWriterId : {}" , this . writerId ) ; return this ; }
Give the writer a unique ID .
36,173
public DataWriterBuilder < S , D > withSchema ( S schema ) { this . schema = schema ; log . debug ( "withSchema : {}" , this . schema ) ; return this ; }
Tell the writer the data schema .
36,174
public DataWriterBuilder < S , D > withBranches ( int branches ) { this . branches = branches ; log . debug ( "With branches: {}" , this . branches ) ; return this ; }
Tell the writer how many branches are being used .
36,175
public DataWriterBuilder < S , D > forBranch ( int branch ) { this . branch = branch ; log . debug ( "For branch: {}" , this . branch ) ; return this ; }
Tell the writer which branch it is associated with .
36,176
public static boolean isHiveTableAvroType ( Table targetTable ) throws IOException { String serializationLib = targetTable . getTTable ( ) . getSd ( ) . getSerdeInfo ( ) . getSerializationLib ( ) ; String inputFormat = targetTable . getTTable ( ) . getSd ( ) . getInputFormat ( ) ; String outputFormat = targetTable . getTTable ( ) . getSd ( ) . getOutputFormat ( ) ; return inputFormat . endsWith ( "AvroContainerInputFormat" ) || outputFormat . endsWith ( "AvroContainerOutputFormat" ) || serializationLib . endsWith ( "AvroSerDe" ) ; }
Tell whether a hive table is actually an Avro table
36,177
public static String getCreateTableQuery ( String completeNewTableName , String likeTableDbName , String likeTableName , String location ) { return getCreateTableQuery ( completeNewTableName , likeTableDbName , likeTableName ) + " LOCATION " + PartitionUtils . getQuotedString ( location ) ; }
If staging table doesn t exist it will create a staging table .
36,178
public static String getInsertQuery ( PurgeableHivePartitionDataset dataset ) { return "INSERT OVERWRITE" + " TABLE " + dataset . getCompleteStagingTableName ( ) + " PARTITION (" + PartitionUtils . getPartitionSpecString ( dataset . getSpec ( ) ) + ")" + " SELECT /*+MAPJOIN(b) */ " + getCommaSeparatedColumnNames ( dataset . getCols ( ) , "a." ) + " FROM " + dataset . getDbName ( ) + "." + dataset . getTableName ( ) + " a LEFT JOIN " + dataset . getComplianceIdTable ( ) + " b" + " ON a." + dataset . getComplianceField ( ) + "=b." + dataset . getComplianceId ( ) + " WHERE b." + dataset . getComplianceId ( ) + " IS NULL AND " + getWhereClauseForPartition ( dataset . getSpec ( ) , "a." ) ; }
This query will create a partition in staging table and insert the datasets whose compliance id is not contained in the compliance id table .
36,179
public static List < String > getPurgeQueries ( PurgeableHivePartitionDataset dataset ) { List < String > queries = new ArrayList < > ( ) ; queries . add ( getUseDbQuery ( dataset . getStagingDb ( ) ) ) ; queries . add ( getInsertQuery ( dataset ) ) ; return queries ; }
Will return all the queries needed to populate the staging table partition . This won t include alter table partition location query .
36,180
public static List < String > getBackupQueries ( PurgeableHivePartitionDataset dataset ) { List < String > queries = new ArrayList < > ( ) ; queries . add ( getUseDbQuery ( dataset . getDbName ( ) ) ) ; queries . add ( getCreateTableQuery ( dataset . getCompleteBackupTableName ( ) , dataset . getDbName ( ) , dataset . getTableName ( ) , dataset . getBackupTableLocation ( ) ) ) ; Optional < String > fileFormat = Optional . absent ( ) ; if ( dataset . getSpecifyPartitionFormat ( ) ) { fileFormat = dataset . getFileFormat ( ) ; } queries . add ( getAddPartitionQuery ( dataset . getBackupTableName ( ) , PartitionUtils . getPartitionSpecString ( dataset . getSpec ( ) ) , fileFormat , Optional . fromNullable ( dataset . getOriginalPartitionLocation ( ) ) ) ) ; return queries ; }
Will return all the queries needed to have a backup table partition pointing to the original partition data location
36,181
public static List < String > getAlterOriginalPartitionLocationQueries ( PurgeableHivePartitionDataset dataset ) { List < String > queries = new ArrayList < > ( ) ; queries . add ( getUseDbQuery ( dataset . getDbName ( ) ) ) ; String partitionSpecString = PartitionUtils . getPartitionSpecString ( dataset . getSpec ( ) ) ; queries . add ( getAlterTableLocationQuery ( dataset . getTableName ( ) , partitionSpecString , dataset . getStagingPartitionLocation ( ) ) ) ; queries . add ( getUpdatePartitionMetadataQuery ( dataset . getDbName ( ) , dataset . getTableName ( ) , partitionSpecString ) ) ; return queries ; }
Will return all the queries needed to alter the location of the table partition . Alter table partition query doesn t work with syntax dbName . tableName
36,182
protected Field convertFieldSchema ( Schema inputSchema , Field field , WorkUnitState workUnit ) throws SchemaConversionException { if ( field . name ( ) . equals ( payloadField ) ) { return createLatestPayloadField ( field ) ; } return new Field ( field . name ( ) , field . schema ( ) , field . doc ( ) , field . defaultValue ( ) , field . order ( ) ) ; }
Convert to the output schema of a field
36,183
protected Object convertFieldValue ( Schema outputSchema , Field field , GenericRecord inputRecord , WorkUnitState workUnit ) throws DataConversionException { if ( field . name ( ) . equals ( payloadField ) ) { return upConvertPayload ( inputRecord ) ; } return inputRecord . get ( field . name ( ) ) ; }
Convert to the output value of a field
36,184
private String convertFormat ( long watermark ) { Preconditions . checkArgument ( watermark > 0 , "Watermark should be positive number." ) ; return googleAnalyticsFormatter . print ( watermarkFormatter . parseDateTime ( Long . toString ( watermark ) ) ) ; }
Converts date format from watermark format to Google analytics format
36,185
public void submitCallable ( Callable < Void > callable , String name ) { this . futures . add ( new NamedFuture ( this . executor . submit ( callable ) , name ) ) ; }
Submit a callable to the thread pool
36,186
public void write ( byte [ ] record ) throws IOException { Preconditions . checkNotNull ( record ) ; byte [ ] toWrite = record ; if ( this . recordDelimiter . isPresent ( ) ) { toWrite = Arrays . copyOf ( record , record . length + 1 ) ; toWrite [ toWrite . length - 1 ] = this . recordDelimiter . get ( ) ; } if ( this . prependSize ) { long recordSize = toWrite . length ; ByteBuffer buf = ByteBuffer . allocate ( Longs . BYTES ) ; buf . putLong ( recordSize ) ; toWrite = ArrayUtils . addAll ( buf . array ( ) , toWrite ) ; } this . stagingFileOutputStream . write ( toWrite ) ; this . bytesWritten += toWrite . length ; this . recordsWritten ++ ; }
Write a source record to the staging file
36,187
public boolean moveToTrashAsUser ( Path path , final String user ) throws IOException { return getUserTrash ( user ) . moveToTrash ( path ) ; }
Move the path to trash as specified user .
36,188
public boolean moveToTrashAsOwner ( Path path ) throws IOException { String owner = this . fs . getFileStatus ( path ) . getOwner ( ) ; return moveToTrashAsUser ( path , owner ) ; }
Move the path to trash as the owner of the path .
36,189
public void run ( ) throws Exception { try { CountDownLatch countDownLatch = new CountDownLatch ( this . tasks ) ; for ( int i = 0 ; i < this . tasks ; i ++ ) { addTask ( i , countDownLatch ) ; } countDownLatch . await ( ) ; } finally { try { this . context . close ( ) ; } finally { this . executor . shutdownNow ( ) ; } } }
Run the example .
36,190
private void checkSrcLogFiles ( ) throws IOException { List < FileStatus > srcLogFiles = new ArrayList < > ( ) ; for ( Path logDirPath : this . srcLogDirs ) { srcLogFiles . addAll ( FileListUtils . listFilesRecursively ( this . srcFs , logDirPath , new PathFilter ( ) { public boolean accept ( Path path ) { return LogCopier . this . logFileExtensions . contains ( Files . getFileExtension ( path . getName ( ) ) ) ; } } ) ) ; } if ( srcLogFiles . isEmpty ( ) ) { LOGGER . warn ( "No log file found under directories " + this . srcLogDirs ) ; return ; } Set < Path > newLogFiles = Sets . newHashSet ( ) ; for ( FileStatus srcLogFile : srcLogFiles ) { newLogFiles . add ( srcLogFile . getPath ( ) ) ; } HashSet < Path > deletedLogFiles = Sets . newHashSet ( getSourceFiles ( ) ) ; deletedLogFiles . removeAll ( newLogFiles ) ; newLogFiles . removeAll ( getSourceFiles ( ) ) ; for ( final Path srcLogFile : newLogFiles ) { String destLogFileName = this . logFileNamePrefix . isPresent ( ) ? this . logFileNamePrefix . get ( ) + "." + srcLogFile . getName ( ) : srcLogFile . getName ( ) ; final Path destLogFile = new Path ( this . destLogDir , destLogFileName ) ; this . scheduler . schedule ( new LogCopyTask ( srcLogFile , destLogFile ) , this . copyInterval , this . timeUnit ) ; } for ( Path deletedLogFile : deletedLogFiles ) { Optional < LogCopyTask > logCopyTask = this . scheduler . getScheduledTask ( deletedLogFile ) ; if ( logCopyTask . isPresent ( ) ) { this . scheduler . cancel ( logCopyTask . get ( ) ) ; } } }
Perform a check on new source log files and submit copy tasks for new log files .
36,191
public static void addToHadoopConfiguration ( Configuration conf ) { final String SERIALIZATION_KEY = "io.serializations" ; String existingSerializers = conf . get ( SERIALIZATION_KEY ) ; if ( existingSerializers != null ) { conf . set ( SERIALIZATION_KEY , existingSerializers + "," + WritableShimSerialization . class . getName ( ) ) ; } else { conf . set ( SERIALIZATION_KEY , "org.apache.hadoop.io.serializer.WritableSerialization," + WritableShimSerialization . class . getName ( ) ) ; } }
Helper method to add this serializer to an existing Hadoop config .
36,192
public List < ? extends ProducerJob > partitionJobs ( ) { UrlTrieNode root = _jobNode . getRight ( ) ; if ( isOperatorEquals ( ) || root . getSize ( ) == 1 ) { return super . partitionJobs ( ) ; } else { if ( _groupSize <= 1 ) { throw new RuntimeException ( "This is impossible. When group size is 1, the operator must be equals" ) ; } UrlTrie trie = new UrlTrie ( getPage ( ) , root ) ; int gs = Math . min ( root . getSize ( ) , _groupSize ) ; UrlTriePrefixGrouper grouper = new UrlTriePrefixGrouper ( trie , ( int ) Math . ceil ( gs / 2.0 ) ) ; List < TrieBasedProducerJob > jobs = new ArrayList < > ( ) ; while ( grouper . hasNext ( ) ) { jobs . add ( new TrieBasedProducerJob ( _startDate , _endDate , grouper . next ( ) , grouper . getGroupSize ( ) ) ) ; } return jobs ; } }
The implementation here will first partition the job by pages and then by dates .
36,193
public void push ( Point point ) { BatchPoints . Builder batchPointsBuilder = BatchPoints . database ( database ) . retentionPolicy ( DEFAULT_RETENTION_POLICY ) ; batchPointsBuilder . point ( point ) ; influxDB . write ( batchPointsBuilder . build ( ) ) ; }
Push a single Point
36,194
public static long getPropAsLongFromSingleOrMultiWorkUnitState ( WorkUnitState workUnitState , String key , int partitionId ) { return Long . parseLong ( workUnitState . contains ( key ) ? workUnitState . getProp ( key ) : workUnitState . getProp ( KafkaUtils . getPartitionPropName ( key , partitionId ) , "0" ) ) ; }
Get a property as long from a work unit that may or may not be a multiworkunit . This method is needed because the SingleLevelWorkUnitPacker does not squeeze work units into a multiworkunit and thus does not append the partitionId to property keys while the BiLevelWorkUnitPacker does . Return 0 as default if key not found in either form .
36,195
private int computeTargetPartitionSize ( Histogram histogram , int minTargetPartitionSize , int maxPartitions ) { return Math . max ( minTargetPartitionSize , DoubleMath . roundToInt ( ( double ) histogram . totalRecordCount / maxPartitions , RoundingMode . CEILING ) ) ; }
Compute the target partition size .
36,196
private int getCountForRange ( TableCountProbingContext probingContext , StrSubstitutor sub , Map < String , String > subValues , long startTime , long endTime ) { String startTimeStr = Utils . dateToString ( new Date ( startTime ) , SalesforceExtractor . SALESFORCE_TIMESTAMP_FORMAT ) ; String endTimeStr = Utils . dateToString ( new Date ( endTime ) , SalesforceExtractor . SALESFORCE_TIMESTAMP_FORMAT ) ; subValues . put ( "start" , startTimeStr ) ; subValues . put ( "end" , endTimeStr ) ; String query = sub . replace ( PROBE_PARTITION_QUERY_TEMPLATE ) ; log . debug ( "Count query: " + query ) ; probingContext . probeCount ++ ; JsonArray records = getRecordsForQuery ( probingContext . connector , query ) ; Iterator < JsonElement > elements = records . iterator ( ) ; JsonObject element = elements . next ( ) . getAsJsonObject ( ) ; return element . get ( "cnt" ) . getAsInt ( ) ; }
Get the row count for a time range
36,197
private void getHistogramRecursively ( TableCountProbingContext probingContext , Histogram histogram , StrSubstitutor sub , Map < String , String > values , int count , long startEpoch , long endEpoch ) { long midpointEpoch = startEpoch + ( endEpoch - startEpoch ) / 2 ; if ( count <= probingContext . bucketSizeLimit || probingContext . probeCount > probingContext . probeLimit || ( midpointEpoch - startEpoch < MIN_SPLIT_TIME_MILLIS ) ) { histogram . add ( new HistogramGroup ( Utils . epochToDate ( startEpoch , SECONDS_FORMAT ) , count ) ) ; return ; } int countLeft = getCountForRange ( probingContext , sub , values , startEpoch , midpointEpoch ) ; getHistogramRecursively ( probingContext , histogram , sub , values , countLeft , startEpoch , midpointEpoch ) ; log . debug ( "Count {} for left partition {} to {}" , countLeft , startEpoch , midpointEpoch ) ; int countRight = count - countLeft ; getHistogramRecursively ( probingContext , histogram , sub , values , countRight , midpointEpoch , endEpoch ) ; log . debug ( "Count {} for right partition {} to {}" , countRight , midpointEpoch , endEpoch ) ; }
Split a histogram bucket along the midpoint if it is larger than the bucket size limit .
36,198
private Histogram getHistogramByProbing ( TableCountProbingContext probingContext , int count , long startEpoch , long endEpoch ) { Histogram histogram = new Histogram ( ) ; Map < String , String > values = new HashMap < > ( ) ; values . put ( "table" , probingContext . entity ) ; values . put ( "column" , probingContext . watermarkColumn ) ; values . put ( "greater" , ">=" ) ; values . put ( "less" , "<" ) ; StrSubstitutor sub = new StrSubstitutor ( values ) ; getHistogramRecursively ( probingContext , histogram , sub , values , count , startEpoch , endEpoch ) ; return histogram ; }
Get a histogram for the time range by probing to break down large buckets . Use count instead of querying if it is non - negative .
36,199
private Histogram getRefinedHistogram ( SalesforceConnector connector , String entity , String watermarkColumn , SourceState state , Partition partition , Histogram histogram ) { final int maxPartitions = state . getPropAsInt ( ConfigurationKeys . SOURCE_MAX_NUMBER_OF_PARTITIONS , ConfigurationKeys . DEFAULT_MAX_NUMBER_OF_PARTITIONS ) ; final int probeLimit = state . getPropAsInt ( DYNAMIC_PROBING_LIMIT , DEFAULT_DYNAMIC_PROBING_LIMIT ) ; final int minTargetPartitionSize = state . getPropAsInt ( MIN_TARGET_PARTITION_SIZE , DEFAULT_MIN_TARGET_PARTITION_SIZE ) ; final Histogram outputHistogram = new Histogram ( ) ; final double probeTargetRatio = state . getPropAsDouble ( PROBE_TARGET_RATIO , DEFAULT_PROBE_TARGET_RATIO ) ; final int bucketSizeLimit = ( int ) ( probeTargetRatio * computeTargetPartitionSize ( histogram , minTargetPartitionSize , maxPartitions ) ) ; log . info ( "Refining histogram with bucket size limit {}." , bucketSizeLimit ) ; HistogramGroup currentGroup ; HistogramGroup nextGroup ; final TableCountProbingContext probingContext = new TableCountProbingContext ( connector , entity , watermarkColumn , bucketSizeLimit , probeLimit ) ; if ( histogram . getGroups ( ) . isEmpty ( ) ) { return outputHistogram ; } List < HistogramGroup > list = new ArrayList ( histogram . getGroups ( ) ) ; Date hwmDate = Utils . toDate ( partition . getHighWatermark ( ) , Partitioner . WATERMARKTIMEFORMAT ) ; list . add ( new HistogramGroup ( Utils . epochToDate ( hwmDate . getTime ( ) , SECONDS_FORMAT ) , 0 ) ) ; for ( int i = 0 ; i < list . size ( ) - 1 ; i ++ ) { currentGroup = list . get ( i ) ; nextGroup = list . get ( i + 1 ) ; if ( currentGroup . count > bucketSizeLimit ) { long startEpoch = Utils . toDate ( currentGroup . getKey ( ) , SECONDS_FORMAT ) . getTime ( ) ; long endEpoch = Utils . toDate ( nextGroup . getKey ( ) , SECONDS_FORMAT ) . getTime ( ) ; outputHistogram . add ( getHistogramByProbing ( probingContext , currentGroup . count , startEpoch , endEpoch ) ) ; } else { outputHistogram . add ( currentGroup ) ; } } log . info ( "Executed {} probes for refining the histogram." , probingContext . probeCount ) ; if ( probingContext . probeCount >= probingContext . probeLimit ) { log . warn ( "Reached the probe limit" ) ; } return outputHistogram ; }
Refine the histogram by probing to split large buckets