idx
int64
0
41.2k
question
stringlengths
74
4.21k
target
stringlengths
5
888
36,100
private Map < Integer , Integer > getColumnPosSqlTypes ( ) { try { final Map < Integer , Integer > columnPosSqlTypes = Maps . newHashMap ( ) ; ParameterMetaData pMetaData = this . insertPstmtForFixedBatch . getParameterMetaData ( ) ; for ( int i = 1 ; i <= pMetaData . getParameterCount ( ) ; i ++ ) { columnPosSqlTypes ...
Creates a mapping between column positions and their data types
36,101
private void processBatchRequestResponse ( CloseableHttpResponse response ) throws IOException , UnexpectedResponseException { String entityStr = EntityUtils . toString ( response . getEntity ( ) ) ; int statusCode = response . getStatusLine ( ) . getStatusCode ( ) ; if ( statusCode >= 400 ) { throw new RuntimeExceptio...
Check results from batch response if any of the results is failure throw exception .
36,102
public static MD5Digest fromString ( String md5String ) { byte [ ] bytes ; try { bytes = Hex . decodeHex ( md5String . toCharArray ( ) ) ; return new MD5Digest ( md5String , bytes ) ; } catch ( DecoderException e ) { throw new IllegalArgumentException ( "Unable to convert md5string" , e ) ; } }
Static method to get an MD5Digest from a human - readable string representation
36,103
public static MD5Digest fromBytes ( byte [ ] md5Bytes ) { Preconditions . checkArgument ( md5Bytes . length == MD5_BYTES_LENGTH , "md5 bytes must be " + MD5_BYTES_LENGTH + " bytes in length, found " + md5Bytes . length + " bytes." ) ; String md5String = Hex . encodeHexString ( md5Bytes ) ; return new MD5Digest ( md5Str...
Static method to get an MD5Digest from a binary byte representation
36,104
public static MD5Digest fromBytes ( byte [ ] md5Bytes , int offset ) { byte [ ] md5BytesCopy = Arrays . copyOfRange ( md5Bytes , offset , offset + MD5_BYTES_LENGTH ) ; String md5String = Hex . encodeHexString ( md5BytesCopy ) ; return new MD5Digest ( md5String , md5BytesCopy ) ; }
Static method to get an MD5Digest from a binary byte representation .
36,105
public boolean add ( T t ) { if ( this . closed ) { throw new RuntimeException ( ConcurrentBoundedPriorityIterable . class . getSimpleName ( ) + " is no longer accepting requests!" ) ; } AllocatedRequestsIteratorBase . RequestWithResourceRequirement < T > newElement = new AllocatedRequestsIteratorBase . RequestWithReso...
Offer an element to the container .
36,106
public static List < Partition > sortPartitions ( List < Partition > partitions ) { Collections . sort ( partitions , new Comparator < Partition > ( ) { public int compare ( Partition o1 , Partition o2 ) { return o1 . getCompleteName ( ) . compareTo ( o2 . getCompleteName ( ) ) ; } } ) ; return partitions ; }
Sort all partitions inplace on the basis of complete name ie dbName . tableName . partitionName
36,107
public List < Partition > getPartitionsFromDataset ( ) throws IOException { try ( AutoReturnableObject < IMetaStoreClient > client = getClientPool ( ) . getClient ( ) ) { List < Partition > partitions = HiveUtils . getPartitions ( client . get ( ) , getTable ( ) , Optional . < String > absent ( ) ) ; return sortPartiti...
This method returns a sorted list of partitions .
36,108
public boolean addFileSet ( FileSet < CopyEntity > fileSet , List < WorkUnit > workUnits ) { boolean addedWorkunits = addFileSetImpl ( fileSet , workUnits ) ; if ( ! addedWorkunits ) { this . rejectedFileSet = true ; } return addedWorkunits ; }
Add a file set to the container .
36,109
public void clean ( ) throws IOException { Path versionLocation = ( ( HivePartitionRetentionVersion ) this . datasetVersion ) . getLocation ( ) ; Path datasetLocation = ( ( CleanableHivePartitionDataset ) this . cleanableDataset ) . getLocation ( ) ; String completeName = ( ( HivePartitionRetentionVersion ) this . data...
If simulate is set to true this will simply return . If version is pointing to an empty location drop the partition and close the jdbc connection . If version is pointing to the same location as of the dataset then drop the partition and close the jdbc connection . If version is pointing to the non deletable version lo...
36,110
private static void addHiveSiteDirToClasspath ( String hiveSiteDir ) { LOG . info ( "Adding " + hiveSiteDir + " to CLASSPATH" ) ; File f = new File ( hiveSiteDir ) ; try { URL u = f . toURI ( ) . toURL ( ) ; URLClassLoader urlClassLoader = ( URLClassLoader ) ClassLoader . getSystemClassLoader ( ) ; Class < URLClassLoad...
Helper method to add the directory containing the hive - site . xml file to the classpath
36,111
static String choppedStatementNoLineChange ( String statement ) { statement = statement . replaceAll ( "\\r\\n|\\r|\\n" , " " ) ; if ( statement . length ( ) <= MAX_OUTPUT_STMT_LENGTH ) { return statement ; } return statement . substring ( 0 , MAX_OUTPUT_STMT_LENGTH ) + "...... (" + ( statement . length ( ) - MAX_OUTPU...
Chopped statements with all line - changing character being removed for saving space of log .
36,112
private void addShutdownHook ( ) { ServiceManager manager = this . serviceManager ; Runtime . getRuntime ( ) . addShutdownHook ( new Thread ( ) { public void run ( ) { try { manager . stopAsync ( ) . awaitStopped ( 5 , TimeUnit . SECONDS ) ; } catch ( TimeoutException timeout ) { } } } ) ; }
Register a shutdown hook for this thread .
36,113
public Result executePolicy ( Object record ) { if ( ! ( record instanceof GenericRecord ) ) { return RowLevelPolicy . Result . FAILED ; } GenericRecord header = ( GenericRecord ) ( ( GenericRecord ) record ) . get ( "header" ) ; if ( header == null ) { return RowLevelPolicy . Result . FAILED ; } if ( header . get ( "t...
Return PASS if the record has either header . time or header . timestamp field .
36,114
public ChannelSftp getSftpChannel ( ) throws SftpException { try { ChannelSftp channelSftp = ( ChannelSftp ) this . session . openChannel ( "sftp" ) ; channelSftp . connect ( ) ; return channelSftp ; } catch ( JSchException e ) { throw new SftpException ( 0 , "Cannot open a channel to SFTP server" , e ) ; } }
Create new channel every time a command needs to be executed . This is required to support execution of multiple commands in parallel . All created channels are cleaned up when the session is closed .
36,115
public ChannelExec getExecChannel ( String command ) throws SftpException { ChannelExec channelExec ; try { channelExec = ( ChannelExec ) this . session . openChannel ( "exec" ) ; channelExec . setCommand ( command ) ; channelExec . connect ( ) ; return channelExec ; } catch ( JSchException e ) { throw new SftpExceptio...
Create a new sftp channel to execute commands .
36,116
public void connect ( ) throws FileBasedHelperException { String privateKey = PasswordManager . getInstance ( this . state ) . readPassword ( this . state . getProp ( ConfigurationKeys . SOURCE_CONN_PRIVATE_KEY ) ) ; String password = PasswordManager . getInstance ( this . state ) . readPassword ( this . state . getPro...
Opens up a connection to specified host using the username . Connects to the source using a private key without prompting for a password . This method does not support connecting to a source using a password only by private key
36,117
public InputStream getFileStream ( String file ) throws FileBasedHelperException { SftpGetMonitor monitor = new SftpGetMonitor ( ) ; try { ChannelSftp channel = getSftpChannel ( ) ; return new SftpFsFileInputStream ( channel . get ( file , monitor ) , channel ) ; } catch ( SftpException e ) { throw new FileBasedHelperE...
Executes a get SftpCommand and returns an input stream to the file
36,118
public Map < String , String > toDataMap ( ) { Map < String , String > map = Maps . newHashMap ( ) ; map . put ( PLATFORM_KEY , platform ) ; map . put ( NAME_KEY , getName ( ) ) ; map . putAll ( metadata ) ; return map ; }
Serialize to a string map
36,119
private synchronized void renewDelegationToken ( ) throws IOException , InterruptedException { this . token . renew ( this . fs . getConf ( ) ) ; writeDelegationTokenToFile ( ) ; if ( ! this . firstLogin ) { sendTokenFileUpdatedMessage ( InstanceType . CONTROLLER ) ; sendTokenFileUpdatedMessage ( InstanceType . PARTICI...
Renew the existing delegation token .
36,120
private void loginFromKeytab ( ) throws IOException { String keyTabFilePath = this . config . getString ( GobblinYarnConfigurationKeys . KEYTAB_FILE_PATH ) ; if ( Strings . isNullOrEmpty ( keyTabFilePath ) ) { throw new IOException ( "Keytab file path is not defined for Kerberos login" ) ; } if ( ! new File ( keyTabFil...
Login the user from a given keytab file .
36,121
synchronized void writeDelegationTokenToFile ( ) throws IOException { if ( this . fs . exists ( this . tokenFilePath ) ) { LOGGER . info ( "Deleting existing token file " + this . tokenFilePath ) ; this . fs . delete ( this . tokenFilePath , false ) ; } LOGGER . info ( "Writing new or renewed token to token file " + th...
Write the current delegation token to the token file .
36,122
private static int symmetricKeyAlgorithmNameToTag ( String cipherName ) { if ( StringUtils . isEmpty ( cipherName ) ) { return PGPEncryptedData . CAST5 ; } Set < Field > fields = ReflectionUtils . getAllFields ( PGPEncryptedData . class , ReflectionUtils . withName ( cipherName ) ) ; if ( fields . isEmpty ( ) ) { throw...
Convert a string cipher name to the integer tag used by GPG
36,123
public List < WorkUnit > getWorkunits ( SourceState state ) { List < WorkUnit > workUnits = Lists . newArrayList ( ) ; Config config = ConfigUtils . propertiesToConfig ( state . getProperties ( ) ) ; Config sourceConfig = ConfigUtils . getConfigOrEmpty ( config , DATASET_CLEANER_SOURCE_PREFIX ) ; List < String > config...
Create a work unit for each configuration defined or a single work unit if no configurations are defined
36,124
public Future < WriteResponse > write ( Batch < String > batch , WriteCallback callback ) { Timer . Context context = writeTimer . time ( ) ; int returnCode = 0 ; LOG . info ( "Dispatching batch " + batch . getId ( ) ) ; recordsAttempted . mark ( batch . getRecords ( ) . size ( ) ) ; try { String encoded = encodeBatch ...
Write a whole batch to eventhub
36,125
public WriteResponse write ( String record ) throws IOException { recordsAttempted . mark ( ) ; String encoded = encodeRecord ( record ) ; int returnCode = request ( encoded ) ; recordsSuccess . mark ( ) ; bytesWritten . mark ( encoded . length ( ) ) ; return WRITE_RESPONSE_WRAPPER . wrap ( returnCode ) ; }
Write a single record to eventhub
36,126
public void refreshSignature ( ) { if ( postStartTimestamp == 0 || ( System . nanoTime ( ) - postStartTimestamp ) > Duration . ofMinutes ( sigExpireInMinute ) . toNanos ( ) ) { try { signature = SharedAccessSignatureTokenProvider . generateSharedAccessSignature ( sasKeyName , sasKey , namespaceName , Duration . ofMinut...
A signature which contains the duration . After the duration is expired the signature becomes invalid
36,127
private int request ( String encoded ) throws IOException { refreshSignature ( ) ; HttpPost httpPost = new HttpPost ( targetURI ) ; httpPost . setHeader ( "Content-type" , "application/vnd.microsoft.servicebus.json" ) ; httpPost . setHeader ( "Authorization" , signature ) ; httpPost . setHeader ( "Host" , namespaceName...
Send an encoded string to the Eventhub using post method
36,128
private String encodeBatch ( Batch < String > batch ) throws IOException { List < String > records = batch . getRecords ( ) ; ArrayList < EventhubRequest > arrayList = new ArrayList < > ( ) ; for ( String record : records ) { arrayList . add ( new EventhubRequest ( record ) ) ; } return mapper . writeValueAsString ( ar...
Each record of batch is wrapped by a Body json object put this new object into an array encode the whole array
36,129
private String encodeRecord ( String record ) throws IOException { ArrayList < EventhubRequest > arrayList = new ArrayList < > ( ) ; arrayList . add ( new EventhubRequest ( record ) ) ; return mapper . writeValueAsString ( arrayList ) ; }
A single record is wrapped by a Body json object encode this json object
36,130
private void checkStability ( ) { if ( ( _watermarksInserted . getCount ( ) - _watermarksSwept . getCount ( ) ) > _watermarkLagThreshold ) { log . error ( "Setting abort flag for Watermark tracking because the lag between the " + "watermarksInserted: {} and watermarksSwept: {} is greater than the threshold: {}" , _wate...
Check if the memory footprint of the data structure is within bounds
36,131
public synchronized void start ( ) { if ( ! _started . get ( ) ) { _executorService = new ScheduledThreadPoolExecutor ( 1 , ExecutorsUtils . newThreadFactory ( Optional . of ( LoggerFactory . getLogger ( FineGrainedWatermarkTracker . class ) ) ) ) ; _executorService . scheduleAtFixedRate ( _sweeper , 0 , _sweepInterval...
Schedule the sweeper and stability checkers
36,132
synchronized int sweep ( ) { long startTime = System . nanoTime ( ) ; int swept = 0 ; for ( Map . Entry < String , Deque < AcknowledgableWatermark > > entry : _watermarksMap . entrySet ( ) ) { Deque < AcknowledgableWatermark > watermarks = entry . getValue ( ) ; boolean continueIteration = true ; while ( continueIterat...
A helper method to garbage collect acknowledged watermarks
36,133
public static String toPartitionJsonList ( List < PartitionDescriptor > descriptors ) { return Descriptor . GSON . toJson ( descriptors , DESCRIPTOR_LIST_TYPE ) ; }
Serialize a list of partition descriptors as json string
36,134
public Job createJob ( FileSystemDataset dataset ) throws IOException { Configuration conf = HadoopUtils . getConfFromState ( state ) ; if ( conf . get ( "mapreduce.output.fileoutputformat.compress" ) == null && conf . get ( "mapred.output.compress" ) == null ) { conf . setBoolean ( "mapreduce.output.fileoutputformat.c...
Customized MR job creation for Avro .
36,135
private Path concatPaths ( String ... names ) { if ( names == null || names . length == 0 ) { return null ; } Path cur = new Path ( names [ 0 ] ) ; for ( int i = 1 ; i < names . length ; ++ i ) { cur = new Path ( cur , new Path ( names [ i ] ) ) ; } return cur ; }
Concatenate multiple directory or file names into one path
36,136
protected Collection < Path > getGranularInputPaths ( Path path ) throws IOException { boolean appendDelta = this . state . getPropAsBoolean ( MRCompactor . COMPACTION_RENAME_SOURCE_DIR_ENABLED , MRCompactor . DEFAULT_COMPACTION_RENAME_SOURCE_DIR_ENABLED ) ; Set < Path > uncompacted = Sets . newHashSet ( ) ; Set < Path...
Converts a top level input path to a group of sub - paths according to user defined granularity . This may be required because if upstream application generates many sub - paths but the map - reduce job only keeps track of the top level path after the job is done we won t be able to tell if those new arriving sub - pat...
36,137
private static boolean isImmutableType ( Object thing ) { return ( ( thing == null ) || ( thing instanceof String ) || ( thing instanceof Integer ) || ( thing instanceof Long ) ) ; }
Contains a collection of supported immutable types for copying . Only keep the types that are worth supporting as record types .
36,138
public static Object copy ( Object thing ) throws CopyNotSupportedException { if ( ! isCopyable ( thing ) ) { throw new CopyNotSupportedException ( thing . getClass ( ) . getName ( ) + " cannot be copied. See Copyable" ) ; } if ( thing instanceof Copyable ) { return ( ( Copyable ) thing ) . copy ( ) ; } if ( thing inst...
Copy this object if needed .
36,139
public static void printJobRuns ( List < JobExecutionInfo > jobExecutionInfos ) { if ( jobExecutionInfos == null ) { System . err . println ( "No job executions found." ) ; System . exit ( 1 ) ; } List < String > labels = Arrays . asList ( "Job Id" , "State" , "Schedule" , "Completed Tasks" , "Launched Tasks" , "Start ...
Print a table describing a bunch of individual job executions .
36,140
public static void printAllJobs ( List < JobExecutionInfo > jobExecutionInfos , int resultsLimit ) { if ( jobExecutionInfos == null ) { System . err . println ( "No jobs found." ) ; System . exit ( 1 ) ; } List < String > labels = Arrays . asList ( "Job Name" , "State" , "Last Run Started" , "Last Run Completed" , "Sch...
Print summary information about a bunch of jobs in the system
36,141
public static void printJob ( Optional < JobExecutionInfo > jobExecutionInfoOptional ) { if ( ! jobExecutionInfoOptional . isPresent ( ) ) { System . err . println ( "Job id not found." ) ; return ; } JobExecutionInfo jobExecutionInfo = jobExecutionInfoOptional . get ( ) ; List < List < String > > data = new ArrayList ...
Print information about one specific job .
36,142
public static void printJobProperties ( Optional < JobExecutionInfo > jobExecutionInfoOptional ) { if ( ! jobExecutionInfoOptional . isPresent ( ) ) { System . err . println ( "Job not found." ) ; return ; } List < List < String > > data = new ArrayList < > ( ) ; List < String > flags = Arrays . asList ( "" , "-" ) ; L...
Print properties of a specific job
36,143
private static void printMetrics ( MetricArray metrics ) { System . out . println ( ) ; if ( metrics . size ( ) == 0 ) { System . out . println ( "No metrics found." ) ; return ; } List < List < String > > data = new ArrayList < > ( ) ; List < String > flags = Arrays . asList ( "" , "-" ) ; for ( Metric metric : metric...
Print out various metrics
36,144
@ CliObjectOption ( description = "Sets the period for which articles should be pulled in ISO time format (e.g. P2D, PT1H)" ) public EmbeddedWikipediaExample lookback ( String isoLookback ) { this . setConfiguration ( WikipediaExtractor . BOOTSTRAP_PERIOD , isoLookback ) ; return this ; }
Set bootstrap lookback i . e . oldest revision to pull .
36,145
@ CliObjectOption ( description = "Write output to Avro files. Specify the output directory as argument." ) public EmbeddedWikipediaExample avroOutput ( String outputPath ) { this . setConfiguration ( ConfigurationKeys . WRITER_BUILDER_CLASS , AvroDataWriterBuilder . class . getName ( ) ) ; this . setConfiguration ( Co...
Write output to avro files at the given input location .
36,146
protected OrcKey convertOrcStructToOrcKey ( OrcStruct struct ) { OrcKey orcKey = new OrcKey ( ) ; orcKey . key = struct ; return orcKey ; }
The output key of mapper needs to be comparable . In the scenarios that we need the orc record itself to be the output key this conversion will be necessary .
36,147
public void afterRead ( D record , long startTime ) { Instrumented . updateTimer ( this . extractorTimer , System . nanoTime ( ) - startTime , TimeUnit . NANOSECONDS ) ; if ( record != null ) { Instrumented . markMeter ( this . readRecordsMeter ) ; } }
Called after each record is read .
36,148
public void nextStage ( String name ) throws IOException { endStage ( ) ; this . currentStage = name ; this . currentStageStart = System . currentTimeMillis ( ) ; }
End the previous stage record the time spent in that stage and start the timer for a new stage .
36,149
public void endStage ( ) { if ( this . currentStage != null ) { long time = System . currentTimeMillis ( ) - this . currentStageStart ; this . timings . add ( new Stage ( this . currentStage , time ) ) ; if ( reportAsMetrics && submitter . getMetricContext ( ) . isPresent ( ) ) { String timerName = submitter . getNames...
End the previous stage and record the time spent in that stage .
36,150
public void submit ( Map < String , String > additionalMetadata ) throws IOException { if ( this . submitted ) { throw new IOException ( "MultiTimingEvent has already been submitted." ) ; } this . submitted = true ; endStage ( ) ; Map < String , String > finalMetadata = Maps . newHashMap ( ) ; finalMetadata . putAll ( ...
Ends the current stage and submits the event containing the timings of each event .
36,151
public Optional < JobExecutionInfo > queryByJobId ( String id ) throws RemoteInvocationException { JobExecutionQuery query = new JobExecutionQuery ( ) ; query . setIdType ( QueryIdTypeEnum . JOB_ID ) ; query . setId ( JobExecutionQuery . Id . create ( id ) ) ; query . setLimit ( 1 ) ; List < JobExecutionInfo > results ...
Retrieve a Gobblin job by its id .
36,152
public List < JobExecutionInfo > queryAllJobs ( QueryListType lookupType , int resultsLimit ) throws RemoteInvocationException { JobExecutionQuery query = new JobExecutionQuery ( ) ; query . setIdType ( QueryIdTypeEnum . LIST_TYPE ) ; query . setId ( JobExecutionQuery . Id . create ( lookupType ) ) ; query . setJobProp...
Retrieve all jobs
36,153
public List < JobExecutionInfo > queryByJobName ( String name , int resultsLimit ) throws RemoteInvocationException { JobExecutionQuery query = new JobExecutionQuery ( ) ; query . setIdType ( QueryIdTypeEnum . JOB_NAME ) ; query . setId ( JobExecutionQuery . Id . create ( name ) ) ; query . setIncludeTaskExecutions ( f...
Query jobs by name
36,154
private List < JobExecutionInfo > executeQuery ( JobExecutionQuery query ) throws RemoteInvocationException { JobExecutionQueryResult result = this . client . get ( query ) ; if ( result != null && result . hasJobExecutions ( ) ) { return result . getJobExecutions ( ) ; } return Collections . emptyList ( ) ; }
Execute a query and coerce the result into a java List
36,155
private boolean shouldPublishDataInTask ( ) { boolean publishDataAtJobLevel = this . taskState . getPropAsBoolean ( ConfigurationKeys . PUBLISH_DATA_AT_JOB_LEVEL , ConfigurationKeys . DEFAULT_PUBLISH_DATA_AT_JOB_LEVEL ) ; if ( publishDataAtJobLevel ) { LOG . info ( String . format ( "%s is true. Will publish data at th...
Whether the task should directly publish its output data to the final publisher output directory .
36,156
private static boolean inMultipleBranches ( List < Boolean > branches ) { int inBranches = 0 ; for ( Boolean bool : branches ) { if ( bool && ++ inBranches > 1 ) { break ; } } return inBranches > 1 ; }
Check if a schema or data record is being passed to more than one branches .
36,157
public synchronized boolean cancel ( ) { if ( this . taskFuture != null && this . taskFuture . cancel ( true ) ) { this . taskStateTracker . onTaskRunCompletion ( this ) ; this . completeShutdown ( ) ; return true ; } else { return false ; } }
return true if the task is successfully cancelled .
36,158
protected GenericRecord convertRecord ( GenericRecord record ) throws IOException { return AvroUtils . convertRecordSchema ( record , this . schema . get ( ) ) ; }
Convert the record to the output schema of this extractor
36,159
private Path getDatasetDirForKey ( ConfigKeyPath configKey ) throws VersionDoesNotExistException { return this . fs . getPath ( this . storePrefix , configKey . getAbsolutePathString ( ) ) ; }
Get path object using zipped file system and relative path
36,160
public Future < ? extends List < Pair < SpecExecutor . Verb , Spec > > > changedSpecs ( ) { List < Pair < SpecExecutor . Verb , Spec > > changesSpecs = new ArrayList < > ( ) ; try { Pair < SpecExecutor . Verb , Spec > specPair = _jobSpecQueue . take ( ) ; _metrics . jobSpecDeqCount . incrementAndGet ( ) ; do { changesS...
This method returns job specs receive from Kafka . It will block if there are no job specs .
36,161
public synchronized boolean remove ( String name ) { MetricContext metricContext = this . metricContext . get ( ) ; if ( metricContext != null ) { metricContext . removeFromMetrics ( this . contextAwareMetrics . get ( name ) . getContextAwareMetric ( ) ) ; } return this . contextAwareMetrics . remove ( name ) != null &...
Remove a metric with a given name .
36,162
private static long getInterval ( long lowWatermarkValue , long highWatermarkValue , long partitionInterval , int maxIntervals ) { if ( lowWatermarkValue > highWatermarkValue ) { LOG . info ( "lowWatermarkValue: " + lowWatermarkValue + " is greater than highWatermarkValue: " + highWatermarkValue ) ; return 0 ; } long o...
recalculate interval if total number of partitions greater than maximum number of allowed partitions
36,163
protected void report ( SortedMap < String , Gauge > gauges , SortedMap < String , Counter > counters , SortedMap < String , Histogram > histograms , SortedMap < String , Meter > meters , SortedMap < String , Timer > timers , Map < String , Object > tags , boolean isFinal ) { report ( gauges , counters , histograms , m...
Report the input metrics . The input tags apply to all input metrics .
36,164
public boolean getPermits ( long permits ) throws InterruptedException { if ( permits <= 0 ) { return true ; } long startTimeNanos = System . nanoTime ( ) ; this . permitsOutstanding . addEntryWithWeight ( permits ) ; this . lock . lock ( ) ; try { while ( true ) { if ( permits >= this . knownUnsatisfiablePermits ) { b...
Try to get a number of permits from this requester .
36,165
private void maybeSendNewPermitRequest ( ) { if ( ! this . requestSemaphore . tryAcquire ( ) ) { return ; } if ( ! this . retryStatus . canRetryNow ( ) ) { this . requestSemaphore . release ( ) ; return ; } try { long permits = computeNextPermitRequest ( ) ; if ( permits <= 0 ) { this . requestSemaphore . release ( ) ;...
Send a new permit request to the server .
36,166
public static Optional < Long > getRateIfRateControlled ( FileSystem fs ) { if ( fs instanceof Decorator ) { List < Object > lineage = DecoratorUtils . getDecoratorLineage ( fs ) ; for ( Object obj : lineage ) { if ( obj instanceof RateControlledFileSystem ) { return Optional . of ( ( ( RateControlledFileSystem ) obj )...
Determines whether the file system is rate controlled and if so returns the allowed rate in operations per second .
36,167
public static DefaultJobSpecScheduleImpl createImmediateSchedule ( JobSpec jobSpec , Runnable jobRunnable ) { return new DefaultJobSpecScheduleImpl ( jobSpec , jobRunnable , Optional . of ( System . currentTimeMillis ( ) ) ) ; }
Creates a schedule denoting that the job is to be executed immediately
36,168
public static DefaultJobSpecScheduleImpl createNoSchedule ( JobSpec jobSpec , Runnable jobRunnable ) { return new DefaultJobSpecScheduleImpl ( jobSpec , jobRunnable , Optional . < Long > absent ( ) ) ; }
Creates a schedule denoting that the job is not to be executed
36,169
Path getTargetLocation ( FileSystem sourceFs , FileSystem targetFs , Path path , Optional < Partition > partition ) throws IOException { return getTargetPathHelper ( ) . getTargetPath ( path , targetFs , partition , false ) ; }
Compute the target location for a Hive location .
36,170
public static File getIvySettingsFile ( ) throws IOException { URL settingsUrl = Thread . currentThread ( ) . getContextClassLoader ( ) . getResource ( IVY_SETTINGS_FILE_NAME ) ; if ( settingsUrl == null ) { throw new IOException ( "Failed to find " + IVY_SETTINGS_FILE_NAME + " from class path" ) ; } File ivySettingsFi...
Get ivy settings file from classpath
36,171
public DataWriterBuilder < S , D > writeTo ( Destination destination ) { this . destination = destination ; log . debug ( "For destination: {}" , destination ) ; return this ; }
Tell the writer the destination to write to .
36,172
public DataWriterBuilder < S , D > withWriterId ( String writerId ) { this . writerId = writerId ; log . debug ( "withWriterId : {}" , this . writerId ) ; return this ; }
Give the writer a unique ID .
36,173
public DataWriterBuilder < S , D > withSchema ( S schema ) { this . schema = schema ; log . debug ( "withSchema : {}" , this . schema ) ; return this ; }
Tell the writer the data schema .
36,174
public DataWriterBuilder < S , D > withBranches ( int branches ) { this . branches = branches ; log . debug ( "With branches: {}" , this . branches ) ; return this ; }
Tell the writer how many branches are being used .
36,175
public DataWriterBuilder < S , D > forBranch ( int branch ) { this . branch = branch ; log . debug ( "For branch: {}" , this . branch ) ; return this ; }
Tell the writer which branch it is associated with .
36,176
public static boolean isHiveTableAvroType ( Table targetTable ) throws IOException { String serializationLib = targetTable . getTTable ( ) . getSd ( ) . getSerdeInfo ( ) . getSerializationLib ( ) ; String inputFormat = targetTable . getTTable ( ) . getSd ( ) . getInputFormat ( ) ; String outputFormat = targetTable . ge...
Tell whether a hive table is actually an Avro table
36,177
public static String getCreateTableQuery ( String completeNewTableName , String likeTableDbName , String likeTableName , String location ) { return getCreateTableQuery ( completeNewTableName , likeTableDbName , likeTableName ) + " LOCATION " + PartitionUtils . getQuotedString ( location ) ; }
If staging table doesn t exist it will create a staging table .
36,178
public static String getInsertQuery ( PurgeableHivePartitionDataset dataset ) { return "INSERT OVERWRITE" + " TABLE " + dataset . getCompleteStagingTableName ( ) + " PARTITION (" + PartitionUtils . getPartitionSpecString ( dataset . getSpec ( ) ) + ")" + " SELECT /*+MAPJOIN(b) */ " + getCommaSeparatedColumnNames ( data...
This query will create a partition in staging table and insert the datasets whose compliance id is not contained in the compliance id table .
36,179
public static List < String > getPurgeQueries ( PurgeableHivePartitionDataset dataset ) { List < String > queries = new ArrayList < > ( ) ; queries . add ( getUseDbQuery ( dataset . getStagingDb ( ) ) ) ; queries . add ( getInsertQuery ( dataset ) ) ; return queries ; }
Will return all the queries needed to populate the staging table partition . This won t include alter table partition location query .
36,180
public static List < String > getBackupQueries ( PurgeableHivePartitionDataset dataset ) { List < String > queries = new ArrayList < > ( ) ; queries . add ( getUseDbQuery ( dataset . getDbName ( ) ) ) ; queries . add ( getCreateTableQuery ( dataset . getCompleteBackupTableName ( ) , dataset . getDbName ( ) , dataset . ...
Will return all the queries needed to have a backup table partition pointing to the original partition data location
36,181
public static List < String > getAlterOriginalPartitionLocationQueries ( PurgeableHivePartitionDataset dataset ) { List < String > queries = new ArrayList < > ( ) ; queries . add ( getUseDbQuery ( dataset . getDbName ( ) ) ) ; String partitionSpecString = PartitionUtils . getPartitionSpecString ( dataset . getSpec ( ) ...
Will return all the queries needed to alter the location of the table partition . Alter table partition query doesn t work with syntax dbName . tableName
36,182
protected Field convertFieldSchema ( Schema inputSchema , Field field , WorkUnitState workUnit ) throws SchemaConversionException { if ( field . name ( ) . equals ( payloadField ) ) { return createLatestPayloadField ( field ) ; } return new Field ( field . name ( ) , field . schema ( ) , field . doc ( ) , field . defau...
Convert to the output schema of a field
36,183
protected Object convertFieldValue ( Schema outputSchema , Field field , GenericRecord inputRecord , WorkUnitState workUnit ) throws DataConversionException { if ( field . name ( ) . equals ( payloadField ) ) { return upConvertPayload ( inputRecord ) ; } return inputRecord . get ( field . name ( ) ) ; }
Convert to the output value of a field
36,184
private String convertFormat ( long watermark ) { Preconditions . checkArgument ( watermark > 0 , "Watermark should be positive number." ) ; return googleAnalyticsFormatter . print ( watermarkFormatter . parseDateTime ( Long . toString ( watermark ) ) ) ; }
Converts date format from watermark format to Google analytics format
36,185
public void submitCallable ( Callable < Void > callable , String name ) { this . futures . add ( new NamedFuture ( this . executor . submit ( callable ) , name ) ) ; }
Submit a callable to the thread pool
36,186
public void write ( byte [ ] record ) throws IOException { Preconditions . checkNotNull ( record ) ; byte [ ] toWrite = record ; if ( this . recordDelimiter . isPresent ( ) ) { toWrite = Arrays . copyOf ( record , record . length + 1 ) ; toWrite [ toWrite . length - 1 ] = this . recordDelimiter . get ( ) ; } if ( this ...
Write a source record to the staging file
36,187
public boolean moveToTrashAsUser ( Path path , final String user ) throws IOException { return getUserTrash ( user ) . moveToTrash ( path ) ; }
Move the path to trash as specified user .
36,188
public boolean moveToTrashAsOwner ( Path path ) throws IOException { String owner = this . fs . getFileStatus ( path ) . getOwner ( ) ; return moveToTrashAsUser ( path , owner ) ; }
Move the path to trash as the owner of the path .
36,189
public void run ( ) throws Exception { try { CountDownLatch countDownLatch = new CountDownLatch ( this . tasks ) ; for ( int i = 0 ; i < this . tasks ; i ++ ) { addTask ( i , countDownLatch ) ; } countDownLatch . await ( ) ; } finally { try { this . context . close ( ) ; } finally { this . executor . shutdownNow ( ) ; ...
Run the example .
36,190
private void checkSrcLogFiles ( ) throws IOException { List < FileStatus > srcLogFiles = new ArrayList < > ( ) ; for ( Path logDirPath : this . srcLogDirs ) { srcLogFiles . addAll ( FileListUtils . listFilesRecursively ( this . srcFs , logDirPath , new PathFilter ( ) { public boolean accept ( Path path ) { return LogCo...
Perform a check on new source log files and submit copy tasks for new log files .
36,191
public static void addToHadoopConfiguration ( Configuration conf ) { final String SERIALIZATION_KEY = "io.serializations" ; String existingSerializers = conf . get ( SERIALIZATION_KEY ) ; if ( existingSerializers != null ) { conf . set ( SERIALIZATION_KEY , existingSerializers + "," + WritableShimSerialization . class ...
Helper method to add this serializer to an existing Hadoop config .
36,192
public List < ? extends ProducerJob > partitionJobs ( ) { UrlTrieNode root = _jobNode . getRight ( ) ; if ( isOperatorEquals ( ) || root . getSize ( ) == 1 ) { return super . partitionJobs ( ) ; } else { if ( _groupSize <= 1 ) { throw new RuntimeException ( "This is impossible. When group size is 1, the operator must b...
The implementation here will first partition the job by pages and then by dates .
36,193
public void push ( Point point ) { BatchPoints . Builder batchPointsBuilder = BatchPoints . database ( database ) . retentionPolicy ( DEFAULT_RETENTION_POLICY ) ; batchPointsBuilder . point ( point ) ; influxDB . write ( batchPointsBuilder . build ( ) ) ; }
Push a single Point
36,194
public static long getPropAsLongFromSingleOrMultiWorkUnitState ( WorkUnitState workUnitState , String key , int partitionId ) { return Long . parseLong ( workUnitState . contains ( key ) ? workUnitState . getProp ( key ) : workUnitState . getProp ( KafkaUtils . getPartitionPropName ( key , partitionId ) , "0" ) ) ; }
Get a property as long from a work unit that may or may not be a multiworkunit . This method is needed because the SingleLevelWorkUnitPacker does not squeeze work units into a multiworkunit and thus does not append the partitionId to property keys while the BiLevelWorkUnitPacker does . Return 0 as default if key not fo...
36,195
private int computeTargetPartitionSize ( Histogram histogram , int minTargetPartitionSize , int maxPartitions ) { return Math . max ( minTargetPartitionSize , DoubleMath . roundToInt ( ( double ) histogram . totalRecordCount / maxPartitions , RoundingMode . CEILING ) ) ; }
Compute the target partition size .
36,196
private int getCountForRange ( TableCountProbingContext probingContext , StrSubstitutor sub , Map < String , String > subValues , long startTime , long endTime ) { String startTimeStr = Utils . dateToString ( new Date ( startTime ) , SalesforceExtractor . SALESFORCE_TIMESTAMP_FORMAT ) ; String endTimeStr = Utils . date...
Get the row count for a time range
36,197
private void getHistogramRecursively ( TableCountProbingContext probingContext , Histogram histogram , StrSubstitutor sub , Map < String , String > values , int count , long startEpoch , long endEpoch ) { long midpointEpoch = startEpoch + ( endEpoch - startEpoch ) / 2 ; if ( count <= probingContext . bucketSizeLimit ||...
Split a histogram bucket along the midpoint if it is larger than the bucket size limit .
36,198
private Histogram getHistogramByProbing ( TableCountProbingContext probingContext , int count , long startEpoch , long endEpoch ) { Histogram histogram = new Histogram ( ) ; Map < String , String > values = new HashMap < > ( ) ; values . put ( "table" , probingContext . entity ) ; values . put ( "column" , probingConte...
Get a histogram for the time range by probing to break down large buckets . Use count instead of querying if it is non - negative .
36,199
private Histogram getRefinedHistogram ( SalesforceConnector connector , String entity , String watermarkColumn , SourceState state , Partition partition , Histogram histogram ) { final int maxPartitions = state . getPropAsInt ( ConfigurationKeys . SOURCE_MAX_NUMBER_OF_PARTITIONS , ConfigurationKeys . DEFAULT_MAX_NUMBER...
Refine the histogram by probing to split large buckets