idx
int64
0
41.2k
question
stringlengths
74
4.21k
target
stringlengths
5
888
36,300
private static int getAppendLimitDelta ( String maxLimit ) { LOG . debug ( "Getting append limit delta" ) ; int limitDelta = 0 ; if ( ! Strings . isNullOrEmpty ( maxLimit ) ) { String [ ] limitParams = maxLimit . split ( "-" ) ; if ( limitParams . length >= 2 ) { limitDelta = Integer . parseInt ( limitParams [ 1 ] ) ; } } return limitDelta ; }
Get append max limit delta num
36,301
private boolean isWatermarkExists ( ) { if ( ! Strings . isNullOrEmpty ( this . state . getProp ( ConfigurationKeys . EXTRACT_DELTA_FIELDS_KEY ) ) && ! Strings . isNullOrEmpty ( this . state . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_WATERMARK_TYPE ) ) ) { return true ; } return false ; }
true if water mark columns and water mark type provided
36,302
public static Map < String , Object > getConfigForBranch ( EntityType entityType , WorkUnitState workUnitState ) { return getConfigForBranch ( entityType , null , workUnitState ) ; }
Retrieve encryption configuration for the branch the WorKUnitState represents
36,303
public static Map < String , Object > getConfigForBranch ( EntityType entityType , State taskState , int numBranches , int branch ) { return getConfigForBranch ( taskState , entityType . getConfigPrefix ( ) , ForkOperatorUtils . getPropertyNameForBranch ( "" , numBranches , branch ) ) ; }
Retrieve encryption config for a given branch of a task
36,304
public static String getKeystoreType ( Map < String , Object > parameters ) { String type = ( String ) parameters . get ( ENCRYPTION_KEYSTORE_TYPE_KEY ) ; if ( type == null ) { type = ENCRYPTION_KEYSTORE_TYPE_KEY_DEFAULT ; } return type ; }
Get the type of keystore to instantiate
36,305
public static String getCipher ( Map < String , Object > parameters ) { return ( String ) parameters . get ( ENCRYPTION_CIPHER_KEY ) ; }
Get the underlying cipher name
36,306
private static Map < String , Object > extractPropertiesForBranch ( Properties properties , String prefix , String branchSuffix ) { Map < String , Object > ret = new HashMap < > ( ) ; for ( Map . Entry < Object , Object > prop : properties . entrySet ( ) ) { String key = ( String ) prop . getKey ( ) ; if ( key . startsWith ( prefix ) && ( branchSuffix . length ( ) == 0 || key . endsWith ( branchSuffix ) ) ) { int strippedKeyStart = Math . min ( key . length ( ) , prefix . length ( ) + 1 ) ; if ( strippedKeyStart != key . length ( ) && key . charAt ( strippedKeyStart - 1 ) != '.' ) { continue ; } int strippedKeyEnd = Math . max ( strippedKeyStart , key . length ( ) - branchSuffix . length ( ) ) ; String strippedKey = key . substring ( strippedKeyStart , strippedKeyEnd ) ; ret . put ( strippedKey , prop . getValue ( ) ) ; } } return ret ; }
Extract a set of properties for a given branch stripping out the prefix and branch suffix .
36,307
private boolean isRestorable ( HivePartitionDataset dataset , HivePartitionVersion version ) throws IOException { if ( version . getLocation ( ) . toString ( ) . equalsIgnoreCase ( dataset . getLocation ( ) . toString ( ) ) ) { return false ; } FileSystem fs = ProxyUtils . getOwnerFs ( new State ( this . state ) , version . getOwner ( ) ) ; if ( ! HadoopUtils . hasContent ( fs , version . getLocation ( ) ) ) { return false ; } return true ; }
A version is called restorable if it can be used to restore dataset .
36,308
public static void createGobblinHelixCluster ( String zkConnectionString , String clusterName , boolean overwrite ) { ClusterSetup clusterSetup = new ClusterSetup ( zkConnectionString ) ; clusterSetup . addCluster ( clusterName , overwrite ) ; String autoJoinConfig = ZKHelixManager . ALLOW_PARTICIPANT_AUTO_JOIN + "=true" ; clusterSetup . setConfig ( HelixConfigScope . ConfigScopeProperty . CLUSTER , clusterName , autoJoinConfig ) ; }
Create a Helix cluster for the Gobblin Cluster application .
36,309
public static void submitJobToQueue ( JobConfig . Builder jobConfigBuilder , String queueName , String jobName , TaskDriver helixTaskDriver , HelixManager helixManager , long jobQueueDeleteTimeoutSeconds ) throws Exception { submitJobToWorkFlow ( jobConfigBuilder , queueName , jobName , helixTaskDriver , helixManager , jobQueueDeleteTimeoutSeconds ) ; }
We have switched from Helix JobQueue to WorkFlow based job execution .
36,310
private static void deleteStoppedHelixJob ( HelixManager helixManager , String workFlowName , String jobName ) throws InterruptedException { WorkflowContext workflowContext = TaskDriver . getWorkflowContext ( helixManager , workFlowName ) ; while ( workflowContext . getJobState ( TaskUtil . getNamespacedJobName ( workFlowName , jobName ) ) != STOPPED ) { log . info ( "Waiting for job {} to stop..." , jobName ) ; workflowContext = TaskDriver . getWorkflowContext ( helixManager , workFlowName ) ; Thread . sleep ( 1000 ) ; } new TaskDriver ( helixManager ) . deleteAndWaitForCompletion ( workFlowName , 10000L ) ; log . info ( "Workflow deleted." ) ; }
Deletes the stopped Helix Workflow . Caller should stop the Workflow before calling this method .
36,311
public boolean copyPartitionParams ( String completeSourcePartitionName , String completeDestPartitionName , List < String > whitelist , List < String > blacklist ) { Optional < Partition > sourcePartitionOptional = getPartitionObject ( completeSourcePartitionName ) ; Optional < Partition > destPartitionOptional = getPartitionObject ( completeDestPartitionName ) ; if ( ( ! sourcePartitionOptional . isPresent ( ) ) || ( ! destPartitionOptional . isPresent ( ) ) ) { return false ; } Map < String , String > sourceParams = sourcePartitionOptional . get ( ) . getParameters ( ) ; Map < String , String > destParams = destPartitionOptional . get ( ) . getParameters ( ) ; for ( Map . Entry < String , String > param : sourceParams . entrySet ( ) ) { if ( ! matched ( whitelist , blacklist , param . getKey ( ) ) ) { continue ; } destParams . put ( param . getKey ( ) , param . getValue ( ) ) ; } destPartitionOptional . get ( ) . setParameters ( destParams ) ; if ( ! dropPartition ( completeDestPartitionName ) ) { return false ; } if ( ! addPartition ( destPartitionOptional . get ( ) , completeDestPartitionName ) ) { return false ; } return true ; }
Method to copy partition parameters from source partition to destination partition
36,312
public static Set < String > findFullPrefixKeys ( Properties properties , Optional < String > keyPrefix ) { TreeSet < String > propNames = new TreeSet < > ( ) ; for ( Map . Entry < Object , Object > entry : properties . entrySet ( ) ) { String entryKey = entry . getKey ( ) . toString ( ) ; if ( StringUtils . startsWith ( entryKey , keyPrefix . or ( StringUtils . EMPTY ) ) ) { propNames . add ( entryKey ) ; } } Set < String > result = new HashSet < > ( ) ; String lastKey = null ; Iterator < String > sortedKeysIter = propNames . iterator ( ) ; while ( sortedKeysIter . hasNext ( ) ) { String propName = sortedKeysIter . next ( ) ; if ( null != lastKey && propName . startsWith ( lastKey + "." ) ) { result . add ( lastKey ) ; } lastKey = propName ; } return result ; }
Finds a list of properties whose keys are complete prefix of other keys . This function is meant to be used during conversion from Properties to typesafe Config as the latter does not support this scenario .
36,313
private static Map < String , Object > guessPropertiesTypes ( Map < Object , Object > srcProperties ) { Map < String , Object > res = new HashMap < > ( ) ; for ( Map . Entry < Object , Object > prop : srcProperties . entrySet ( ) ) { Object value = prop . getValue ( ) ; if ( null != value && value instanceof String && ! Strings . isNullOrEmpty ( value . toString ( ) ) ) { try { value = Long . parseLong ( value . toString ( ) ) ; } catch ( NumberFormatException e ) { try { value = Double . parseDouble ( value . toString ( ) ) ; } catch ( NumberFormatException e2 ) { if ( value . toString ( ) . equalsIgnoreCase ( "true" ) || value . toString ( ) . equalsIgnoreCase ( "yes" ) ) { value = Boolean . TRUE ; } else if ( value . toString ( ) . equalsIgnoreCase ( "false" ) || value . toString ( ) . equalsIgnoreCase ( "no" ) ) { value = Boolean . FALSE ; } else { } } } } res . put ( prop . getKey ( ) . toString ( ) , value ) ; } return res ; }
Attempts to guess type types of a Properties . By default typesafe will make all property values Strings . This implementation will try to recognize booleans and numbers . All keys are treated as strings .
36,314
public static boolean verifySubset ( Config superConfig , Config subConfig ) { for ( Map . Entry < String , ConfigValue > entry : subConfig . entrySet ( ) ) { if ( ! superConfig . hasPath ( entry . getKey ( ) ) || ! superConfig . getValue ( entry . getKey ( ) ) . unwrapped ( ) . equals ( entry . getValue ( ) . unwrapped ( ) ) ) { return false ; } } return true ; }
Check that every key - value in superConfig is in subConfig
36,315
public static boolean checkReaderWriterCompatibility ( Schema readerSchema , Schema writerSchema , boolean ignoreNamespace ) { if ( ignoreNamespace ) { List < Schema . Field > fields = deepCopySchemaFields ( readerSchema ) ; readerSchema = Schema . createRecord ( writerSchema . getName ( ) , writerSchema . getDoc ( ) , writerSchema . getNamespace ( ) , readerSchema . isError ( ) ) ; readerSchema . setFields ( fields ) ; } return SchemaCompatibility . checkReaderWriterCompatibility ( readerSchema , writerSchema ) . getType ( ) . equals ( SchemaCompatibility . SchemaCompatibilityType . COMPATIBLE ) ; }
Validates that the provided reader schema can be used to decode avro data written with the provided writer schema .
36,316
public static Optional < Field > getField ( Schema schema , String fieldLocation ) { Preconditions . checkNotNull ( schema ) ; Preconditions . checkArgument ( ! Strings . isNullOrEmpty ( fieldLocation ) ) ; Splitter splitter = Splitter . on ( FIELD_LOCATION_DELIMITER ) . omitEmptyStrings ( ) . trimResults ( ) ; List < String > pathList = Lists . newArrayList ( splitter . split ( fieldLocation ) ) ; if ( pathList . size ( ) == 0 ) { return Optional . absent ( ) ; } return AvroUtils . getFieldHelper ( schema , pathList , 0 ) ; }
Given a GenericRecord this method will return the field specified by the path parameter . The fieldLocation parameter is an ordered string specifying the location of the nested field to retrieve . For example field1 . nestedField1 takes field field1 and retrieves nestedField1 from it .
36,317
public static Optional < Object > getFieldValue ( GenericRecord record , String fieldLocation ) { Map < String , Object > ret = getMultiFieldValue ( record , fieldLocation ) ; return Optional . fromNullable ( ret . get ( fieldLocation ) ) ; }
Given a GenericRecord this method will return the field specified by the path parameter . The fieldLocation parameter is an ordered string specifying the location of the nested field to retrieve . For example field1 . nestedField1 takes the the value of the field field1 and retrieves the field nestedField1 from it .
36,318
private static Object getObjectFromMap ( Map map , String key ) { Utf8 utf8Key = new Utf8 ( key ) ; return map . get ( utf8Key ) ; }
This method is to get object from map given a key as string . Avro persists string as Utf8
36,319
public static GenericRecord convertRecordSchema ( GenericRecord record , Schema newSchema ) throws IOException { if ( record . getSchema ( ) . equals ( newSchema ) ) { return record ; } try { BinaryDecoder decoder = new DecoderFactory ( ) . binaryDecoder ( recordToByteArray ( record ) , null ) ; DatumReader < GenericRecord > reader = new GenericDatumReader < > ( record . getSchema ( ) , newSchema ) ; return reader . read ( null , decoder ) ; } catch ( IOException e ) { throw new IOException ( String . format ( "Cannot convert avro record to new schema. Origianl schema = %s, new schema = %s" , record . getSchema ( ) , newSchema ) , e ) ; } }
Change the schema of an Avro record .
36,320
public static byte [ ] recordToByteArray ( GenericRecord record ) throws IOException { try ( ByteArrayOutputStream out = new ByteArrayOutputStream ( ) ) { Encoder encoder = EncoderFactory . get ( ) . directBinaryEncoder ( out , null ) ; DatumWriter < GenericRecord > writer = new GenericDatumWriter < > ( record . getSchema ( ) ) ; writer . write ( record , encoder ) ; byte [ ] byteArray = out . toByteArray ( ) ; return byteArray ; } }
Convert a GenericRecord to a byte array .
36,321
public static Schema getSchemaFromDataFile ( Path dataFile , FileSystem fs ) throws IOException { try ( SeekableInput sin = new FsInput ( dataFile , fs . getConf ( ) ) ; DataFileReader < GenericRecord > reader = new DataFileReader < > ( sin , new GenericDatumReader < GenericRecord > ( ) ) ) { return reader . getSchema ( ) ; } }
Get Avro schema from an Avro data file .
36,322
public static Schema parseSchemaFromFile ( Path filePath , FileSystem fs ) throws IOException { Preconditions . checkArgument ( fs . exists ( filePath ) , filePath + " does not exist" ) ; try ( FSDataInputStream in = fs . open ( filePath ) ) { return new Schema . Parser ( ) . parse ( in ) ; } }
Parse Avro schema from a schema file .
36,323
public static void writeSchemaToFile ( Schema schema , Path filePath , Path tempFilePath , FileSystem fs , boolean overwrite , FsPermission perm ) throws IOException { boolean fileExists = fs . exists ( filePath ) ; if ( ! overwrite ) { Preconditions . checkState ( ! fileExists , filePath + " already exists" ) ; } else { if ( fileExists && null == tempFilePath ) { HadoopUtils . deletePath ( fs , filePath , true ) ; fileExists = false ; } } Path writeFilePath = fileExists ? tempFilePath : filePath ; try ( DataOutputStream dos = fs . create ( writeFilePath ) ) { dos . writeChars ( schema . toString ( ) ) ; } fs . setPermission ( writeFilePath , perm ) ; if ( fileExists ) { if ( ! fs . delete ( filePath , true ) ) { throw new IOException ( String . format ( "Failed to delete %s while renaming %s to %s" , filePath , tempFilePath , filePath ) ) ; } HadoopUtils . movePath ( fs , tempFilePath , fs , filePath , true , fs . getConf ( ) ) ; } }
Write a schema to a file
36,324
public static Schema nullifyFieldsForSchemaMerge ( Schema oldSchema , Schema newSchema ) { if ( oldSchema == null ) { LOG . warn ( "No previous schema available, use the new schema instead." ) ; return newSchema ; } if ( ! ( oldSchema . getType ( ) . equals ( Type . RECORD ) && newSchema . getType ( ) . equals ( Type . RECORD ) ) ) { LOG . warn ( "Both previous schema and new schema need to be record type. Quit merging schema." ) ; return newSchema ; } List < Field > combinedFields = Lists . newArrayList ( ) ; for ( Field newFld : newSchema . getFields ( ) ) { combinedFields . add ( new Field ( newFld . name ( ) , newFld . schema ( ) , newFld . doc ( ) , newFld . defaultValue ( ) ) ) ; } for ( Field oldFld : oldSchema . getFields ( ) ) { if ( newSchema . getField ( oldFld . name ( ) ) == null ) { List < Schema > union = Lists . newArrayList ( ) ; Schema oldFldSchema = oldFld . schema ( ) ; if ( oldFldSchema . getType ( ) . equals ( Type . UNION ) ) { union . add ( Schema . create ( Type . NULL ) ) ; for ( Schema itemInUion : oldFldSchema . getTypes ( ) ) { if ( ! itemInUion . getType ( ) . equals ( Type . NULL ) ) { union . add ( itemInUion ) ; } } Schema newFldSchema = Schema . createUnion ( union ) ; combinedFields . add ( new Field ( oldFld . name ( ) , newFldSchema , oldFld . doc ( ) , oldFld . defaultValue ( ) ) ) ; } else { union . add ( Schema . create ( Type . NULL ) ) ; union . add ( oldFldSchema ) ; Schema newFldSchema = Schema . createUnion ( union ) ; combinedFields . add ( new Field ( oldFld . name ( ) , newFldSchema , oldFld . doc ( ) , oldFld . defaultValue ( ) ) ) ; } } } Schema mergedSchema = Schema . createRecord ( newSchema . getName ( ) , newSchema . getDoc ( ) , newSchema . getNamespace ( ) , newSchema . isError ( ) ) ; mergedSchema . setFields ( combinedFields ) ; return mergedSchema ; }
Merge oldSchema and newSchame . Set a field default value to null if this field exists in the old schema but not in the new schema .
36,325
public static Optional < Schema > removeUncomparableFields ( Schema schema ) { return removeUncomparableFields ( schema , Sets . < Schema > newHashSet ( ) ) ; }
Remove map array enum fields as well as union fields that contain map array or enum from an Avro schema . A schema with these fields cannot be used as Mapper key in a MapReduce job .
36,326
public Collection < Spec > getAllVersionsOfSpec ( URI specUri ) { Preconditions . checkArgument ( null != specUri , "Spec URI should not be null" ) ; Path specPath = getPathForURI ( this . fsSpecStoreDirPath , specUri , FlowSpec . Builder . DEFAULT_VERSION ) ; return getAllVersionsOfSpec ( specPath ) ; }
Returns all versions of the spec defined by specUri . Currently multiple versions are not supported so this should return exactly one spec .
36,327
protected Path getPathForURI ( Path fsSpecStoreDirPath , URI uri , String version ) { return PathUtils . addExtension ( PathUtils . mergePaths ( fsSpecStoreDirPath , new Path ( uri ) ) , version ) ; }
Construct a file path given URI and version of a spec .
36,328
public static JsonElementConverter getConverter ( JsonSchema schema , boolean repeated ) { InputType fieldType = schema . getInputType ( ) ; switch ( fieldType ) { case INT : return new IntConverter ( schema , repeated ) ; case LONG : return new LongConverter ( schema , repeated ) ; case FLOAT : return new FloatConverter ( schema , repeated ) ; case DOUBLE : return new DoubleConverter ( schema , repeated ) ; case BOOLEAN : return new BooleanConverter ( schema , repeated ) ; case STRING : return new StringConverter ( schema , repeated ) ; case ARRAY : return new ArrayConverter ( schema ) ; case ENUM : return new EnumConverter ( schema ) ; case RECORD : return new RecordConverter ( schema ) ; case MAP : return new MapConverter ( schema ) ; case DATE : case TIMESTAMP : return new StringConverter ( schema , repeated ) ; default : throw new UnsupportedOperationException ( fieldType + " is unsupported" ) ; } }
Use to create a converter for a single field from a parquetSchema .
36,329
private Config loadConfigFileWithFlowNameOverrides ( Path configFilePath ) throws IOException { Config flowConfig = this . pullFileLoader . loadPullFile ( configFilePath , emptyConfig , false ) ; String flowName = FSSpecStore . getSpecName ( configFilePath ) ; String flowGroup = FSSpecStore . getSpecGroup ( configFilePath ) ; return flowConfig . withValue ( ConfigurationKeys . FLOW_NAME_KEY , ConfigValueFactory . fromAnyRef ( flowName ) ) . withValue ( ConfigurationKeys . FLOW_GROUP_KEY , ConfigValueFactory . fromAnyRef ( flowGroup ) ) ; }
Load the config file and override the flow name and flow path properties with the names from the file path
36,330
public static URI buildURI ( String urlTemplate , Map < String , String > keys , Map < String , String > queryParams ) { String url = urlTemplate ; if ( keys != null && keys . size ( ) != 0 ) { url = StrSubstitutor . replace ( urlTemplate , keys ) ; } try { URIBuilder uriBuilder = new URIBuilder ( url ) ; if ( queryParams != null && queryParams . size ( ) != 0 ) { for ( Map . Entry < String , String > entry : queryParams . entrySet ( ) ) { uriBuilder . addParameter ( entry . getKey ( ) , entry . getValue ( ) ) ; } } return uriBuilder . build ( ) ; } catch ( URISyntaxException e ) { throw new RuntimeException ( "Fail to build uri" , e ) ; } }
Given a url template interpolate with keys and build the URI after adding query parameters
36,331
public static Set < String > getErrorCodeWhitelist ( Config config ) { String list = config . getString ( HttpConstants . ERROR_CODE_WHITELIST ) . toLowerCase ( ) ; return new HashSet < > ( getStringList ( list ) ) ; }
Get the error code whitelist from a config
36,332
public static Map < String , Object > toMap ( String jsonString ) { Map < String , Object > map = new HashMap < > ( ) ; return GSON . fromJson ( jsonString , map . getClass ( ) ) ; }
Convert a json encoded string to a Map
36,333
public static String createR2ClientLimiterKey ( Config config ) { String urlTemplate = config . getString ( HttpConstants . URL_TEMPLATE ) ; try { String escaped = URIUtil . encodeQuery ( urlTemplate ) ; URI uri = new URI ( escaped ) ; if ( uri . getHost ( ) == null ) throw new RuntimeException ( "Cannot get host part from uri" + urlTemplate ) ; String key = uri . getScheme ( ) + "/" + uri . getHost ( ) ; if ( uri . getPort ( ) > 0 ) { key = key + "/" + uri . getPort ( ) ; } log . info ( "Get limiter key [" + key + "]" ) ; return key ; } catch ( Exception e ) { throw new RuntimeException ( "Cannot create R2 limiter key" , e ) ; } }
Convert D2 URL template into a string used for throttling limiter
36,334
public void onSuccessfulWrite ( long startTimeNanos ) { Instrumented . updateTimer ( this . dataWriterTimer , System . nanoTime ( ) - startTimeNanos , TimeUnit . NANOSECONDS ) ; Instrumented . markMeter ( this . successfulWritesMeter ) ; }
Called after a successful write of a record .
36,335
public String convertSchema ( Schema inputSchema , WorkUnitState workUnit ) throws SchemaConversionException { return EnvelopeSchemaConverter . class . getName ( ) ; }
Do nothing actual schema must be obtained from records .
36,336
public Iterable < GenericRecord > convertRecord ( String outputSchema , GenericRecord inputRecord , WorkUnitState workUnit ) throws DataConversionException { try { String schemaIdField = workUnit . contains ( PAYLOAD_SCHEMA_ID_FIELD ) ? workUnit . getProp ( PAYLOAD_SCHEMA_ID_FIELD ) : DEFAULT_PAYLOAD_SCHEMA_ID_FIELD ; String payloadField = workUnit . contains ( PAYLOAD_FIELD ) ? workUnit . getProp ( PAYLOAD_FIELD ) : DEFAULT_PAYLOAD_FIELD ; String schemaKey = String . valueOf ( inputRecord . get ( schemaIdField ) ) ; Schema payloadSchema = ( Schema ) this . registry . getSchemaByKey ( schemaKey ) ; byte [ ] payload = getPayload ( inputRecord , payloadField ) ; GenericRecord outputRecord = deserializePayload ( payload , payloadSchema ) ; if ( this . fieldRemover . isPresent ( ) ) { payloadSchema = this . fieldRemover . get ( ) . removeFields ( payloadSchema ) ; } return new SingleRecordIterable < > ( AvroUtils . convertRecordSchema ( outputRecord , payloadSchema ) ) ; } catch ( IOException | SchemaRegistryException | ExecutionException e ) { throw new DataConversionException ( e ) ; } }
Get actual schema from registry and deserialize payload using it .
36,337
public byte [ ] getPayload ( GenericRecord inputRecord , String payloadFieldName ) { ByteBuffer bb = ( ByteBuffer ) inputRecord . get ( payloadFieldName ) ; byte [ ] payloadBytes ; if ( bb . hasArray ( ) ) { payloadBytes = bb . array ( ) ; } else { payloadBytes = new byte [ bb . remaining ( ) ] ; bb . get ( payloadBytes ) ; } String hexString = new String ( payloadBytes , StandardCharsets . UTF_8 ) ; return DatatypeConverter . parseHexBinary ( hexString ) ; }
Get payload field from GenericRecord and convert to byte array
36,338
public GenericRecord deserializePayload ( byte [ ] payload , Schema payloadSchema ) throws IOException , ExecutionException { Decoder decoder = this . decoderFactory . binaryDecoder ( payload , null ) ; GenericDatumReader < GenericRecord > reader = this . readers . get ( payloadSchema ) ; return reader . read ( null , decoder ) ; }
Deserialize payload using payload schema
36,339
private void deleteStateStore ( URI jobSpecUri ) throws IOException { int EXPECTED_NUM_URI_TOKENS = 3 ; String [ ] uriTokens = jobSpecUri . getPath ( ) . split ( "/" ) ; if ( null == this . datasetStateStore ) { log . warn ( "Job state store deletion failed as datasetstore is not initialized." ) ; return ; } if ( uriTokens . length != EXPECTED_NUM_URI_TOKENS ) { log . error ( "Invalid URI {}." , jobSpecUri ) ; return ; } String jobName = uriTokens [ EXPECTED_NUM_URI_TOKENS - 1 ] ; this . datasetStateStore . delete ( jobName ) ; log . info ( "JobSpec {} deleted with statestore." , jobSpecUri ) ; }
It fetches the job name from the given jobSpecUri and deletes its corresponding state store
36,340
private boolean shouldRemoveDataPullUpperBounds ( ) { if ( ! this . workUnitState . getPropAsBoolean ( ConfigurationKeys . SOURCE_QUERYBASED_ALLOW_REMOVE_UPPER_BOUNDS , true ) ) { return false ; } if ( ! partition . isLastPartition ( ) ) { return false ; } if ( partition . getHasUserSpecifiedHighWatermark ( ) || this . workUnitState . getProp ( ConfigurationKeys . WORK_UNIT_STATE_ACTUAL_HIGH_WATER_MARK_KEY ) != null ) { return false ; } return true ; }
Check if it s appropriate to remove data pull upper bounds in the last work unit fetching as much data as possible from the source . As between the time when data query was created and that was executed there might be some new data generated in the source . Removing the upper bounds will help us grab the new data .
36,341
private void removeDataPullUpperBounds ( ) { log . info ( "Removing data pull upper bound for last work unit" ) ; Iterator < Predicate > it = predicateList . iterator ( ) ; while ( it . hasNext ( ) ) { Predicate predicate = it . next ( ) ; if ( predicate . getType ( ) == Predicate . PredicateType . HWM ) { log . info ( "Remove predicate: " + predicate . condition ) ; it . remove ( ) ; } } }
Remove all upper bounds in the predicateList used for pulling data
36,342
private Iterator < D > getIterator ( ) throws DataRecordException , IOException { if ( Boolean . valueOf ( this . workUnitState . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_IS_SPECIFIC_API_ACTIVE ) ) ) { return this . getRecordSetFromSourceApi ( this . schema , this . entity , this . workUnit , this . predicateList ) ; } return this . getRecordSet ( this . schema , this . entity , this . workUnit , this . predicateList ) ; }
Get iterator from protocol specific api if is . specific . api . active is false Get iterator from source specific api if is . specific . api . active is true
36,343
public void close ( ) { log . info ( "Updating the current state high water mark with " + this . highWatermark ) ; this . workUnitState . setActualHighWatermark ( new LongWatermark ( this . highWatermark ) ) ; try { this . closeConnection ( ) ; } catch ( Exception e ) { log . error ( "Failed to close the extractor" , e ) ; } }
close extractor read stream update high watermark
36,344
public Extractor < S , D > build ( ) throws ExtractPrepareException { String watermarkColumn = this . workUnitState . getProp ( ConfigurationKeys . EXTRACT_DELTA_FIELDS_KEY ) ; long lwm = partition . getLowWatermark ( ) ; long hwm = partition . getHighWatermark ( ) ; log . info ( "Low water mark: " + lwm + "; and High water mark: " + hwm ) ; WatermarkType watermarkType ; if ( StringUtils . isBlank ( this . workUnitState . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_WATERMARK_TYPE ) ) ) { watermarkType = null ; } else { watermarkType = WatermarkType . valueOf ( this . workUnitState . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_WATERMARK_TYPE ) . toUpperCase ( ) ) ; } log . info ( "Source Entity is " + this . entity ) ; try { this . setTimeOut ( this . workUnitState . getPropAsInt ( ConfigurationKeys . SOURCE_CONN_TIMEOUT , ConfigurationKeys . DEFAULT_CONN_TIMEOUT ) ) ; this . extractMetadata ( this . schema , this . entity , this . workUnit ) ; if ( StringUtils . isNotBlank ( watermarkColumn ) ) { if ( partition . isLastPartition ( ) ) { long adjustedHighWatermark = this . getLatestWatermark ( watermarkColumn , watermarkType , lwm , hwm ) ; log . info ( "High water mark from source: " + adjustedHighWatermark ) ; if ( adjustedHighWatermark == ConfigurationKeys . DEFAULT_WATERMARK_VALUE ) { adjustedHighWatermark = getLowWatermarkWithNoDelta ( lwm ) ; } this . highWatermark = adjustedHighWatermark ; } else { this . highWatermark = hwm ; } log . info ( "High water mark for the current run: " + highWatermark ) ; this . setRangePredicates ( watermarkColumn , watermarkType , lwm , highWatermark ) ; } if ( ! Boolean . valueOf ( this . workUnitState . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_SKIP_COUNT_CALC ) ) ) { this . sourceRecordCount = this . getSourceCount ( this . schema , this . entity , this . workUnit , this . predicateList ) ; } else { log . info ( "Skip count calculation" ) ; this . sourceRecordCount = - 1 ; } if ( this . sourceRecordCount == 0 ) { log . info ( "Record count is 0; Setting fetch status to false to skip readRecord()" ) ; this . setFetchStatus ( false ) ; } } catch ( SchemaException e ) { throw new ExtractPrepareException ( "Failed to get schema for this object; error - " + e . getMessage ( ) , e ) ; } catch ( HighWatermarkException e ) { throw new ExtractPrepareException ( "Failed to get high watermark; error - " + e . getMessage ( ) , e ) ; } catch ( RecordCountException e ) { throw new ExtractPrepareException ( "Failed to get record count; error - " + e . getMessage ( ) , e ) ; } catch ( Exception e ) { throw new ExtractPrepareException ( "Failed to prepare the extract build; error - " + e . getMessage ( ) , e ) ; } return this ; }
build schema record count and high water mark
36,345
private void setRangePredicates ( String watermarkColumn , WatermarkType watermarkType , long lwmValue , long hwmValue ) { log . debug ( "Getting range predicates" ) ; String lwmOperator = partition . isLowWatermarkInclusive ( ) ? ">=" : ">" ; String hwmOperator = ( partition . isLastPartition ( ) || partition . isHighWatermarkInclusive ( ) ) ? "<=" : "<" ; WatermarkPredicate watermark = new WatermarkPredicate ( watermarkColumn , watermarkType ) ; this . addPredicates ( watermark . getPredicate ( this , lwmValue , lwmOperator , Predicate . PredicateType . LWM ) ) ; this . addPredicates ( watermark . getPredicate ( this , hwmValue , hwmOperator , Predicate . PredicateType . HWM ) ) ; if ( Boolean . valueOf ( this . workUnitState . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_IS_HOURLY_EXTRACT ) ) ) { String hourColumn = this . workUnitState . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_HOUR_COLUMN ) ; if ( StringUtils . isNotBlank ( hourColumn ) ) { WatermarkPredicate hourlyWatermark = new WatermarkPredicate ( hourColumn , WatermarkType . HOUR ) ; this . addPredicates ( hourlyWatermark . getPredicate ( this , lwmValue , lwmOperator , Predicate . PredicateType . LWM ) ) ; this . addPredicates ( hourlyWatermark . getPredicate ( this , hwmValue , hwmOperator , Predicate . PredicateType . HWM ) ) ; } } }
range predicates for watermark column and transaction columns .
36,346
public void modifyDatasetForRecompact ( State recompactState ) { if ( ! this . jobProps ( ) . getPropAsBoolean ( MRCompactor . COMPACTION_RECOMPACT_ALL_DATA , MRCompactor . DEFAULT_COMPACTION_RECOMPACT_ALL_DATA ) ) { this . overwriteInputPath ( this . outputLatePath ) ; this . cleanAdditionalInputPath ( ) ; } else { this . overwriteInputPath ( this . outputPath ) ; this . overwriteInputLatePath ( this . outputLatePath ) ; this . addAdditionalInputPath ( this . outputLatePath ) ; } this . setJobProps ( recompactState ) ; this . resetNeedToRecompact ( ) ; }
Modify an existing dataset to recompact from its ouput path .
36,347
public void beforeConvert ( SO outputSchema , DI inputRecord , WorkUnitState workUnit ) { Instrumented . markMeter ( this . recordsInMeter ) ; }
Called before conversion .
36,348
public void afterConvert ( Iterable < DO > iterable , long startTimeNanos ) { Instrumented . updateTimer ( this . converterTimer , System . nanoTime ( ) - startTimeNanos , TimeUnit . NANOSECONDS ) ; }
Called after conversion .
36,349
protected Callback < Response < PermitAllocation > > decorateCallback ( PermitRequest request , Callback < Response < PermitAllocation > > callback ) { return callback ; }
Decorate the callback to intercept some responses .
36,350
public static List < Pattern > getPatternsFromStrings ( List < String > strings ) { List < Pattern > patterns = Lists . newArrayList ( ) ; for ( String s : strings ) { patterns . add ( Pattern . compile ( s ) ) ; } return patterns ; }
Convert a list of Strings to a list of Patterns .
36,351
public static boolean stringInPatterns ( String s , List < Pattern > patterns ) { for ( Pattern pattern : patterns ) { if ( pattern . matcher ( s ) . matches ( ) ) { return true ; } } return false ; }
Determines whether a string matches one of the regex patterns .
36,352
public List < HivePartitionDataset > findDatasets ( ) throws IOException { List < HivePartitionDataset > list = new ArrayList < > ( ) ; for ( HiveDataset hiveDataset : this . hiveDatasets ) { for ( Partition partition : hiveDataset . getPartitionsFromDataset ( ) ) { list . add ( new HivePartitionDataset ( partition ) ) ; } } String selectionPolicyString = this . state . getProp ( ComplianceConfigurationKeys . DATASET_SELECTION_POLICY_CLASS , ComplianceConfigurationKeys . DEFAULT_DATASET_SELECTION_POLICY_CLASS ) ; Policy < HivePartitionDataset > selectionPolicy = GobblinConstructorUtils . invokeConstructor ( Policy . class , selectionPolicyString ) ; return selectionPolicy . selectedList ( list ) ; }
Will find all datasets according to whitelist except the backup trash and staging tables .
36,353
private void build ( ) { this . startNodes = new ArrayList < > ( ) ; this . endNodes = new ArrayList < > ( ) ; this . parentChildMap = new HashMap < > ( ) ; for ( DagNode node : this . nodes ) { if ( node . getParentNodes ( ) == null ) { this . startNodes . add ( node ) ; } else { List < DagNode > parentNodeList = node . getParentNodes ( ) ; for ( DagNode parentNode : parentNodeList ) { if ( parentChildMap . containsKey ( parentNode ) ) { parentChildMap . get ( parentNode ) . add ( node ) ; } else { parentChildMap . put ( parentNode , Lists . newArrayList ( node ) ) ; } } } } for ( DagNode node : this . nodes ) { if ( ! parentChildMap . containsKey ( node ) ) { this . endNodes . add ( node ) ; } } }
Constructs the dag from the Node list .
36,354
public InputStream decryptFile ( InputStream inputStream , String passPhrase ) throws IOException { PGPEncryptedDataList enc = getPGPEncryptedDataList ( inputStream ) ; PGPPBEEncryptedData pbe = ( PGPPBEEncryptedData ) enc . get ( 0 ) ; InputStream clear ; try { clear = pbe . getDataStream ( new JcePBEDataDecryptorFactoryBuilder ( new JcaPGPDigestCalculatorProviderBuilder ( ) . setProvider ( BouncyCastleProvider . PROVIDER_NAME ) . build ( ) ) . setProvider ( BouncyCastleProvider . PROVIDER_NAME ) . build ( passPhrase . toCharArray ( ) ) ) ; JcaPGPObjectFactory pgpFact = new JcaPGPObjectFactory ( clear ) ; return new LazyMaterializeDecryptorInputStream ( pgpFact ) ; } catch ( PGPException e ) { throw new IOException ( e ) ; } }
Taking in a file inputstream and a passPhrase generate a decrypted file inputstream .
36,355
public InputStream decryptFile ( InputStream inputStream , InputStream keyIn , String passPhrase ) throws IOException { try { PGPEncryptedDataList enc = getPGPEncryptedDataList ( inputStream ) ; Iterator it = enc . getEncryptedDataObjects ( ) ; PGPPrivateKey sKey = null ; PGPPublicKeyEncryptedData pbe = null ; PGPSecretKeyRingCollection pgpSec = new PGPSecretKeyRingCollection ( PGPUtil . getDecoderStream ( keyIn ) , new BcKeyFingerprintCalculator ( ) ) ; while ( sKey == null && it . hasNext ( ) ) { pbe = ( PGPPublicKeyEncryptedData ) it . next ( ) ; sKey = findSecretKey ( pgpSec , pbe . getKeyID ( ) , passPhrase ) ; } if ( sKey == null ) { throw new IllegalArgumentException ( "secret key for message not found." ) ; } InputStream clear = pbe . getDataStream ( new JcePublicKeyDataDecryptorFactoryBuilder ( ) . setProvider ( BouncyCastleProvider . PROVIDER_NAME ) . build ( sKey ) ) ; JcaPGPObjectFactory pgpFact = new JcaPGPObjectFactory ( clear ) ; return new LazyMaterializeDecryptorInputStream ( pgpFact ) ; } catch ( PGPException e ) { throw new IOException ( e ) ; } }
Taking in a file inputstream keyring inputstream and a passPhrase generate a decrypted file inputstream .
36,356
private PGPPrivateKey findSecretKey ( PGPSecretKeyRingCollection pgpSec , long keyID , String passPhrase ) throws PGPException { PGPSecretKey pgpSecKey = pgpSec . getSecretKey ( keyID ) ; if ( pgpSecKey == null ) { return null ; } return pgpSecKey . extractPrivateKey ( new JcePBESecretKeyDecryptorBuilder ( ) . setProvider ( BouncyCastleProvider . PROVIDER_NAME ) . build ( passPhrase . toCharArray ( ) ) ) ; }
Private util function that finds the private key from keyring collection based on keyId and passPhrase
36,357
private PGPEncryptedDataList getPGPEncryptedDataList ( InputStream inputStream ) throws IOException { if ( Security . getProvider ( BouncyCastleProvider . PROVIDER_NAME ) == null ) { Security . addProvider ( new BouncyCastleProvider ( ) ) ; } inputStream = PGPUtil . getDecoderStream ( inputStream ) ; JcaPGPObjectFactory pgpF = new JcaPGPObjectFactory ( inputStream ) ; PGPEncryptedDataList enc ; Object pgpfObject = pgpF . nextObject ( ) ; if ( pgpfObject instanceof PGPEncryptedDataList ) { enc = ( PGPEncryptedDataList ) pgpfObject ; } else { enc = ( PGPEncryptedDataList ) pgpF . nextObject ( ) ; } return enc ; }
Generate a PGPEncryptedDataList from an inputstream
36,358
@ SuppressWarnings ( "unchecked" ) public Iterator < String [ ] > downloadFile ( String file ) throws IOException { log . info ( "Beginning to download file: " + file ) ; final State state = fileBasedExtractor . workUnitState ; CSVReader reader ; try { if ( state . contains ( DELIMITER ) ) { String delimiterStr = state . getProp ( DELIMITER ) . trim ( ) ; Preconditions . checkArgument ( delimiterStr . length ( ) == 1 , "Delimiter should be a character." ) ; char delimiter = delimiterStr . charAt ( 0 ) ; log . info ( "Using " + delimiter + " as a delimiter." ) ; reader = this . fileBasedExtractor . getCloser ( ) . register ( new CSVReader ( new InputStreamReader ( this . fileBasedExtractor . getFsHelper ( ) . getFileStream ( file ) , ConfigurationKeys . DEFAULT_CHARSET_ENCODING ) , delimiter ) ) ; } else { reader = this . fileBasedExtractor . getCloser ( ) . register ( new CSVReader ( new InputStreamReader ( this . fileBasedExtractor . getFsHelper ( ) . getFileStream ( file ) , ConfigurationKeys . DEFAULT_CHARSET_ENCODING ) ) ) ; } } catch ( FileBasedHelperException e ) { throw new IOException ( e ) ; } PeekingIterator < String [ ] > iterator = Iterators . peekingIterator ( reader . iterator ( ) ) ; if ( state . contains ( SKIP_TOP_ROWS_REGEX ) ) { String regex = state . getProp ( SKIP_TOP_ROWS_REGEX ) ; log . info ( "Trying to skip with regex: " + regex ) ; while ( iterator . hasNext ( ) ) { String [ ] row = iterator . peek ( ) ; if ( row . length == 0 ) { break ; } if ( ! row [ 0 ] . matches ( regex ) ) { break ; } iterator . next ( ) ; } } if ( this . fileBasedExtractor . isShouldSkipFirstRecord ( ) && iterator . hasNext ( ) ) { log . info ( "Skipping first record" ) ; iterator . next ( ) ; } return iterator ; }
Provide iterator via OpenCSV s CSVReader . Provides a way to skip top rows by providing regex . ( This is useful when CSV file comes with comments on top rows but not in fixed size . It also provides validation on schema by matching header names between property s schema and header name in CSV file .
36,359
public WorkUnitStream transform ( Function < WorkUnit , WorkUnit > function ) { if ( this . materializedWorkUnits == null ) { return new BasicWorkUnitStream ( this , Iterators . transform ( this . workUnits , function ) , null ) ; } else { return new BasicWorkUnitStream ( this , null , Lists . newArrayList ( Lists . transform ( this . materializedWorkUnits , function ) ) ) ; } }
Apply a transformation function to this stream .
36,360
public WorkUnitStream filter ( Predicate < WorkUnit > predicate ) { if ( this . materializedWorkUnits == null ) { return new BasicWorkUnitStream ( this , Iterators . filter ( this . workUnits , predicate ) , null ) ; } else { return new BasicWorkUnitStream ( this , null , Lists . newArrayList ( Iterables . filter ( this . materializedWorkUnits , predicate ) ) ) ; } }
Apply a filtering function to this stream .
36,361
public void printTable ( ) { if ( this . labels != null ) { System . out . printf ( this . rowFormat , this . labels . toArray ( ) ) ; } for ( List < String > row : this . data ) { System . out . printf ( this . rowFormat , row . toArray ( ) ) ; } }
Prints the table of data
36,362
private List < Integer > getColumnMaxWidths ( ) { int numCols = data . get ( 0 ) . size ( ) ; int [ ] widths = new int [ numCols ] ; if ( this . labels != null ) { for ( int i = 0 ; i < numCols ; i ++ ) { widths [ i ] = this . labels . get ( i ) . length ( ) ; } } for ( List < String > row : this . data ) { for ( int i = 0 ; i < row . size ( ) ; i ++ ) { if ( row . get ( i ) == null ) { widths [ i ] = Math . max ( widths [ i ] , 4 ) ; } else { widths [ i ] = Math . max ( widths [ i ] , row . get ( i ) . length ( ) ) ; } } } return Ints . asList ( widths ) ; }
A function for determining the max widths of columns accounting for labels and data .
36,363
private String getRowFormat ( List < Integer > widths ) { StringBuilder rowFormat = new StringBuilder ( spaces ( this . indentation ) ) ; for ( int i = 0 ; i < widths . size ( ) ; i ++ ) { rowFormat . append ( "%" ) ; rowFormat . append ( this . flags != null ? this . flags . get ( i ) : "" ) ; rowFormat . append ( widths . get ( i ) . toString ( ) ) ; rowFormat . append ( "s" ) ; rowFormat . append ( spaces ( this . delimiterWidth ) ) ; } rowFormat . append ( "\n" ) ; return rowFormat . toString ( ) ; }
Generates a simple row format string given a set of widths
36,364
private void copyJarDependencies ( State state ) throws IOException { if ( this . tmpJobDir == null ) { throw new RuntimeException ( "Job directory is not created" ) ; } if ( ! state . contains ( ConfigurationKeys . JOB_JAR_FILES_KEY ) ) { return ; } LocalFileSystem lfs = FileSystem . getLocal ( HadoopUtils . getConfFromState ( state ) ) ; Path tmpJarFileDir = new Path ( this . tmpJobDir , MRCompactor . COMPACTION_JAR_SUBDIR ) ; this . fs . mkdirs ( tmpJarFileDir ) ; state . setProp ( MRCompactor . COMPACTION_JARS , tmpJarFileDir . toString ( ) ) ; for ( String jarFile : state . getPropAsList ( ConfigurationKeys . JOB_JAR_FILES_KEY ) ) { for ( FileStatus status : lfs . globStatus ( new Path ( jarFile ) ) ) { Path tmpJarFile = new Path ( this . fs . makeQualified ( tmpJarFileDir ) , status . getPath ( ) . getName ( ) ) ; this . fs . copyFromLocalFile ( status . getPath ( ) , tmpJarFile ) ; log . info ( String . format ( "%s will be added to classpath" , tmpJarFile ) ) ; } } }
Copy dependent jars to a temporary job directory on HDFS
36,365
public void executeQueries ( List < String > queries , Optional < String > proxy ) throws SQLException { Preconditions . checkArgument ( ! this . statementMap . isEmpty ( ) , "No hive connection. Unable to execute queries" ) ; if ( ! proxy . isPresent ( ) ) { Preconditions . checkArgument ( this . statementMap . size ( ) == 1 , "Multiple Hive connections. Please specify a user" ) ; proxy = Optional . fromNullable ( this . statementMap . keySet ( ) . iterator ( ) . next ( ) ) ; } Statement statement = this . statementMap . get ( proxy . get ( ) ) ; for ( String query : queries ) { statement . execute ( query ) ; } }
Execute queries .
36,366
public long getRecordCount ( Path filepath ) { String [ ] components = filepath . getName ( ) . split ( Pattern . quote ( SEPARATOR ) ) ; Preconditions . checkArgument ( components . length >= 2 && StringUtils . isNumeric ( components [ components . length - 2 ] ) , String . format ( "Filename %s does not follow the pattern: FILENAME.RECORDCOUNT.EXTENSION" , filepath ) ) ; return Long . parseLong ( components [ components . length - 2 ] ) ; }
The record count should be the last component before the filename extension .
36,367
private R2Request < GenericRecord > buildWriteRequest ( BufferedRecord < GenericRecord > record ) { if ( record == null ) { return null ; } R2Request < GenericRecord > request = new R2Request < > ( ) ; HttpOperation httpOperation = HttpUtils . toHttpOperation ( record . getRecord ( ) ) ; URI uri = HttpUtils . buildURI ( urlTemplate , httpOperation . getKeys ( ) , httpOperation . getQueryParams ( ) ) ; if ( uri == null ) { return null ; } RestRequestBuilder builder = new RestRequestBuilder ( uri ) . setMethod ( method . getHttpMethod ( ) . toString ( ) ) ; Map < String , String > headers = httpOperation . getHeaders ( ) ; if ( headers != null && headers . size ( ) != 0 ) { builder . setHeaders ( headers ) ; } builder . setHeader ( RestConstants . HEADER_RESTLI_PROTOCOL_VERSION , protocolVersion ) ; builder . setHeader ( RestConstants . HEADER_RESTLI_REQUEST_METHOD , method . toString ( ) ) ; int bytesWritten = addPayload ( builder , httpOperation . getBody ( ) ) ; if ( bytesWritten == - 1 ) { throw new RuntimeException ( "Fail to write payload into request" ) ; } request . markRecord ( record , bytesWritten ) ; request . setRawRequest ( build ( builder ) ) ; return request ; }
Build a request from a single record
36,368
public AzkabanClientStatus createProject ( String projectName , String description ) throws AzkabanClientException { AzkabanMultiCallables . CreateProjectCallable callable = AzkabanMultiCallables . CreateProjectCallable . builder ( ) . client ( this ) . projectName ( projectName ) . description ( description ) . build ( ) ; return runWithRetry ( callable , AzkabanClientStatus . class ) ; }
Creates a project .
36,369
public AzkabanClientStatus deleteProject ( String projectName ) throws AzkabanClientException { AzkabanMultiCallables . DeleteProjectCallable callable = AzkabanMultiCallables . DeleteProjectCallable . builder ( ) . client ( this ) . projectName ( projectName ) . build ( ) ; return runWithRetry ( callable , AzkabanClientStatus . class ) ; }
Deletes a project . Currently no response message will be returned after finishing the delete operation . Thus success status is always expected .
36,370
public AzkabanClientStatus uploadProjectZip ( String projectName , File zipFile ) throws AzkabanClientException { AzkabanMultiCallables . UploadProjectCallable callable = AzkabanMultiCallables . UploadProjectCallable . builder ( ) . client ( this ) . projectName ( projectName ) . zipFile ( zipFile ) . build ( ) ; return runWithRetry ( callable , AzkabanClientStatus . class ) ; }
Updates a project by uploading a new zip file . Before uploading any project zip files the project should be created first .
36,371
public AzkabanExecuteFlowStatus executeFlowWithOptions ( String projectName , String flowName , Map < String , String > flowOptions , Map < String , String > flowParameters ) throws AzkabanClientException { AzkabanMultiCallables . ExecuteFlowCallable callable = AzkabanMultiCallables . ExecuteFlowCallable . builder ( ) . client ( this ) . projectName ( projectName ) . flowName ( flowName ) . flowOptions ( flowOptions ) . flowParameters ( flowParameters ) . build ( ) ; return runWithRetry ( callable , AzkabanExecuteFlowStatus . class ) ; }
Execute a flow by providing flow parameters and options . The project and flow should be created first .
36,372
public AzkabanExecuteFlowStatus executeFlow ( String projectName , String flowName , Map < String , String > flowParameters ) throws AzkabanClientException { return executeFlowWithOptions ( projectName , flowName , null , flowParameters ) ; }
Execute a flow with flow parameters . The project and flow should be created first .
36,373
public AzkabanClientStatus cancelFlow ( String execId ) throws AzkabanClientException { AzkabanMultiCallables . CancelFlowCallable callable = AzkabanMultiCallables . CancelFlowCallable . builder ( ) . client ( this ) . execId ( execId ) . build ( ) ; return runWithRetry ( callable , AzkabanClientStatus . class ) ; }
Cancel a flow by execution id .
36,374
public AzkabanClientStatus fetchExecutionLog ( String execId , String jobId , String offset , String length , File ouf ) throws AzkabanClientException { AzkabanMultiCallables . FetchExecLogCallable callable = AzkabanMultiCallables . FetchExecLogCallable . builder ( ) . client ( this ) . execId ( execId ) . jobId ( jobId ) . offset ( offset ) . length ( length ) . output ( ouf ) . build ( ) ; return runWithRetry ( callable , AzkabanClientStatus . class ) ; }
Fetch an execution log .
36,375
public AzkabanFetchExecuteFlowStatus fetchFlowExecution ( String execId ) throws AzkabanClientException { AzkabanMultiCallables . FetchFlowExecCallable callable = AzkabanMultiCallables . FetchFlowExecCallable . builder ( ) . client ( this ) . execId ( execId ) . build ( ) ; return runWithRetry ( callable , AzkabanFetchExecuteFlowStatus . class ) ; }
Given an execution id fetches all the detailed information of that execution including a list of all the job executions .
36,376
public AsyncDataWriter getAsyncDataWriter ( Properties properties ) { EventhubDataWriter eventhubDataWriter = new EventhubDataWriter ( properties ) ; EventhubBatchAccumulator accumulator = new EventhubBatchAccumulator ( properties ) ; BatchedEventhubDataWriter batchedEventhubDataWriter = new BatchedEventhubDataWriter ( accumulator , eventhubDataWriter ) ; return batchedEventhubDataWriter ; }
Create an eventhub data writer wrapped into a buffered async data writer
36,377
public PermitsAndDelay getPermitsAndDelay ( long requestedPermits , long minPermits , long timeoutMillis ) { try { long storedTokens = this . tokenBucket . getStoredTokens ( ) ; long eagerTokens = storedTokens / 2 ; if ( eagerTokens > requestedPermits && this . tokenBucket . getTokens ( eagerTokens , 0 , TimeUnit . MILLISECONDS ) ) { return new PermitsAndDelay ( eagerTokens , 0 , true ) ; } long millisToSatisfyMinPermits = ( long ) ( minPermits / this . tokenBucket . getTokensPerMilli ( ) ) ; if ( millisToSatisfyMinPermits > timeoutMillis ) { return new PermitsAndDelay ( 0 , 0 , false ) ; } long allowedTimeout = Math . min ( millisToSatisfyMinPermits + this . baseTimeout , timeoutMillis ) ; while ( requestedPermits > minPermits ) { long wait = this . tokenBucket . tryReserveTokens ( requestedPermits , allowedTimeout ) ; if ( wait >= 0 ) { return new PermitsAndDelay ( requestedPermits , wait , true ) ; } requestedPermits /= 2 ; } long wait = this . tokenBucket . tryReserveTokens ( minPermits , allowedTimeout ) ; if ( wait >= 0 ) { return new PermitsAndDelay ( requestedPermits , wait , true ) ; } } catch ( InterruptedException ie ) { } return new PermitsAndDelay ( 0 , 0 , true ) ; }
Request tokens .
36,378
public StreamCodec buildStreamEncryptor ( Map < String , Object > parameters ) { String encryptionType = EncryptionConfigParser . getEncryptionType ( parameters ) ; if ( encryptionType == null ) { throw new IllegalArgumentException ( "Encryption type not present in parameters!" ) ; } return buildStreamCryptoProvider ( encryptionType , parameters ) ; }
Return a StreamEncryptor for the given parameters . The algorithm type to use will be extracted from the parameters object .
36,379
public CredentialStore buildCredentialStore ( Map < String , Object > parameters ) { String ks_type = EncryptionConfigParser . getKeystoreType ( parameters ) ; String ks_path = EncryptionConfigParser . getKeystorePath ( parameters ) ; String ks_password = EncryptionConfigParser . getKeystorePassword ( parameters ) ; try { switch ( ks_type ) { case JCEKSKeystoreCredentialStore . TAG : return new JCEKSKeystoreCredentialStore ( ks_path , ks_password ) ; case JsonCredentialStore . TAG : return new JsonCredentialStore ( ks_path , buildKeyToStringCodec ( parameters ) ) ; default : return null ; } } catch ( IOException e ) { log . error ( "Error building credential store, returning null" , e ) ; return null ; } }
Build a credential store with the given parameters .
36,380
public String getCharset ( String contentType ) { String charSet = knownCharsets . get ( contentType ) ; if ( charSet != null ) { return charSet ; } if ( contentType . startsWith ( "text/" ) || contentType . endsWith ( "+json" ) || contentType . endsWith ( "+xml" ) ) { return "UTF-8" ; } return "BINARY" ; }
Check which character set a given content - type corresponds to .
36,381
public boolean inferPrintableFromMetadata ( Metadata md ) { String inferredCharset = "BINARY" ; List < String > transferEncoding = md . getGlobalMetadata ( ) . getTransferEncoding ( ) ; if ( transferEncoding != null ) { inferredCharset = getCharset ( transferEncoding . get ( transferEncoding . size ( ) - 1 ) ) ; } else if ( md . getGlobalMetadata ( ) . getContentType ( ) != null ) { inferredCharset = getCharset ( md . getGlobalMetadata ( ) . getContentType ( ) ) ; } return inferredCharset . equals ( "UTF-8" ) ; }
Heuristic to infer if content is printable from metadata .
36,382
public void registerCharsetMapping ( String contentType , String charSet ) { if ( knownCharsets . contains ( contentType ) ) { log . warn ( "{} is already registered; re-registering" ) ; } knownCharsets . put ( contentType , charSet ) ; }
Register a new contentType to charSet mapping .
36,383
private static AuditCountClientFactory getClientFactory ( State state ) { if ( ! state . contains ( AuditCountClientFactory . AUDIT_COUNT_CLIENT_FACTORY ) ) { return new EmptyAuditCountClientFactory ( ) ; } try { String factoryName = state . getProp ( AuditCountClientFactory . AUDIT_COUNT_CLIENT_FACTORY ) ; ClassAliasResolver < AuditCountClientFactory > conditionClassAliasResolver = new ClassAliasResolver < > ( AuditCountClientFactory . class ) ; AuditCountClientFactory factory = conditionClassAliasResolver . resolveClass ( factoryName ) . newInstance ( ) ; return factory ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
Obtain a client factory
36,384
private static Credential buildCredentialFromP12 ( String privateKeyPath , Optional < String > fsUri , Optional < String > id , HttpTransport transport , Collection < String > serviceAccountScopes ) throws IOException , GeneralSecurityException { Preconditions . checkArgument ( id . isPresent ( ) , "user id is required." ) ; FileSystem fs = getFileSystem ( fsUri ) ; Path keyPath = getPrivateKey ( fs , privateKeyPath ) ; final File localCopied = copyToLocal ( fs , keyPath ) ; localCopied . deleteOnExit ( ) ; try { return new GoogleCredential . Builder ( ) . setTransport ( transport ) . setJsonFactory ( JSON_FACTORY ) . setServiceAccountId ( id . get ( ) ) . setServiceAccountPrivateKeyFromP12File ( localCopied ) . setServiceAccountScopes ( serviceAccountScopes ) . build ( ) ; } finally { boolean isDeleted = localCopied . delete ( ) ; if ( ! isDeleted ) { throw new RuntimeException ( localCopied . getAbsolutePath ( ) + " has not been deleted." ) ; } } }
As Google API only accepts java . io . File for private key and this method copies private key into local file system . Once Google credential is instantiated it deletes copied private key file .
36,385
private static Path getPrivateKey ( FileSystem fs , String privateKeyPath ) throws IOException { Path keyPath = new Path ( privateKeyPath ) ; FileStatus fileStatus = fs . getFileStatus ( keyPath ) ; Preconditions . checkArgument ( USER_READ_PERMISSION_ONLY . equals ( fileStatus . getPermission ( ) ) , "Private key file should only have read only permission only on user. " + keyPath ) ; return keyPath ; }
Before retrieving private key it makes sure that original private key s permission is read only on the owner . This is a way to ensure to keep private key private .
36,386
public static HttpTransport newTransport ( String proxyUrl , String portStr ) throws NumberFormatException , GeneralSecurityException , IOException { if ( ! StringUtils . isEmpty ( proxyUrl ) && ! StringUtils . isEmpty ( portStr ) ) { return new NetHttpTransport . Builder ( ) . trustCertificates ( GoogleUtils . getCertificateTrustStore ( ) ) . setProxy ( new Proxy ( Proxy . Type . HTTP , new InetSocketAddress ( proxyUrl , Integer . parseInt ( portStr ) ) ) ) . build ( ) ; } return GoogleNetHttpTransport . newTrustedTransport ( ) ; }
Provides HttpTransport . If both proxyUrl and postStr is defined it provides transport with Proxy .
36,387
public EmbeddedGobblin mrMode ( ) throws IOException { this . sysConfigOverrides . put ( ConfigurationKeys . JOB_LAUNCHER_TYPE_KEY , JobLauncherFactory . JobLauncherType . MAPREDUCE . name ( ) ) ; this . builtConfigMap . put ( ConfigurationKeys . FS_URI_KEY , FileSystem . get ( new Configuration ( ) ) . getUri ( ) . toString ( ) ) ; this . builtConfigMap . put ( ConfigurationKeys . MR_JOB_ROOT_DIR_KEY , "/tmp/EmbeddedGobblin_" + System . currentTimeMillis ( ) ) ; this . distributeJarsFunction = new Runnable ( ) { public void run ( ) { EmbeddedGobblin . this . sysConfigOverrides . put ( ConfigurationKeys . JOB_JAR_FILES_KEY , Joiner . on ( "," ) . join ( getPrioritizedDistributedJars ( ) ) ) ; } } ; return this ; }
Specify job should run in MR mode .
36,388
public EmbeddedGobblin sysConfig ( String key , String value ) { this . sysConfigOverrides . put ( key , value ) ; return this ; }
Override a Gobblin system configuration .
36,389
public EmbeddedGobblin setConfiguration ( String key , String value ) { this . userConfigMap . put ( key , value ) ; return this ; }
Manually set a key - value pair in the job configuration .
36,390
public EmbeddedGobblin setJobTimeout ( String timeout ) { return setJobTimeout ( Period . parse ( timeout ) . getSeconds ( ) , TimeUnit . SECONDS ) ; }
Set the timeout for the Gobblin job execution from ISO - style period .
36,391
public EmbeddedGobblin setLaunchTimeout ( String timeout ) { return setLaunchTimeout ( Period . parse ( timeout ) . getSeconds ( ) , TimeUnit . SECONDS ) ; }
Set the timeout for launching the Gobblin job from ISO - style period .
36,392
public EmbeddedGobblin setShutdownTimeout ( String timeout ) { return setShutdownTimeout ( Period . parse ( timeout ) . getSeconds ( ) , TimeUnit . SECONDS ) ; }
Set the timeout for shutting down the Gobblin instance driver after the job is done from ISO - style period .
36,393
public EmbeddedGobblin useStateStore ( String rootDir ) { this . setConfiguration ( ConfigurationKeys . STATE_STORE_ENABLED , "true" ) ; this . setConfiguration ( ConfigurationKeys . STATE_STORE_ROOT_DIR_KEY , rootDir ) ; return this ; }
Enable state store .
36,394
public EmbeddedGobblin enableMetrics ( ) { this . usePlugin ( new GobblinMetricsPlugin . Factory ( ) ) ; this . sysConfig ( ConfigurationKeys . METRICS_ENABLED_KEY , Boolean . toString ( true ) ) ; return this ; }
Enable metrics . Does not start any reporters .
36,395
public JobExecutionResult run ( ) throws InterruptedException , TimeoutException , ExecutionException { JobExecutionDriver jobDriver = runAsync ( ) ; return jobDriver . get ( this . jobTimeout . getTimeout ( ) , this . jobTimeout . getTimeUnit ( ) ) ; }
Run the Gobblin job . This call will block until the job is done .
36,396
private void loadCoreGobblinJarsToDistributedJars ( ) { distributeJarByClassWithPriority ( State . class , 0 ) ; distributeJarByClassWithPriority ( ConstructState . class , 0 ) ; distributeJarByClassWithPriority ( InstrumentedExtractorBase . class , 0 ) ; distributeJarByClassWithPriority ( MetricContext . class , 0 ) ; distributeJarByClassWithPriority ( GobblinMetrics . class , 0 ) ; distributeJarByClassWithPriority ( FsStateStore . class , 0 ) ; distributeJarByClassWithPriority ( Task . class , 0 ) ; distributeJarByClassWithPriority ( PathUtils . class , 0 ) ; distributeJarByClassWithPriority ( ReadableInstant . class , 0 ) ; distributeJarByClassWithPriority ( Escaper . class , - 10 ) ; distributeJarByClassWithPriority ( MetricFilter . class , 0 ) ; distributeJarByClassWithPriority ( DataTemplate . class , 0 ) ; distributeJarByClassWithPriority ( ClassUtils . class , 0 ) ; distributeJarByClassWithPriority ( SchemaBuilder . class , 0 ) ; distributeJarByClassWithPriority ( RetryListener . class , 0 ) ; distributeJarByClassWithPriority ( ConfigFactory . class , 0 ) ; distributeJarByClassWithPriority ( Reflections . class , 0 ) ; distributeJarByClassWithPriority ( ClassFile . class , 0 ) ; }
This returns the set of jars required by a basic Gobblin ingestion job . In general these need to be distributed to workers in a distributed environment .
36,397
public synchronized void put ( JobSpec jobSpec ) { Preconditions . checkState ( state ( ) == State . RUNNING , String . format ( "%s is not running." , this . getClass ( ) . getName ( ) ) ) ; Preconditions . checkNotNull ( jobSpec ) ; try { long startTime = System . currentTimeMillis ( ) ; Path jobSpecPath = getPathForURI ( this . jobConfDirPath , jobSpec . getUri ( ) ) ; boolean isUpdate = fs . exists ( jobSpecPath ) ; materializedJobSpec ( jobSpecPath , jobSpec , this . fs ) ; this . mutableMetrics . updatePutJobTime ( startTime ) ; if ( isUpdate ) { this . listeners . onUpdateJob ( jobSpec ) ; } else { this . listeners . onAddJob ( jobSpec ) ; } } catch ( IOException e ) { throw new RuntimeException ( "When persisting a new JobSpec, unexpected issues happen:" + e . getMessage ( ) ) ; } catch ( JobSpecNotFoundException e ) { throw new RuntimeException ( "When replacing a existed JobSpec, unexpected issue happen:" + e . getMessage ( ) ) ; } }
Allow user to programmatically add a new JobSpec . The method will materialized the jobSpec into real file .
36,398
public synchronized void remove ( URI jobURI ) { Preconditions . checkState ( state ( ) == State . RUNNING , String . format ( "%s is not running." , this . getClass ( ) . getName ( ) ) ) ; try { long startTime = System . currentTimeMillis ( ) ; JobSpec jobSpec = getJobSpec ( jobURI ) ; Path jobSpecPath = getPathForURI ( this . jobConfDirPath , jobURI ) ; if ( fs . exists ( jobSpecPath ) ) { fs . delete ( jobSpecPath , false ) ; this . mutableMetrics . updateRemoveJobTime ( startTime ) ; this . listeners . onDeleteJob ( jobURI , jobSpec . getVersion ( ) ) ; } else { LOGGER . warn ( "No file with URI:" + jobSpecPath + " is found. Deletion failed." ) ; } } catch ( IOException e ) { throw new RuntimeException ( "When removing a JobConf. file, issues unexpected happen:" + e . getMessage ( ) ) ; } catch ( SpecNotFoundException e ) { LOGGER . warn ( "No file with URI:" + jobURI + " is found. Deletion failed." ) ; } }
Allow user to programmatically delete a new JobSpec . This method is designed to be reentrant .
36,399
protected Metric serializeValue ( String name , Number value , String ... path ) { return new Metric ( MetricRegistry . name ( name , path ) , value . doubleValue ( ) ) ; }
Converts a single key - value pair into a metric .