idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
21,900
private void initializePresets ( ) { if ( gobblinPresets == null ) { synchronized ( GobblinHadoopJob . class ) { if ( gobblinPresets == null ) { gobblinPresets = Maps . newHashMap ( ) ; String gobblinPresetDirName = getSysProps ( ) . getString ( GobblinConstants . GOBBLIN_PRESET_DIR_KEY ) ; File gobblinPresetDir = new File ( gobblinPresetDirName ) ; File [ ] presetFiles = gobblinPresetDir . listFiles ( ) ; if ( presetFiles == null ) { return ; } File commonPropertiesFile = new File ( gobblinPresetDir , GOBBLIN_PRESET_COMMON_PROPERTIES_FILE_NAME ) ; if ( ! commonPropertiesFile . exists ( ) ) { throw new IllegalStateException ( "Gobbline preset common properties file is missing " + commonPropertiesFile . getAbsolutePath ( ) ) ; } for ( File f : presetFiles ) { if ( GOBBLIN_PRESET_COMMON_PROPERTIES_FILE_NAME . equals ( f . getName ( ) ) ) { continue ; } if ( f . isFile ( ) ) { Properties prop = new Properties ( ) ; try ( InputStream commonIs = new BufferedInputStream ( new FileInputStream ( commonPropertiesFile ) ) ; InputStream presetIs = new BufferedInputStream ( new FileInputStream ( f ) ) ) { prop . load ( commonIs ) ; prop . load ( presetIs ) ; String presetName = f . getName ( ) . substring ( 0 , f . getName ( ) . lastIndexOf ( '.' ) ) ; gobblinPresets . put ( GobblinPresets . fromName ( presetName ) , prop ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } } } } } }
Initializes presets and cache it into preset map . As presets do not change while server is up this initialization happens only once per JVM .
21,901
private void transformProperties ( ) { String query = getJobProps ( ) . getString ( GOBBLIN_QUERY_KEY , null ) ; if ( query == null ) { return ; } query = query . trim ( ) ; int idx = - 1 ; if ( ( idx = query . indexOf ( ';' ) ) >= 0 ) { if ( idx < query . length ( ) - 1 ) { throw new IllegalArgumentException ( GOBBLIN_QUERY_KEY + " should consist of one SELECT statement. " + query ) ; } query = query . substring ( 0 , idx ) ; getJobProps ( ) . put ( GOBBLIN_QUERY_KEY , query ) ; } }
Transform property to make it work for Gobblin .
21,902
@ SuppressWarnings ( "FutureReturnValueIgnored" ) private void deleteProjectDirsInParallel ( final ImmutableSet < File > projectDirsToDelete ) { final int CLEANING_SERVICE_THREAD_NUM = 8 ; final ExecutorService deletionService = Executors . newFixedThreadPool ( CLEANING_SERVICE_THREAD_NUM ) ; for ( final File toDelete : projectDirsToDelete ) { deletionService . submit ( ( ) -> { log . info ( "Deleting project dir {} from project cache to free up space" , toDelete ) ; FileIOUtils . deleteDirectorySilently ( toDelete ) ; } ) ; } try { new ExecutorServiceUtils ( ) . gracefulShutdown ( deletionService , Duration . ofDays ( 1 ) ) ; } catch ( final InterruptedException e ) { log . warn ( "Error when deleting files" , e ) ; } }
Delete all the files in parallel
21,903
private void deleteLeastRecentlyUsedProjects ( long sizeToFreeInBytes , final List < ProjectDirectoryMetadata > projectDirMetadataList ) { projectDirMetadataList . sort ( Comparator . comparing ( ProjectDirectoryMetadata :: getLastAccessTime ) ) ; final Set < File > projectDirsToDelete = new HashSet < > ( ) ; for ( final ProjectDirectoryMetadata proj : projectDirMetadataList ) { if ( sizeToFreeInBytes > 0 ) { if ( proj . getInstalledDir ( ) != null ) { projectDirsToDelete . add ( proj . getInstalledDir ( ) ) ; sizeToFreeInBytes -= proj . getDirSizeInByte ( ) ; } } else { break ; } } final long start = System . currentTimeMillis ( ) ; deleteProjectDirsInParallel ( ImmutableSet . copyOf ( projectDirsToDelete ) ) ; final long end = System . currentTimeMillis ( ) ; log . info ( "Deleting {} project dir(s) took {} sec(s)" , projectDirsToDelete . size ( ) , ( end - start ) / 1000 ) ; }
Delete least recently used projects to free up space
21,904
void deleteProjectDirsIfNecessary ( final long newProjectSizeInBytes ) { final long projectCacheMaxSizeInByte = ( long ) ( this . projectCacheDir . getTotalSpace ( ) * this . percentageOfDisk ) ; final long start = System . currentTimeMillis ( ) ; final List < ProjectDirectoryMetadata > allProjects = loadAllProjects ( ) ; log . info ( "Loading {} project dirs metadata completed in {} sec(s)" , allProjects . size ( ) , ( System . currentTimeMillis ( ) - start ) / 1000 ) ; final long currentSpaceInBytes = getProjectDirsTotalSizeInBytes ( allProjects ) ; if ( currentSpaceInBytes + newProjectSizeInBytes >= projectCacheMaxSizeInByte ) { log . info ( "Project cache usage[{} MB] >= cache limit[{} MB], start cleaning up project dirs" , ( currentSpaceInBytes + newProjectSizeInBytes ) / ( 1024 * 1024 ) , projectCacheMaxSizeInByte / ( 1024 * 1024 ) ) ; final long freeCacheSpaceInBytes = projectCacheMaxSizeInByte - currentSpaceInBytes ; deleteLeastRecentlyUsedProjects ( newProjectSizeInBytes - freeCacheSpaceInBytes , allProjects ) ; } }
Deleting least recently accessed project dirs when there s no room to accommodate new project
21,905
private void createLogger ( final String flowId ) { final String loggerName = this . execId + "." + flowId ; this . logger = Logger . getLogger ( loggerName ) ; final String logName = "_flow." + loggerName + ".log" ; this . logFile = new File ( this . execDir , logName ) ; final String absolutePath = this . logFile . getAbsolutePath ( ) ; this . flowAppender = null ; try { this . flowAppender = new FileAppender ( this . loggerLayout , absolutePath , false ) ; this . logger . addAppender ( this . flowAppender ) ; } catch ( final IOException e ) { this . logger . error ( "Could not open log file in " + this . execDir , e ) ; } }
setup logger and execution dir for the flowId
21,906
private void runFlow ( ) throws Exception { this . logger . info ( "Starting flows" ) ; runReadyJob ( this . flow ) ; updateFlow ( ) ; while ( ! this . flowFinished ) { synchronized ( this . mainSyncObj ) { if ( this . flowPaused ) { try { this . mainSyncObj . wait ( CHECK_WAIT_MS ) ; } catch ( final InterruptedException e ) { } continue ; } else { if ( this . retryFailedJobs ) { retryAllFailures ( ) ; } else if ( ! progressGraph ( ) ) { try { this . mainSyncObj . wait ( CHECK_WAIT_MS ) ; } catch ( final InterruptedException e ) { } } } } } this . logger . info ( "Finishing up flow. Awaiting Termination" ) ; this . executorService . shutdown ( ) ; updateFlow ( ) ; this . logger . info ( "Finished Flow" ) ; }
Main method that executes the jobs .
21,907
private void propagateStatusAndAlert ( final ExecutableFlowBase base , final Status status ) { if ( ! Status . isStatusFinished ( base . getStatus ( ) ) && base . getStatus ( ) != Status . KILLING ) { this . logger . info ( "Setting " + base . getNestedId ( ) + " to " + status ) ; boolean shouldAlert = false ; if ( base . getStatus ( ) != status ) { base . setStatus ( status ) ; shouldAlert = true ; } if ( base . getParentFlow ( ) != null ) { propagateStatusAndAlert ( base . getParentFlow ( ) , status ) ; } else if ( this . azkabanProps . getBoolean ( ConfigurationKeys . AZKABAN_POLL_MODEL , false ) ) { if ( shouldAlert && base . getStatus ( ) == Status . FAILED_FINISHING ) { ExecutionControllerUtils . alertUserOnFirstError ( ( ExecutableFlow ) base , this . alerterHolder ) ; } } } }
Recursively propagate status to parent flow . Alert on first error of the flow in new AZ dispatching design .
21,908
public Status getImpliedStatus ( final ExecutableNode node ) { if ( Status . isStatusRunning ( node . getStatus ( ) ) || node . getStatus ( ) == Status . SUCCEEDED ) { return null ; } Status status = Status . READY ; switch ( checkConditionOnJobStatus ( node ) ) { case FAILED : this . logger . info ( "Condition on job status: " + node . getConditionOnJobStatus ( ) + " is " + "evaluated to false for " + node . getId ( ) ) ; status = Status . CANCELLED ; break ; case PENDING : return null ; default : break ; } if ( status != Status . CANCELLED && ! isConditionOnRuntimeVariableMet ( node ) ) { status = Status . CANCELLED ; } if ( node . getStatus ( ) == Status . DISABLED || node . getStatus ( ) == Status . SKIPPED ) { return Status . SKIPPED ; } if ( this . flowFailed && this . failureAction == ExecutionOptions . FailureAction . FINISH_CURRENTLY_RUNNING ) { return Status . CANCELLED ; } else if ( isKilled ( ) ) { return Status . CANCELLED ; } return status ; }
Determines what the state of the next node should be . Returns null if the node should not be run .
21,909
private void configureJobLevelMetrics ( final JobRunner jobRunner ) { this . logger . info ( "Configuring Azkaban metrics tracking for jobrunner object" ) ; if ( MetricReportManager . isAvailable ( ) ) { final MetricReportManager metricManager = MetricReportManager . getInstance ( ) ; jobRunner . addListener ( ( NumRunningJobMetric ) metricManager . getMetricFromName ( NumRunningJobMetric . NUM_RUNNING_JOB_METRIC_NAME ) ) ; jobRunner . addListener ( ( NumFailedJobMetric ) metricManager . getMetricFromName ( NumFailedJobMetric . NUM_FAILED_JOB_METRIC_NAME ) ) ; } jobRunner . addListener ( JmxJobMBeanManager . getInstance ( ) ) ; }
Configure Azkaban metrics tracking for a new jobRunner instance
21,910
public void recoverIncompleteTriggerInstances ( ) { final Collection < TriggerInstance > unfinishedTriggerInstances = this . flowTriggerInstanceLoader . getIncompleteTriggerInstances ( ) ; for ( final TriggerInstance triggerInstance : unfinishedTriggerInstances ) { if ( triggerInstance . getFlowTrigger ( ) != null ) { recoverTriggerInstance ( triggerInstance ) ; } else { logger . error ( "cannot recover the trigger instance {}, flow trigger is null," + " cancelling it " , triggerInstance . getId ( ) ) ; if ( isDoneButFlowNotExecuted ( triggerInstance ) ) { triggerInstance . setFlowExecId ( Constants . FAILED_EXEC_ID ) ; this . flowTriggerInstanceLoader . updateAssociatedFlowExecId ( triggerInstance ) ; } else { for ( final DependencyInstance depInst : triggerInstance . getDepInstances ( ) ) { if ( ! Status . isDone ( depInst . getStatus ( ) ) ) { processStatusAndCancelCauseUpdate ( depInst , Status . CANCELLED , CancellationCause . FAILURE ) ; this . triggerProcessor . processTermination ( depInst . getTriggerInstance ( ) ) ; } } } } } }
Resume executions of all incomplete trigger instances by recovering the state from db .
21,911
public void startTrigger ( final FlowTrigger flowTrigger , final String flowId , final int flowVersion , final String submitUser , final Project project ) { final TriggerInstance triggerInst = createTriggerInstance ( flowTrigger , flowId , flowVersion , submitUser , project ) ; this . flowTriggerExecutorService . submit ( ( ) -> { logger . info ( "Starting the flow trigger [trigger instance id: {}] by {}" , triggerInst . getId ( ) , submitUser ) ; start ( triggerInst ) ; } ) ; }
Start the trigger . The method will be scheduled to invoke by azkaban scheduler .
21,912
public void cancelTriggerInstance ( final TriggerInstance triggerInst , final CancellationCause cause ) { if ( triggerInst . getStatus ( ) == Status . RUNNING ) { this . flowTriggerExecutorService . submit ( ( ) -> cancel ( triggerInst , cause ) ) ; } }
Cancel a trigger instance
21,913
public void shutdown ( ) { this . flowTriggerExecutorService . shutdown ( ) ; this . cancelExecutorService . shutdown ( ) ; this . timeoutService . shutdown ( ) ; this . flowTriggerExecutorService . shutdownNow ( ) ; this . cancelExecutorService . shutdownNow ( ) ; this . timeoutService . shutdownNow ( ) ; this . triggerProcessor . shutdown ( ) ; this . triggerPluginManager . shutdown ( ) ; this . cleaner . shutdown ( ) ; }
Shuts down the service immediately .
21,914
public static boolean isDirWritable ( final File dir ) { File testFile = null ; try { testFile = new File ( dir , "_tmp" ) ; testFile . createNewFile ( ) ; } catch ( final IOException e ) { return false ; } finally { if ( testFile != null ) { testFile . delete ( ) ; } } return true ; }
Check if a directory is writable
21,915
public static void deleteDirectorySilently ( final File dir ) { if ( dir != null ) { try { FileUtils . deleteDirectory ( dir ) ; } catch ( final IOException e ) { log . error ( "error when deleting dir {}" , dir , e ) ; } } }
Delete a directory log the error if deletion fails .
21,916
public static void dumpNumberToFile ( final Path filePath , final long num ) throws IOException { try ( final BufferedWriter writer = Files . newBufferedWriter ( filePath , StandardCharsets . UTF_8 ) ) { writer . write ( String . valueOf ( num ) ) ; } catch ( final IOException e ) { log . error ( "Failed to write the number {} to the file {}" , num , filePath , e ) ; throw e ; } }
Dumps a number into a new file .
21,917
public static long readNumberFromFile ( final Path filePath ) throws IOException , NumberFormatException { final List < String > allLines = Files . readAllLines ( filePath ) ; if ( ! allLines . isEmpty ( ) ) { return Long . parseLong ( allLines . get ( 0 ) ) ; } else { throw new NumberFormatException ( "unable to parse empty file " + filePath . toString ( ) ) ; } }
Reads a number from a file .
21,918
public static int createDeepHardlink ( final File sourceDir , final File destDir ) throws IOException { if ( ! sourceDir . exists ( ) ) { throw new IOException ( "Source directory " + sourceDir . getPath ( ) + " doesn't exist" ) ; } else if ( ! destDir . exists ( ) ) { throw new IOException ( "Destination directory " + destDir . getPath ( ) + " doesn't exist" ) ; } else if ( sourceDir . isFile ( ) && destDir . isFile ( ) ) { throw new IOException ( "Source or Destination is not a directory." ) ; } final Set < String > paths = new HashSet < > ( ) ; createDirsFindFiles ( sourceDir , sourceDir , destDir , paths ) ; int linkCount = 0 ; for ( String path : paths ) { final File sourceLink = new File ( sourceDir , path ) ; path = destDir + path ; final File [ ] targetFiles = sourceLink . listFiles ( ) ; for ( final File targetFile : targetFiles ) { if ( targetFile . isFile ( ) ) { final File linkFile = new File ( path , targetFile . getName ( ) ) ; Files . createLink ( linkFile . toPath ( ) , Paths . get ( targetFile . getAbsolutePath ( ) ) ) ; linkCount ++ ; } } } return linkCount ; }
Hard link files and recurse into directories .
21,919
public static Pair < Integer , Integer > getUtf8Range ( final byte [ ] buffer , final int offset , final int length ) { final int start = getUtf8ByteStart ( buffer , offset ) ; final int end = getUtf8ByteEnd ( buffer , offset + length - 1 ) ; return new Pair < > ( start , end - start + 1 ) ; }
Returns first and length .
21,920
private void handlePurgeProject ( final HttpServletRequest req , final HttpServletResponse resp , final Session session ) throws ServletException , IOException { final User user = session . getUser ( ) ; final HashMap < String , Object > ret = new HashMap < > ( ) ; boolean isOperationSuccessful = true ; try { Project project = null ; final String projectParam = getParam ( req , "project" ) ; if ( StringUtils . isNumeric ( projectParam ) ) { project = this . projectManager . getProject ( Integer . parseInt ( projectParam ) ) ; } else { project = this . projectManager . getProject ( projectParam ) ; } if ( project == null ) { ret . put ( ERROR_PARAM , "invalid project" ) ; isOperationSuccessful = false ; } if ( isOperationSuccessful && this . projectManager . isActiveProject ( project . getId ( ) ) ) { ret . put ( ERROR_PARAM , "Project " + project . getName ( ) + " should be deleted before purging" ) ; isOperationSuccessful = false ; } if ( isOperationSuccessful && ! hasPermission ( project , user , Type . ADMIN ) ) { ret . put ( ERROR_PARAM , "Cannot purge. User '" + user . getUserId ( ) + "' is not an ADMIN." ) ; isOperationSuccessful = false ; } if ( isOperationSuccessful ) { this . projectManager . purgeProject ( project , user ) ; } } catch ( final Exception e ) { ret . put ( ERROR_PARAM , e . getMessage ( ) ) ; isOperationSuccessful = false ; } ret . put ( "success" , isOperationSuccessful ) ; this . writeJSON ( resp , ret ) ; }
validate readiness of a project and user permission and use projectManager to purge the project if things looks good
21,921
private void ajaxGetPermissions ( final Project project , final HashMap < String , Object > ret ) { final ArrayList < HashMap < String , Object > > permissions = new ArrayList < > ( ) ; for ( final Pair < String , Permission > perm : project . getUserPermissions ( ) ) { final HashMap < String , Object > permObj = new HashMap < > ( ) ; final String userId = perm . getFirst ( ) ; permObj . put ( "username" , userId ) ; permObj . put ( "permission" , perm . getSecond ( ) . toStringArray ( ) ) ; permissions . add ( permObj ) ; } ret . put ( "permissions" , permissions ) ; }
this only returns user permissions but not group permissions and proxy users
21,922
private void ajaxSetFlowLock ( final Project project , final HashMap < String , Object > ret , final HttpServletRequest req ) throws ServletException { final String flowName = getParam ( req , FLOW_NAME_PARAM ) ; final Flow flow = project . getFlow ( flowName ) ; if ( flow == null ) { ret . put ( ERROR_PARAM , "Flow " + flowName + " not found in project " + project . getName ( ) ) ; return ; } boolean isLocked = Boolean . parseBoolean ( getParam ( req , FLOW_IS_LOCKED_PARAM ) ) ; if ( isLocked != flow . isLocked ( ) ) { try { if ( projectManager . hasFlowTrigger ( project , flow ) ) { if ( isLocked ) { if ( this . scheduler . pauseFlowTriggerIfPresent ( project . getId ( ) , flow . getId ( ) ) ) { logger . info ( "Flow trigger for flow " + project . getName ( ) + "." + flow . getId ( ) + " is paused" ) ; } else { logger . warn ( "Flow trigger for flow " + project . getName ( ) + "." + flow . getId ( ) + " doesn't exist" ) ; } } else { if ( this . scheduler . resumeFlowTriggerIfPresent ( project . getId ( ) , flow . getId ( ) ) ) { logger . info ( "Flow trigger for flow " + project . getName ( ) + "." + flow . getId ( ) + " is resumed" ) ; } else { logger . warn ( "Flow trigger for flow " + project . getName ( ) + "." + flow . getId ( ) + " doesn't exist" ) ; } } } } catch ( Exception e ) { ret . put ( ERROR_PARAM , e ) ; } } flow . setLocked ( isLocked ) ; ret . put ( FLOW_IS_LOCKED_PARAM , flow . isLocked ( ) ) ; ret . put ( FLOW_ID_PARAM , flow . getId ( ) ) ; this . projectManager . updateFlow ( project , flow ) ; }
Set if a flow is locked .
21,923
private void ajaxIsFlowLocked ( final Project project , final HashMap < String , Object > ret , final HttpServletRequest req ) throws ServletException { final String flowName = getParam ( req , FLOW_NAME_PARAM ) ; final Flow flow = project . getFlow ( flowName ) ; if ( flow == null ) { ret . put ( ERROR_PARAM , "Flow " + flowName + " not found in project " + project . getName ( ) ) ; return ; } ret . put ( FLOW_ID_PARAM , flow . getId ( ) ) ; ret . put ( FLOW_IS_LOCKED_PARAM , flow . isLocked ( ) ) ; }
Returns true if the flow is locked false if it is unlocked .
21,924
private void lockFlowsForProject ( Project project , List < String > lockedFlows ) { for ( String flowId : lockedFlows ) { Flow flow = project . getFlow ( flowId ) ; if ( flow != null ) { flow . setLocked ( true ) ; } } }
Lock the specified flows for the project .
21,925
public static AzkabanDataSource getDataSource ( final Props props ) { final String databaseType = props . getString ( "database.type" ) ; AzkabanDataSource dataSource = null ; if ( databaseType . equals ( "mysql" ) ) { final int port = props . getInt ( "mysql.port" ) ; final String host = props . getString ( "mysql.host" ) ; final String database = props . getString ( "mysql.database" ) ; final String user = props . getString ( "mysql.user" ) ; final String password = props . getString ( "mysql.password" ) ; final int numConnections = props . getInt ( "mysql.numconnections" ) ; dataSource = getMySQLDataSource ( host , port , database , user , password , numConnections ) ; } else if ( databaseType . equals ( "h2" ) ) { final String path = props . getString ( "h2.path" ) ; final Path h2DbPath = Paths . get ( path ) . toAbsolutePath ( ) ; logger . info ( "h2 DB path: " + h2DbPath ) ; dataSource = getH2DataSource ( h2DbPath ) ; } return dataSource ; }
Create Datasource from parameters in the properties
21,926
public static AzkabanDataSource getMySQLDataSource ( final String host , final Integer port , final String dbName , final String user , final String password , final Integer numConnections ) { return new MySQLBasicDataSource ( host , port , dbName , user , password , numConnections ) ; }
Create a MySQL DataSource
21,927
private boolean blockOnPipeLine ( ) { if ( this . isKilled ( ) ) { return true ; } if ( ! this . pipelineJobs . isEmpty ( ) ) { String blockedList = "" ; final ArrayList < BlockingStatus > blockingStatus = new ArrayList < > ( ) ; for ( final String waitingJobId : this . pipelineJobs ) { final Status status = this . watcher . peekStatus ( waitingJobId ) ; if ( status != null && ! Status . isStatusFinished ( status ) ) { final BlockingStatus block = this . watcher . getBlockingStatus ( waitingJobId ) ; blockingStatus . add ( block ) ; blockedList += waitingJobId + "," ; } } if ( ! blockingStatus . isEmpty ( ) ) { this . logger . info ( "Pipeline job " + this . jobId + " waiting on " + blockedList + " in execution " + this . watcher . getExecId ( ) ) ; for ( final BlockingStatus bStatus : blockingStatus ) { this . logger . info ( "Waiting on pipelined job " + bStatus . getJobId ( ) ) ; this . currentBlockStatus = bStatus ; bStatus . blockOnFinishedStatus ( ) ; if ( this . isKilled ( ) ) { this . logger . info ( "Job was killed while waiting on pipeline. Quiting." ) ; return true ; } else { this . logger . info ( "Pipelined job " + bStatus . getJobId ( ) + " finished." ) ; } } } } this . currentBlockStatus = null ; return false ; }
If pipelining is set will block on another flow s jobs .
21,928
private String getProjectPermissionsURL ( ) { String projectPermissionsURL = null ; final String baseURL = this . azkabanProps . get ( AZKABAN_WEBSERVER_URL ) ; if ( baseURL != null ) { final String projectName = this . node . getParentFlow ( ) . getProjectName ( ) ; projectPermissionsURL = String . format ( "%s/manager?project=%s&permissions" , baseURL , projectName ) ; } return projectPermissionsURL ; }
Get project permissions page URL
21,929
private void insertJVMAargs ( ) { final String flowName = this . node . getParentFlow ( ) . getFlowId ( ) ; final String jobId = this . node . getId ( ) ; String jobJVMArgs = String . format ( "'-Dazkaban.flowid=%s' '-Dazkaban.execid=%s' '-Dazkaban.jobid=%s'" , flowName , this . executionId , jobId ) ; final String previousJVMArgs = this . props . get ( JavaProcessJob . JVM_PARAMS ) ; jobJVMArgs += ( previousJVMArgs == null ) ? "" : " " + previousJVMArgs ; this . logger . info ( "job JVM args: " + jobJVMArgs ) ; this . props . put ( JavaProcessJob . JVM_PARAMS , jobJVMArgs ) ; }
Add useful JVM arguments so it is easier to map a running Java process to a flow execution id and job
21,930
private void insertJobMetadata ( ) { final String baseURL = this . azkabanProps . get ( AZKABAN_WEBSERVER_URL ) ; if ( baseURL != null ) { final String flowName = this . node . getParentFlow ( ) . getFlowId ( ) ; final String projectName = this . node . getParentFlow ( ) . getProjectName ( ) ; this . props . put ( CommonJobProperties . AZKABAN_URL , baseURL ) ; this . props . put ( CommonJobProperties . EXECUTION_LINK , String . format ( "%s/executor?execid=%d" , baseURL , this . executionId ) ) ; this . props . put ( CommonJobProperties . JOBEXEC_LINK , String . format ( "%s/executor?execid=%d&job=%s" , baseURL , this . executionId , this . node . getNestedId ( ) ) ) ; this . props . put ( CommonJobProperties . ATTEMPT_LINK , String . format ( "%s/executor?execid=%d&job=%s&attempt=%d" , baseURL , this . executionId , this . node . getNestedId ( ) , this . node . getAttempt ( ) ) ) ; this . props . put ( CommonJobProperties . WORKFLOW_LINK , String . format ( "%s/manager?project=%s&flow=%s" , baseURL , projectName , flowName ) ) ; this . props . put ( CommonJobProperties . JOB_LINK , String . format ( "%s/manager?project=%s&flow=%s&job=%s" , baseURL , projectName , flowName , this . jobId ) ) ; } else { if ( this . logger != null ) { this . logger . info ( AZKABAN_WEBSERVER_URL + " property was not set" ) ; } } this . props . put ( CommonJobProperties . OUT_NODES , StringUtils . join2 ( this . node . getOutNodes ( ) , "," ) ) ; this . props . put ( CommonJobProperties . IN_NODES , StringUtils . join2 ( this . node . getInNodes ( ) , "," ) ) ; }
Add relevant links to the job properties so that downstream consumers may know what executions initiated their execution .
21,931
public void updatePermissions ( ) { final String [ ] accessViewerList = this . accessViewer . trim ( ) . split ( ACCESS_LIST_SPLIT_REGEX ) ; final String [ ] accessExecutorList = this . accessExecutor . trim ( ) . split ( ACCESS_LIST_SPLIT_REGEX ) ; final String [ ] accessOwnerList = this . accessOwner . trim ( ) . split ( ACCESS_LIST_SPLIT_REGEX ) ; final Permission admin = new Permission ( ) ; admin . addPermission ( Type . READ ) ; admin . addPermission ( Type . EXECUTE ) ; admin . addPermission ( Type . ADMIN ) ; final Permission executor = new Permission ( ) ; executor . addPermission ( Type . READ ) ; executor . addPermission ( Type . EXECUTE ) ; final Permission viewer = new Permission ( ) ; viewer . addPermission ( Type . READ ) ; this . project . clearUserPermission ( ) ; for ( String user : accessViewerList ) { user = user . trim ( ) ; if ( ! user . isEmpty ( ) ) { this . project . setUserPermission ( user , viewer ) ; } } for ( String user : accessExecutorList ) { user = user . trim ( ) ; if ( ! user . isEmpty ( ) ) { this . project . setUserPermission ( user , executor ) ; } } for ( String user : accessOwnerList ) { user = user . trim ( ) ; if ( ! user . isEmpty ( ) ) { this . project . setUserPermission ( user , admin ) ; } } this . project . setUserPermission ( this . reportalUser , admin ) ; }
Updates the project permissions in MEMORY but does NOT update the project in the database .
21,932
private void ajaxFetchExecFlowLogs ( final HttpServletRequest req , final HttpServletResponse resp , final HashMap < String , Object > ret , final User user , final ExecutableFlow exFlow ) throws ServletException { final long startMs = System . currentTimeMillis ( ) ; final Project project = getProjectAjaxByPermission ( ret , exFlow . getProjectId ( ) , user , Type . READ ) ; if ( project == null ) { return ; } final int offset = this . getIntParam ( req , "offset" ) ; final int length = this . getIntParam ( req , "length" ) ; resp . setCharacterEncoding ( "utf-8" ) ; try { final LogData data = this . executorManagerAdapter . getExecutableFlowLog ( exFlow , offset , length ) ; ret . putAll ( appendLogData ( data , offset ) ) ; } catch ( final ExecutorManagerException e ) { throw new ServletException ( e ) ; } this . webMetrics . setFetchLogLatency ( System . currentTimeMillis ( ) - startMs ) ; }
Gets the logs through plain text stream to reduce memory overhead .
21,933
private void ajaxFetchJobLogs ( final HttpServletRequest req , final HttpServletResponse resp , final HashMap < String , Object > ret , final User user , final ExecutableFlow exFlow ) throws ServletException { final Project project = getProjectAjaxByPermission ( ret , exFlow . getProjectId ( ) , user , Type . READ ) ; if ( project == null ) { return ; } final int offset = this . getIntParam ( req , "offset" ) ; final int length = this . getIntParam ( req , "length" ) ; final String jobId = this . getParam ( req , "jobId" ) ; resp . setCharacterEncoding ( "utf-8" ) ; try { final ExecutableNode node = exFlow . getExecutableNodePath ( jobId ) ; if ( node == null ) { ret . put ( "error" , "Job " + jobId + " doesn't exist in " + exFlow . getExecutionId ( ) ) ; return ; } final int attempt = this . getIntParam ( req , "attempt" , node . getAttempt ( ) ) ; final LogData data = this . executorManagerAdapter . getExecutionJobLog ( exFlow , jobId , offset , length , attempt ) ; ret . putAll ( appendLogData ( data , offset ) ) ; } catch ( final ExecutorManagerException e ) { throw new ServletException ( e ) ; } }
Gets the logs through ajax plain text stream to reduce memory overhead .
21,934
public static ValidationStatus getInfoMsgLevel ( final String msg ) { if ( msg . startsWith ( "ERROR" ) ) { return ValidationStatus . ERROR ; } if ( msg . startsWith ( "WARN" ) ) { return ValidationStatus . WARN ; } return ValidationStatus . PASS ; }
Return the severity level this information message is associated with .
21,935
public static String getInfoMsg ( final String msg ) { if ( msg . startsWith ( "ERROR" ) ) { return msg . replaceFirst ( "ERROR" , "" ) ; } if ( msg . startsWith ( "WARN" ) ) { return msg . replaceFirst ( "WARN" , "" ) ; } return msg ; }
Get the raw information message .
21,936
public static boolean tryDeleteFileOrDirectory ( File file ) { try { deleteFileOrDirectory ( file ) ; return true ; } catch ( Exception e ) { logger . warn ( "Failed to delete file. file = " + file . getAbsolutePath ( ) , e ) ; return false ; } }
Try to delete File or Directory
21,937
public String getParam ( final HttpServletRequest request , final String name , final String defaultVal ) { return HttpRequestUtils . getParam ( request , name , defaultVal ) ; }
Retrieves the param from the http servlet request .
21,938
protected void setSessionValue ( final HttpServletRequest request , final String key , final Object value ) { request . getSession ( true ) . setAttribute ( key , value ) ; }
Returns the session value of the request .
21,939
protected void addSessionValue ( final HttpServletRequest request , final String key , final Object value ) { List l = ( List ) request . getSession ( true ) . getAttribute ( key ) ; if ( l == null ) { l = new ArrayList ( ) ; } l . add ( value ) ; request . getSession ( true ) . setAttribute ( key , l ) ; }
Adds a session value to the request
21,940
protected void setErrorMessageInCookie ( final HttpServletResponse response , final String errorMsg ) { final Cookie cookie = new Cookie ( AZKABAN_FAILURE_MESSAGE , errorMsg ) ; cookie . setPath ( "/" ) ; response . addCookie ( cookie ) ; }
Sets an error message in azkaban . failure . message in the cookie . This will be used by the web client javascript to somehow display the message
21,941
protected void setWarnMessageInCookie ( final HttpServletResponse response , final String errorMsg ) { final Cookie cookie = new Cookie ( AZKABAN_WARN_MESSAGE , errorMsg ) ; cookie . setPath ( "/" ) ; response . addCookie ( cookie ) ; }
Sets a warning message in azkaban . warn . message in the cookie . This will be used by the web client javascript to somehow display the message
21,942
protected void setSuccessMessageInCookie ( final HttpServletResponse response , final String message ) { final Cookie cookie = new Cookie ( AZKABAN_SUCCESS_MESSAGE , message ) ; cookie . setPath ( "/" ) ; response . addCookie ( cookie ) ; }
Sets a message in azkaban . success . message in the cookie . This will be used by the web client javascript to somehow display the message
21,943
protected String getSuccessMessageFromCookie ( final HttpServletRequest request ) { final Cookie cookie = getCookieByName ( request , AZKABAN_SUCCESS_MESSAGE ) ; if ( cookie == null ) { return null ; } return cookie . getValue ( ) ; }
Retrieves a success message from a cookie . azkaban . success . message
21,944
protected String getWarnMessageFromCookie ( final HttpServletRequest request ) { final Cookie cookie = getCookieByName ( request , AZKABAN_WARN_MESSAGE ) ; if ( cookie == null ) { return null ; } return cookie . getValue ( ) ; }
Retrieves a warn message from a cookie . azkaban . warn . message
21,945
protected String getErrorMessageFromCookie ( final HttpServletRequest request ) { final Cookie cookie = getCookieByName ( request , AZKABAN_FAILURE_MESSAGE ) ; if ( cookie == null ) { return null ; } return cookie . getValue ( ) ; }
Retrieves a success message from a cookie . azkaban . failure . message
21,946
protected Page newPage ( final HttpServletRequest req , final HttpServletResponse resp , final Session session , final String template ) { final Page page = new Page ( req , resp , getApplication ( ) . getVelocityEngine ( ) , template ) ; page . add ( "version" , jarVersion ) ; page . add ( "azkaban_name" , this . name ) ; page . add ( "azkaban_label" , this . label ) ; page . add ( "azkaban_color" , this . color ) ; page . add ( "note_type" , NoteServlet . type ) ; page . add ( "note_message" , NoteServlet . message ) ; page . add ( "note_url" , NoteServlet . url ) ; page . add ( "timezone" , TimeZone . getDefault ( ) . getID ( ) ) ; page . add ( "currentTime" , ( new DateTime ( ) ) . getMillis ( ) ) ; page . add ( "size" , getDisplayExecutionPageSize ( ) ) ; page . add ( "System" , System . class ) ; page . add ( "TimeUtils" , TimeUtils . class ) ; page . add ( "WebUtils" , WebUtils . class ) ; if ( session != null && session . getUser ( ) != null ) { page . add ( "user_id" , session . getUser ( ) . getUserId ( ) ) ; } final String errorMsg = getErrorMessageFromCookie ( req ) ; page . add ( "error_message" , errorMsg == null || errorMsg . isEmpty ( ) ? "null" : errorMsg ) ; setErrorMessageInCookie ( resp , null ) ; final String warnMsg = getWarnMessageFromCookie ( req ) ; page . add ( "warn_message" , warnMsg == null || warnMsg . isEmpty ( ) ? "null" : warnMsg ) ; setWarnMessageInCookie ( resp , null ) ; final String successMsg = getSuccessMessageFromCookie ( req ) ; page . add ( "success_message" , successMsg == null || successMsg . isEmpty ( ) ? "null" : successMsg ) ; setSuccessMessageInCookie ( resp , null ) ; if ( this . viewerPlugins != null && ! this . viewerPlugins . isEmpty ( ) ) { page . add ( "viewers" , this . viewerPlugins ) ; } if ( this . triggerPlugins != null && ! this . triggerPlugins . isEmpty ( ) ) { page . add ( "triggerPlugins" , this . triggerPlugins ) ; } return page ; }
Creates a new velocity page to use . With session .
21,947
protected Page newPage ( final HttpServletRequest req , final HttpServletResponse resp , final String template ) { final Page page = new Page ( req , resp , getApplication ( ) . getVelocityEngine ( ) , template ) ; page . add ( "version" , jarVersion ) ; page . add ( "azkaban_name" , this . name ) ; page . add ( "azkaban_label" , this . label ) ; page . add ( "azkaban_color" , this . color ) ; page . add ( "note_type" , NoteServlet . type ) ; page . add ( "note_message" , NoteServlet . message ) ; page . add ( "note_url" , NoteServlet . url ) ; page . add ( "timezone" , TimeZone . getDefault ( ) . getID ( ) ) ; page . add ( "currentTime" , ( new DateTime ( ) ) . getMillis ( ) ) ; page . add ( "size" , getDisplayExecutionPageSize ( ) ) ; if ( this . viewerPlugins != null && ! this . viewerPlugins . isEmpty ( ) ) { page . add ( "viewers" , this . viewerPlugins ) ; final ViewerPlugin plugin = this . viewerPlugins . get ( 0 ) ; page . add ( "viewerName" , plugin . getPluginName ( ) ) ; page . add ( "viewerPath" , plugin . getPluginPath ( ) ) ; } if ( this . triggerPlugins != null && ! this . triggerPlugins . isEmpty ( ) ) { page . add ( "triggers" , this . triggerPlugins ) ; } return page ; }
Creates a new velocity page to use .
21,948
protected void writeJSON ( final HttpServletResponse resp , final Object obj ) throws IOException { writeJSON ( resp , obj , false ) ; }
Writes json out to the stream .
21,949
private static void populateHiveConf ( HiveConf hiveConf , String [ ] args ) { if ( args == null ) { return ; } int index = 0 ; for ( ; index < args . length ; index ++ ) { if ( "-hiveconf" . equals ( args [ index ] ) ) { String hiveConfParam = stripSingleDoubleQuote ( args [ ++ index ] ) ; String [ ] tokens = hiveConfParam . split ( "=" ) ; if ( tokens . length == 2 ) { String name = tokens [ 0 ] ; String value = tokens [ 1 ] ; logger . info ( "Setting: " + name + "=" + value + " to hiveConf" ) ; hiveConf . set ( name , value ) ; } else { logger . warn ( "Invalid hiveconf: " + hiveConfParam ) ; } } } }
Extract hiveconf from command line arguments and populate them into HiveConf
21,950
private static String stripSingleDoubleQuote ( String input ) { if ( StringUtils . isEmpty ( input ) ) { return input ; } if ( input . startsWith ( SINGLE_QUOTE_STRING ) || input . startsWith ( DOUBLE_QUOTE_STRING ) ) { input = input . substring ( 1 ) ; } if ( input . endsWith ( SINGLE_QUOTE_STRING ) || input . endsWith ( DOUBLE_QUOTE_STRING ) ) { input = input . substring ( 0 , input . length ( ) - 1 ) ; } return input ; }
Strip single quote or double quote at either end of the string
21,951
public void setupExecutors ( ) throws ExecutorManagerException { final ImmutableSet < Executor > newExecutors = loadExecutors ( ) ; if ( newExecutors . isEmpty ( ) ) { final String error = "No active executors found" ; logger . error ( error ) ; throw new ExecutorManagerException ( error ) ; } else { this . activeExecutors = newExecutors ; } }
Loads executors . Can be also used to reload executors if there have been changes in the DB .
21,952
public static boolean isThereJobCallbackProperty ( final Props props , final JobCallbackStatusEnum status ) { if ( props == null || status == null ) { throw new NullPointerException ( "One of the argument is null" ) ; } final String jobCallBackUrl = firstJobcallbackPropertyMap . get ( status ) ; return props . containsKey ( jobCallBackUrl ) ; }
Use to quickly determine if there is a job callback related property in the Props .
21,953
public static List < HttpRequestBase > parseJobCallbackProperties ( final Props props , final JobCallbackStatusEnum status , final Map < String , String > contextInfo , final int maxNumCallback , final Logger privateLogger ) { String callbackUrl = null ; if ( ! isThereJobCallbackProperty ( props , status ) ) { return Collections . emptyList ( ) ; } final List < HttpRequestBase > result = new ArrayList < > ( ) ; final String jobCallBackUrlKey = replaceStatusToken ( JOB_CALLBACK_URL_TEMPLATE , status ) ; final String requestMethod = replaceStatusToken ( JOB_CALLBACK_REQUEST_METHOD_TEMPLATE , status ) ; final String httpBodyKey = replaceStatusToken ( JOB_CALLBACK_BODY_TEMPLATE , status ) ; final String headersKey = replaceStatusToken ( JOB_CALLBACK_REQUEST_HEADERS_TEMPLATE , status ) ; for ( int sequence = 1 ; sequence <= maxNumCallback ; sequence ++ ) { HttpRequestBase httpRequest = null ; final String sequenceStr = Integer . toString ( sequence ) ; final String callbackUrlKey = jobCallBackUrlKey . replace ( SEQUENCE_TOKEN , sequenceStr ) ; callbackUrl = props . get ( callbackUrlKey ) ; if ( callbackUrl == null || callbackUrl . length ( ) == 0 ) { break ; } else { final String callbackUrlWithTokenReplaced = replaceTokens ( callbackUrl , contextInfo , true ) ; final String requestMethodKey = requestMethod . replace ( SEQUENCE_TOKEN , sequenceStr ) ; final String method = props . getString ( requestMethodKey , HTTP_GET ) ; if ( HTTP_POST . equals ( method ) ) { final String postBodyKey = httpBodyKey . replace ( SEQUENCE_TOKEN , sequenceStr ) ; final String httpBodyValue = props . get ( postBodyKey ) ; if ( httpBodyValue == null ) { privateLogger . warn ( "Missing value for key: " + postBodyKey + " skipping job callback '" + callbackUrl + " for job " + contextInfo . get ( CONTEXT_JOB_TOKEN ) ) ; } else { final HttpPost httpPost = new HttpPost ( callbackUrlWithTokenReplaced ) ; final String postActualBody = replaceTokens ( httpBodyValue , contextInfo , false ) ; privateLogger . info ( "postActualBody: " + postActualBody ) ; httpPost . setEntity ( createStringEntity ( postActualBody ) ) ; httpRequest = httpPost ; } } else if ( HTTP_GET . equals ( method ) ) { httpRequest = new HttpGet ( callbackUrlWithTokenReplaced ) ; } else { privateLogger . warn ( "Unsupported request method: " + method + ". Only POST and GET are supported" ) ; } final String headersKeyPerSequence = headersKey . replace ( SEQUENCE_TOKEN , sequenceStr ) ; final String headersValue = props . get ( headersKeyPerSequence ) ; privateLogger . info ( "headers: " + headersValue ) ; final Header [ ] headers = parseHttpHeaders ( headersValue ) ; if ( headers != null ) { httpRequest . setHeaders ( headers ) ; privateLogger . info ( "# of headers found: " + headers . length ) ; } result . add ( httpRequest ) ; } } return result ; }
This method is responsible for parsing job call URL properties and convert them into a list of HttpRequestBase which callers can use to execute .
21,954
public static Map < String , String > buildJobContextInfoMap ( final Event event , final String server ) { if ( event . getRunner ( ) instanceof JobRunner ) { final JobRunner jobRunner = ( JobRunner ) event . getRunner ( ) ; final ExecutableNode node = jobRunner . getNode ( ) ; final EventData eventData = event . getData ( ) ; final String projectName = node . getParentFlow ( ) . getProjectName ( ) ; final String flowName = node . getParentFlow ( ) . getFlowId ( ) ; final String executionId = String . valueOf ( node . getParentFlow ( ) . getExecutionId ( ) ) ; final String jobId = node . getId ( ) ; final Map < String , String > result = new HashMap < > ( ) ; result . put ( CONTEXT_SERVER_TOKEN , server ) ; result . put ( CONTEXT_PROJECT_TOKEN , projectName ) ; result . put ( CONTEXT_FLOW_TOKEN , flowName ) ; result . put ( CONTEXT_EXECUTION_ID_TOKEN , executionId ) ; result . put ( CONTEXT_JOB_TOKEN , jobId ) ; result . put ( CONTEXT_JOB_STATUS_TOKEN , eventData . getStatus ( ) . name ( ) . toLowerCase ( ) ) ; return result ; } else { throw new IllegalArgumentException ( "Provided event is not a job event" ) ; } }
This method takes the job context info . and put the values into a map with keys as the tokens .
21,955
public static String replaceTokens ( final String value , final Map < String , String > contextInfo , final boolean withEncoding ) { String result = value ; String tokenValue = encodeQueryParam ( contextInfo . get ( CONTEXT_SERVER_TOKEN ) , withEncoding ) ; result = result . replaceFirst ( Pattern . quote ( CONTEXT_SERVER_TOKEN ) , tokenValue ) ; tokenValue = encodeQueryParam ( contextInfo . get ( CONTEXT_PROJECT_TOKEN ) , withEncoding ) ; result = result . replaceFirst ( Pattern . quote ( CONTEXT_PROJECT_TOKEN ) , tokenValue ) ; tokenValue = encodeQueryParam ( contextInfo . get ( CONTEXT_FLOW_TOKEN ) , withEncoding ) ; result = result . replaceFirst ( Pattern . quote ( CONTEXT_FLOW_TOKEN ) , tokenValue ) ; tokenValue = encodeQueryParam ( contextInfo . get ( CONTEXT_JOB_TOKEN ) , withEncoding ) ; result = result . replaceFirst ( Pattern . quote ( CONTEXT_JOB_TOKEN ) , tokenValue ) ; tokenValue = encodeQueryParam ( contextInfo . get ( CONTEXT_EXECUTION_ID_TOKEN ) , withEncoding ) ; result = result . replaceFirst ( Pattern . quote ( CONTEXT_EXECUTION_ID_TOKEN ) , tokenValue ) ; tokenValue = encodeQueryParam ( contextInfo . get ( CONTEXT_JOB_STATUS_TOKEN ) , withEncoding ) ; result = result . replaceFirst ( Pattern . quote ( CONTEXT_JOB_STATUS_TOKEN ) , tokenValue ) ; return result ; }
Replace the supported tokens in the URL with values in the contextInfo . This will also make sure the values are HTTP encoded .
21,956
public void uploadProject ( final Project project , final int version , final File localFile , final User uploader ) { byte [ ] md5 = null ; if ( ! ( this . storage instanceof DatabaseStorage ) ) { md5 = computeHash ( localFile ) ; } final StorageMetadata metadata = new StorageMetadata ( project . getId ( ) , version , uploader . getUserId ( ) , md5 ) ; log . info ( String . format ( "Adding archive to storage. Meta:%s File: %s[%d bytes]" , metadata , localFile . getName ( ) , localFile . length ( ) ) ) ; final String resourceId = this . storage . put ( metadata , localFile ) ; if ( ! ( this . storage instanceof DatabaseStorage ) ) { this . projectLoader . addProjectVersion ( project . getId ( ) , version , localFile , uploader . getUserId ( ) , requireNonNull ( md5 ) , requireNonNull ( resourceId ) ) ; log . info ( String . format ( "Added project metadata to DB. Meta:%s File: %s[%d bytes] URI: %s" , metadata , localFile . getName ( ) , localFile . length ( ) , resourceId ) ) ; } }
API to a project file into Azkaban Storage
21,957
public ProjectFileHandler getProjectFile ( final int projectId , final int version ) { log . info ( String . format ( "Fetching project file. project ID: %d version: %d" , projectId , version ) ) ; if ( this . storage instanceof DatabaseStorage ) { return ( ( DatabaseStorage ) this . storage ) . get ( projectId , version ) ; } final ProjectFileHandler pfh = this . projectLoader . fetchProjectMetaData ( projectId , version ) ; final String resourceId = requireNonNull ( pfh . getResourceId ( ) , String . format ( "URI is null. project ID: %d version: %d" , pfh . getProjectId ( ) , pfh . getVersion ( ) ) ) ; try ( InputStream is = this . storage . get ( resourceId ) ) { final File file = createTempOutputFile ( pfh ) ; try ( FileOutputStream fos = new FileOutputStream ( file ) ) { IOUtils . copy ( is , fos ) ; } validateChecksum ( file , pfh ) ; pfh . setLocalFile ( file ) ; return pfh ; } catch ( final IOException e ) { throw new StorageException ( e ) ; } }
Fetch project file from storage .
21,958
private Map < String , String > readPrivateConfig ( final File privateConfigFile ) { try { return readConfig ( privateConfigFile ) ; } catch ( final Exception ex ) { return new HashMap < > ( ) ; } }
read config from private property file if the file is not present then return empty .
21,959
public static void croak ( final String message , final int exitCode ) { System . err . println ( message ) ; System . exit ( exitCode ) ; }
Print the message and then exit with the given exit code
21,960
public static Object callConstructor ( final Class < ? > cls , final Object ... args ) { return callConstructor ( cls , getTypes ( args ) , args ) ; }
Construct a class object with the given arguments
21,961
private static Class < ? > [ ] getTypes ( final Object ... args ) { final Class < ? > [ ] argTypes = new Class < ? > [ args . length ] ; for ( int i = 0 ; i < argTypes . length ; i ++ ) { argTypes [ i ] = args [ i ] . getClass ( ) ; } return argTypes ; }
Get the Class of all the objects
21,962
public static ArrayList < String > runProcess ( String ... commands ) throws InterruptedException , IOException { final java . lang . ProcessBuilder processBuilder = new java . lang . ProcessBuilder ( commands ) ; final ArrayList < String > output = new ArrayList < > ( ) ; final Process process = processBuilder . start ( ) ; process . waitFor ( ) ; final InputStream inputStream = process . getInputStream ( ) ; try { final java . io . BufferedReader reader = new java . io . BufferedReader ( new InputStreamReader ( inputStream , StandardCharsets . UTF_8 ) ) ; String line ; while ( ( line = reader . readLine ( ) ) != null ) { output . add ( line ) ; } } finally { inputStream . close ( ) ; } return output ; }
Run a sequence of commands
21,963
public static void mergeTypeClassPaths ( List < String > destinationPaths , final List < String > sourcePaths , final String rootPath ) { if ( sourcePaths != null ) { for ( String jar : sourcePaths ) { File file = new File ( jar ) ; if ( ! file . isAbsolute ( ) ) { file = new File ( rootPath + File . separatorChar + jar ) ; } String path = file . getAbsolutePath ( ) ; if ( ! destinationPaths . contains ( path ) ) { destinationPaths . add ( path ) ; } } } }
Merge the absolute paths of source paths into the list of destination paths
21,964
public static void mergeStringList ( final List < String > destinationList , final List < String > sourceList ) { if ( sourceList != null ) { for ( String item : sourceList ) { if ( ! destinationList . contains ( item ) ) { destinationList . add ( item ) ; } } } }
Merge elements in Source List into the Destination List
21,965
static String expandHiveAuxJarsPath ( String original ) throws IOException { if ( original == null || original . contains ( ".jar" ) ) return original ; File [ ] files = new File ( original ) . listFiles ( ) ; if ( files == null || files . length == 0 ) { LOG . info ( "No files in to expand in aux jar path. Returning original parameter" ) ; return original ; } return filesToURIString ( files ) ; }
Normally hive . aux . jars . path is expanded from just being a path to the full list of files in the directory by the hive shell script . Since we normally won t be running from the script it s up to us to do that work here . We use a heuristic that if there is no occurrence of . jar in the original it needs expansion . Otherwise it s already been done for us .
21,966
void updateDagStatus ( ) { boolean failed = false ; for ( final Node node : this . nodes ) { final Status nodeStatus = node . getStatus ( ) ; if ( ! nodeStatus . isTerminal ( ) ) { return ; } if ( nodeStatus == Status . FAILURE ) { failed = true ; } } updateDagStatusInternal ( failed ) ; }
Update the final dag status when all nodes are done .
21,967
private void updateDagStatusInternal ( final boolean failed ) { if ( this . status == Status . KILLING ) { changeStatus ( Status . KILLED ) ; } else if ( failed ) { changeStatus ( Status . FAILURE ) ; } else { changeStatus ( Status . SUCCESS ) ; } }
Update the final dag status .
21,968
private synchronized void updateLocal ( ) throws ScheduleManagerException { final List < Schedule > updates = this . loader . loadUpdatedSchedules ( ) ; for ( final Schedule s : updates ) { if ( s . getStatus ( ) . equals ( TriggerStatus . EXPIRED . toString ( ) ) ) { onScheduleExpire ( s ) ; } else { internalSchedule ( s ) ; } } }
only do this when using external runner
21,969
public Schedule getSchedule ( final int projectId , final String flowId ) throws ScheduleManagerException { updateLocal ( ) ; return this . scheduleIdentityPairMap . get ( new Pair < > ( projectId , flowId ) ) ; }
Returns the scheduled flow for the flow name
21,970
public synchronized void removeSchedule ( final Schedule sched ) { final Pair < Integer , String > identityPairMap = sched . getScheduleIdentityPair ( ) ; final Schedule schedule = this . scheduleIdentityPairMap . get ( identityPairMap ) ; if ( schedule != null ) { this . scheduleIdentityPairMap . remove ( identityPairMap ) ; } this . scheduleIDMap . remove ( sched . getScheduleId ( ) ) ; try { this . loader . removeSchedule ( sched ) ; } catch ( final ScheduleManagerException e ) { logger . error ( e ) ; } }
Removes the flow from the schedule if it exists .
21,971
private synchronized void internalSchedule ( final Schedule s ) { this . scheduleIDMap . put ( s . getScheduleId ( ) , s ) ; this . scheduleIdentityPairMap . put ( s . getScheduleIdentityPair ( ) , s ) ; }
Schedules the flow but doesn t save the schedule afterwards .
21,972
public synchronized void insertSchedule ( final Schedule s ) { final Schedule exist = this . scheduleIdentityPairMap . get ( s . getScheduleIdentityPair ( ) ) ; if ( s . updateTime ( ) ) { try { if ( exist == null ) { this . loader . insertSchedule ( s ) ; internalSchedule ( s ) ; } else { s . setScheduleId ( exist . getScheduleId ( ) ) ; this . loader . updateSchedule ( s ) ; internalSchedule ( s ) ; } } catch ( final ScheduleManagerException e ) { logger . error ( e ) ; } } else { logger . error ( "The provided schedule is non-recurring and the scheduled time already passed. " + s . getScheduleName ( ) ) ; } }
Adds a flow to the schedule .
21,973
public void checkExecutorHealth ( ) { final Map < Optional < Executor > , List < ExecutableFlow > > exFlowMap = getFlowToExecutorMap ( ) ; for ( final Map . Entry < Optional < Executor > , List < ExecutableFlow > > entry : exFlowMap . entrySet ( ) ) { final Optional < Executor > executorOption = entry . getKey ( ) ; if ( ! executorOption . isPresent ( ) ) { final String finalizeReason = "Executor id of this execution doesn't exist." ; for ( final ExecutableFlow flow : entry . getValue ( ) ) { logger . warn ( String . format ( "Finalizing execution %s, %s" , flow . getExecutionId ( ) , finalizeReason ) ) ; ExecutionControllerUtils . finalizeFlow ( this . executorLoader , this . alerterHolder , flow , finalizeReason , null ) ; } continue ; } final Executor executor = executorOption . get ( ) ; try { final Map < String , Object > results = this . apiGateway . callWithExecutionId ( executor . getHost ( ) , executor . getPort ( ) , ConnectorParams . PING_ACTION , null , null ) ; if ( results == null || results . containsKey ( ConnectorParams . RESPONSE_ERROR ) || ! results . containsKey ( ConnectorParams . STATUS_PARAM ) || ! results . get ( ConnectorParams . STATUS_PARAM ) . equals ( ConnectorParams . RESPONSE_ALIVE ) ) { throw new ExecutorManagerException ( "Status of executor " + executor . getId ( ) + " is " + "not alive." ) ; } else { if ( this . executorFailureCount . containsKey ( executor . getId ( ) ) ) { this . executorFailureCount . put ( executor . getId ( ) , 0 ) ; } } } catch ( final ExecutorManagerException e ) { handleExecutorNotAliveCase ( entry , executor , e ) ; } } }
Checks executor health . Finalizes the flow if its executor is already removed from DB or sends alert emails if the executor isn t alive any more .
21,974
private Map < Optional < Executor > , List < ExecutableFlow > > getFlowToExecutorMap ( ) { final HashMap < Optional < Executor > , List < ExecutableFlow > > exFlowMap = new HashMap < > ( ) ; try { for ( final Pair < ExecutionReference , ExecutableFlow > runningFlow : this . executorLoader . fetchActiveFlows ( ) . values ( ) ) { final Optional < Executor > executor = runningFlow . getFirst ( ) . getExecutor ( ) ; List < ExecutableFlow > flows = exFlowMap . get ( executor ) ; if ( flows == null ) { flows = new ArrayList < > ( ) ; exFlowMap . put ( executor , flows ) ; } flows . add ( runningFlow . getSecond ( ) ) ; } } catch ( final ExecutorManagerException e ) { logger . error ( "Failed to get flow to executor map" ) ; } return exFlowMap ; }
Groups Executable flow by Executors to reduce number of REST calls .
21,975
private void handleExecutorNotAliveCase ( final Entry < Optional < Executor > , List < ExecutableFlow > > entry , final Executor executor , final ExecutorManagerException e ) { logger . error ( "Failed to get update from executor " + executor . getId ( ) , e ) ; this . executorFailureCount . put ( executor . getId ( ) , this . executorFailureCount . getOrDefault ( executor . getId ( ) , 0 ) + 1 ) ; if ( this . executorFailureCount . get ( executor . getId ( ) ) % this . executorMaxFailureCount == 0 && ! this . alertEmails . isEmpty ( ) ) { entry . getValue ( ) . stream ( ) . forEach ( flow -> flow . getExecutionOptions ( ) . setFailureEmails ( this . alertEmails ) ) ; logger . info ( String . format ( "Executor failure count is %d. Sending alert emails to %s." , this . executorFailureCount . get ( executor . getId ( ) ) , this . alertEmails ) ) ; this . alerterHolder . get ( "email" ) . alertOnFailedUpdate ( executor , entry . getValue ( ) , e ) ; } }
Increments executor failure count . If it reaches max failure count sends alert emails to AZ admin .
21,976
public static List < ExecutableNode > sortExecutableNodes ( final ExecutableFlow flow ) { final List < ExecutableNode > sortedNodes = new ArrayList < > ( ) ; if ( flow != null ) { final List < String > startNodeIds = flow . getStartNodes ( ) ; String nextNodeId = startNodeIds . isEmpty ( ) ? null : startNodeIds . get ( 0 ) ; while ( nextNodeId != null ) { final ExecutableNode node = flow . getExecutableNode ( nextNodeId ) ; sortedNodes . add ( node ) ; final Set < String > outNodes = node . getOutNodes ( ) ; nextNodeId = outNodes . isEmpty ( ) ? null : outNodes . iterator ( ) . next ( ) ; } } return sortedNodes ; }
Returns a list of the executable nodes in the specified flow in execution order . Assumes that the flow is linear .
21,977
public static List < Variable > getRunTimeVariables ( final Collection < Variable > variables ) { final List < Variable > runtimeVariables = ReportalUtil . getVariablesByRegex ( variables , Reportal . REPORTAL_CONFIG_PREFIX_NEGATION_REGEX ) ; return runtimeVariables ; }
Get runtime variables to be set in unscheduled mode of execution . Returns empty list if no runtime variable is found
21,978
public static List < Variable > getVariablesByRegex ( final Collection < Variable > variables , final String regex ) { final List < Variable > shortlistedVariables = new ArrayList < > ( ) ; if ( variables != null && regex != null ) { for ( final Variable var : variables ) { if ( var . getTitle ( ) . matches ( regex ) ) { shortlistedVariables . add ( var ) ; } } } return shortlistedVariables ; }
Shortlist variables which match a given regex . Returns empty empty list if no eligible variable is found
21,979
public static Map < String , String > getVariableMapByPrefix ( final Collection < Variable > variables , final String prefix ) { final Map < String , String > shortlistMap = new HashMap < > ( ) ; if ( variables != null && prefix != null ) { for ( final Variable var : getVariablesByRegex ( variables , Reportal . REPORTAL_CONFIG_PREFIX_REGEX ) ) { shortlistMap . put ( var . getTitle ( ) . replaceFirst ( prefix , "" ) , var . getName ( ) ) ; } } return shortlistMap ; }
Shortlist variables which match a given prefix . Returns empty map if no eligible variable is found .
21,980
public Session getSession ( final String sessionId ) { final Session elem = this . cache . getIfPresent ( sessionId ) ; return elem ; }
Returns the cached session using the session id .
21,981
public Set < Session > findSessionsByIP ( final String ip ) { final Set < Session > ret = new HashSet < > ( ) ; final Map < String , Session > cacheSnapshot = this . cache . asMap ( ) ; for ( final Entry < String , Session > entry : cacheSnapshot . entrySet ( ) ) { if ( entry . getValue ( ) . getIp ( ) . equals ( ip ) ) { ret . add ( entry . getValue ( ) ) ; } } return ret ; }
Returns sessions whose IP equals to the given IP .
21,982
public void addPluginClass ( final String jobTypeName , final Class < ? extends Job > jobTypeClass ) { this . jobToClass . put ( jobTypeName , jobTypeClass ) ; }
Adds plugin jobtype class
21,983
Map < Integer , Pair < ExecutionReference , ExecutableFlow > > fetchUnfinishedFlows ( ) throws ExecutorManagerException { try { return this . dbOperator . query ( FetchActiveExecutableFlows . FETCH_UNFINISHED_EXECUTABLE_FLOWS , new FetchActiveExecutableFlows ( ) ) ; } catch ( final SQLException e ) { throw new ExecutorManagerException ( "Error fetching unfinished flows" , e ) ; } }
Fetch flows that are not in finished status including both dispatched and non - dispatched flows .
21,984
Map < Integer , Pair < ExecutionReference , ExecutableFlow > > fetchActiveFlows ( ) throws ExecutorManagerException { try { return this . dbOperator . query ( FetchActiveExecutableFlows . FETCH_ACTIVE_EXECUTABLE_FLOWS , new FetchActiveExecutableFlows ( ) ) ; } catch ( final SQLException e ) { throw new ExecutorManagerException ( "Error fetching active flows" , e ) ; } }
Fetch flows that are dispatched and not yet finished .
21,985
Pair < ExecutionReference , ExecutableFlow > fetchActiveFlowByExecId ( final int execId ) throws ExecutorManagerException { try { return this . dbOperator . query ( FetchActiveExecutableFlow . FETCH_ACTIVE_EXECUTABLE_FLOW_BY_EXEC_ID , new FetchActiveExecutableFlow ( ) , execId ) ; } catch ( final SQLException e ) { throw new ExecutorManagerException ( "Error fetching active flow by exec id" + execId , e ) ; } }
Fetch the flow that is dispatched and not yet finished by execution id .
21,986
public static void cancelHadoopTokens ( HadoopSecurityManager hadoopSecurityManager , String userToProxy , File tokenFile , Logger log ) { if ( tokenFile == null ) { return ; } try { hadoopSecurityManager . cancelTokens ( tokenFile , userToProxy , log ) ; } catch ( HadoopSecurityManagerException e ) { log . error ( e . getCause ( ) + e . getMessage ( ) ) ; } catch ( Exception e ) { log . error ( e . getCause ( ) + e . getMessage ( ) ) ; } if ( tokenFile . exists ( ) ) { tokenFile . delete ( ) ; } }
Invalidates a Hadoop authentication token file
21,987
public static HadoopSecurityManager loadHadoopSecurityManager ( Props props , Logger log ) throws RuntimeException { Class < ? > hadoopSecurityManagerClass = props . getClass ( HADOOP_SECURITY_MANAGER_CLASS_PARAM , true , HadoopJobUtils . class . getClassLoader ( ) ) ; log . info ( "Loading hadoop security manager " + hadoopSecurityManagerClass . getName ( ) ) ; HadoopSecurityManager hadoopSecurityManager = null ; try { Method getInstanceMethod = hadoopSecurityManagerClass . getMethod ( "getInstance" , Props . class ) ; hadoopSecurityManager = ( HadoopSecurityManager ) getInstanceMethod . invoke ( hadoopSecurityManagerClass , props ) ; } catch ( InvocationTargetException e ) { String errMsg = "Could not instantiate Hadoop Security Manager " + hadoopSecurityManagerClass . getName ( ) + e . getCause ( ) ; log . error ( errMsg ) ; throw new RuntimeException ( errMsg , e ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } return hadoopSecurityManager ; }
Based on the HADOOP_SECURITY_MANAGER_CLASS_PARAM setting in the incoming props finds the correct HadoopSecurityManager Java class
21,988
public static File getHadoopTokens ( HadoopSecurityManager hadoopSecurityManager , Props props , Logger log ) throws HadoopSecurityManagerException { File tokenFile = null ; try { tokenFile = File . createTempFile ( "mr-azkaban" , ".token" ) ; } catch ( Exception e ) { throw new HadoopSecurityManagerException ( "Failed to create the token file." , e ) ; } hadoopSecurityManager . prefetchToken ( tokenFile , props , log ) ; return tokenFile ; }
Fetching token with the Azkaban user
21,989
public static void proxyUserKillAllSpawnedHadoopJobs ( final String logFilePath , Props jobProps , File tokenFile , final Logger log ) { Properties properties = new Properties ( ) ; properties . putAll ( jobProps . getFlattened ( ) ) ; try { if ( HadoopSecureWrapperUtils . shouldProxy ( properties ) ) { UserGroupInformation proxyUser = HadoopSecureWrapperUtils . setupProxyUser ( properties , tokenFile . getAbsolutePath ( ) , log ) ; proxyUser . doAs ( new PrivilegedExceptionAction < Void > ( ) { public Void run ( ) throws Exception { HadoopJobUtils . killAllSpawnedHadoopJobs ( logFilePath , log ) ; return null ; } } ) ; } else { HadoopJobUtils . killAllSpawnedHadoopJobs ( logFilePath , log ) ; } } catch ( Throwable t ) { log . warn ( "something happened while trying to kill all spawned jobs" , t ) ; } }
This method is a decorator around the KillAllSpawnedHadoopJobs method . This method takes additional parameters to determine whether KillAllSpawnedHadoopJobs needs to be executed using doAs as a different user
21,990
public static Set < String > killAllSpawnedHadoopJobs ( String logFilePath , Logger log ) { Set < String > allSpawnedJobs = findApplicationIdFromLog ( logFilePath , log ) ; log . info ( "applicationIds to kill: " + allSpawnedJobs ) ; for ( String appId : allSpawnedJobs ) { try { killJobOnCluster ( appId , log ) ; } catch ( Throwable t ) { log . warn ( "something happened while trying to kill this job: " + appId , t ) ; } } return allSpawnedJobs ; }
Pass in a log file this method will find all the hadoop jobs it has launched and kills it
21,991
public static List < String > filterCommands ( Collection < String > commands , String whitelistRegex , String blacklistRegex , Logger log ) { List < String > filteredCommands = new LinkedList < String > ( ) ; Pattern whitelistPattern = Pattern . compile ( whitelistRegex ) ; Pattern blacklistPattern = Pattern . compile ( blacklistRegex ) ; for ( String command : commands ) { if ( whitelistPattern . matcher ( command ) . matches ( ) && ! blacklistPattern . matcher ( command ) . matches ( ) ) { filteredCommands . add ( command ) ; } else { log . warn ( String . format ( "Removing restricted command: %s" , command ) ) ; } } return filteredCommands ; }
Filter a collection of String commands to match a whitelist regex and not match a blacklist regex .
21,992
public static String constructHadoopTags ( Props props , String [ ] keys ) { String [ ] keysAndValues = new String [ keys . length ] ; for ( int i = 0 ; i < keys . length ; i ++ ) { if ( props . containsKey ( keys [ i ] ) ) { keysAndValues [ i ] = keys [ i ] + ":" + props . get ( keys [ i ] ) ; } } Joiner joiner = Joiner . on ( ',' ) . skipNulls ( ) ; return joiner . join ( keysAndValues ) ; }
Construct a CSV of tags for the Hadoop application .
21,993
long getOsTotalFreeMemorySize ( ) { if ( ! Files . isRegularFile ( Paths . get ( MEM_INFO_FILE ) ) ) { return 0 ; } final List < String > lines ; try { lines = Files . readAllLines ( Paths . get ( MEM_INFO_FILE ) , StandardCharsets . UTF_8 ) ; } catch ( final IOException e ) { final String errMsg = "Failed to open mem info file: " + MEM_INFO_FILE ; logger . error ( errMsg , e ) ; return 0 ; } return getOsTotalFreeMemorySizeFromStrings ( lines ) ; }
Includes OS cache and free swap .
21,994
public int execute ( final String user , final List < String > command ) throws IOException { log . info ( "Command: " + command ) ; final Process process = new ProcessBuilder ( ) . command ( constructExecuteAsCommand ( user , command ) ) . inheritIO ( ) . start ( ) ; int exitCode ; try { exitCode = process . waitFor ( ) ; } catch ( final InterruptedException e ) { log . error ( e . getMessage ( ) , e ) ; exitCode = 1 ; } return exitCode ; }
API to execute a command on behalf of another user .
21,995
public static boolean validateIntegerParam ( final Map < String , String > params , final String paramName ) throws ExecutorManagerException { if ( params != null && params . containsKey ( paramName ) && ! StringUtils . isNumeric ( params . get ( paramName ) ) ) { throw new ExecutorManagerException ( paramName + " should be an integer" ) ; } return true ; }
parse a string as number and throws exception if parsed value is not a valid integer
21,996
static long calculateDirSizeAndSave ( final File dir ) throws IOException { final Path path = Paths . get ( dir . getPath ( ) , FlowPreparer . PROJECT_DIR_SIZE_FILE_NAME ) ; if ( ! Files . exists ( path ) ) { final long sizeInByte = FileUtils . sizeOfDirectory ( dir ) ; FileIOUtils . dumpNumberToFile ( path , sizeInByte ) ; return sizeInByte ; } else { return FileIOUtils . readNumberFromFile ( path ) ; } }
Calculate the directory size and save it to a file .
21,997
void setup ( final ExecutableFlow flow ) throws ExecutorManagerException { final ProjectFileHandler projectFileHandler = null ; File tempDir = null ; try { final ProjectDirectoryMetadata project = new ProjectDirectoryMetadata ( flow . getProjectId ( ) , flow . getVersion ( ) ) ; final long flowPrepStartTime = System . currentTimeMillis ( ) ; tempDir = downloadProjectIfNotExists ( project , flow . getExecutionId ( ) ) ; long criticalSectionStartTime = - 1 ; File execDir = null ; synchronized ( this ) { criticalSectionStartTime = System . currentTimeMillis ( ) ; if ( ! project . getInstalledDir ( ) . exists ( ) && tempDir != null ) { if ( this . projectCacheCleaner . isPresent ( ) ) { this . projectCacheCleaner . get ( ) . deleteProjectDirsIfNecessary ( project . getDirSizeInByte ( ) ) ; } Files . move ( tempDir . toPath ( ) , project . getInstalledDir ( ) . toPath ( ) ) ; } final long start = System . currentTimeMillis ( ) ; execDir = setupExecutionDir ( project . getInstalledDir ( ) , flow ) ; final long end = System . currentTimeMillis ( ) ; log . info ( "Setting up execution dir {} took {} sec(s)" , execDir , ( end - start ) / 1000 ) ; } final long flowPrepCompletionTime = System . currentTimeMillis ( ) ; log . info ( "Flow preparation completed in {} sec(s), out ot which {} sec(s) was spent inside " + "critical section. [execid: {}, path: {}]" , ( flowPrepCompletionTime - flowPrepStartTime ) / 1000 , ( flowPrepCompletionTime - criticalSectionStartTime ) / 1000 , flow . getExecutionId ( ) , execDir . getPath ( ) ) ; } catch ( final Exception ex ) { FileIOUtils . deleteDirectorySilently ( tempDir ) ; log . error ( "Error in preparing flow execution {}" , flow . getExecutionId ( ) , ex ) ; throw new ExecutorManagerException ( ex ) ; } finally { if ( projectFileHandler != null ) { projectFileHandler . deleteLocalFile ( ) ; } } }
Prepare the flow directory for execution .
21,998
synchronized void consumerSubscriptionRebalance ( ) { log . debug ( "Subscribed Topics " + this . consumer . subscription ( ) ) ; if ( ! this . subscribedTopics . isEmpty ( ) ) { final Iterator < String > iter = this . subscribedTopics . iterator ( ) ; final List < String > topics = new ArrayList < > ( ) ; while ( iter . hasNext ( ) ) { topics . add ( iter . next ( ) ) ; } this . subscribedTopics . clear ( ) ; this . consumer . subscribe ( topics ) ; } }
Dynamically tune subscription only for the topic that dependencies need .
21,999
private void triggerDependencies ( final Set < String > matchedList , final ConsumerRecord < String , String > record ) { final List < KafkaDependencyInstanceContext > deleteList = new LinkedList < > ( ) ; for ( final String it : matchedList ) { final List < KafkaDependencyInstanceContext > possibleAvailableDeps = this . depInstances . getDepsByTopicAndEvent ( record . topic ( ) , it ) ; for ( final KafkaDependencyInstanceContext dep : possibleAvailableDeps ) { dep . getCallback ( ) . onSuccess ( dep ) ; deleteList . add ( dep ) ; } if ( ! this . depInstances . removeList ( record . topic ( ) , it , deleteList ) ) { this . subscribedTopics . addAll ( this . depInstances . getTopicList ( ) ) ; } } }
If the matcher returns true remove the dependency from collection .