idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
33,700 | @ SuppressWarnings ( "deprecation" ) void init ( ) throws InterruptedException , IOException { long now = System . currentTimeMillis ( ) ; JobConf jobConf = new JobConf ( getConf ( ) ) ; jobConf . setClass ( "topology.node.switch.mapping.impl" , StaticMapping . class , DNSToSwitchMapping . class ) ; jobConf . set ( "fs... | Initiate components in the simulation . |
33,701 | public static void main ( String [ ] args ) throws IOException { if ( args . length != 1 ) { System . err . println ( "Usage: JenkinsHash filename" ) ; System . exit ( - 1 ) ; } FileInputStream in = new FileInputStream ( args [ 0 ] ) ; byte [ ] bytes = new byte [ 512 ] ; int value = 0 ; JenkinsHash hash = new JenkinsHa... | Compute the hash of the specified file |
33,702 | void setStartTime ( long startTime ) { if ( startTime > 0 ) { this . startTime = startTime ; } else { LOG . error ( "Trying to set illegal startTime for task : " + taskid + ".Stack trace is : " + StringUtils . stringifyException ( new Exception ( ) ) ) ; } } | Set startTime of the task if start time is greater than zero . |
33,703 | void setPhase ( Phase phase ) { TaskStatus . Phase oldPhase = getPhase ( ) ; if ( oldPhase != phase ) { if ( phase == TaskStatus . Phase . SORT ) { setShuffleFinishTime ( JobTracker . getClock ( ) . getTime ( ) ) ; } else if ( phase == TaskStatus . Phase . REDUCE ) { setSortFinishTime ( JobTracker . getClock ( ) . getT... | Set current phase of this task . |
33,704 | synchronized void statusUpdate ( State runState , float progress , String state , Phase phase , long finishTime ) { setRunState ( runState ) ; setProgress ( progress ) ; setStateString ( state ) ; setPhase ( phase ) ; if ( finishTime > 0 ) { setFinishTime ( finishTime ) ; } } | Update specific fields of task status |
33,705 | public static void verifySavedMD5 ( File dataFile , MD5Hash expectedMD5 ) throws IOException { MD5Hash storedHash = readStoredMd5ForFile ( dataFile ) ; if ( ! expectedMD5 . equals ( storedHash ) ) { throw new IOException ( "File " + dataFile + " did not match stored MD5 checksum " + " (stored: " + storedHash + ", compu... | Verify that the previously saved md5 for the given file matches expectedMd5 . |
33,706 | public static MD5Hash readStoredMd5ForFile ( File dataFile ) throws IOException { File md5File = getDigestFileForFile ( dataFile ) ; String md5Line ; if ( ! md5File . exists ( ) ) { return null ; } BufferedReader reader = new BufferedReader ( new FileReader ( md5File ) ) ; try { md5Line = reader . readLine ( ) ; if ( m... | Read the md5 checksum stored alongside the given file or null if no md5 is stored . |
33,707 | public static MD5Hash computeMd5ForFile ( File dataFile ) throws IOException { InputStream in = new FileInputStream ( dataFile ) ; try { MessageDigest digester = MD5Hash . getDigester ( ) ; DigestInputStream dis = new DigestInputStream ( in , digester ) ; IOUtils . copyBytes ( dis , new IOUtils . NullOutputStream ( ) ,... | Read dataFile and compute its MD5 checksum . |
33,708 | public static void saveMD5File ( File dataFile , MD5Hash digest ) throws IOException { File md5File = getDigestFileForFile ( dataFile ) ; String digestString = StringUtils . byteToHexString ( digest . getDigest ( ) ) ; String md5Line = digestString + " *" + dataFile . getName ( ) + "\n" ; AtomicFileOutputStream afos = ... | Save the . md5 file that lists the md5sum of another file . |
33,709 | public int compareTo ( Object o ) { int thisValue = this . value ; int thatValue = ( ( VIntWritable ) o ) . value ; return ( thisValue < thatValue ? - 1 : ( thisValue == thatValue ? 0 : 1 ) ) ; } | Compares two VIntWritables . |
33,710 | private void incrementOpCodeCount ( Byte opCode ) { if ( ! opCodeCount . containsKey ( opCode ) ) { opCodeCount . put ( opCode , 0L ) ; } Long newValue = opCodeCount . get ( opCode ) + 1 ; opCodeCount . put ( opCode , newValue ) ; } | Increment the op code counter |
33,711 | public FSEditLogOp readOp ( ) throws IOException { FSEditLogOp ret ; if ( cachedOp != null ) { ret = cachedOp ; cachedOp = null ; return ret ; } return nextOp ( ) ; } | Read an operation from the stream |
33,712 | public boolean skipUntil ( long txid ) throws IOException { while ( true ) { FSEditLogOp op = readOp ( ) ; if ( op == null ) { return false ; } if ( op . getTransactionId ( ) >= txid ) { cachedOp = op ; return true ; } } } | Skip edit log operations up to a given transaction ID or until the end of the edit log is reached . |
33,713 | public static Path getSkipOutputPath ( Configuration conf ) { String name = conf . get ( OUT_PATH ) ; if ( name != null ) { if ( "none" . equals ( name ) ) { return null ; } return new Path ( name ) ; } Path outPath = FileOutputFormat . getOutputPath ( new JobConf ( conf ) ) ; return outPath == null ? null : new Path (... | Get the directory to which skipped records are written . By default it is the sub directory of the output _logs directory . User can stop writing skipped records by setting the value null . |
33,714 | public static void setSkipOutputPath ( JobConf conf , Path path ) { String pathStr = null ; if ( path == null ) { pathStr = "none" ; } else { pathStr = path . toString ( ) ; } conf . set ( OUT_PATH , pathStr ) ; } | Set the directory to which skipped records are written . By default it is the sub directory of the output _logs directory . User can stop writing skipped records by setting the value null . |
33,715 | public RawKeyValueIterator sort ( ) { MergeSort m = new MergeSort ( this ) ; int count = super . count ; if ( count == 0 ) return null ; int [ ] pointers = super . pointers ; int [ ] pointersCopy = new int [ count ] ; System . arraycopy ( pointers , 0 , pointersCopy , 0 , count ) ; m . mergeSort ( pointers , pointersCo... | The sort method derived from BasicTypeSorterBase and overridden here |
33,716 | public void write ( JsonGenerator jsonGenerator ) throws IOException { jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeObjectField ( "request" , request ) ; jsonGenerator . writeEndObject ( ) ; } | This method writes the ResourceRequestInfo instance to disk |
33,717 | public synchronized void write ( int b ) throws IOException { eventStartWrite ( ) ; try { sum . update ( b ) ; buf [ count ++ ] = ( byte ) b ; if ( bytesSentInChunk + count == buf . length ) { flushBuffer ( true , shouldKeepPartialChunkData ( ) ) ; } } finally { eventEndWrite ( ) ; } } | Write one byte |
33,718 | private int write1 ( byte b [ ] , int off , int len ) throws IOException { eventStartWrite ( ) ; try { if ( count == 0 && bytesSentInChunk + len >= buf . length ) { final int length = buf . length - bytesSentInChunk ; sum . update ( b , off , length ) ; writeChecksumChunk ( b , off , length , false ) ; bytesSentInChunk... | Write a portion of an array flushing to the underlying stream at most once if necessary . |
33,719 | private void writeChecksumChunk ( byte b [ ] , int off , int len , boolean keep ) throws IOException { int tempChecksum = ( int ) sum . getValue ( ) ; if ( ! keep ) { sum . reset ( ) ; } int2byte ( tempChecksum , checksum ) ; writeChunk ( b , off , len , checksum ) ; } | Generate checksum for the data chunk and output data chunk & checksum to the underlying output stream . If keep is true then keep the current checksum intact do not reset it . |
33,720 | public String getUserName ( ) { int n = ( int ) PermissionStatusFormat . USER . retrieve ( permission ) ; return SerialNumberManager . INSTANCE . getUser ( n ) ; } | Get user name |
33,721 | public String getGroupName ( ) { int n = ( int ) PermissionStatusFormat . GROUP . retrieve ( permission ) ; return SerialNumberManager . INSTANCE . getGroup ( n ) ; } | Get group name |
33,722 | public static void enforceRegularStorageINode ( INodeFile inode , String msg ) throws IOException { if ( inode . getStorageType ( ) != StorageType . REGULAR_STORAGE ) { LOG . error ( msg ) ; throw new IOException ( msg ) ; } } | Verify if file is regular storage otherwise throw an exception |
33,723 | static byte [ ] [ ] getPathComponents ( String [ ] strings ) { if ( strings . length == 0 ) { return new byte [ ] [ ] { null } ; } byte [ ] [ ] bytes = new byte [ strings . length ] [ ] ; for ( int i = 0 ; i < strings . length ; i ++ ) bytes [ i ] = DFSUtil . string2Bytes ( strings [ i ] ) ; return bytes ; } | Convert strings to byte arrays for path components . |
33,724 | static String [ ] getPathNames ( String path ) { if ( path == null || ! path . startsWith ( Path . SEPARATOR ) ) { return null ; } return StringUtils . split ( path , Path . SEPARATOR_CHAR ) ; } | Breaks file path into names . |
33,725 | public final int compareTo ( byte [ ] name2 ) { if ( name == name2 ) return 0 ; int len1 = ( name == null ? 0 : name . length ) ; int len2 = ( name2 == null ? 0 : name2 . length ) ; int n = Math . min ( len1 , len2 ) ; byte b1 , b2 ; for ( int i = 0 ; i < n ; i ++ ) { b1 = name [ i ] ; b2 = name2 [ i ] ; if ( b1 != b2 ... | Compare names of the inodes |
33,726 | static INode newINode ( long id , PermissionStatus permissions , BlockInfo [ ] blocks , short replication , long modificationTime , long atime , long nsQuota , long dsQuota , long preferredBlockSize , byte inodeType , long hardLinkID , RaidCodec codec , FSImageLoadingContext context ) { if ( inodeType == INode . INodeT... | Create an INode ; the inode s name is not set yet |
33,727 | private void waitForUpgradeDone ( int namespaceId ) { UpgradeManagerDatanode um = datanode . getUpgradeManager ( namespaceId ) ; while ( ! um . isUpgradeCompleted ( ) ) { try { datanode . updateAndReportThreadLiveness ( BackgroundThread . BLOCK_SCANNER ) ; Thread . sleep ( 5000 ) ; LOG . info ( "sleeping ............" ... | Wait for upgrading done for the given namespace |
33,728 | private DataBlockScanner getNextNamespaceSliceScanner ( int currentNamespaceId ) { Integer nextNsId = null ; while ( ( nextNsId == null ) && datanode . shouldRun && ! blockScannerThread . isInterrupted ( ) ) { waitForOneNameSpaceUp ( ) ; synchronized ( this ) { if ( getNamespaceSetSize ( ) > 0 ) { long lastScanTime = -... | Find next namespaceId to scan . There should be only one current verification log file . Find which namespace contains the current verification log file and that is used as the starting namespaceId . If no current files are found start with first namespace . However if more than one current files are found the one with... |
33,729 | public int [ ] hash ( Key k ) { byte [ ] b = k . getBytes ( ) ; if ( b == null ) { throw new NullPointerException ( "buffer reference is null" ) ; } if ( b . length == 0 ) { throw new IllegalArgumentException ( "key length must be > 0" ) ; } int [ ] result = new int [ nbHash ] ; for ( int i = 0 , initval = 0 ; i < nbHa... | Hashes a specified key into several integers . |
33,730 | protected Connector createBaseListener ( Configuration conf ) throws IOException { Connector ret ; if ( conf . getBoolean ( "hadoop.http.bio" , false ) ) { SocketConnector conn = new SocketConnector ( ) ; conn . setAcceptQueueSize ( 4096 ) ; conn . setResolveNames ( false ) ; ret = conn ; } else { SelectChannelConnecto... | Create a required listener for the Jetty instance listening on the port provided . This wrapper and all subclasses must create at least one listener . |
33,731 | private static FilterInitializer [ ] getFilterInitializers ( Configuration conf ) { if ( conf == null ) { return null ; } Class < ? > [ ] classes = conf . getClasses ( FILTER_INITIALIZER_PROPERTY ) ; if ( classes == null ) { return null ; } FilterInitializer [ ] initializers = new FilterInitializer [ classes . length ]... | Get an array of FilterConfiguration specified in the conf |
33,732 | protected void addDefaultApps ( ContextHandlerCollection parent , final String appDir ) throws IOException { String logDir = System . getProperty ( "hadoop.log.dir" ) ; if ( logDir != null ) { Context logContext = new Context ( parent , "/logs" ) ; logContext . setResourceBase ( logDir ) ; logContext . addServlet ( Sta... | Add default apps . |
33,733 | protected void addDefaultServlets ( ) { addServlet ( "stacks" , "/stacks" , StackServlet . class ) ; addServlet ( "logLevel" , "/logLevel" , LogLevel . Servlet . class ) ; addServlet ( "jmx" , "/jmx" , JMXJsonServlet . class ) ; addServlet ( "metrics" , "/metrics" , MetricsServlet . class ) ; addServlet ( "conf" , "/co... | Add default servlets . |
33,734 | protected void addContext ( String pathSpec , String dir , boolean isFiltered ) throws IOException { if ( 0 == webServer . getHandlers ( ) . length ) { throw new RuntimeException ( "Couldn't find handler" ) ; } WebAppContext webAppCtx = new WebAppContext ( ) ; webAppCtx . setContextPath ( pathSpec ) ; webAppCtx . setWa... | Add a context |
33,735 | public void addServlet ( String name , String pathSpec , Class < ? extends HttpServlet > clazz ) { addInternalServlet ( name , pathSpec , clazz ) ; addFilterPathMapping ( pathSpec , webAppContext ) ; } | Add a servlet in the server . |
33,736 | public void addInternalServlet ( String name , String pathSpec , Class < ? extends HttpServlet > clazz ) { ServletHolder holder = new ServletHolder ( clazz ) ; if ( name != null ) { holder . setName ( name ) ; } webAppContext . addServlet ( holder , pathSpec ) ; } | Add an internal servlet in the server . |
33,737 | public void removeServlet ( String name , String pathSpec , Class < ? extends HttpServlet > clazz ) { if ( clazz == null ) { return ; } ServletHandler servletHandler = webAppContext . getServletHandler ( ) ; List < FilterMapping > newFilterMappings = new ArrayList < FilterMapping > ( ) ; for ( FilterMapping mapping : s... | Remove a servlet in the server . |
33,738 | public void removeInternalServlet ( String name , String pathSpec , Class < ? extends HttpServlet > clazz ) { if ( null == clazz ) { return ; } ServletHandler servletHandler = webAppContext . getServletHandler ( ) ; List < ServletHolder > newServletHolders = new ArrayList < ServletHolder > ( ) ; List < ServletMapping >... | Remove an internal servlet in the server . |
33,739 | protected void defineFilter ( Context ctx , String name , String classname , Map < String , String > parameters , String [ ] urls ) { FilterHolder holder = new FilterHolder ( ) ; holder . setName ( name ) ; holder . setClassName ( classname ) ; holder . setInitParameters ( parameters ) ; FilterMapping fmap = new Filter... | Define a filter for a context and set up default url mappings . |
33,740 | protected void addFilterPathMapping ( String pathSpec , Context webAppCtx ) { ServletHandler handler = webAppCtx . getServletHandler ( ) ; for ( String name : filterNames ) { FilterMapping fmap = new FilterMapping ( ) ; fmap . setPathSpec ( pathSpec ) ; fmap . setFilterName ( name ) ; fmap . setDispatches ( Handler . A... | Add the path spec to the filter path mapping . |
33,741 | protected String getWebAppsPath ( ) throws IOException { URL url = getClass ( ) . getClassLoader ( ) . getResource ( "webapps" ) ; if ( url == null ) throw new IOException ( "webapps not found in CLASSPATH" ) ; return url . toString ( ) ; } | Get the pathname to the webapps files . |
33,742 | public void stop ( ) throws Exception { listener . close ( ) ; webAppContext . clearAttributes ( ) ; webServer . removeHandler ( webAppContext ) ; webServer . stop ( ) ; } | stop the server |
33,743 | protected String getAttribute ( String attributeName ) { String factoryAttribute = contextName + "." + attributeName ; return ( String ) factory . getAttribute ( factoryAttribute ) ; } | Convenience method for subclasses to access factory attributes . |
33,744 | public synchronized void registerUpdater ( final Updater updater ) { if ( ! updaters . containsKey ( updater ) ) { updaters . put ( updater , Boolean . TRUE ) ; } } | Registers a callback to be called at time intervals determined by the configuration . |
33,745 | private synchronized void startTimer ( ) { if ( timer == null ) { timer = new Timer ( "Timer thread for monitoring " + getContextName ( ) , true ) ; TimerTask task = new TimerTask ( ) { public void run ( ) { try { timerEvent ( ) ; } catch ( IOException ioe ) { ioe . printStackTrace ( ) ; } } } ; long millis = period * ... | Starts timer if it is not already started |
33,746 | private void timerEvent ( ) throws IOException { if ( isMonitoring ) { Collection < Updater > myUpdaters ; synchronized ( this ) { myUpdaters = new ArrayList < Updater > ( updaters . keySet ( ) ) ; } for ( Updater updater : myUpdaters ) { try { updater . doUpdates ( this ) ; } catch ( Throwable throwable ) { throwable ... | Timer callback . |
33,747 | private synchronized void emitRecords ( ) throws IOException { for ( String recordName : bufferedData . keySet ( ) ) { RecordMap recordMap = bufferedData . get ( recordName ) ; synchronized ( recordMap ) { Set < Entry < TagMap , MetricMap > > entrySet = recordMap . entrySet ( ) ; for ( Entry < TagMap , MetricMap > entr... | Emits the records . |
33,748 | private Number sum ( Number a , Number b ) { if ( a instanceof Integer ) { return Integer . valueOf ( a . intValue ( ) + b . intValue ( ) ) ; } else if ( a instanceof Float ) { return new Float ( a . floatValue ( ) + b . floatValue ( ) ) ; } else if ( a instanceof Short ) { return Short . valueOf ( ( short ) ( a . shor... | Adds two numbers coercing the second to the type of the first . |
33,749 | protected void parseAndSetPeriod ( String attributeName ) { String periodStr = getAttribute ( attributeName ) ; if ( periodStr != null ) { int period = 0 ; try { period = Integer . parseInt ( periodStr ) ; } catch ( NumberFormatException nfe ) { } if ( period <= 0 ) { throw new MetricsException ( "Invalid period: " + p... | If a period is set in the attribute passed in override the default with it . |
33,750 | public synchronized Map < String , Collection < OutputRecord > > getAllRecords ( ) { Map < String , Collection < OutputRecord > > out = new TreeMap < String , Collection < OutputRecord > > ( ) ; for ( String recordName : bufferedData . keySet ( ) ) { RecordMap recordMap = bufferedData . get ( recordName ) ; synchronize... | Retrieves all the records managed by this MetricsContext . Useful for monitoring systems that are polling - based . |
33,751 | public List < SimulatorEvent > accept ( SimulatorEvent event ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Accepted event " + event ) ; } if ( event instanceof HeartbeatEvent ) { return processHeartbeatEvent ( ( HeartbeatEvent ) event ) ; } else if ( event instanceof TaskAttemptCompletionEvent ) { return process... | Processes a simulation event . |
33,752 | public List < SimulatorEvent > init ( long when ) { LOG . debug ( "TaskTracker starting up, current simulation time=" + when ) ; return Collections . < SimulatorEvent > singletonList ( new HeartbeatEvent ( this , when ) ) ; } | Called once at the start of the simulation . |
33,753 | private void finishRunningTask ( TaskStatus finalStatus , long now ) { TaskAttemptID taskId = finalStatus . getTaskID ( ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Finishing running task id=" + taskId + ", now=" + now ) ; } SimulatorTaskInProgress tip = tasks . get ( taskId ) ; if ( tip == null ) { throw new I... | Stops running a task attempt on the task tracker . It also updates the number of available slots accordingly . |
33,754 | private List < SimulatorEvent > processTaskAttemptCompletionEvent ( TaskAttemptCompletionEvent event ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Processing task attempt completion event" + event ) ; } long now = event . getTimeStamp ( ) ; TaskStatus finalStatus = event . getStatus ( ) ; TaskAttemptID taskID = ... | Records that a task attempt has completed . Ignores the event for tasks that got killed after the creation of the completion event . |
33,755 | private TaskAttemptCompletionEvent createTaskAttemptCompletionEvent ( SimulatorTaskInProgress tip , long now ) { TaskStatus status = ( TaskStatus ) tip . getTaskStatus ( ) . clone ( ) ; long delta = tip . getUserSpaceRunTime ( ) ; assert delta >= 0 : "TaskAttempt " + tip . getTaskStatus ( ) . getTaskID ( ) + " has nega... | Creates a signal for itself marking the completion of a task attempt . It assumes that the task attempt hasn t made any progress in the user space code so far i . e . it is called right at launch for map tasks and immediately after all maps completed for reduce tasks . |
33,756 | private List < SimulatorEvent > handleSimulatorLaunchTaskAction ( SimulatorLaunchTaskAction action , long now ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Handling launch task action " + action ) ; } Task task = action . getTask ( ) ; TaskAttemptID taskId = task . getTaskID ( ) ; if ( tasks . containsKey ( task... | Launches a task on the simulated task tracker . |
33,757 | private List < SimulatorEvent > handleKillTaskAction ( KillTaskAction action , long now ) { TaskAttemptID taskId = action . getTaskID ( ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Handling kill task action, taskId=" + taskId + ", now=" + now ) ; } SimulatorTaskInProgress tip = tasks . get ( taskId ) ; if ( tip... | Kills a task attempt . |
33,758 | private void progressTaskStatus ( SimulatorTaskInProgress tip , long now ) { TaskStatus status = tip . getTaskStatus ( ) ; if ( status . getRunState ( ) != State . RUNNING ) { return ; } boolean isMap = tip . isMapTask ( ) ; long startTime = - 1 ; long runTime = tip . getUserSpaceRunTime ( ) ; float progress = 0.0f ; i... | Updates the progress indicator of a task if it is running . |
33,759 | private void garbageCollectCompletedTasks ( ) { for ( Iterator < TaskAttemptID > iter = tasks . keySet ( ) . iterator ( ) ; iter . hasNext ( ) ; ) { TaskAttemptID taskId = iter . next ( ) ; SimulatorTaskInProgress tip = tasks . get ( taskId ) ; if ( tip . getTaskStatus ( ) . getRunState ( ) != State . RUNNING ) { iter ... | Frees up bookkeping memory used by completed tasks . Has no effect on the events or logs produced by the SimulatorTaskTracker . We need this in order not to report completed task multiple times and to ensure that we do not run out of Java heap memory in larger simulations . |
33,760 | private List < SimulatorEvent > processHeartbeatEvent ( HeartbeatEvent event ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Processing heartbeat event " + event ) ; } long now = event . getTimeStamp ( ) ; progressTaskStatuses ( now ) ; List < TaskStatus > taskStatuses = collectAndCloneTaskStatuses ( ) ; boolean a... | Transmits a heartbeat event to the jobtracker and processes the response . |
33,761 | public StripeInfo retrieveStripe ( Block lostBlock , Path p , long lostBlockOffset , FileSystem fs , Context context , boolean online ) throws IOException { StripeInfo si = null ; if ( stripeStore != null ) { IOException caughtException = null ; try { si = stripeStore . getStripe ( codec , lostBlock ) ; } catch ( IOExc... | Retrieve stripes from stripe store |
33,762 | public Long retrieveChecksum ( Block lostBlock , Path p , long lostBlockOffset , FileSystem fs , Context context ) throws IOException { Long oldCRC = null ; if ( checksumStore != null ) { IOException caughtException = null ; try { oldCRC = checksumStore . getChecksum ( lostBlock ) ; } catch ( IOException ioe ) { LOG . ... | Retrieve checksums from checksum store and record checksum lost if possible |
33,763 | public CRC32 recoverBlockToFileFromStripeInfo ( FileSystem srcFs , Path srcPath , Block lostBlock , File localBlockFile , long blockSize , long lostBlockOffset , long limit , StripeInfo si , Context context ) throws IOException { OutputStream out = null ; try { out = new FileOutputStream ( localBlockFile ) ; CRC32 crc ... | Recover a corrupt block to local file . Using the stripe information stored in the Stripe Store . |
33,764 | private String getOldCodeId ( FileStatus srcStat ) throws IOException { if ( codec . id . equals ( "xor" ) || codec . id . equals ( "rs" ) ) { return codec . id ; } else { if ( ParityFilePair . getParityFile ( Codec . getCodec ( "xor" ) , srcStat , this . conf ) != null ) return "xor" ; if ( ParityFilePair . getParityF... | Return the old code id to construct a old decoder |
33,765 | private static void setupSsl ( Configuration conf ) { Configuration sslConf = new Configuration ( false ) ; sslConf . addResource ( conf . get ( "dfs.https.client.keystore.resource" , "ssl-client.xml" ) ) ; System . setProperty ( "javax.net.ssl.trustStore" , sslConf . get ( "ssl.client.truststore.location" , "" ) ) ; S... | Set up SSL resources |
33,766 | void setup ( ) { if ( supportsIsTaskAlive ( ) ) { waitForConfirmedKill = getConf ( ) . getBoolean ( WAIT_FOR_CONFIRMED_KILL_KEY , WAIT_FOR_CONFIRMED_DEFAULT ) ; confirmedKillRetries = getConf ( ) . getInt ( CONFIRMED_KILL_RETRIES_KEY , CONFIRMED_KILL_RETRIES_DEFAULT ) ; } LOG . info ( "setup: waitForConfirmedKill=" + w... | Setup task controller component . Will be called prior to use . |
33,767 | final void destroyTaskJVM ( TaskControllerContext context ) { Thread taskJVMDestroyer = new Thread ( new DestroyJVMTaskRunnable ( context ) ) ; taskJVMDestroyer . start ( ) ; if ( waitForConfirmedKill ) { try { taskJVMDestroyer . join ( ) ; } catch ( InterruptedException e ) { throw new IllegalStateException ( "destroy... | Use DestroyJVMTaskRunnable to kill task JVM asynchronously . Wait for the confirmed kill if configured so . |
33,768 | public Block getPenultimateBlock ( ) { if ( blocks == null || blocks . length <= 1 ) { return null ; } return blocks [ blocks . length - 2 ] ; } | Return the penultimate allocated block for this file . |
33,769 | public void addBlock ( BlockInfo newblock ) { if ( this . blocks == null ) { this . blocks = new BlockInfo [ 1 ] ; this . blocks [ 0 ] = newblock ; } else { int size = this . blocks . length ; BlockInfo [ ] newlist = new BlockInfo [ size + 1 ] ; System . arraycopy ( this . blocks , 0 , newlist , 0 , size ) ; newlist [ ... | add a block to the block list |
33,770 | public INodeRaidStorage convertToRaidStorage ( BlockInfo [ ] parityBlocks , RaidCodec codec , int [ ] checksums , BlocksMap blocksMap , short replication , INodeFile inode ) throws IOException { if ( codec == null ) { throw new IOException ( "Codec is null" ) ; } else { return new INodeRaidStorage ( codec . convertToRa... | Only used by merge it puts parity file s blocks and source file s blocks together into a block array to create an INodeRaidStorage |
33,771 | public void write ( byte [ ] b , int off , int len ) throws IOException { if ( compressor . finished ( ) ) { throw new IOException ( "write beyond end of stream" ) ; } if ( b == null ) { throw new NullPointerException ( ) ; } else if ( ( off < 0 ) || ( off > b . length ) || ( len < 0 ) || ( ( off + len ) > b . length )... | Write the data provided to the compression codec compressing no more than the buffer size less the compression overhead as specified during construction for each block . |
33,772 | boolean detachBlock ( int namespaceId , Block block , int numLinks ) throws IOException { if ( isDetached ( ) ) { return false ; } if ( blockDataFile . getFile ( ) == null || blockDataFile . volume == null ) { throw new IOException ( "detachBlock:Block not found. " + block ) ; } File meta = null ; if ( ! inlineChecksum... | Returns true if this block was copied otherwise returns false . |
33,773 | public synchronized boolean addDependingJob ( Job dependingJob ) { if ( this . state == Job . WAITING ) { if ( this . dependingJobs == null ) { this . dependingJobs = new ArrayList < Job > ( ) ; } return this . dependingJobs . add ( dependingJob ) ; } else { return false ; } } | Add a job to this jobs dependency list . Dependent jobs can only be added while a Job is waiting to run not during or afterwards . |
33,774 | private void checkRunningState ( ) { RunningJob running = null ; try { running = jc . getJob ( this . mapredJobID ) ; if ( running . isComplete ( ) ) { if ( running . isSuccessful ( ) ) { this . state = Job . SUCCESS ; } else { this . state = Job . FAILED ; this . message = "Job failed!" ; try { running . killJob ( ) ;... | Check the state of this running job . The state may remain the same become SUCCESS or FAILED . |
33,775 | synchronized int checkState ( ) { if ( this . state == Job . RUNNING ) { checkRunningState ( ) ; } if ( this . state != Job . WAITING ) { return this . state ; } if ( this . dependingJobs == null || this . dependingJobs . size ( ) == 0 ) { this . state = Job . READY ; return this . state ; } Job pred = null ; int n = t... | Check and update the state of this job . The state changes depending on its current state and the states of the depending jobs . |
33,776 | protected synchronized void submit ( ) { try { if ( theJobConf . getBoolean ( "create.empty.dir.if.nonexist" , false ) ) { FileSystem fs = FileSystem . get ( theJobConf ) ; Path inputPaths [ ] = FileInputFormat . getInputPaths ( theJobConf ) ; for ( int i = 0 ; i < inputPaths . length ; i ++ ) { if ( ! fs . exists ( in... | Submit this job to mapred . The state becomes RUNNING if submission is successful FAILED otherwise . |
33,777 | public void write ( byte [ ] b , int start , int length ) throws IOException { currentDataSegmentBuffer . write ( b , start , length ) ; flushIfNeeded ( ) ; } | This function makes sure the whole buffer is written into the same data segment . |
33,778 | public void flush ( ) throws IOException { if ( currentDataSegmentBuffer . size ( ) == 0 ) { return ; } DataSegmentWriter currentDataSegment = new DataSegmentWriter ( currentDataSegmentBuffer , codec , codecCompressor ) ; updateMetadata ( currentDataSegmentBuffer . size ( ) , currentDataSegment . size ( ) ) ; currentDa... | Take the current data segment optionally compress it calculate the crc32 and then write it out . |
33,779 | protected void checkPath ( Path path ) { URI thisUri = this . getUri ( ) ; URI thatUri = path . toUri ( ) ; String thatAuthority = thatUri . getAuthority ( ) ; if ( thatUri . getScheme ( ) != null && thatUri . getScheme ( ) . equalsIgnoreCase ( thisUri . getScheme ( ) ) && thatUri . getPort ( ) == NameNode . DEFAULT_PO... | Permit paths which explicitly specify the default port . |
33,780 | public FSDataOutputStream append ( Path f , int bufferSize , Progressable progress ) throws IOException { DFSOutputStream op = ( DFSOutputStream ) dfs . append ( getPathName ( f ) , bufferSize , progress ) ; return new FSDataOutputStream ( op , statistics , op . getInitialLen ( ) ) ; } | This optional operation is not yet supported . |
33,781 | public void concat ( Path trg , Path [ ] psrcs , boolean restricted ) throws IOException { String [ ] srcs = new String [ psrcs . length ] ; for ( int i = 0 ; i < psrcs . length ; i ++ ) { srcs [ i ] = getPathName ( psrcs [ i ] ) ; } dfs . concat ( getPathName ( trg ) , srcs , restricted ) ; } | THIS IS DFS only operations it is not part of FileSystem move blocks from srcs to trg and delete srcs afterwards |
33,782 | public void concat ( Path trg , Path [ ] psrcs ) throws IOException { concat ( trg , psrcs , true ) ; } | THIS IS DFS only operations it is not part of FileSystem move blocks from srcs to trg and delete srcs afterwards All blocks should be of the same size |
33,783 | public void setQuota ( Path src , long namespaceQuota , long diskspaceQuota ) throws IOException { dfs . setQuota ( getPathName ( src ) , namespaceQuota , diskspaceQuota ) ; } | Set a directory s quotas |
33,784 | public FileStatus getFileStatus ( Path f ) throws IOException { FileStatus fi = dfs . getFileInfo ( getPathName ( f ) ) ; if ( fi != null ) { fi . makeQualified ( this ) ; return fi ; } else { throw new FileNotFoundException ( "File does not exist: " + f ) ; } } | Returns the stat information about the file . |
33,785 | private void joinAndCollect ( Object [ ] tags , ResetableIterator [ ] values , Object key , OutputCollector output , Reporter reporter ) throws IOException { if ( values . length < 1 ) { return ; } Object [ ] partialList = new Object [ values . length ] ; joinAndCollect ( tags , values , 0 , partialList , key , output ... | join the list of the value lists and collect the results . |
33,786 | private void joinAndCollect ( Object [ ] tags , ResetableIterator [ ] values , int pos , Object [ ] partialList , Object key , OutputCollector output , Reporter reporter ) throws IOException { if ( values . length == pos ) { TaggedMapOutput combined = combine ( tags , partialList ) ; collect ( key , combined , output ,... | Perform the actual join recursively . |
33,787 | public void purgeJob ( final HadoopJob job ) { runningJobs . remove ( job . getJobID ( ) ) ; Display . getDefault ( ) . asyncExec ( new Runnable ( ) { public void run ( ) { fireJobRemoved ( job ) ; } } ) ; } | Remove the given job from the currently running jobs map |
33,788 | public boolean loadFromXML ( File file ) throws ParserConfigurationException , SAXException , IOException { Configuration newConf = new Configuration ( this . conf ) ; DocumentBuilder builder = DocumentBuilderFactory . newInstance ( ) . newDocumentBuilder ( ) ; Document document = builder . parse ( file ) ; Element roo... | Overwrite this location with settings available in the given XML file . The existing configuration is preserved if the XML file is invalid . |
33,789 | public void storeSettingsToFile ( File file ) throws IOException { FileOutputStream fos = new FileOutputStream ( file ) ; this . conf . writeXml ( fos ) ; fos . close ( ) ; } | Write this location settings to the given output stream |
33,790 | private void addPluginConfigDefaultProperties ( ) { for ( ConfProp prop : ConfProp . values ( ) ) { if ( conf . get ( prop . name ) == null ) conf . set ( prop . name , prop . defVal ) ; } } | Fill the configuration with valid default values |
33,791 | public static JobID downgrade ( org . apache . hadoop . mapreduce . JobID old ) { if ( old instanceof JobID ) { return ( JobID ) old ; } else { return new JobID ( old . getJtIdentifier ( ) , old . getId ( ) ) ; } } | Downgrade a new JobID to an old one |
33,792 | public static void logCall ( long entryTime , long returnTime , int callIndex , Object returnValue , Object argValues [ ] , long streamId ) { if ( ! API_TRACE_LOG . isInfoEnabled ( ) ) { return ; } long elapsed = returnTime ; elapsed -= entryTime ; entryTime -= baseTime ; StringBuilder line = new StringBuilder ( ) ; li... | Record a method call and its return value in the log . |
33,793 | private static Stats calcStats ( List < Double > nums ) { double sum = 0.0 , mean = 0.0 , variance = 0.0 , stdDev = 0.0 ; for ( Double d : nums ) { sum += d . doubleValue ( ) ; } if ( nums . size ( ) > 0 ) { mean = sum / nums . size ( ) ; } sum = 0.0 ; for ( Double d : nums ) { sum += ( d . doubleValue ( ) - mean ) * (... | Calculates mean variance standard deviation for a set of numbers |
33,794 | public void write ( JsonGenerator jsonGenerator ) throws IOException { jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeStringField ( "poolGroupName" , poolGroupName ) ; jsonGenerator . writeStringField ( "poolName" , poolName ) ; jsonGenerator . writeEndObject ( ) ; } | Used to write the state of the PoolInfo instance to disk when we are persisting the state of the ClusterManager |
33,795 | public static PoolInfoStrings createPoolInfoStrings ( PoolInfo poolInfo ) { if ( poolInfo == null ) { return null ; } return new PoolInfoStrings ( poolInfo . getPoolGroupName ( ) , poolInfo . getPoolName ( ) ) ; } | Convert this object to PoolInfoStrings for Thrift |
33,796 | public static PoolInfo createPoolInfo ( PoolInfoStrings poolInfoStrings ) { if ( poolInfoStrings == null ) { return null ; } return new PoolInfo ( poolInfoStrings . getPoolGroupName ( ) , poolInfoStrings . getPoolName ( ) ) ; } | Convert this object from PoolInfoStrings for Thrift |
33,797 | public static boolean isLegalPoolInfo ( PoolInfo poolInfo ) { if ( poolInfo == null || poolInfo . getPoolGroupName ( ) == null || poolInfo . getPoolName ( ) == null ) { return false ; } if ( INVALID_REGEX_PATTERN . matcher ( poolInfo . getPoolGroupName ( ) ) . matches ( ) || poolInfo . getPoolGroupName ( ) . isEmpty ( ... | Returns whether or not the given pool name is legal . |
33,798 | public FsPermission applyUMask ( FsPermission umask ) { return new FsPermission ( useraction . and ( umask . useraction . not ( ) ) , groupaction . and ( umask . groupaction . not ( ) ) , otheraction . and ( umask . otheraction . not ( ) ) ) ; } | Apply a umask to this permission and return a new one |
33,799 | public static FsPermission valueOf ( String unixSymbolicPermission ) { if ( unixSymbolicPermission == null ) { return null ; } else if ( unixSymbolicPermission . length ( ) != 10 ) { throw new IllegalArgumentException ( "length != 10(unixSymbolicPermission=" + unixSymbolicPermission + ")" ) ; } int n = 0 ; for ( int i ... | Create a FsPermission from a Unix symbolic permission string |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.