idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
12,400 | public long getUsedBytes ( ) throws IOException { try ( CloseableResource < BlockMasterClient > blockMasterClientResource = mContext . acquireBlockMasterClientResource ( ) ) { return blockMasterClientResource . get ( ) . getUsedBytes ( ) ; } } | Gets the used bytes of Alluxio s BlockStore . |
12,401 | public static void addSwiftCredentials ( Configuration configuration ) { PropertyKey [ ] propertyNames = { PropertyKey . SWIFT_API_KEY , PropertyKey . SWIFT_TENANT_KEY , PropertyKey . SWIFT_USER_KEY , PropertyKey . SWIFT_AUTH_URL_KEY , PropertyKey . SWIFT_AUTH_METHOD_KEY , PropertyKey . SWIFT_PASSWORD_KEY , PropertyKey . SWIFT_SIMULATION , PropertyKey . SWIFT_REGION_KEY } ; setConfigurationFromSystemProperties ( configuration , propertyNames ) ; } | Adds Swift keys to the given Hadoop Configuration object if the user has specified them using System properties and they re not already set . |
12,402 | private static void setConfigurationFromSystemProperties ( Configuration configuration , PropertyKey [ ] propertyNames ) { for ( PropertyKey propertyName : propertyNames ) { setConfigurationFromSystemProperty ( configuration , propertyName . toString ( ) ) ; } } | Set the System properties into Hadoop configuration . |
12,403 | private static void setConfigurationFromSystemProperty ( Configuration configuration , String propertyName ) { String propertyValue = System . getProperty ( propertyName ) ; if ( propertyValue != null && configuration . get ( propertyName ) == null ) { configuration . set ( propertyName , propertyValue ) ; } } | Set the System property into Hadoop configuration . |
12,404 | public static void writeTarGz ( Path dirPath , OutputStream output ) throws IOException , InterruptedException { GzipCompressorOutputStream zipStream = new GzipCompressorOutputStream ( output ) ; TarArchiveOutputStream archiveStream = new TarArchiveOutputStream ( zipStream ) ; for ( Path subPath : Files . walk ( dirPath ) . collect ( toList ( ) ) ) { if ( Thread . interrupted ( ) ) { throw new InterruptedException ( ) ; } File file = subPath . toFile ( ) ; TarArchiveEntry entry = new TarArchiveEntry ( file , dirPath . relativize ( subPath ) . toString ( ) ) ; archiveStream . putArchiveEntry ( entry ) ; if ( file . isFile ( ) ) { try ( InputStream fileIn = Files . newInputStream ( subPath ) ) { IOUtils . copy ( fileIn , archiveStream ) ; } } archiveStream . closeArchiveEntry ( ) ; } archiveStream . finish ( ) ; zipStream . finish ( ) ; } | Creates a gzipped tar archive from the given path streaming the data to the give output stream . |
12,405 | public static void readTarGz ( Path dirPath , InputStream input ) throws IOException { InputStream zipStream = new GzipCompressorInputStream ( input ) ; TarArchiveInputStream archiveStream = new TarArchiveInputStream ( zipStream ) ; TarArchiveEntry entry ; while ( ( entry = ( TarArchiveEntry ) archiveStream . getNextEntry ( ) ) != null ) { File outputFile = new File ( dirPath . toFile ( ) , entry . getName ( ) ) ; if ( entry . isDirectory ( ) ) { outputFile . mkdirs ( ) ; } else { outputFile . getParentFile ( ) . mkdirs ( ) ; try ( FileOutputStream fileOut = new FileOutputStream ( outputFile ) ) { IOUtils . copy ( archiveStream , fileOut ) ; } } } } | Reads a gzipped tar archive from a stream and writes it to the given path . |
12,406 | protected void startMasters ( boolean isLeader ) { try { if ( isLeader ) { if ( ServerConfiguration . isSet ( PropertyKey . MASTER_JOURNAL_INIT_FROM_BACKUP ) ) { AlluxioURI backup = new AlluxioURI ( ServerConfiguration . get ( PropertyKey . MASTER_JOURNAL_INIT_FROM_BACKUP ) ) ; if ( mJournalSystem . isEmpty ( ) ) { initFromBackup ( backup ) ; } else { LOG . info ( "The journal system is not freshly formatted, skipping restoring backup from " + backup ) ; } } mSafeModeManager . notifyPrimaryMasterStarted ( ) ; } else { startRejectingServers ( ) ; } mRegistry . start ( isLeader ) ; LOG . info ( "All masters started" ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } | Starts all masters including block master FileSystem master and additional masters . |
12,407 | protected void startServingWebServer ( ) { stopRejectingWebServer ( ) ; mWebServer = new MasterWebServer ( ServiceType . MASTER_WEB . getServiceName ( ) , mWebBindAddress , this ) ; mWebServer . addHandler ( mMetricsServlet . getHandler ( ) ) ; mWebServer . addHandler ( mPMetricsServlet . getHandler ( ) ) ; mWebServer . start ( ) ; } | Starts serving web ui server resetting master web port adding the metrics servlet to the web server and starting web ui . |
12,408 | protected void startJvmMonitorProcess ( ) { if ( ServerConfiguration . getBoolean ( PropertyKey . MASTER_JVM_MONITOR_ENABLED ) ) { mJvmPauseMonitor = new JvmPauseMonitor ( ServerConfiguration . getMs ( PropertyKey . JVM_MONITOR_SLEEP_INTERVAL_MS ) , ServerConfiguration . getMs ( PropertyKey . JVM_MONITOR_INFO_THRESHOLD_MS ) , ServerConfiguration . getMs ( PropertyKey . JVM_MONITOR_WARN_THRESHOLD_MS ) ) ; mJvmPauseMonitor . start ( ) ; } } | Starts jvm monitor process to monitor jvm . |
12,409 | private ObjectListing getObjectListingChunk ( ListObjectsRequest request ) { ObjectListing result ; try { result = mClient . listObjects ( request ) ; } catch ( CosClientException e ) { LOG . error ( "Failed to list path {}" , request . getPrefix ( ) , e ) ; result = null ; } return result ; } | Get next chunk of listing result |
12,410 | private void scanExtensions ( List < T > factories , String extensionsDir ) { LOG . info ( "Loading extension jars from {}" , extensionsDir ) ; scan ( Arrays . asList ( ExtensionUtils . listExtensions ( extensionsDir ) ) , factories ) ; } | Finds all factory from the extensions directory . |
12,411 | private void scanLibs ( List < T > factories , String libDir ) { LOG . info ( "Loading core jars from {}" , libDir ) ; List < File > files = new ArrayList < > ( ) ; try ( DirectoryStream < Path > stream = Files . newDirectoryStream ( Paths . get ( libDir ) , mExtensionPattern ) ) { for ( Path entry : stream ) { if ( entry . toFile ( ) . isFile ( ) ) { files . add ( entry . toFile ( ) ) ; } } } catch ( IOException e ) { LOG . warn ( "Failed to load libs: {}" , e . toString ( ) ) ; } scan ( files , factories ) ; } | Finds all factory from the lib directory . |
12,412 | private void scan ( List < File > files , List < T > factories ) { for ( File jar : files ) { try { URL extensionURL = jar . toURI ( ) . toURL ( ) ; String jarPath = extensionURL . toString ( ) ; ClassLoader extensionsClassLoader = new ExtensionsClassLoader ( new URL [ ] { extensionURL } , ClassLoader . getSystemClassLoader ( ) ) ; ServiceLoader < T > extensionServiceLoader = ServiceLoader . load ( mFactoryClass , extensionsClassLoader ) ; for ( T factory : extensionServiceLoader ) { LOG . debug ( "Discovered a factory implementation {} - {} in jar {}" , factory . getClass ( ) , factory , jarPath ) ; register ( factory , factories ) ; } } catch ( Throwable t ) { LOG . warn ( "Failed to load jar {}: {}" , jar , t . toString ( ) ) ; } } } | Class - loads jar files that have not been loaded . |
12,413 | public static void writeCSVFile ( Collection < ? extends PropertyKey > defaultKeys , String filePath ) throws IOException { if ( defaultKeys . size ( ) == 0 ) { return ; } FileWriter fileWriter ; Closer closer = Closer . create ( ) ; String [ ] fileNames = { "user-configuration.csv" , "master-configuration.csv" , "worker-configuration.csv" , "security-configuration.csv" , "common-configuration.csv" , "cluster-management-configuration.csv" } ; try { Map < String , FileWriter > fileWriterMap = new HashMap < > ( ) ; for ( String fileName : fileNames ) { fileWriter = new FileWriter ( PathUtils . concatPath ( filePath , fileName ) ) ; fileWriter . append ( CSV_FILE_HEADER + "\n" ) ; String key = fileName . substring ( 0 , fileName . indexOf ( "configuration" ) - 1 ) ; fileWriterMap . put ( key , fileWriter ) ; closer . register ( fileWriter ) ; } List < PropertyKey > dfkeys = new ArrayList < > ( defaultKeys ) ; Collections . sort ( dfkeys ) ; for ( PropertyKey propertyKey : dfkeys ) { String pKey = propertyKey . toString ( ) ; String defaultDescription ; if ( propertyKey . getDefaultSupplier ( ) . get ( ) == null ) { defaultDescription = "" ; } else { defaultDescription = propertyKey . getDefaultSupplier ( ) . getDescription ( ) ; } defaultDescription = String . format ( "\"%s\"" , defaultDescription ) ; String keyValueStr = pKey + "," + defaultDescription + "\n" ; if ( pKey . startsWith ( "alluxio.user." ) ) { fileWriter = fileWriterMap . get ( "user" ) ; } else if ( pKey . startsWith ( "alluxio.master." ) ) { fileWriter = fileWriterMap . get ( "master" ) ; } else if ( pKey . startsWith ( "alluxio.worker." ) ) { fileWriter = fileWriterMap . get ( "worker" ) ; } else if ( pKey . startsWith ( "alluxio.security." ) ) { fileWriter = fileWriterMap . get ( "security" ) ; } else if ( pKey . startsWith ( "alluxio.keyvalue." ) ) { fileWriter = fileWriterMap . get ( "key-value" ) ; } else if ( pKey . startsWith ( "alluxio.integration" ) ) { fileWriter = fileWriterMap . get ( "cluster-management" ) ; } else { fileWriter = fileWriterMap . get ( "common" ) ; } fileWriter . append ( keyValueStr ) ; } LOG . info ( "Property Key CSV files were created successfully." ) ; } catch ( Exception e ) { throw closer . rethrow ( e ) ; } finally { try { closer . close ( ) ; } catch ( IOException e ) { LOG . error ( "Error while flushing/closing Property Key CSV FileWriter" , e ) ; } } } | Writes property key to csv files . |
12,414 | public static void writeYMLFile ( Collection < ? extends PropertyKey > defaultKeys , String filePath ) throws IOException { if ( defaultKeys . size ( ) == 0 ) { return ; } FileWriter fileWriter ; Closer closer = Closer . create ( ) ; String [ ] fileNames = { "user-configuration.yml" , "master-configuration.yml" , "worker-configuration.yml" , "security-configuration.yml" , "common-configuration.yml" , "cluster-management-configuration.yml" } ; try { Map < String , FileWriter > fileWriterMap = new HashMap < > ( ) ; for ( String fileName : fileNames ) { fileWriter = new FileWriter ( PathUtils . concatPath ( filePath , fileName ) ) ; String key = fileName . substring ( 0 , fileName . indexOf ( "configuration" ) - 1 ) ; fileWriterMap . put ( key , fileWriter ) ; closer . register ( fileWriter ) ; } List < PropertyKey > dfkeys = new ArrayList < > ( defaultKeys ) ; Collections . sort ( dfkeys ) ; for ( PropertyKey iteratorPK : dfkeys ) { String pKey = iteratorPK . toString ( ) ; String description = iteratorPK . getDescription ( ) . replace ( "'" , "''" ) ; if ( iteratorPK . isIgnoredSiteProperty ( ) ) { description += " Note: This property must be specified as a JVM property; " + "it is not accepted in alluxio-site.properties." ; } String keyValueStr = pKey + ":\n '" + description + "'\n" ; if ( pKey . startsWith ( "alluxio.user." ) ) { fileWriter = fileWriterMap . get ( "user" ) ; } else if ( pKey . startsWith ( "alluxio.master." ) ) { fileWriter = fileWriterMap . get ( "master" ) ; } else if ( pKey . startsWith ( "alluxio.worker." ) ) { fileWriter = fileWriterMap . get ( "worker" ) ; } else if ( pKey . startsWith ( "alluxio.security." ) ) { fileWriter = fileWriterMap . get ( "security" ) ; } else if ( pKey . startsWith ( "alluxio.keyvalue." ) ) { fileWriter = fileWriterMap . get ( "key-value" ) ; } else if ( pKey . startsWith ( "alluxio.integration." ) ) { fileWriter = fileWriterMap . get ( "cluster-management" ) ; } else { fileWriter = fileWriterMap . get ( "common" ) ; } fileWriter . append ( keyValueStr ) ; } LOG . info ( "YML files for description of Property Keys were created successfully." ) ; } catch ( Exception e ) { throw closer . rethrow ( e ) ; } finally { try { closer . close ( ) ; } catch ( IOException e ) { LOG . error ( "Error while flushing/closing YML files for description of Property Keys " + "FileWriter" , e ) ; } } } | Writes description of property key to yml files . |
12,415 | public static void main ( String [ ] args ) throws IOException { Collection < ? extends PropertyKey > defaultKeys = PropertyKey . defaultKeys ( ) ; defaultKeys . removeIf ( key -> key . isHidden ( ) ) ; String homeDir = new InstancedConfiguration ( ConfigurationUtils . defaults ( ) ) . get ( PropertyKey . HOME ) ; String filePath = PathUtils . concatPath ( homeDir , CSV_FILE_DIR ) ; writeCSVFile ( defaultKeys , filePath ) ; filePath = PathUtils . concatPath ( homeDir , YML_FILE_DIR ) ; writeYMLFile ( defaultKeys , filePath ) ; } | Main entry for this util class . |
12,416 | public static void enableAutoRead ( Channel channel ) { if ( ! channel . config ( ) . isAutoRead ( ) ) { channel . config ( ) . setAutoRead ( true ) ; channel . read ( ) ; } } | Enables auto read for a netty channel . |
12,417 | public static int getConfKey ( String ... args ) { switch ( args . length ) { case 0 : printHelp ( "Missing argument." ) ; return 1 ; case 1 : String varName = args [ 0 ] . trim ( ) ; String propertyName = ENV_VIOLATORS . getOrDefault ( varName , varName . toLowerCase ( ) . replace ( "_" , "." ) ) ; if ( ! PropertyKey . isValid ( propertyName ) ) { printHelp ( String . format ( "%s is not a valid configuration key" , propertyName ) ) ; return 1 ; } PropertyKey key = PropertyKey . fromString ( propertyName ) ; System . out . println ( key . getName ( ) ) ; break ; default : printHelp ( "More arguments than expected" ) ; return 1 ; } return 0 ; } | Implements get configuration key . |
12,418 | public static void main ( String [ ] args ) { InstancedConfiguration conf = new InstancedConfiguration ( ConfigurationUtils . defaults ( ) ) ; if ( ! ConfigurationUtils . masterHostConfigured ( conf ) && args . length > 0 ) { System . out . println ( ConfigurationUtils . getMasterHostNotConfiguredMessage ( "Alluxio fsadmin shell" ) ) ; System . exit ( 1 ) ; } conf . set ( PropertyKey . USER_RPC_RETRY_MAX_DURATION , "5s" , Source . DEFAULT ) ; FileSystemAdminShell fsAdminShell = new FileSystemAdminShell ( conf ) ; System . exit ( fsAdminShell . run ( args ) ) ; } | Manage Alluxio file system . |
12,419 | public static void main ( String [ ] args ) { System . exit ( getConf ( ClientContext . create ( new InstancedConfiguration ( ConfigurationUtils . defaults ( ) ) ) , args ) ) ; } | Prints Alluxio configuration . |
12,420 | public static void prepareFilePath ( AlluxioURI alluxioPath , String ufsPath , FileSystem fs , UnderFileSystem ufs ) throws AlluxioException , IOException { AlluxioURI dstPath = new AlluxioURI ( ufsPath ) ; String parentPath = dstPath . getParent ( ) . getPath ( ) ; if ( ! ufs . isDirectory ( parentPath ) ) { Stack < Pair < String , MkdirsOptions > > ufsDirsToMakeWithOptions = new Stack < > ( ) ; AlluxioURI curAlluxioPath = alluxioPath . getParent ( ) ; AlluxioURI curUfsPath = dstPath . getParent ( ) ; while ( curUfsPath != null && ! ufs . isDirectory ( curUfsPath . toString ( ) ) && curAlluxioPath != null ) { URIStatus curDirStatus = fs . getStatus ( curAlluxioPath ) ; if ( curDirStatus . isMountPoint ( ) ) { throw new IOException ( ExceptionMessage . UFS_PATH_DOES_NOT_EXIST . getMessage ( curUfsPath ) ) ; } ufsDirsToMakeWithOptions . push ( new Pair < > ( curUfsPath . toString ( ) , MkdirsOptions . defaults ( ServerConfiguration . global ( ) ) . setCreateParent ( false ) . setOwner ( curDirStatus . getOwner ( ) ) . setGroup ( curDirStatus . getGroup ( ) ) . setMode ( new Mode ( ( short ) curDirStatus . getMode ( ) ) ) ) ) ; curAlluxioPath = curAlluxioPath . getParent ( ) ; curUfsPath = curUfsPath . getParent ( ) ; } while ( ! ufsDirsToMakeWithOptions . empty ( ) ) { Pair < String , MkdirsOptions > ufsDirAndPerm = ufsDirsToMakeWithOptions . pop ( ) ; if ( ! ufs . mkdirs ( ufsDirAndPerm . getFirst ( ) , ufsDirAndPerm . getSecond ( ) ) && ! ufs . isDirectory ( ufsDirAndPerm . getFirst ( ) ) ) { throw new IOException ( "Failed to create dir: " + ufsDirAndPerm . getFirst ( ) ) ; } } } } | Creates parent directories for path with correct permissions if required . |
12,421 | private void evictIfOverLimit ( ) { int numToEvict = mCache . size ( ) - mSoftLimit ; if ( numToEvict <= 0 ) { return ; } if ( mEvictLock . tryLock ( ) ) { try { numToEvict = mCache . size ( ) - mSoftLimit ; while ( numToEvict > 0 ) { if ( ! mIterator . hasNext ( ) ) { mIterator = mCache . entrySet ( ) . iterator ( ) ; } Map . Entry < K , ValNode > candidateMapEntry = mIterator . next ( ) ; ValNode candidate = candidateMapEntry . getValue ( ) ; if ( candidate . mIsAccessed ) { candidate . mIsAccessed = false ; } else { if ( candidate . mRefCount . compareAndSet ( 0 , Integer . MIN_VALUE ) ) { mIterator . remove ( ) ; numToEvict -- ; } } } } finally { mEvictLock . unlock ( ) ; } } } | If the size of the cache exceeds the soft limit and no other thread is evicting entries start evicting entries . |
12,422 | public LockResource get ( K key , LockMode mode ) { ValNode valNode = getValNode ( key ) ; ReentrantReadWriteLock lock = valNode . mValue ; switch ( mode ) { case READ : return new RefCountLockResource ( lock . readLock ( ) , true , valNode . mRefCount ) ; case WRITE : return new RefCountLockResource ( lock . writeLock ( ) , true , valNode . mRefCount ) ; default : throw new IllegalStateException ( "Unknown lock mode: " + mode ) ; } } | Locks the specified key in the specified mode . |
12,423 | public Optional < LockResource > tryGet ( K key , LockMode mode ) { ValNode valNode = getValNode ( key ) ; ReentrantReadWriteLock lock = valNode . mValue ; Lock innerLock ; switch ( mode ) { case READ : innerLock = lock . readLock ( ) ; break ; case WRITE : innerLock = lock . writeLock ( ) ; break ; default : throw new IllegalStateException ( "Unknown lock mode: " + mode ) ; } if ( ! innerLock . tryLock ( ) ) { return Optional . empty ( ) ; } return Optional . of ( new RefCountLockResource ( innerLock , false , valNode . mRefCount ) ) ; } | Attempts to take a lock on the given key . |
12,424 | public ReentrantReadWriteLock getRawReadWriteLock ( K key ) { return mCache . getOrDefault ( key , new ValNode ( new ReentrantReadWriteLock ( ) ) ) . mValue ; } | Get the raw readwrite lock from the cache . |
12,425 | public boolean containsKey ( K key ) { Preconditions . checkNotNull ( key , "key can not be null" ) ; return mCache . containsKey ( key ) ; } | Returns whether the cache contains a particular key . |
12,426 | private String calculateChecksum ( AlluxioURI filePath ) throws AlluxioException , IOException { OpenFilePOptions options = OpenFilePOptions . newBuilder ( ) . setReadType ( ReadPType . NO_CACHE ) . build ( ) ; try ( FileInStream fis = mFileSystem . openFile ( filePath , options ) ) { return DigestUtils . md5Hex ( fis ) ; } } | Calculates the md5 checksum for a file . |
12,427 | protected void checkVersion ( long clientVersion ) throws IOException { if ( mServiceVersion == Constants . UNKNOWN_SERVICE_VERSION ) { mServiceVersion = getRemoteServiceVersion ( ) ; if ( mServiceVersion != clientVersion ) { throw new IOException ( ExceptionMessage . INCOMPATIBLE_VERSION . getMessage ( getServiceName ( ) , clientVersion , mServiceVersion ) ) ; } } } | Checks that the service version is compatible with the client . |
12,428 | public synchronized void connect ( ) throws AlluxioStatusException { if ( mConnected ) { return ; } disconnect ( ) ; Preconditions . checkState ( ! mClosed , "Client is closed, will not try to connect." ) ; IOException lastConnectFailure = null ; RetryPolicy retryPolicy = mRetryPolicySupplier . get ( ) ; while ( retryPolicy . attempt ( ) ) { if ( mClosed ) { throw new FailedPreconditionException ( "Failed to connect: client has been closed" ) ; } try { mAddress = getAddress ( ) ; } catch ( UnavailableException e ) { LOG . warn ( "Failed to determine {} rpc address ({}): {}" , getServiceName ( ) , retryPolicy . getAttemptCount ( ) , e . toString ( ) ) ; continue ; } if ( mAddress . isUnresolved ( ) ) { LOG . info ( "Retry resolving address {}" , mAddress ) ; mAddress = new InetSocketAddress ( mAddress . getHostName ( ) , mAddress . getPort ( ) ) ; if ( mAddress . isUnresolved ( ) ) { LOG . warn ( "Failed to resolve address on retry {}" , mAddress ) ; } } try { beforeConnect ( ) ; LOG . info ( "Alluxio client (version {}) is trying to connect with {} @ {}" , RuntimeConstants . VERSION , getServiceName ( ) , mAddress ) ; mChannel = GrpcChannelBuilder . newBuilder ( new GrpcServerAddress ( mAddress ) , mContext . getConf ( ) ) . setSubject ( mContext . getSubject ( ) ) . build ( ) ; mVersionService = ServiceVersionClientServiceGrpc . newBlockingStub ( mChannel ) ; mConnected = true ; afterConnect ( ) ; checkVersion ( getServiceVersion ( ) ) ; LOG . info ( "Alluxio client (version {}) is connected with {} @ {}" , RuntimeConstants . VERSION , getServiceName ( ) , mAddress ) ; return ; } catch ( IOException e ) { LOG . warn ( "Failed to connect ({}) with {} @ {}: {}" , retryPolicy . getAttemptCount ( ) , getServiceName ( ) , mAddress , e . getMessage ( ) ) ; lastConnectFailure = e ; } } if ( mChannel != null ) { mChannel . shutdown ( ) ; } if ( mAddress == null ) { throw new UnavailableException ( String . format ( "Failed to determine address for %s after %s attempts" , getServiceName ( ) , retryPolicy . getAttemptCount ( ) ) ) ; } if ( lastConnectFailure instanceof UnauthenticatedException ) { throw ( AlluxioStatusException ) lastConnectFailure ; } throw new UnavailableException ( String . format ( "Failed to connect to %s @ %s after %s attempts" , getServiceName ( ) , mAddress , retryPolicy . getAttemptCount ( ) ) , lastConnectFailure ) ; } | Connects with the remote . |
12,429 | public synchronized void disconnect ( ) { if ( mConnected ) { Preconditions . checkNotNull ( mChannel , PreconditionMessage . CHANNEL_NULL_WHEN_CONNECTED ) ; LOG . debug ( "Disconnecting from the {} @ {}" , getServiceName ( ) , mAddress ) ; beforeDisconnect ( ) ; mChannel . shutdown ( ) ; mConnected = false ; afterDisconnect ( ) ; } } | Closes the connection with the Alluxio remote and does the necessary cleanup . It should be used if the client has not connected with the remote for a while for example . |
12,430 | public static void main ( String [ ] args ) { if ( args . length != 0 ) { LOG . warn ( "java -cp {} {}" , RuntimeConstants . ALLUXIO_JAR , AlluxioMasterMonitor . class . getCanonicalName ( ) ) ; LOG . warn ( "ignoring arguments" ) ; } AlluxioConfiguration alluxioConf = new InstancedConfiguration ( ConfigurationUtils . defaults ( ) ) ; MasterHealthCheckClient . Builder builder = new MasterHealthCheckClient . Builder ( alluxioConf ) ; if ( ConfigurationUtils . isHaMode ( alluxioConf ) ) { builder . withProcessCheck ( true ) ; } else { builder . withProcessCheck ( false ) ; } HealthCheckClient client = builder . build ( ) ; if ( ! client . isServing ( ) ) { System . exit ( 1 ) ; } System . exit ( 0 ) ; } | Starts the Alluxio master monitor . |
12,431 | public static String formatLsString ( boolean hSize , boolean acl , boolean isFolder , String permission , String userName , String groupName , long size , long lastModifiedTime , int inAlluxioPercentage , String persistenceState , String path , String dateFormatPattern ) { String inAlluxioState ; String sizeStr ; if ( isFolder ) { inAlluxioState = IN_ALLUXIO_STATE_DIR ; sizeStr = String . valueOf ( size ) ; } else { inAlluxioState = String . format ( IN_ALLUXIO_STATE_FILE_FORMAT , inAlluxioPercentage ) ; sizeStr = hSize ? FormatUtils . getSizeFromBytes ( size ) : String . valueOf ( size ) ; } if ( acl ) { return String . format ( LS_FORMAT , permission , userName , groupName , sizeStr , persistenceState , CommonUtils . convertMsToDate ( lastModifiedTime , dateFormatPattern ) , inAlluxioState , path ) ; } else { return String . format ( LS_FORMAT_NO_ACL , sizeStr , persistenceState , CommonUtils . convertMsToDate ( lastModifiedTime , dateFormatPattern ) , inAlluxioState , path ) ; } } | Formats the ls result string . |
12,432 | private void ls ( AlluxioURI path , boolean recursive , boolean forceLoadMetadata , boolean dirAsFile , boolean hSize , boolean pinnedOnly , String sortField , boolean reverse ) throws AlluxioException , IOException { URIStatus pathStatus = mFileSystem . getStatus ( path ) ; if ( dirAsFile ) { if ( pinnedOnly && ! pathStatus . isPinned ( ) ) { return ; } printLsString ( pathStatus , hSize ) ; return ; } ListStatusPOptions . Builder optionsBuilder = ListStatusPOptions . newBuilder ( ) ; if ( forceLoadMetadata ) { optionsBuilder . setLoadMetadataType ( LoadMetadataPType . ALWAYS ) ; } optionsBuilder . setRecursive ( recursive ) ; Timer timer = new Timer ( ) ; if ( pathStatus . isFolder ( ) ) { timer . schedule ( new TimerTask ( ) { public void run ( ) { System . out . printf ( "Getting directory status of %s files or sub-directories " + "may take a while." , pathStatus . getLength ( ) ) ; } } , 10000 ) ; } List < URIStatus > statuses = mFileSystem . listStatus ( path , optionsBuilder . build ( ) ) ; timer . cancel ( ) ; List < URIStatus > sorted = sortByFieldAndOrder ( statuses , sortField , reverse ) ; for ( URIStatus status : sorted ) { if ( ! pinnedOnly || status . isPinned ( ) ) { printLsString ( status , hSize ) ; } } } | Displays information for all directories and files directly under the path specified in args . |
12,433 | public boolean isActivelySynced ( AlluxioURI path ) { for ( AlluxioURI syncedPath : mSyncPathList ) { try { if ( PathUtils . hasPrefix ( path . getPath ( ) , syncedPath . getPath ( ) ) ) { return true ; } } catch ( InvalidPathException e ) { return false ; } } return false ; } | Check if a URI is actively synced . |
12,434 | public void start ( ) throws IOException { for ( AlluxioURI syncPoint : mSyncPathList ) { MountTable . Resolution resolution = null ; long mountId = 0 ; try { resolution = mMountTable . resolve ( syncPoint ) ; mountId = resolution . getMountId ( ) ; } catch ( InvalidPathException e ) { LOG . info ( "Invalid Path encountered during start up of ActiveSyncManager, " + "path {}, exception {}" , syncPoint , e ) ; continue ; } try ( CloseableResource < UnderFileSystem > ufsResource = resolution . acquireUfsResource ( ) ) { if ( ! ufsResource . get ( ) . supportsActiveSync ( ) ) { throw new UnsupportedOperationException ( "Active Sync is not supported on this UFS type: " + ufsResource . get ( ) . getUnderFSType ( ) ) ; } ufsResource . get ( ) . startSync ( resolution . getUri ( ) ) ; } } for ( long mountId : mFilterMap . keySet ( ) ) { long txId = mStartingTxIdMap . getOrDefault ( mountId , SyncInfo . INVALID_TXID ) ; launchPollingThread ( mountId , txId ) ; try { if ( ( txId == SyncInfo . INVALID_TXID ) && ServerConfiguration . getBoolean ( PropertyKey . MASTER_ACTIVE_UFS_SYNC_INITIAL_SYNC ) ) { mExecutorService . submit ( ( ) -> mFilterMap . get ( mountId ) . parallelStream ( ) . forEach ( syncPoint -> { try { RetryUtils . retry ( "active sync during start" , ( ) -> mFileSystemMaster . activeSyncMetadata ( syncPoint , null , getExecutor ( ) ) , RetryUtils . defaultActiveSyncClientRetry ( ServerConfiguration . getMs ( PropertyKey . MASTER_ACTIVE_UFS_POLL_TIMEOUT ) ) ) ; } catch ( IOException e ) { LOG . warn ( "IOException encountered during active sync while starting {}" , e ) ; } } ) ) ; } } catch ( Exception e ) { LOG . warn ( "exception encountered during initial sync {}" , e ) ; } } } | start the polling threads . |
12,435 | public void launchPollingThread ( long mountId , long txId ) { LOG . debug ( "launch polling thread for mount id {}, txId {}" , mountId , txId ) ; if ( ! mPollerMap . containsKey ( mountId ) ) { try ( CloseableResource < UnderFileSystem > ufsClient = mMountTable . getUfsClient ( mountId ) . acquireUfsResource ( ) ) { ufsClient . get ( ) . startActiveSyncPolling ( txId ) ; } catch ( IOException e ) { LOG . warn ( "IO Exception trying to launch Polling thread {}" , e ) ; } ActiveSyncer syncer = new ActiveSyncer ( mFileSystemMaster , this , mMountTable , mountId ) ; Future < ? > future = getExecutor ( ) . submit ( new HeartbeatThread ( HeartbeatContext . MASTER_ACTIVE_UFS_SYNC , syncer , ( int ) ServerConfiguration . getMs ( PropertyKey . MASTER_ACTIVE_UFS_SYNC_INTERVAL ) , ServerConfiguration . global ( ) ) ) ; mPollerMap . put ( mountId , future ) ; } } | Launches polling thread on a particular mount point with starting txId . |
12,436 | public void applyAndJournal ( Supplier < JournalContext > context , AddSyncPointEntry entry ) { try { apply ( entry ) ; context . get ( ) . append ( Journal . JournalEntry . newBuilder ( ) . setAddSyncPoint ( entry ) . build ( ) ) ; } catch ( Throwable t ) { ProcessUtils . fatalError ( LOG , t , "Failed to apply %s" , entry ) ; throw t ; } } | Apply AddSyncPoint entry and journal the entry . |
12,437 | public void stopSyncForMount ( long mountId ) throws InvalidPathException , IOException { LOG . debug ( "Stop sync for mount id {}" , mountId ) ; if ( mFilterMap . containsKey ( mountId ) ) { List < Pair < AlluxioURI , MountTable . Resolution > > toBeDeleted = new ArrayList < > ( ) ; for ( AlluxioURI uri : mFilterMap . get ( mountId ) ) { MountTable . Resolution resolution = resolveSyncPoint ( uri ) ; if ( resolution != null ) { toBeDeleted . add ( new Pair < > ( uri , resolution ) ) ; } } for ( Pair < AlluxioURI , MountTable . Resolution > deleteInfo : toBeDeleted ) { stopSyncInternal ( deleteInfo . getFirst ( ) , deleteInfo . getSecond ( ) ) ; } } } | stop active sync on a mount id . |
12,438 | public MountTable . Resolution resolveSyncPoint ( AlluxioURI syncPoint ) throws InvalidPathException { if ( ! mSyncPathList . contains ( syncPoint ) ) { LOG . debug ( "syncPoint not found {}" , syncPoint . getPath ( ) ) ; return null ; } MountTable . Resolution resolution = mMountTable . resolve ( syncPoint ) ; return resolution ; } | Perform various checks of stopping a sync point . |
12,439 | public void stopSyncInternal ( AlluxioURI syncPoint , MountTable . Resolution resolution ) { try ( LockResource r = new LockResource ( mSyncManagerLock ) ) { LOG . debug ( "stop syncPoint {}" , syncPoint . getPath ( ) ) ; RemoveSyncPointEntry removeSyncPoint = File . RemoveSyncPointEntry . newBuilder ( ) . setSyncpointPath ( syncPoint . toString ( ) ) . setMountId ( resolution . getMountId ( ) ) . build ( ) ; apply ( removeSyncPoint ) ; try { stopSyncPostJournal ( syncPoint ) ; } catch ( Throwable e ) { AddSyncPointEntry addSyncPoint = File . AddSyncPointEntry . newBuilder ( ) . setSyncpointPath ( syncPoint . toString ( ) ) . build ( ) ; apply ( addSyncPoint ) ; recoverFromStopSync ( syncPoint , resolution . getMountId ( ) ) ; } } } | stop active sync on a URI . |
12,440 | public List < SyncPointInfo > getSyncPathList ( ) { List < SyncPointInfo > returnList = new ArrayList < > ( ) ; for ( AlluxioURI uri : mSyncPathList ) { SyncPointInfo . SyncStatus status ; Future < ? > syncStatus = mSyncPathStatus . get ( uri ) ; if ( syncStatus == null ) { status = SyncPointInfo . SyncStatus . NOT_INITIALLY_SYNCED ; } else if ( syncStatus . isDone ( ) ) { status = SyncPointInfo . SyncStatus . INITIALLY_SYNCED ; } else { status = SyncPointInfo . SyncStatus . SYNCING ; } returnList . add ( new SyncPointInfo ( uri , status ) ) ; } return returnList ; } | Get the sync point list . |
12,441 | public void stopSyncPostJournal ( AlluxioURI syncPoint ) throws InvalidPathException { MountTable . Resolution resolution = mMountTable . resolve ( syncPoint ) ; long mountId = resolution . getMountId ( ) ; Future < ? > syncFuture = mSyncPathStatus . remove ( syncPoint ) ; if ( syncFuture != null ) { syncFuture . cancel ( true ) ; } if ( mFilterMap . get ( mountId ) . isEmpty ( ) ) { mFilterMap . remove ( mountId ) ; Future < ? > future = mPollerMap . remove ( mountId ) ; if ( future != null ) { future . cancel ( true ) ; } } try ( CloseableResource < UnderFileSystem > ufs = resolution . acquireUfsResource ( ) ) { ufs . get ( ) . stopSync ( resolution . getUri ( ) ) ; } catch ( IOException e ) { LOG . info ( "Ufs IOException for uri {}, exception is {}" , syncPoint , e ) ; } if ( mFilterMap . containsKey ( mountId ) && mFilterMap . get ( mountId ) . isEmpty ( ) ) { try ( CloseableResource < UnderFileSystem > ufs = resolution . acquireUfsResource ( ) ) { ufs . get ( ) . stopActiveSyncPolling ( ) ; } catch ( IOException e ) { LOG . warn ( "Encountered IOException when trying to stop polling thread {}" , e ) ; } } } | Clean up tasks to stop sync point after we have journaled . |
12,442 | public void startSyncPostJournal ( AlluxioURI uri ) throws InvalidPathException { MountTable . Resolution resolution = mMountTable . resolve ( uri ) ; startInitSync ( uri , resolution ) ; launchPollingThread ( resolution . getMountId ( ) , SyncInfo . INVALID_TXID ) ; } | Continue to start sync after we have journaled the operation . |
12,443 | public void recoverFromStopSync ( AlluxioURI uri , long mountId ) { if ( mSyncPathStatus . containsKey ( uri ) ) { return ; } try { MountTable . Resolution resolution = mMountTable . resolve ( uri ) ; startInitSync ( uri , resolution ) ; launchPollingThread ( resolution . getMountId ( ) , SyncInfo . INVALID_TXID ) ; } catch ( Throwable t ) { LOG . warn ( "Recovering from stop syncing failed {}" , t ) ; } } | Recover from a stop sync operation . |
12,444 | public void recoverFromStartSync ( AlluxioURI uri , long mountId ) { if ( mSyncPathStatus . containsKey ( uri ) ) { Future < ? > syncFuture = mSyncPathStatus . remove ( uri ) ; if ( syncFuture != null ) { syncFuture . cancel ( true ) ; } } mFilterMap . remove ( mountId ) ; Future < ? > future = mPollerMap . remove ( mountId ) ; if ( future != null ) { future . cancel ( true ) ; } } | Recover from start sync operation . |
12,445 | public static String convertByteArrayToStringWithoutEscape ( byte [ ] data , int offset , int length ) { StringBuilder sb = new StringBuilder ( length ) ; for ( int i = offset ; i < length && i < data . length ; i ++ ) { sb . append ( ( char ) data [ i ] ) ; } return sb . toString ( ) ; } | Converts a byte array to string . |
12,446 | public static String convertMsToShortClockTime ( long millis ) { Preconditions . checkArgument ( millis >= 0 , "Negative values are not supported" ) ; long days = millis / Constants . DAY_MS ; long hours = ( millis % Constants . DAY_MS ) / Constants . HOUR_MS ; long mins = ( millis % Constants . HOUR_MS ) / Constants . MINUTE_MS ; long secs = ( millis % Constants . MINUTE_MS ) / Constants . SECOND_MS ; return String . format ( "%d d, %d h, %d m, and %d s" , days , hours , mins , secs ) ; } | Converts milliseconds to short clock time . |
12,447 | List < AlluxioURI > checkConsistency ( AlluxioURI path , CheckConsistencyPOptions options ) throws IOException { FileSystemMasterClient client = mFsContext . acquireMasterClient ( ) ; try { return client . checkConsistency ( path , options ) ; } finally { mFsContext . releaseMasterClient ( client ) ; } } | Checks the consistency of Alluxio metadata against the under storage for all files and directories in a given subtree . |
12,448 | private void runConsistencyCheck ( AlluxioURI path , boolean repairConsistency ) throws AlluxioException , IOException { List < AlluxioURI > inconsistentUris = checkConsistency ( path , FileSystemOptions . checkConsistencyDefaults ( mFsContext . getPathConf ( path ) ) ) ; if ( inconsistentUris . isEmpty ( ) ) { System . out . println ( path + " is consistent with the under storage system." ) ; return ; } if ( ! repairConsistency ) { Collections . sort ( inconsistentUris ) ; System . out . println ( "The following files are inconsistent:" ) ; for ( AlluxioURI uri : inconsistentUris ) { System . out . println ( uri ) ; } } else { Collections . sort ( inconsistentUris ) ; System . out . println ( path + " has: " + inconsistentUris . size ( ) + " inconsistent files." ) ; List < AlluxioURI > inconsistentDirs = new ArrayList < AlluxioURI > ( ) ; for ( AlluxioURI inconsistentUri : inconsistentUris ) { URIStatus status = mFileSystem . getStatus ( inconsistentUri ) ; if ( status . isFolder ( ) ) { inconsistentDirs . add ( inconsistentUri ) ; continue ; } System . out . println ( "repairing path: " + inconsistentUri ) ; DeletePOptions deleteOptions = DeletePOptions . newBuilder ( ) . setAlluxioOnly ( true ) . build ( ) ; mFileSystem . delete ( inconsistentUri , deleteOptions ) ; mFileSystem . exists ( inconsistentUri ) ; System . out . println ( inconsistentUri + " repaired" ) ; System . out . println ( ) ; } for ( AlluxioURI uri : inconsistentDirs ) { DeletePOptions deleteOptions = DeletePOptions . newBuilder ( ) . setAlluxioOnly ( true ) . setRecursive ( true ) . build ( ) ; System . out . println ( "repairing path: " + uri ) ; mFileSystem . delete ( uri , deleteOptions ) ; mFileSystem . exists ( uri ) ; System . out . println ( uri + "repaired" ) ; System . out . println ( ) ; } } } | Checks the inconsistent files and directories which exist in Alluxio but don t exist in the under storage repairs the inconsistent paths by deleting them if repairConsistency is true . |
12,449 | public boolean needPersistence ( long fileId ) { if ( isFilePersisting ( fileId ) || isFilePersisted ( fileId ) ) { return false ; } try { String ufsFingerprint = ufsFingerprint ( fileId ) ; if ( ufsFingerprint != null ) { addPersistedFile ( fileId , ufsFingerprint ) ; return false ; } } catch ( Exception e ) { LOG . warn ( "Failed to check if file {} exists in under storage system: {}" , fileId , e . getMessage ( ) ) ; LOG . debug ( "Exception: " , e ) ; } return true ; } | Checks if the given file needs persistence . |
12,450 | private synchronized String ufsFingerprint ( long fileId ) throws IOException { FileInfo fileInfo = mBlockWorker . getFileInfo ( fileId ) ; String dstPath = fileInfo . getUfsPath ( ) ; try ( CloseableResource < UnderFileSystem > ufsResource = mUfsManager . get ( fileInfo . getMountId ( ) ) . acquireUfsResource ( ) ) { UnderFileSystem ufs = ufsResource . get ( ) ; return ufs . isFile ( dstPath ) ? ufs . getFingerprint ( dstPath ) : null ; } } | Returns the ufs fingerprint of the given file or null if the file doesn t exist . |
12,451 | public void lockBlocks ( long fileId , List < Long > blockIds ) throws IOException { Map < Long , Long > blockIdToLockId = new HashMap < > ( ) ; List < Throwable > errors = new ArrayList < > ( ) ; synchronized ( mLock ) { if ( mPersistingInProgressFiles . containsKey ( fileId ) ) { throw new IOException ( "the file " + fileId + " is already being persisted" ) ; } } try { for ( long blockId : blockIds ) { long lockId = mBlockWorker . lockBlock ( Sessions . CHECKPOINT_SESSION_ID , blockId ) ; blockIdToLockId . put ( blockId , lockId ) ; } } catch ( BlockDoesNotExistException e ) { errors . add ( e ) ; for ( long lockId : blockIdToLockId . values ( ) ) { try { mBlockWorker . unlockBlock ( lockId ) ; } catch ( BlockDoesNotExistException bdnee ) { errors . add ( bdnee ) ; } } if ( ! errors . isEmpty ( ) ) { StringBuilder errorStr = new StringBuilder ( ) ; errorStr . append ( "failed to lock all blocks of file " ) . append ( fileId ) . append ( "\n" ) ; for ( Throwable error : errors ) { errorStr . append ( error ) . append ( '\n' ) ; } throw new IOException ( errorStr . toString ( ) ) ; } } synchronized ( mLock ) { mPersistingInProgressFiles . put ( fileId , blockIdToLockId ) ; } } | Locks all the blocks of a given file Id . |
12,452 | public void persistFile ( long fileId , List < Long > blockIds ) throws AlluxioException , IOException { Map < Long , Long > blockIdToLockId ; synchronized ( mLock ) { blockIdToLockId = mPersistingInProgressFiles . get ( fileId ) ; if ( blockIdToLockId == null || ! blockIdToLockId . keySet ( ) . equals ( new HashSet < > ( blockIds ) ) ) { throw new IOException ( "Not all the blocks of file " + fileId + " are locked" ) ; } } FileInfo fileInfo = mBlockWorker . getFileInfo ( fileId ) ; try ( CloseableResource < UnderFileSystem > ufsResource = mUfsManager . get ( fileInfo . getMountId ( ) ) . acquireUfsResource ( ) ) { UnderFileSystem ufs = ufsResource . get ( ) ; String dstPath = prepareUfsFilePath ( fileInfo , ufs ) ; OutputStream outputStream = ufs . createNonexistingFile ( dstPath , CreateOptions . defaults ( ServerConfiguration . global ( ) ) . setOwner ( fileInfo . getOwner ( ) ) . setGroup ( fileInfo . getGroup ( ) ) . setMode ( new Mode ( ( short ) fileInfo . getMode ( ) ) ) ) ; final WritableByteChannel outputChannel = Channels . newChannel ( outputStream ) ; List < Throwable > errors = new ArrayList < > ( ) ; try { for ( long blockId : blockIds ) { long lockId = blockIdToLockId . get ( blockId ) ; if ( ServerConfiguration . getBoolean ( PropertyKey . WORKER_FILE_PERSIST_RATE_LIMIT_ENABLED ) ) { BlockMeta blockMeta = mBlockWorker . getBlockMeta ( Sessions . CHECKPOINT_SESSION_ID , blockId , lockId ) ; mPersistenceRateLimiter . acquire ( ( int ) blockMeta . getBlockSize ( ) ) ; } BlockReader reader = mBlockWorker . readBlockRemote ( Sessions . CHECKPOINT_SESSION_ID , blockId , lockId ) ; ReadableByteChannel inputChannel = reader . getChannel ( ) ; mChannelCopier . copy ( inputChannel , outputChannel ) ; reader . close ( ) ; } } catch ( BlockDoesNotExistException | InvalidWorkerStateException e ) { errors . add ( e ) ; } finally { for ( long lockId : blockIdToLockId . values ( ) ) { try { mBlockWorker . unlockBlock ( lockId ) ; } catch ( BlockDoesNotExistException e ) { errors . add ( e ) ; } } if ( ! errors . isEmpty ( ) ) { StringBuilder errorStr = new StringBuilder ( ) ; errorStr . append ( "the blocks of file" ) . append ( fileId ) . append ( " are failed to persist\n" ) ; for ( Throwable e : errors ) { errorStr . append ( e ) . append ( '\n' ) ; } throw new IOException ( errorStr . toString ( ) ) ; } } outputStream . flush ( ) ; outputChannel . close ( ) ; outputStream . close ( ) ; String ufsFingerprint = ufs . getFingerprint ( dstPath ) ; synchronized ( mLock ) { mPersistingInProgressFiles . remove ( fileId ) ; mPersistedUfsFingerprints . put ( fileId , ufsFingerprint ) ; } } } | Persists the blocks of a file into the under file system . |
12,453 | private String prepareUfsFilePath ( FileInfo fileInfo , UnderFileSystem ufs ) throws AlluxioException , IOException { AlluxioURI alluxioPath = new AlluxioURI ( fileInfo . getPath ( ) ) ; FileSystem fs = mFileSystemFactory . get ( ) ; URIStatus status = fs . getStatus ( alluxioPath ) ; String ufsPath = status . getUfsPath ( ) ; UnderFileSystemUtils . prepareFilePath ( alluxioPath , ufsPath , fs , ufs ) ; return ufsPath ; } | Prepares the destination file path of the given file id . Also creates the parent folder if it does not exist . |
12,454 | public static BlockWorkerInfo getWorkerWithMostBlocks ( List < BlockWorkerInfo > workers , List < FileBlockInfo > fileBlockInfos ) { IndexedSet < BlockWorkerInfo > addressIndexedWorkers = new IndexedSet < > ( WORKER_ADDRESS_INDEX ) ; addressIndexedWorkers . addAll ( workers ) ; ConcurrentMap < BlockWorkerInfo , Integer > blocksPerWorker = Maps . newConcurrentMap ( ) ; int maxBlocks = 0 ; BlockWorkerInfo mostBlocksWorker = null ; for ( FileBlockInfo fileBlockInfo : fileBlockInfos ) { for ( BlockLocation location : fileBlockInfo . getBlockInfo ( ) . getLocations ( ) ) { BlockWorkerInfo worker = addressIndexedWorkers . getFirstByField ( WORKER_ADDRESS_INDEX , location . getWorkerAddress ( ) ) ; if ( worker == null ) { continue ; } blocksPerWorker . putIfAbsent ( worker , 0 ) ; int newBlockCount = blocksPerWorker . get ( worker ) + 1 ; blocksPerWorker . put ( worker , newBlockCount ) ; if ( newBlockCount > maxBlocks ) { maxBlocks = newBlockCount ; mostBlocksWorker = worker ; } } } return mostBlocksWorker ; } | Returns whichever specified worker stores the most blocks from the block info list . |
12,455 | public static void loadBlock ( FileSystem fs , FileSystemContext context , String path , long blockId ) throws AlluxioException , IOException { AlluxioBlockStore blockStore = AlluxioBlockStore . create ( context ) ; String localHostName = NetworkAddressUtils . getConnectHost ( ServiceType . WORKER_RPC , ServerConfiguration . global ( ) ) ; List < BlockWorkerInfo > workerInfoList = blockStore . getAllWorkers ( ) ; WorkerNetAddress localNetAddress = null ; for ( BlockWorkerInfo workerInfo : workerInfoList ) { if ( workerInfo . getNetAddress ( ) . getHost ( ) . equals ( localHostName ) ) { localNetAddress = workerInfo . getNetAddress ( ) ; break ; } } if ( localNetAddress == null ) { throw new NotFoundException ( ExceptionMessage . NO_LOCAL_BLOCK_WORKER_REPLICATE_TASK . getMessage ( blockId ) ) ; } URIStatus status = fs . getStatus ( new AlluxioURI ( path ) ) ; OpenFilePOptions openOptions = OpenFilePOptions . newBuilder ( ) . setReadType ( ReadPType . NO_CACHE ) . build ( ) ; AlluxioConfiguration conf = ServerConfiguration . global ( ) ; InStreamOptions inOptions = new InStreamOptions ( status , openOptions , conf ) ; inOptions . setUfsReadLocationPolicy ( BlockLocationPolicy . Factory . create ( LocalFirstPolicy . class . getCanonicalName ( ) , conf ) ) ; OutStreamOptions outOptions = OutStreamOptions . defaults ( conf ) ; outOptions . setLocationPolicy ( BlockLocationPolicy . Factory . create ( LocalFirstPolicy . class . getCanonicalName ( ) , conf ) ) ; try ( OutputStream outputStream = blockStore . getOutStream ( blockId , - 1 , localNetAddress , outOptions ) ) { try ( InputStream inputStream = blockStore . getInStream ( blockId , inOptions ) ) { ByteStreams . copy ( inputStream , outputStream ) ; } catch ( Throwable t ) { try { ( ( Cancelable ) outputStream ) . cancel ( ) ; } catch ( Throwable t2 ) { t . addSuppressed ( t2 ) ; } throw t ; } } } | Loads a block into the local worker . If the block doesn t exist in Alluxio it will be read from the UFS . |
12,456 | public void stopAndJoin ( ) { interrupt ( ) ; if ( mServerSocket != null ) { try { mServerSocket . close ( ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } try { join ( 5 * Constants . SECOND_MS ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; } if ( isAlive ( ) ) { LOG . warn ( "Failed to stop rejecting server thread" ) ; } } | Stops the server and joins the server thread . |
12,457 | public void initializeRoot ( String owner , String group , Mode mode , JournalContext context ) throws UnavailableException { if ( mState . getRoot ( ) == null ) { MutableInodeDirectory root = MutableInodeDirectory . create ( mDirectoryIdGenerator . getNewDirectoryId ( context ) , NO_PARENT , ROOT_INODE_NAME , CreateDirectoryContext . mergeFrom ( CreateDirectoryPOptions . newBuilder ( ) . setMode ( mode . toProto ( ) ) ) . setOwner ( owner ) . setGroup ( group ) ) ; root . setPersistenceState ( PersistenceState . PERSISTED ) ; mState . applyAndJournal ( context , root ) ; } } | Initializes the root of the inode tree . |
12,458 | public void setDirectChildrenLoaded ( Supplier < JournalContext > context , InodeDirectory dir ) { mState . applyAndJournal ( context , UpdateInodeDirectoryEntry . newBuilder ( ) . setId ( dir . getId ( ) ) . setDirectChildrenLoaded ( true ) . build ( ) ) ; } | Marks an inode directory as having its direct children loaded . |
12,459 | public InodePathPair lockInodePathPair ( AlluxioURI path1 , LockPattern lockPattern1 , AlluxioURI path2 , LockPattern lockPattern2 ) throws InvalidPathException { LockedInodePath lockedPath1 = null ; LockedInodePath lockedPath2 = null ; boolean valid = false ; try { if ( path1 . getPath ( ) . compareTo ( path2 . getPath ( ) ) > 0 ) { lockedPath2 = lockInodePath ( path2 , lockPattern2 ) ; lockedPath1 = lockInodePath ( path1 , lockPattern1 ) ; } else { lockedPath1 = lockInodePath ( path1 , lockPattern1 ) ; lockedPath2 = lockInodePath ( path2 , lockPattern2 ) ; } valid = true ; return new InodePathPair ( lockedPath1 , lockedPath2 ) ; } finally { if ( ! valid ) { if ( lockedPath1 != null ) { lockedPath1 . close ( ) ; } if ( lockedPath2 != null ) { lockedPath2 . close ( ) ; } } } } | Locks existing inodes on the two specified paths . The two paths will be locked in the correct order . The target inodes are not required to exist . |
12,460 | private void computePathForInode ( InodeView inode , StringBuilder builder ) throws FileDoesNotExistException { long id ; long parentId ; String name ; try ( LockResource lr = mInodeLockManager . lockInode ( inode , LockMode . READ ) ) { id = inode . getId ( ) ; parentId = inode . getParentId ( ) ; name = inode . getName ( ) ; } if ( isRootId ( id ) ) { builder . append ( AlluxioURI . SEPARATOR ) ; } else if ( isRootId ( parentId ) ) { builder . append ( AlluxioURI . SEPARATOR ) ; builder . append ( name ) ; } else { Optional < Inode > parentInode = mInodeStore . get ( parentId ) ; if ( ! parentInode . isPresent ( ) ) { throw new FileDoesNotExistException ( ExceptionMessage . INODE_DOES_NOT_EXIST . getMessage ( parentId ) ) ; } computePathForInode ( parentInode . get ( ) , builder ) ; builder . append ( AlluxioURI . SEPARATOR ) ; builder . append ( name ) ; } } | Appends components of the path from a given inode . |
12,461 | public AlluxioURI getPath ( InodeView inode ) throws FileDoesNotExistException { StringBuilder builder = new StringBuilder ( ) ; computePathForInode ( inode , builder ) ; return new AlluxioURI ( builder . toString ( ) ) ; } | Returns the path for a particular inode . The inode and the path to the inode must already be locked . |
12,462 | private static void inheritOwnerAndGroupIfEmpty ( MutableInode < ? > newInode , InodeDirectoryView ancestorInode ) { if ( ServerConfiguration . getBoolean ( PropertyKey . MASTER_METASTORE_INODE_INHERIT_OWNER_AND_GROUP ) && newInode . getOwner ( ) . isEmpty ( ) && newInode . getGroup ( ) . isEmpty ( ) ) { newInode . setOwner ( ancestorInode . getOwner ( ) ) ; newInode . setGroup ( ancestorInode . getGroup ( ) ) ; } } | Inherit owner and group from ancestor if both are empty |
12,463 | public void deleteInode ( RpcContext rpcContext , LockedInodePath inodePath , long opTimeMs ) throws FileDoesNotExistException { Preconditions . checkState ( inodePath . getLockPattern ( ) == LockPattern . WRITE_EDGE ) ; Inode inode = inodePath . getInode ( ) ; mState . applyAndJournal ( rpcContext , DeleteFileEntry . newBuilder ( ) . setId ( inode . getId ( ) ) . setRecursive ( false ) . setOpTimeMs ( opTimeMs ) . build ( ) ) ; if ( inode . isFile ( ) ) { rpcContext . getBlockDeletionContext ( ) . registerBlocksForDeletion ( inode . asFile ( ) . getBlockIds ( ) ) ; } } | Deletes a single inode from the inode tree by removing it from the parent inode . |
12,464 | public void setPinned ( RpcContext rpcContext , LockedInodePath inodePath , boolean pinned , long opTimeMs ) throws FileDoesNotExistException , InvalidPathException { Preconditions . checkState ( inodePath . getLockPattern ( ) . isWrite ( ) ) ; Inode inode = inodePath . getInode ( ) ; mState . applyAndJournal ( rpcContext , UpdateInodeEntry . newBuilder ( ) . setId ( inode . getId ( ) ) . setPinned ( pinned ) . setLastModificationTimeMs ( opTimeMs ) . build ( ) ) ; if ( inode . isDirectory ( ) ) { assert inode instanceof InodeDirectory ; for ( Inode child : mInodeStore . getChildren ( inode . asDirectory ( ) ) ) { try ( LockedInodePath childPath = inodePath . lockChild ( child , LockPattern . WRITE_INODE ) ) { setPinned ( rpcContext , childPath , pinned , opTimeMs ) ; } } } } | Sets the pinned state of an inode . If the inode is a directory the pinned state will be set recursively . |
12,465 | public void syncPersistExistingDirectory ( Supplier < JournalContext > context , InodeDirectoryView dir ) throws IOException , InvalidPathException , FileDoesNotExistException { RetryPolicy retry = new ExponentialBackoffRetry ( PERSIST_WAIT_BASE_SLEEP_MS , PERSIST_WAIT_MAX_SLEEP_MS , PERSIST_WAIT_MAX_RETRIES ) ; while ( retry . attempt ( ) ) { if ( dir . getPersistenceState ( ) == PersistenceState . PERSISTED ) { return ; } Optional < Scoped > persisting = mInodeLockManager . tryAcquirePersistingLock ( dir . getId ( ) ) ; if ( ! persisting . isPresent ( ) ) { continue ; } try ( Scoped s = persisting . get ( ) ) { if ( dir . getPersistenceState ( ) == PersistenceState . PERSISTED ) { return ; } mState . applyAndJournal ( context , UpdateInodeEntry . newBuilder ( ) . setId ( dir . getId ( ) ) . setPersistenceState ( PersistenceState . TO_BE_PERSISTED . name ( ) ) . build ( ) ) ; UpdateInodeEntry . Builder entry = UpdateInodeEntry . newBuilder ( ) . setId ( dir . getId ( ) ) ; syncPersistDirectory ( dir ) . ifPresent ( status -> { if ( isRootId ( dir . getId ( ) ) ) { return ; } entry . setOwner ( status . getOwner ( ) ) . setGroup ( status . getGroup ( ) ) . setMode ( status . getMode ( ) ) ; Long lastModificationTime = status . getLastModifiedTime ( ) ; if ( lastModificationTime != null ) { entry . setLastModificationTimeMs ( lastModificationTime ) . setOverwriteModificationTime ( true ) ; } } ) ; entry . setPersistenceState ( PersistenceState . PERSISTED . name ( ) ) ; mState . applyAndJournal ( context , entry . build ( ) ) ; return ; } } throw new IOException ( ExceptionMessage . FAILED_UFS_CREATE . getMessage ( dir . getName ( ) ) ) ; } | Synchronously persists an inode directory to the UFS . If concurrent calls are made only one thread will persist to UFS and the others will wait until it is persisted . |
12,466 | public void syncPersistNewDirectory ( MutableInodeDirectory dir ) throws InvalidPathException , FileDoesNotExistException , IOException { dir . setPersistenceState ( PersistenceState . TO_BE_PERSISTED ) ; syncPersistDirectory ( dir ) . ifPresent ( status -> { dir . setOwner ( status . getOwner ( ) ) . setGroup ( status . getGroup ( ) ) . setMode ( status . getMode ( ) ) ; Long lastModificationTime = status . getLastModifiedTime ( ) ; if ( lastModificationTime != null ) { dir . setLastModificationTimeMs ( lastModificationTime , true ) ; } } ) ; dir . setPersistenceState ( PersistenceState . PERSISTED ) ; } | Synchronously persists an inode directory to the UFS . |
12,467 | private Optional < UfsStatus > syncPersistDirectory ( InodeDirectoryView dir ) throws FileDoesNotExistException , IOException , InvalidPathException { AlluxioURI uri = getPath ( dir ) ; MountTable . Resolution resolution = mMountTable . resolve ( uri ) ; String ufsUri = resolution . getUri ( ) . toString ( ) ; try ( CloseableResource < UnderFileSystem > ufsResource = resolution . acquireUfsResource ( ) ) { UnderFileSystem ufs = ufsResource . get ( ) ; MkdirsOptions mkdirsOptions = MkdirsOptions . defaults ( ServerConfiguration . global ( ) ) . setCreateParent ( false ) . setOwner ( dir . getOwner ( ) ) . setGroup ( dir . getGroup ( ) ) . setMode ( new Mode ( dir . getMode ( ) ) ) ; if ( ! ufs . mkdirs ( ufsUri , mkdirsOptions ) ) { UfsStatus status ; try { status = ufs . getStatus ( ufsUri ) ; } catch ( Exception e ) { throw new IOException ( String . format ( "Cannot create or load UFS directory %s: %s." , ufsUri , e . toString ( ) ) , e ) ; } if ( status . isFile ( ) ) { throw new InvalidPathException ( String . format ( "Error persisting directory. A file exists at the UFS location %s." , ufsUri ) ) ; } return Optional . of ( status ) ; } } return Optional . empty ( ) ; } | Persists the directory to the UFS returning the UFS status if the directory is found to already exist in the UFS . |
12,468 | public static void main ( String [ ] args ) { if ( args . length != 0 ) { LOG . info ( "java -cp {} {}" , RuntimeConstants . ALLUXIO_JAR , AlluxioJobWorker . class . getCanonicalName ( ) ) ; System . exit ( - 1 ) ; } if ( ! ConfigurationUtils . masterHostConfigured ( ServerConfiguration . global ( ) ) ) { System . out . println ( ConfigurationUtils . getMasterHostNotConfiguredMessage ( "Alluxio job worker" ) ) ; System . exit ( 1 ) ; } if ( ! ConfigurationUtils . jobMasterHostConfigured ( ServerConfiguration . global ( ) ) ) { System . out . println ( ConfigurationUtils . getJobMasterHostNotConfiguredMessage ( "Alluxio job worker" ) ) ; System . exit ( 1 ) ; } CommonUtils . PROCESS_TYPE . set ( CommonUtils . ProcessType . JOB_WORKER ) ; MasterInquireClient masterInquireClient = MasterInquireClient . Factory . create ( ServerConfiguration . global ( ) ) ; try { RetryUtils . retry ( "load cluster default configuration with master" , ( ) -> { InetSocketAddress masterAddress = masterInquireClient . getPrimaryRpcAddress ( ) ; ServerConfiguration . loadClusterDefaults ( masterAddress ) ; } , RetryUtils . defaultWorkerMasterClientRetry ( ServerConfiguration . getDuration ( PropertyKey . WORKER_MASTER_CONNECT_RETRY_TIMEOUT ) ) ) ; } catch ( IOException e ) { ProcessUtils . fatalError ( LOG , "Failed to load cluster default configuration for job worker: %s" , e . getMessage ( ) ) ; } JobWorkerProcess process = JobWorkerProcess . Factory . create ( ) ; ProcessUtils . run ( process ) ; } | Starts the Alluxio job worker . |
12,469 | private static void runApplicationMaster ( final CommandLine cliParser , AlluxioConfiguration alluxioConf ) throws Exception { int numWorkers = Integer . parseInt ( cliParser . getOptionValue ( "num_workers" , "1" ) ) ; String masterAddress = cliParser . getOptionValue ( "master_address" ) ; String resourcePath = cliParser . getOptionValue ( "resource_path" ) ; ApplicationMaster applicationMaster = new ApplicationMaster ( numWorkers , masterAddress , resourcePath , alluxioConf ) ; applicationMaster . start ( ) ; applicationMaster . requestAndLaunchContainers ( ) ; applicationMaster . waitForShutdown ( ) ; applicationMaster . stop ( ) ; } | Run the application master . |
12,470 | public void start ( ) throws IOException , YarnException { if ( UserGroupInformation . isSecurityEnabled ( ) ) { Credentials credentials = UserGroupInformation . getCurrentUser ( ) . getCredentials ( ) ; DataOutputBuffer credentialsBuffer = new DataOutputBuffer ( ) ; credentials . writeTokenStorageToStream ( credentialsBuffer ) ; Iterator < Token < ? > > iter = credentials . getAllTokens ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { Token < ? > token = iter . next ( ) ; if ( token . getKind ( ) . equals ( AMRMTokenIdentifier . KIND_NAME ) ) { iter . remove ( ) ; } } mAllTokens = ByteBuffer . wrap ( credentialsBuffer . getData ( ) , 0 , credentialsBuffer . getLength ( ) ) ; } mNMClient . init ( mYarnConf ) ; mNMClient . start ( ) ; mRMClient . init ( mYarnConf ) ; mRMClient . start ( ) ; mYarnClient . init ( mYarnConf ) ; mYarnClient . start ( ) ; String hostname = NetworkAddressUtils . getLocalHostName ( ( int ) mAlluxioConf . getMs ( PropertyKey . NETWORK_HOST_RESOLUTION_TIMEOUT_MS ) ) ; mRMClient . registerApplicationMaster ( hostname , 0 , "" ) ; LOG . info ( "ApplicationMaster registered" ) ; } | Starts the application master . |
12,471 | public void requestAndLaunchContainers ( ) throws Exception { if ( masterExists ( ) ) { InetAddress address = InetAddress . getByName ( mMasterAddress ) ; mMasterContainerNetAddress = address . getHostAddress ( ) ; LOG . info ( "Found master already running on " + mMasterAddress ) ; } else { LOG . info ( "Configuring master container request." ) ; Resource masterResource = Records . newRecord ( Resource . class ) ; masterResource . setMemory ( mMasterMemInMB ) ; masterResource . setVirtualCores ( mMasterCpu ) ; mContainerAllocator = new ContainerAllocator ( "master" , 1 , 1 , masterResource , mYarnClient , mRMClient , mMasterAddress ) ; List < Container > masterContainers = mContainerAllocator . allocateContainers ( ) ; launchMasterContainer ( Iterables . getOnlyElement ( masterContainers ) ) ; } Resource workerResource = Records . newRecord ( Resource . class ) ; workerResource . setMemory ( mWorkerMemInMB + mRamdiskMemInMB ) ; workerResource . setVirtualCores ( mWorkerCpu ) ; mContainerAllocator = new ContainerAllocator ( "worker" , mNumWorkers , mMaxWorkersPerHost , workerResource , mYarnClient , mRMClient ) ; List < Container > workerContainers = mContainerAllocator . allocateContainers ( ) ; for ( Container container : workerContainers ) { launchWorkerContainer ( container ) ; } LOG . info ( "Master and workers are launched" ) ; } | Submits requests for containers until the master and all workers are launched . |
12,472 | public void stop ( ) { try { mRMClient . unregisterApplicationMaster ( FinalApplicationStatus . SUCCEEDED , "" , "" ) ; } catch ( YarnException e ) { LOG . error ( "Failed to unregister application" , e ) ; } catch ( IOException e ) { LOG . error ( "Failed to unregister application" , e ) ; } mRMClient . stop ( ) ; mYarnClient . stop ( ) ; } | Shuts down the application master unregistering it from Yarn and stopping its clients . |
12,473 | private boolean masterExists ( ) { String webPort = mAlluxioConf . get ( PropertyKey . MASTER_WEB_PORT ) ; try { URL myURL = new URL ( "http://" + mMasterAddress + ":" + webPort + Constants . REST_API_PREFIX + "/master/version" ) ; LOG . debug ( "Checking for master at: " + myURL . toString ( ) ) ; HttpURLConnection connection = ( HttpURLConnection ) myURL . openConnection ( ) ; connection . setRequestMethod ( HttpMethod . GET ) ; int resCode = connection . getResponseCode ( ) ; LOG . debug ( "Response code from master was: " + Integer . toString ( resCode ) ) ; connection . disconnect ( ) ; return resCode == HttpURLConnection . HTTP_OK ; } catch ( MalformedURLException e ) { LOG . error ( "Malformed URL in attempt to check if master is running already" , e ) ; } catch ( IOException e ) { LOG . debug ( "No existing master found" , e ) ; } return false ; } | Checks if an Alluxio master node is already running or not on the master address given . |
12,474 | private boolean gainPrimacy ( ) throws Exception { AtomicBoolean unstable = new AtomicBoolean ( false ) ; try ( Scoped scoped = mLeaderSelector . onStateChange ( state -> unstable . set ( true ) ) ) { if ( mLeaderSelector . getState ( ) != State . PRIMARY ) { unstable . set ( true ) ; } stopMasters ( ) ; LOG . info ( "Secondary stopped" ) ; mJournalSystem . gainPrimacy ( ) ; if ( unstable . get ( ) ) { losePrimacy ( ) ; return false ; } } startMasters ( true ) ; mServingThread = new Thread ( ( ) -> { try { startServing ( " (gained leadership)" , " (lost leadership)" ) ; } catch ( Throwable t ) { Throwable root = ExceptionUtils . getRootCause ( t ) ; if ( ( root != null && ( root instanceof InterruptedException ) ) || Thread . interrupted ( ) ) { return ; } ProcessUtils . fatalError ( LOG , t , "Exception thrown in main serving thread" ) ; } } , "MasterServingThread" ) ; mServingThread . start ( ) ; if ( ! waitForReady ( 10 * Constants . MINUTE_MS ) ) { ThreadUtils . logAllThreads ( ) ; throw new RuntimeException ( "Alluxio master failed to come up" ) ; } LOG . info ( "Primary started" ) ; return true ; } | Upgrades the master to primary mode . |
12,475 | public static long getNumSector ( String requestSize , String sectorSize ) { Double memSize = Double . parseDouble ( requestSize ) ; Double sectorBytes = Double . parseDouble ( sectorSize ) ; Double nSectors = memSize / sectorBytes ; Double memSizeKB = memSize / 1024 ; Double memSizeGB = memSize / ( 1024 * 1024 * 1024 ) ; Double memSize100GB = memSizeGB / 100 ; Double allocBitmapSize = nSectors / 8 ; Double extOverflowFileSize = memSize100GB * 1024 * 1024 * 4 ; Double journalFileSize = memSize100GB * 1024 * 1024 * 8 ; Double catalogFileSize = memSizeKB * 10 ; Double hotFileSize = memSizeKB * 5 ; Double quotaUsersFileSize = ( memSizeGB * 256 + 1 ) * 64 ; Double quotaGroupsFileSize = ( memSizeGB * 32 + 1 ) * 64 ; Double metadataSize = allocBitmapSize + extOverflowFileSize + journalFileSize + catalogFileSize + hotFileSize + quotaUsersFileSize + quotaGroupsFileSize ; Double allocSize = memSize + metadataSize ; Double numSectors = allocSize / sectorBytes ; System . out . println ( numSectors . longValue ( ) + 1 ) ; return numSectors . longValue ( ) + 1 ; } | Converts the memory size to number of sectors . |
12,476 | public static void main ( String [ ] args ) { if ( args . length != 2 ) { System . exit ( - 1 ) ; } String mem = args [ 0 ] ; String sector = args [ 1 ] ; try { getNumSector ( mem , sector ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } | The main class to invoke the getNumSector . |
12,477 | private boolean processSinglePath ( AlluxioURI alluxioUri , MountInfo mountInfo ) { PathLock pathLock = new PathLock ( ) ; Lock writeLock = pathLock . writeLock ( ) ; Lock readLock = null ; try { writeLock . lock ( ) ; PathLock existingLock = mCurrentPaths . putIfAbsent ( alluxioUri . getPath ( ) , pathLock ) ; if ( existingLock != null ) { writeLock . unlock ( ) ; writeLock = null ; readLock = existingLock . readLock ( ) ; readLock . lock ( ) ; if ( mCache . getIfPresent ( alluxioUri . getPath ( ) ) != null ) { return false ; } } else { MountTable . Resolution resolution = mMountTable . resolve ( alluxioUri ) ; if ( resolution . getMountId ( ) != mountInfo . getMountId ( ) ) { return false ; } boolean existsInUfs ; try ( CloseableResource < UnderFileSystem > ufsResource = resolution . acquireUfsResource ( ) ) { UnderFileSystem ufs = ufsResource . get ( ) ; existsInUfs = ufs . exists ( resolution . getUri ( ) . toString ( ) ) ; } if ( existsInUfs ) { mCache . invalidate ( alluxioUri . getPath ( ) ) ; } else { mCache . put ( alluxioUri . getPath ( ) , mountInfo . getMountId ( ) ) ; if ( pathLock . isInvalidate ( ) ) { mCache . invalidate ( alluxioUri . getPath ( ) ) ; } else { return false ; } } } } catch ( InvalidPathException | IOException e ) { LOG . warn ( "Processing path failed: " + alluxioUri , e ) ; return false ; } finally { if ( readLock != null ) { readLock . unlock ( ) ; } if ( writeLock != null ) { mCurrentPaths . remove ( alluxioUri . getPath ( ) , pathLock ) ; writeLock . unlock ( ) ; } } return true ; } | Processes and checks the existence of the corresponding ufs path for the given Alluxio path . |
12,478 | private List < AlluxioURI > getNestedPaths ( AlluxioURI alluxioUri , int startComponentIndex ) { try { String [ ] fullComponents = PathUtils . getPathComponents ( alluxioUri . getPath ( ) ) ; String [ ] baseComponents = Arrays . copyOfRange ( fullComponents , 0 , startComponentIndex ) ; AlluxioURI uri = new AlluxioURI ( PathUtils . concatPath ( AlluxioURI . SEPARATOR , baseComponents ) ) ; List < AlluxioURI > components = new ArrayList < > ( fullComponents . length - startComponentIndex ) ; for ( int i = startComponentIndex ; i < fullComponents . length ; i ++ ) { uri = uri . joinUnsafe ( fullComponents [ i ] ) ; components . add ( uri ) ; } return components ; } catch ( InvalidPathException e ) { return Collections . emptyList ( ) ; } } | Returns a sequence of Alluxio paths for a specified path starting from the path component at a specific index to the specified path . |
12,479 | private void copyWildcard ( List < AlluxioURI > srcPaths , AlluxioURI dstPath , boolean recursive ) throws AlluxioException , IOException { URIStatus dstStatus = null ; try { dstStatus = mFileSystem . getStatus ( dstPath ) ; } catch ( FileDoesNotExistException e ) { } if ( dstStatus != null && ! dstStatus . isFolder ( ) ) { throw new InvalidPathException ( ExceptionMessage . DESTINATION_CANNOT_BE_FILE . getMessage ( ) ) ; } if ( dstStatus == null ) { mFileSystem . createDirectory ( dstPath ) ; System . out . println ( "Created directory: " + dstPath ) ; } List < String > errorMessages = new ArrayList < > ( ) ; for ( AlluxioURI srcPath : srcPaths ) { try { copy ( srcPath , new AlluxioURI ( dstPath . getScheme ( ) , dstPath . getAuthority ( ) , PathUtils . concatPath ( dstPath . getPath ( ) , srcPath . getName ( ) ) ) , recursive ) ; } catch ( AlluxioException | IOException e ) { errorMessages . add ( e . getMessage ( ) ) ; } } if ( errorMessages . size ( ) != 0 ) { throw new IOException ( Joiner . on ( '\n' ) . join ( errorMessages ) ) ; } } | Copies a list of files or directories specified by srcPaths to the destination specified by dstPath . This method is used when the original source path contains wildcards . |
12,480 | private void copy ( AlluxioURI srcPath , AlluxioURI dstPath , boolean recursive ) throws AlluxioException , IOException { URIStatus srcStatus = mFileSystem . getStatus ( srcPath ) ; URIStatus dstStatus = null ; try { dstStatus = mFileSystem . getStatus ( dstPath ) ; } catch ( FileDoesNotExistException e ) { } if ( ! srcStatus . isFolder ( ) ) { if ( dstStatus != null && dstStatus . isFolder ( ) ) { dstPath = new AlluxioURI ( PathUtils . concatPath ( dstPath . getPath ( ) , srcPath . getName ( ) ) ) ; } copyFile ( srcPath , dstPath ) ; } else { if ( ! recursive ) { throw new IOException ( srcPath . getPath ( ) + " is a directory, to copy it please use \"cp -R <src> <dst>\"" ) ; } List < URIStatus > statuses ; statuses = mFileSystem . listStatus ( srcPath ) ; if ( dstStatus != null ) { if ( ! dstStatus . isFolder ( ) ) { throw new InvalidPathException ( ExceptionMessage . DESTINATION_CANNOT_BE_FILE . getMessage ( ) ) ; } if ( srcStatus . isFolder ( ) ) { dstPath = new AlluxioURI ( PathUtils . concatPath ( dstPath . getPath ( ) , srcPath . getName ( ) ) ) ; mFileSystem . createDirectory ( dstPath ) ; System . out . println ( "Created directory: " + dstPath ) ; } } if ( dstStatus == null ) { mFileSystem . createDirectory ( dstPath ) ; System . out . println ( "Created directory: " + dstPath ) ; } preserveAttributes ( srcPath , dstPath ) ; List < String > errorMessages = new ArrayList < > ( ) ; for ( URIStatus status : statuses ) { try { copy ( new AlluxioURI ( srcPath . getScheme ( ) , srcPath . getAuthority ( ) , status . getPath ( ) ) , new AlluxioURI ( dstPath . getScheme ( ) , dstPath . getAuthority ( ) , PathUtils . concatPath ( dstPath . getPath ( ) , status . getName ( ) ) ) , recursive ) ; } catch ( IOException e ) { errorMessages . add ( e . getMessage ( ) ) ; } } if ( errorMessages . size ( ) != 0 ) { throw new IOException ( Joiner . on ( '\n' ) . join ( errorMessages ) ) ; } } } | Copies a file or a directory in the Alluxio filesystem . |
12,481 | private void copyFile ( AlluxioURI srcPath , AlluxioURI dstPath ) throws AlluxioException , IOException { try ( Closer closer = Closer . create ( ) ) { FileInStream is = closer . register ( mFileSystem . openFile ( srcPath ) ) ; FileOutStream os = closer . register ( mFileSystem . createFile ( dstPath ) ) ; try { IOUtils . copy ( is , os ) ; } catch ( Exception e ) { os . cancel ( ) ; throw e ; } System . out . println ( String . format ( COPY_SUCCEED_MESSAGE , srcPath , dstPath ) ) ; } preserveAttributes ( srcPath , dstPath ) ; } | Copies a file in the Alluxio filesystem . |
12,482 | private void preserveAttributes ( AlluxioURI srcPath , AlluxioURI dstPath ) throws IOException , AlluxioException { if ( mPreservePermissions ) { URIStatus srcStatus = mFileSystem . getStatus ( srcPath ) ; mFileSystem . setAttribute ( dstPath , SetAttributePOptions . newBuilder ( ) . setOwner ( srcStatus . getOwner ( ) ) . setGroup ( srcStatus . getGroup ( ) ) . setMode ( new Mode ( ( short ) srcStatus . getMode ( ) ) . toProto ( ) ) . build ( ) ) ; mFileSystem . setAcl ( dstPath , SetAclAction . REPLACE , srcStatus . getAcl ( ) . getEntries ( ) ) ; } } | Preserves attributes from the source file to the target file . |
12,483 | private void createDstDir ( AlluxioURI dstPath ) throws AlluxioException , IOException { try { mFileSystem . createDirectory ( dstPath ) ; } catch ( FileAlreadyExistsException e ) { } URIStatus dstStatus = mFileSystem . getStatus ( dstPath ) ; if ( ! dstStatus . isFolder ( ) ) { throw new InvalidPathException ( ExceptionMessage . DESTINATION_CANNOT_BE_FILE . getMessage ( ) ) ; } } | Creates a directory in the Alluxio filesystem space . It will not throw any exception if the destination directory already exists . |
12,484 | private void asyncCopyLocalPath ( CopyThreadPoolExecutor pool , AlluxioURI srcPath , AlluxioURI dstPath ) throws InterruptedException { File src = new File ( srcPath . getPath ( ) ) ; if ( ! src . isDirectory ( ) ) { pool . submit ( ( ) -> { try { copyFromLocalFile ( srcPath , dstPath ) ; pool . succeed ( srcPath , dstPath ) ; } catch ( Exception e ) { pool . fail ( srcPath , dstPath , e ) ; } return null ; } ) ; } else { try { mFileSystem . createDirectory ( dstPath ) ; } catch ( Exception e ) { pool . fail ( srcPath , dstPath , e ) ; return ; } File [ ] fileList = src . listFiles ( ) ; if ( fileList == null ) { pool . fail ( srcPath , dstPath , new IOException ( String . format ( "Failed to list directory %s." , src ) ) ) ; return ; } for ( File srcFile : fileList ) { AlluxioURI newURI = new AlluxioURI ( dstPath , new AlluxioURI ( srcFile . getName ( ) ) ) ; asyncCopyLocalPath ( pool , new AlluxioURI ( srcPath . getScheme ( ) , srcPath . getAuthority ( ) , srcFile . getPath ( ) ) , newURI ) ; } } } | Asynchronously copies a file or directory specified by srcPath from the local filesystem to dstPath in the Alluxio filesystem space assuming dstPath does not exist . |
12,485 | private void copyWildcardToLocal ( List < AlluxioURI > srcPaths , AlluxioURI dstPath ) throws AlluxioException , IOException { File dstFile = new File ( dstPath . getPath ( ) ) ; if ( dstFile . exists ( ) && ! dstFile . isDirectory ( ) ) { throw new InvalidPathException ( ExceptionMessage . DESTINATION_CANNOT_BE_FILE . getMessage ( ) ) ; } if ( ! dstFile . exists ( ) ) { if ( ! dstFile . mkdirs ( ) ) { throw new IOException ( "Fail to create directory: " + dstPath ) ; } else { System . out . println ( "Create directory: " + dstPath ) ; } } List < String > errorMessages = new ArrayList < > ( ) ; for ( AlluxioURI srcPath : srcPaths ) { try { File dstSubFile = new File ( dstFile . getAbsoluteFile ( ) , srcPath . getName ( ) ) ; copyToLocal ( srcPath , new AlluxioURI ( dstPath . getScheme ( ) , dstPath . getAuthority ( ) , dstSubFile . getPath ( ) ) ) ; } catch ( IOException e ) { errorMessages . add ( e . getMessage ( ) ) ; } } if ( errorMessages . size ( ) != 0 ) { throw new IOException ( Joiner . on ( '\n' ) . join ( errorMessages ) ) ; } } | Copies a list of files or directories specified by srcPaths from the Alluxio filesystem to dstPath in the local filesystem . This method is used when the input path contains wildcards . |
12,486 | private void copyToLocal ( AlluxioURI srcPath , AlluxioURI dstPath ) throws AlluxioException , IOException { URIStatus srcStatus = mFileSystem . getStatus ( srcPath ) ; File dstFile = new File ( dstPath . getPath ( ) ) ; if ( srcStatus . isFolder ( ) ) { if ( ! dstFile . exists ( ) ) { if ( ! dstFile . mkdirs ( ) ) { throw new IOException ( "mkdir failure for directory: " + dstPath ) ; } else { System . out . println ( "Create directory: " + dstPath ) ; } } List < URIStatus > statuses ; try { statuses = mFileSystem . listStatus ( srcPath ) ; } catch ( AlluxioException e ) { throw new IOException ( e . getMessage ( ) ) ; } List < String > errorMessages = new ArrayList < > ( ) ; for ( URIStatus status : statuses ) { try { File subDstFile = new File ( dstFile . getAbsolutePath ( ) , status . getName ( ) ) ; copyToLocal ( new AlluxioURI ( srcPath . getScheme ( ) , srcPath . getAuthority ( ) , status . getPath ( ) ) , new AlluxioURI ( dstPath . getScheme ( ) , dstPath . getAuthority ( ) , subDstFile . getPath ( ) ) ) ; } catch ( IOException e ) { errorMessages . add ( e . getMessage ( ) ) ; } } if ( errorMessages . size ( ) != 0 ) { throw new IOException ( Joiner . on ( '\n' ) . join ( errorMessages ) ) ; } } else { copyFileToLocal ( srcPath , dstPath ) ; } } | Copies a file or a directory from the Alluxio filesystem to the local filesystem . |
12,487 | private void copyFileToLocal ( AlluxioURI srcPath , AlluxioURI dstPath ) throws AlluxioException , IOException { File dstFile = new File ( dstPath . getPath ( ) ) ; String randomSuffix = String . format ( ".%s_copyToLocal_" , RandomStringUtils . randomAlphanumeric ( 8 ) ) ; File outputFile ; if ( dstFile . isDirectory ( ) ) { outputFile = new File ( PathUtils . concatPath ( dstFile . getAbsolutePath ( ) , srcPath . getName ( ) ) ) ; } else { outputFile = dstFile ; } File tmpDst = new File ( outputFile . getPath ( ) + randomSuffix ) ; try ( Closer closer = Closer . create ( ) ) { FileInStream is = closer . register ( mFileSystem . openFile ( srcPath ) ) ; FileOutputStream out = closer . register ( new FileOutputStream ( tmpDst ) ) ; byte [ ] buf = new byte [ mCopyToLocalBufferSize ] ; int t = is . read ( buf ) ; while ( t != - 1 ) { out . write ( buf , 0 , t ) ; t = is . read ( buf ) ; } if ( ! tmpDst . renameTo ( outputFile ) ) { throw new IOException ( "Failed to rename " + tmpDst . getPath ( ) + " to destination " + outputFile . getPath ( ) ) ; } System . out . println ( "Copied " + srcPath + " to " + "file://" + outputFile . getPath ( ) ) ; } finally { tmpDst . delete ( ) ; } } | Copies a file specified by argv from the filesystem to the local filesystem . This is the utility function . |
12,488 | public long lockBlock ( long sessionId , long blockId , BlockLockType blockLockType ) { ClientRWLock blockLock = getBlockLock ( blockId ) ; Lock lock ; if ( blockLockType == BlockLockType . READ ) { lock = blockLock . readLock ( ) ; } else { if ( sessionHoldsLock ( sessionId , blockId ) ) { throw new IllegalStateException ( String . format ( "Session %s attempted to take a write lock on block %s, but the session already" + " holds a lock on the block" , sessionId , blockId ) ) ; } lock = blockLock . writeLock ( ) ; } lock . lock ( ) ; try { long lockId = LOCK_ID_GEN . getAndIncrement ( ) ; synchronized ( mSharedMapsLock ) { mLockIdToRecordMap . put ( lockId , new LockRecord ( sessionId , blockId , lock ) ) ; Set < Long > sessionLockIds = mSessionIdToLockIdsMap . get ( sessionId ) ; if ( sessionLockIds == null ) { mSessionIdToLockIdsMap . put ( sessionId , Sets . newHashSet ( lockId ) ) ; } else { sessionLockIds . add ( lockId ) ; } } return lockId ; } catch ( RuntimeException e ) { unlock ( lock , blockId ) ; throw e ; } } | Locks a block . Note that even if this block does not exist a lock id is still returned . |
12,489 | private ClientRWLock getBlockLock ( long blockId ) { while ( true ) { ClientRWLock blockLock ; synchronized ( mSharedMapsLock ) { blockLock = mLocks . get ( blockId ) ; if ( blockLock != null ) { blockLock . addReference ( ) ; return blockLock ; } } blockLock = mLockPool . acquire ( 1 , TimeUnit . SECONDS ) ; if ( blockLock != null ) { synchronized ( mSharedMapsLock ) { if ( mLocks . containsKey ( blockId ) ) { mLockPool . release ( blockLock ) ; blockLock = mLocks . get ( blockId ) ; } else { mLocks . put ( blockId , blockLock ) ; } blockLock . addReference ( ) ; return blockLock ; } } } } | Returns the block lock for the given block id acquiring such a lock if it doesn t exist yet . |
12,490 | public void validateLock ( long sessionId , long blockId , long lockId ) throws BlockDoesNotExistException , InvalidWorkerStateException { synchronized ( mSharedMapsLock ) { LockRecord record = mLockIdToRecordMap . get ( lockId ) ; if ( record == null ) { throw new BlockDoesNotExistException ( ExceptionMessage . LOCK_RECORD_NOT_FOUND_FOR_LOCK_ID , lockId ) ; } if ( sessionId != record . getSessionId ( ) ) { throw new InvalidWorkerStateException ( ExceptionMessage . LOCK_ID_FOR_DIFFERENT_SESSION , lockId , record . getSessionId ( ) , sessionId ) ; } if ( blockId != record . getBlockId ( ) ) { throw new InvalidWorkerStateException ( ExceptionMessage . LOCK_ID_FOR_DIFFERENT_BLOCK , lockId , record . getBlockId ( ) , blockId ) ; } } } | Validates the lock is hold by the given session for the given block . |
12,491 | public void cleanupSession ( long sessionId ) { synchronized ( mSharedMapsLock ) { Set < Long > sessionLockIds = mSessionIdToLockIdsMap . get ( sessionId ) ; if ( sessionLockIds == null ) { return ; } for ( long lockId : sessionLockIds ) { LockRecord record = mLockIdToRecordMap . get ( lockId ) ; if ( record == null ) { LOG . error ( ExceptionMessage . LOCK_RECORD_NOT_FOUND_FOR_LOCK_ID . getMessage ( lockId ) ) ; continue ; } Lock lock = record . getLock ( ) ; unlock ( lock , record . getBlockId ( ) ) ; mLockIdToRecordMap . remove ( lockId ) ; } mSessionIdToLockIdsMap . remove ( sessionId ) ; } } | Cleans up the locks currently hold by a specific session . |
12,492 | public Set < Long > getLockedBlocks ( ) { synchronized ( mSharedMapsLock ) { Set < Long > set = new HashSet < > ( ) ; for ( LockRecord lockRecord : mLockIdToRecordMap . values ( ) ) { set . add ( lockRecord . getBlockId ( ) ) ; } return set ; } } | Gets a set of currently locked blocks . |
12,493 | private void releaseBlockLockIfUnused ( long blockId ) { synchronized ( mSharedMapsLock ) { ClientRWLock lock = mLocks . get ( blockId ) ; if ( lock == null ) { return ; } if ( lock . dropReference ( ) == 0 ) { mLocks . remove ( blockId ) ; mLockPool . release ( lock ) ; } } } | Checks whether anyone is using the block lock for the given block id returning the lock to the lock pool if it is unused . |
12,494 | public void validate ( ) { synchronized ( mSharedMapsLock ) { ConcurrentMap < Long , AtomicInteger > blockLockReferenceCounts = new ConcurrentHashMap < > ( ) ; for ( LockRecord record : mLockIdToRecordMap . values ( ) ) { blockLockReferenceCounts . putIfAbsent ( record . getBlockId ( ) , new AtomicInteger ( 0 ) ) ; blockLockReferenceCounts . get ( record . getBlockId ( ) ) . incrementAndGet ( ) ; } for ( Entry < Long , ClientRWLock > entry : mLocks . entrySet ( ) ) { long blockId = entry . getKey ( ) ; ClientRWLock lock = entry . getValue ( ) ; Integer recordCount = blockLockReferenceCounts . get ( blockId ) . get ( ) ; Integer referenceCount = lock . getReferenceCount ( ) ; if ( ! Objects . equal ( recordCount , referenceCount ) ) { throw new IllegalStateException ( "There are " + recordCount + " lock records for block" + " id " + blockId + ", but the reference count is " + referenceCount ) ; } } for ( Entry < Long , Set < Long > > entry : mSessionIdToLockIdsMap . entrySet ( ) ) { for ( Long lockId : entry . getValue ( ) ) { LockRecord record = mLockIdToRecordMap . get ( lockId ) ; if ( record . getSessionId ( ) != entry . getKey ( ) ) { throw new IllegalStateException ( "The session id map contains lock id " + lockId + "under session id " + entry . getKey ( ) + ", but the record for that lock id (" + record + ")" + " doesn't contain that session id" ) ; } } } } } | Checks the internal state of the manager to make sure invariants hold . |
12,495 | public int compareTo ( TtlBucket ttlBucket ) { long startTime1 = getTtlIntervalStartTimeMs ( ) ; long startTime2 = ttlBucket . getTtlIntervalStartTimeMs ( ) ; return Long . compare ( startTime1 , startTime2 ) ; } | Compares this bucket s TTL interval start time to that of another bucket . |
12,496 | private void createHdfsFilesystem ( Configuration conf ) throws Exception { mFileSystem = FileSystem . get ( URI . create ( conf . get ( "fs.defaultFS" ) ) , conf ) ; mOutputFilePath = new Path ( "./MapReduceOutputFile" ) ; if ( mFileSystem . exists ( mOutputFilePath ) ) { mFileSystem . delete ( mOutputFilePath , true ) ; } } | Creates the HDFS filesystem to store output files . |
12,497 | private int run ( String [ ] args ) throws Exception { Configuration conf = new Configuration ( ) ; String numMaps = new GenericOptionsParser ( conf , args ) . getRemainingArgs ( ) [ 0 ] ; conf . set ( MRJobConfig . NUM_MAPS , numMaps ) ; createHdfsFilesystem ( conf ) ; Job job = Job . getInstance ( conf , "MapReduceIntegrationChecker" ) ; job . setJarByClass ( MapReduceIntegrationChecker . class ) ; job . setMapperClass ( CheckerMapper . class ) ; job . setCombinerClass ( CheckerReducer . class ) ; job . setReducerClass ( CheckerReducer . class ) ; job . setOutputKeyClass ( Text . class ) ; job . setOutputValueClass ( Text . class ) ; job . setInputFormatClass ( EmptyInputFormat . class ) ; FileOutputFormat . setOutputPath ( job , mOutputFilePath ) ; try { if ( ! job . waitForCompletion ( true ) ) { return 1 ; } Status resultStatus = generateReport ( ) ; return resultStatus . equals ( Status . SUCCESS ) ? 0 : ( resultStatus . equals ( Status . FAIL_TO_FIND_CLASS ) ? 2 : 1 ) ; } finally { if ( mFileSystem . exists ( mOutputFilePath ) ) { mFileSystem . delete ( mOutputFilePath , true ) ; } mFileSystem . close ( ) ; } } | Implements MapReduce with Alluxio integration checker . |
12,498 | public static void main ( String [ ] args ) throws Exception { MapReduceIntegrationChecker checker = new MapReduceIntegrationChecker ( ) ; System . exit ( checker . run ( args ) ) ; } | Main function will be triggered via hadoop jar . |
12,499 | public void stop ( ) { Preconditions . checkState ( mJvmMonitorThread != null , "JVM monitor thread does not start" ) ; mJvmMonitorThread . interrupt ( ) ; try { mJvmMonitorThread . join ( ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; } reset ( ) ; } | Stops jvm monitor . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.