idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
12,000 | private void execUpload ( UploadPartRequest request ) { File file = request . getFile ( ) ; ListenableFuture < PartETag > futureTag = mExecutor . submit ( ( Callable ) ( ) -> { PartETag partETag ; AmazonClientException lastException ; try { do { try { partETag = mClient . uploadPart ( request ) . getPartETag ( ) ; return partETag ; } catch ( AmazonClientException e ) { lastException = e ; } } while ( mRetryPolicy . attempt ( ) ) ; } finally { if ( ! file . delete ( ) ) { LOG . error ( "Failed to delete temporary file @ {}" , file . getPath ( ) ) ; } } throw new IOException ( "Fail to upload part " + request . getPartNumber ( ) + " to " + request . getKey ( ) , lastException ) ; } ) ; mTagFutures . add ( futureTag ) ; LOG . debug ( "Submit upload part request. key={}, partNum={}, file={}, fileSize={}, lastPart={}." , mKey , request . getPartNumber ( ) , file . getPath ( ) , file . length ( ) , request . isLastPart ( ) ) ; } | Executes the upload part request . |
12,001 | private void waitForAllPartsUpload ( ) throws IOException { int beforeSize = mTags . size ( ) ; try { for ( ListenableFuture < PartETag > future : mTagFutures ) { mTags . add ( future . get ( ) ) ; } } catch ( ExecutionException e ) { Futures . allAsList ( mTagFutures ) . cancel ( true ) ; abortMultiPartUpload ( ) ; throw new IOException ( "Part upload failed in multipart upload with " + "id '" + mUploadId + "' to " + mKey , e ) ; } catch ( InterruptedException e ) { LOG . warn ( "Interrupted object upload." , e ) ; Futures . allAsList ( mTagFutures ) . cancel ( true ) ; abortMultiPartUpload ( ) ; Thread . currentThread ( ) . interrupt ( ) ; } mTagFutures = new ArrayList < > ( ) ; if ( mTags . size ( ) != beforeSize ) { LOG . debug ( "Uploaded {} partitions of id '{}' to {}." , mTags . size ( ) , mUploadId , mKey ) ; } } | Waits for the submitted upload tasks to finish . |
12,002 | private void completeMultiPartUpload ( ) throws IOException { AmazonClientException lastException ; CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest ( mBucketName , mKey , mUploadId , mTags ) ; do { try { mClient . completeMultipartUpload ( completeRequest ) ; LOG . debug ( "Completed multipart upload for key {} and id '{}' with {} partitions." , mKey , mUploadId , mTags . size ( ) ) ; return ; } catch ( AmazonClientException e ) { lastException = e ; } } while ( mRetryPolicy . attempt ( ) ) ; throw new IOException ( "Unable to complete multipart upload with id '" + mUploadId + "' to " + mKey , lastException ) ; } | Completes multipart upload . |
12,003 | private void abortMultiPartUpload ( ) { AmazonClientException lastException ; do { try { mClient . abortMultipartUpload ( new AbortMultipartUploadRequest ( mBucketName , mKey , mUploadId ) ) ; LOG . warn ( "Aborted multipart upload for key {} and id '{}' to bucket {}" , mKey , mUploadId , mBucketName ) ; return ; } catch ( AmazonClientException e ) { lastException = e ; } } while ( mRetryPolicy . attempt ( ) ) ; LOG . warn ( "Unable to abort multipart upload for key '{}' and id '{}' to bucket {}. " + "You may need to enable the periodical cleanup by setting property {}" + "to be true." , mKey , mUploadId , mBucketName , PropertyKey . UNDERFS_CLEANUP_ENABLED . getName ( ) , lastException ) ; } | Aborts multipart upload . |
12,004 | public static long getGidFromGroupName ( String groupName ) throws IOException { String result = "" ; if ( OSUtils . isLinux ( ) ) { String script = "getent group " + groupName + " | cut -d: -f3" ; result = ShellUtils . execCommand ( "bash" , "-c" , script ) . trim ( ) ; } else if ( OSUtils . isMacOS ( ) ) { String script = "dscl . -read /Groups/" + groupName + " | awk '($1 == \"PrimaryGroupID:\") { print $2 }'" ; result = ShellUtils . execCommand ( "bash" , "-c" , script ) . trim ( ) ; } try { return Long . parseLong ( result ) ; } catch ( NumberFormatException e ) { LOG . error ( "Failed to get gid from group name {}." , groupName ) ; return - 1 ; } } | Retrieves the gid of the given group . |
12,005 | public static String getUserName ( long uid ) throws IOException { return ShellUtils . execCommand ( "id" , "-nu" , Long . toString ( uid ) ) . trim ( ) ; } | Gets the user name from the user id . |
12,006 | public static String getGroupName ( long gid ) throws IOException { if ( OSUtils . isLinux ( ) ) { String script = "getent group " + gid + " | cut -d: -f1" ; return ShellUtils . execCommand ( "bash" , "-c" , script ) . trim ( ) ; } else if ( OSUtils . isMacOS ( ) ) { String script = "dscl . list /Groups PrimaryGroupID | awk '($2 == \"" + gid + "\") { print $1 }'" ; return ShellUtils . execCommand ( "bash" , "-c" , script ) . trim ( ) ; } return "" ; } | Gets the group name from the group id . |
12,007 | public static boolean isFuseInstalled ( ) { try { if ( OSUtils . isLinux ( ) ) { String result = ShellUtils . execCommand ( "fusermount" , "-V" ) ; return ! result . isEmpty ( ) ; } else if ( OSUtils . isMacOS ( ) ) { String result = ShellUtils . execCommand ( "bash" , "-c" , "mount | grep FUSE" ) ; return ! result . isEmpty ( ) ; } } catch ( Exception e ) { return false ; } return false ; } | Checks whether fuse is installed in local file system . Alluxio - Fuse only support mac and linux . |
12,008 | private static long getIdInfo ( String option , String username ) { String output ; try { output = ShellUtils . execCommand ( "id" , option , username ) . trim ( ) ; } catch ( IOException e ) { LOG . error ( "Failed to get id from {} with option {}" , username , option ) ; return - 1 ; } return Long . parseLong ( output ) ; } | Runs the id command with the given options on the passed username . |
12,009 | public static int getErrorCode ( Throwable t ) { if ( t instanceof AlluxioException ) { return getAlluxioErrorCode ( ( AlluxioException ) t ) ; } else if ( t instanceof IOException ) { return - ErrorCodes . EIO ( ) ; } else { return - ErrorCodes . EBADMSG ( ) ; } } | Gets the corresponding error code of a throwable . |
12,010 | private static int getAlluxioErrorCode ( AlluxioException e ) { try { throw e ; } catch ( FileDoesNotExistException ex ) { return - ErrorCodes . ENOENT ( ) ; } catch ( FileAlreadyExistsException ex ) { return - ErrorCodes . EEXIST ( ) ; } catch ( InvalidPathException ex ) { return - ErrorCodes . EFAULT ( ) ; } catch ( BlockDoesNotExistException ex ) { return - ErrorCodes . ENODATA ( ) ; } catch ( DirectoryNotEmptyException ex ) { return - ErrorCodes . ENOTEMPTY ( ) ; } catch ( AccessControlException ex ) { return - ErrorCodes . EACCES ( ) ; } catch ( ConnectionFailedException ex ) { return - ErrorCodes . ECONNREFUSED ( ) ; } catch ( FileAlreadyCompletedException ex ) { return - ErrorCodes . EOPNOTSUPP ( ) ; } catch ( AlluxioException ex ) { return - ErrorCodes . EBADMSG ( ) ; } } | Gets the corresponding error code of an Alluxio exception . |
12,011 | public void record ( long timeNano , int numEvents ) { long leftEndPoint = bucket ( timeNano ) ; mSeries . put ( leftEndPoint , mSeries . getOrDefault ( leftEndPoint , 0 ) + numEvents ) ; } | Record events at a timestamp into the time series . |
12,012 | public void add ( TimeSeries other ) { TreeMap < Long , Integer > otherSeries = other . getSeries ( ) ; for ( Map . Entry < Long , Integer > event : otherSeries . entrySet ( ) ) { record ( event . getKey ( ) + other . getWidthNano ( ) / 2 , event . getValue ( ) ) ; } } | Add one histogram to the current one . We preserve the width in the current TimeSeries . |
12,013 | public void sparsePrint ( PrintStream stream ) { if ( mSeries . isEmpty ( ) ) { return ; } long start = mSeries . firstKey ( ) ; stream . printf ( "Time series starts at %d with width %d.%n" , start , mWidthNano ) ; for ( Map . Entry < Long , Integer > entry : mSeries . entrySet ( ) ) { stream . printf ( "%d %d%n" , ( entry . getKey ( ) - start ) / mWidthNano , entry . getValue ( ) ) ; } } | Print the time series sparsely i . e . it ignores buckets with 0 events . |
12,014 | public void print ( PrintStream stream ) { if ( mSeries . isEmpty ( ) ) { return ; } long start = mSeries . firstKey ( ) ; stream . printf ( "Time series starts at %d with width %d.%n" , start , mWidthNano ) ; int bucketIndex = 0 ; Iterator < Map . Entry < Long , Integer > > it = mSeries . entrySet ( ) . iterator ( ) ; Map . Entry < Long , Integer > current = it . next ( ) ; while ( current != null ) { int numEvents = 0 ; if ( bucketIndex * mWidthNano + start == current . getKey ( ) ) { numEvents = current . getValue ( ) ; current = null ; if ( it . hasNext ( ) ) { current = it . next ( ) ; } } stream . printf ( "%d %d%n" , bucketIndex , numEvents ) ; bucketIndex ++ ; } } | Print the time series densely i . e . it doesn t ignore buckets with 0 events . |
12,015 | private static < T > Supplier < T > memoize ( Supplier < T > original ) { return new Supplier < T > ( ) { Supplier < T > mDelegate = this :: firstTime ; boolean mInitialized ; public T get ( ) { return mDelegate . get ( ) ; } private synchronized T firstTime ( ) { if ( ! mInitialized ) { T value = original . get ( ) ; mDelegate = ( ) -> value ; mInitialized = true ; } return mDelegate . get ( ) ; } } ; } | Memoize implementation for java . util . function . supplier . |
12,016 | public static SwiftOutputStream put ( Access access , String objectName ) throws IOException { LOG . debug ( "PUT method, object : {}" , objectName ) ; URL url = new URL ( access . getPublicURL ( ) + "/" + objectName ) ; URLConnection connection = url . openConnection ( ) ; if ( ! ( connection instanceof HttpURLConnection ) ) { throw new IOException ( "Connection is not an instance of HTTP URL Connection" ) ; } HttpURLConnection httpCon = ( HttpURLConnection ) connection ; httpCon . setRequestMethod ( "PUT" ) ; httpCon . addRequestProperty ( "X-Auth-Token" , access . getToken ( ) ) ; httpCon . addRequestProperty ( "Content-Type" , "binary/octet-stream" ) ; httpCon . setDoInput ( true ) ; httpCon . setRequestProperty ( "Connection" , "close" ) ; httpCon . setReadTimeout ( HTTP_READ_TIMEOUT ) ; httpCon . setRequestProperty ( "Transfer-Encoding" , "chunked" ) ; httpCon . setDoOutput ( true ) ; httpCon . setChunkedStreamingMode ( HTTP_CHUNK_STREAMING ) ; httpCon . connect ( ) ; return new SwiftOutputStream ( httpCon ) ; } | Swift HTTP PUT request . |
12,017 | private void cleanupStaleClients ( ) { LocalTime cleanupTime = LocalTime . now ( ) ; LOG . debug ( "Starting cleanup authentication registry at {}" , cleanupTime ) ; List < UUID > staleChannels = new ArrayList < > ( ) ; for ( Map . Entry < UUID , AuthenticatedChannelInfo > clientEntry : mChannels . entrySet ( ) ) { LocalTime lat = clientEntry . getValue ( ) . getLastAccessTime ( ) ; if ( lat . plusSeconds ( mCleanupIntervalMs / 1000 ) . isBefore ( cleanupTime ) ) { staleChannels . add ( clientEntry . getKey ( ) ) ; } } LOG . debug ( "Found {} stale channels for cleanup." , staleChannels . size ( ) ) ; for ( UUID clientId : staleChannels ) { unregisterChannel ( clientId ) ; } LOG . debug ( "Finished state channel cleanup at {}" , LocalTime . now ( ) ) ; } | Primitive that is invoked periodically for cleaning the registry from clients that has become stale . |
12,018 | protected void checkSupported ( AuthType authType ) { switch ( authType ) { case NOSASL : case SIMPLE : case CUSTOM : return ; default : throw new RuntimeException ( "Authentication type not supported:" + authType . name ( ) ) ; } } | Used to check if given authentication is supported by the server . |
12,019 | public void addBlocksToDelete ( List < Long > blocks ) { for ( long id : blocks ) { if ( mRemovingBlocks . contains ( id ) ) { LOG . debug ( "{} is being removed. Current queue size is {}." , id , mBlocksToRemove . size ( ) ) ; continue ; } try { mBlocksToRemove . put ( id ) ; mRemovingBlocks . add ( id ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; LOG . warn ( "AsyncBlockRemover got interrupted while it was putting block {}." , id ) ; } } } | Put blocks into async block remover . This method will take care of the duplicate blocks . |
12,020 | private boolean rename ( String src , String dst ) throws IOException { src = stripPath ( src ) ; dst = stripPath ( dst ) ; File file = new File ( src ) ; return file . renameTo ( new File ( dst ) ) ; } | Rename a file to a file or a directory to a directory . |
12,021 | private Response completeMultipartUpload ( final String bucket , final String object , final long uploadId ) { return S3RestUtils . call ( bucket , new S3RestUtils . RestCallable < CompleteMultipartUploadResult > ( ) { public CompleteMultipartUploadResult call ( ) throws S3Exception { String bucketPath = parseBucketPath ( AlluxioURI . SEPARATOR + bucket ) ; checkBucketIsAlluxioDirectory ( bucketPath ) ; String objectPath = bucketPath + AlluxioURI . SEPARATOR + object ; AlluxioURI multipartTemporaryDir = new AlluxioURI ( S3RestUtils . getMultipartTemporaryDirForObject ( bucketPath , object ) ) ; checkUploadId ( multipartTemporaryDir , uploadId ) ; try { List < URIStatus > parts = mFileSystem . listStatus ( multipartTemporaryDir ) ; Collections . sort ( parts , new URIStatusNameComparator ( ) ) ; CreateFilePOptions options = CreateFilePOptions . newBuilder ( ) . setRecursive ( true ) . setWriteType ( getS3WriteType ( ) ) . build ( ) ; FileOutStream os = mFileSystem . createFile ( new AlluxioURI ( objectPath ) , options ) ; MessageDigest md5 = MessageDigest . getInstance ( "MD5" ) ; DigestOutputStream digestOutputStream = new DigestOutputStream ( os , md5 ) ; try { for ( URIStatus part : parts ) { try ( FileInStream is = mFileSystem . openFile ( new AlluxioURI ( part . getPath ( ) ) ) ) { ByteStreams . copy ( is , digestOutputStream ) ; } } } finally { digestOutputStream . close ( ) ; } mFileSystem . delete ( multipartTemporaryDir , DeletePOptions . newBuilder ( ) . setRecursive ( true ) . build ( ) ) ; String entityTag = Hex . encodeHexString ( md5 . digest ( ) ) ; return new CompleteMultipartUploadResult ( objectPath , bucket , object , entityTag ) ; } catch ( Exception e ) { throw toObjectS3Exception ( e , objectPath ) ; } } } ) ; } | under the temporary multipart upload directory are combined into the final object . |
12,022 | public static List < InetSocketAddress > getMasterRpcAddresses ( AlluxioConfiguration conf ) { if ( conf . isSet ( PropertyKey . MASTER_RPC_ADDRESSES ) ) { return parseInetSocketAddresses ( conf . getList ( PropertyKey . MASTER_RPC_ADDRESSES , "," ) ) ; } int rpcPort = NetworkAddressUtils . getPort ( NetworkAddressUtils . ServiceType . MASTER_RPC , conf ) ; return overridePort ( getEmbeddedJournalAddresses ( conf , ServiceType . MASTER_RAFT ) , rpcPort ) ; } | Gets the RPC addresses of all masters based on the configuration . |
12,023 | public static List < InetSocketAddress > getJobMasterRpcAddresses ( AlluxioConfiguration conf ) { if ( conf . isSet ( PropertyKey . JOB_MASTER_RPC_ADDRESSES ) ) { return parseInetSocketAddresses ( conf . getList ( PropertyKey . JOB_MASTER_RPC_ADDRESSES , "," ) ) ; } int jobRpcPort = NetworkAddressUtils . getPort ( NetworkAddressUtils . ServiceType . JOB_MASTER_RPC , conf ) ; if ( conf . isSet ( PropertyKey . MASTER_RPC_ADDRESSES ) ) { List < InetSocketAddress > addrs = parseInetSocketAddresses ( conf . getList ( PropertyKey . MASTER_RPC_ADDRESSES , "," ) ) ; return overridePort ( addrs , jobRpcPort ) ; } return overridePort ( getEmbeddedJournalAddresses ( conf , ServiceType . JOB_MASTER_RAFT ) , jobRpcPort ) ; } | Gets the RPC addresses of all job masters based on the configuration . |
12,024 | public static Properties loadPropertiesFromResource ( URL resource ) { try ( InputStream stream = resource . openStream ( ) ) { return loadProperties ( stream ) ; } catch ( IOException e ) { LOG . warn ( "Failed to read properties from {}: {}" , resource , e . toString ( ) ) ; return null ; } } | Loads properties from a resource . |
12,025 | public static Properties loadPropertiesFromFile ( String filePath ) { try ( FileInputStream fileInputStream = new FileInputStream ( filePath ) ) { return loadProperties ( fileInputStream ) ; } catch ( FileNotFoundException e ) { return null ; } catch ( IOException e ) { LOG . warn ( "Failed to close property input stream from {}: {}" , filePath , e . toString ( ) ) ; return null ; } } | Loads properties from the given file . |
12,026 | public static String searchPropertiesFile ( String propertiesFile , String [ ] confPathList ) { if ( propertiesFile == null || confPathList == null ) { return null ; } for ( String path : confPathList ) { String file = PathUtils . concatPath ( path , propertiesFile ) ; Properties properties = loadPropertiesFromFile ( file ) ; if ( properties != null ) { return file ; } } return null ; } | Searches the given properties file from a list of paths . |
12,027 | public static String getMasterHostNotConfiguredMessage ( String serviceName ) { return getHostNotConfiguredMessage ( serviceName , "master" , PropertyKey . MASTER_HOSTNAME , PropertyKey . MASTER_EMBEDDED_JOURNAL_ADDRESSES ) ; } | Returns a unified message for cases when the master hostname cannot be determined . |
12,028 | public static String getJobMasterHostNotConfiguredMessage ( String serviceName ) { return getHostNotConfiguredMessage ( serviceName , "job master" , PropertyKey . JOB_MASTER_HOSTNAME , PropertyKey . JOB_MASTER_EMBEDDED_JOURNAL_ADDRESSES ) ; } | Returns a unified message for cases when the job master hostname cannot be determined . |
12,029 | public static float checkRatio ( AlluxioConfiguration conf , PropertyKey key ) { float value = conf . getFloat ( key ) ; Preconditions . checkState ( value <= 1.0 , "Property %s must not exceed 1, but it is set to %s" , key . getName ( ) , value ) ; Preconditions . checkState ( value >= 0.0 , "Property %s must be non-negative, but it is set to %s" , key . getName ( ) , value ) ; return value ; } | Checks that the given property key is a ratio from 0 . 0 and 1 . 0 throwing an exception if it is not . |
12,030 | public static List < ConfigProperty > getConfiguration ( AlluxioConfiguration conf , Scope scope ) { ConfigurationValueOptions useRawDisplayValue = ConfigurationValueOptions . defaults ( ) . useDisplayValue ( true ) . useRawValue ( true ) ; List < ConfigProperty > configs = new ArrayList < > ( ) ; List < PropertyKey > selectedKeys = conf . keySet ( ) . stream ( ) . filter ( key -> GrpcUtils . contains ( key . getScope ( ) , scope ) ) . filter ( key -> key . isValid ( key . getName ( ) ) ) . collect ( toList ( ) ) ; for ( PropertyKey key : selectedKeys ) { ConfigProperty . Builder configProp = ConfigProperty . newBuilder ( ) . setName ( key . getName ( ) ) . setSource ( conf . getSource ( key ) . toString ( ) ) ; if ( conf . isSet ( key ) ) { configProp . setValue ( conf . get ( key , useRawDisplayValue ) ) ; } configs . add ( configProp . build ( ) ) ; } return configs ; } | Gets all configuration properties filtered by the specified scope . |
12,031 | public static void reloadProperties ( ) { synchronized ( DEFAULT_PROPERTIES_LOCK ) { AlluxioProperties properties = new AlluxioProperties ( ) ; InstancedConfiguration conf = new InstancedConfiguration ( properties ) ; properties . merge ( System . getProperties ( ) , Source . SYSTEM_PROPERTY ) ; if ( conf . getBoolean ( PropertyKey . TEST_MODE ) ) { conf . validate ( ) ; sDefaultProperties = properties ; return ; } String confPaths = conf . get ( PropertyKey . SITE_CONF_DIR ) ; String [ ] confPathList = confPaths . split ( "," ) ; String sitePropertyFile = ConfigurationUtils . searchPropertiesFile ( Constants . SITE_PROPERTIES , confPathList ) ; Properties siteProps = null ; if ( sitePropertyFile != null ) { siteProps = loadPropertiesFromFile ( sitePropertyFile ) ; sSourcePropertyFile = sitePropertyFile ; } else { URL resource = ConfigurationUtils . class . getClassLoader ( ) . getResource ( Constants . SITE_PROPERTIES ) ; if ( resource != null ) { siteProps = loadPropertiesFromResource ( resource ) ; if ( siteProps != null ) { sSourcePropertyFile = resource . getPath ( ) ; } } } properties . merge ( siteProps , Source . siteProperty ( sSourcePropertyFile ) ) ; conf . validate ( ) ; sDefaultProperties = properties ; } } | Reloads site properties from disk . |
12,032 | private static GetConfigurationPResponse loadConfiguration ( InetSocketAddress address , AlluxioConfiguration conf ) throws AlluxioStatusException { GrpcChannel channel = null ; try { LOG . info ( "Alluxio client (version {}) is trying to load configuration from meta master {}" , RuntimeConstants . VERSION , address ) ; channel = GrpcChannelBuilder . newBuilder ( new GrpcServerAddress ( address ) , conf ) . disableAuthentication ( ) . build ( ) ; MetaMasterConfigurationServiceGrpc . MetaMasterConfigurationServiceBlockingStub client = MetaMasterConfigurationServiceGrpc . newBlockingStub ( channel ) ; GetConfigurationPResponse response = client . getConfiguration ( GetConfigurationPOptions . newBuilder ( ) . setRawValue ( true ) . build ( ) ) ; LOG . info ( "Alluxio client has loaded configuration from meta master {}" , address ) ; return response ; } catch ( io . grpc . StatusRuntimeException e ) { throw new UnavailableException ( String . format ( "Failed to handshake with master %s to load cluster default configuration values: %s" , address , e . getMessage ( ) ) , e ) ; } catch ( UnauthenticatedException e ) { throw new RuntimeException ( String . format ( "Received authentication exception during boot-strap connect with host:%s" , address ) , e ) ; } finally { if ( channel != null ) { channel . shutdown ( ) ; } } } | Loads configuration from meta master . |
12,033 | private static Properties loadClientProperties ( List < ConfigProperty > properties , BiFunction < PropertyKey , String , String > logMessage ) { Properties props = new Properties ( ) ; for ( ConfigProperty property : properties ) { String name = property . getName ( ) ; if ( PropertyKey . isValid ( name ) && property . hasValue ( ) ) { PropertyKey key = PropertyKey . fromString ( name ) ; if ( ! GrpcUtils . contains ( key . getScope ( ) , Scope . CLIENT ) ) { continue ; } String value = property . getValue ( ) ; props . put ( key , value ) ; LOG . debug ( logMessage . apply ( key , value ) ) ; } } return props ; } | Loads client scope properties from the property list returned by grpc . |
12,034 | private static AlluxioConfiguration loadClusterConfiguration ( GetConfigurationPResponse response , AlluxioConfiguration conf ) { String clientVersion = conf . get ( PropertyKey . VERSION ) ; LOG . info ( "Alluxio client (version {}) is trying to load cluster level configurations" , clientVersion ) ; List < alluxio . grpc . ConfigProperty > clusterConfig = response . getConfigsList ( ) ; Properties clusterProps = loadClientProperties ( clusterConfig , ( key , value ) -> String . format ( "Loading property: %s (%s) -> %s" , key , key . getScope ( ) , value ) ) ; String clusterVersion = clusterProps . get ( PropertyKey . VERSION ) . toString ( ) ; if ( ! clientVersion . equals ( clusterVersion ) ) { LOG . warn ( "Alluxio client version ({}) does not match Alluxio cluster version ({})" , clientVersion , clusterVersion ) ; clusterProps . remove ( PropertyKey . VERSION ) ; } AlluxioProperties props = conf . copyProperties ( ) ; props . merge ( clusterProps , Source . CLUSTER_DEFAULT ) ; InstancedConfiguration updatedConf = new InstancedConfiguration ( props , true ) ; updatedConf . validate ( ) ; LOG . info ( "Alluxio client has loaded cluster level configurations" ) ; return updatedConf ; } | Loads the cluster level configuration from the get configuration response and merges it with the existing configuration . |
12,035 | private static PathConfiguration loadPathConfiguration ( GetConfigurationPResponse response , AlluxioConfiguration clusterConf ) { String clientVersion = clusterConf . get ( PropertyKey . VERSION ) ; LOG . info ( "Alluxio client (version {}) is trying to load path level configurations" , clientVersion ) ; Map < String , AlluxioConfiguration > pathConfs = new HashMap < > ( ) ; response . getPathConfigsMap ( ) . forEach ( ( path , conf ) -> { Properties props = loadClientProperties ( conf . getPropertiesList ( ) , ( key , value ) -> String . format ( "Loading property: %s (%s) -> %s for path %s" , key , key . getScope ( ) , value , path ) ) ; AlluxioProperties properties = new AlluxioProperties ( ) ; properties . merge ( props , Source . PATH_DEFAULT ) ; pathConfs . put ( path , new InstancedConfiguration ( properties , true ) ) ; } ) ; LOG . info ( "Alluxio client has loaded path level configurations" ) ; return PathConfiguration . create ( pathConfs ) ; } | Loads the path level configuration from the get configuration response . |
12,036 | public static Pair < AlluxioConfiguration , PathConfiguration > loadClusterAndPathDefaults ( InetSocketAddress address , AlluxioConfiguration clusterConf , PathConfiguration pathConf ) throws AlluxioStatusException { if ( shouldLoadClusterConfiguration ( clusterConf ) ) { GetConfigurationPResponse response = loadConfiguration ( address , clusterConf ) ; clusterConf = loadClusterConfiguration ( response , clusterConf ) ; pathConf = loadPathConfiguration ( response , clusterConf ) ; } return new Pair < > ( clusterConf , pathConf ) ; } | Loads both cluster and path level configurations from meta master . |
12,037 | static UfsJournalFile createLogFile ( URI location , long start , long end ) { return new UfsJournalFile ( location , start , end , false ) ; } | Creates a journal log file . |
12,038 | static UfsJournalFile createTmpCheckpointFile ( URI location ) { return new UfsJournalFile ( location , UfsJournal . UNKNOWN_SEQUENCE_NUMBER , UfsJournal . UNKNOWN_SEQUENCE_NUMBER , false ) ; } | Creates a temporary checkpoint file . |
12,039 | static URI encodeCheckpointFileLocation ( UfsJournal journal , long end ) { String filename = String . format ( "0x%x-0x%x" , 0 , end ) ; URI location = URIUtils . appendPathOrDie ( journal . getCheckpointDir ( ) , filename ) ; return location ; } | Encodes a checkpoint location under the checkpoint directory . |
12,040 | static URI encodeLogFileLocation ( UfsJournal journal , long start , long end ) { String filename = String . format ( "0x%x-0x%x" , start , end ) ; URI location = URIUtils . appendPathOrDie ( journal . getLogDir ( ) , filename ) ; return location ; } | Encodes a log location under the log directory . |
12,041 | static URI encodeTemporaryCheckpointFileLocation ( UfsJournal journal ) { return URIUtils . appendPathOrDie ( journal . getTmpDir ( ) , UUID . randomUUID ( ) . toString ( ) ) ; } | Encodes a temporary location under the temporary directory . |
12,042 | private void createUfsBlock ( BlockWriteRequestContext context ) throws Exception { BlockWriteRequest request = context . getRequest ( ) ; Protocol . CreateUfsBlockOptions createUfsBlockOptions = request . getCreateUfsBlockOptions ( ) ; UfsManager . UfsClient ufsClient = mUfsManager . get ( createUfsBlockOptions . getMountId ( ) ) ; alluxio . resource . CloseableResource < UnderFileSystem > ufsResource = ufsClient . acquireUfsResource ( ) ; context . setUfsResource ( ufsResource ) ; String ufsString = MetricsSystem . escape ( ufsClient . getUfsMountPointUri ( ) ) ; String ufsPath = BlockUtils . getUfsBlockPath ( ufsClient , request . getId ( ) ) ; UnderFileSystem ufs = ufsResource . get ( ) ; OutputStream ufsOutputStream = ufs . create ( ufsPath , CreateOptions . defaults ( ServerConfiguration . global ( ) ) . setEnsureAtomic ( true ) . setCreateParent ( true ) ) ; context . setOutputStream ( ufsOutputStream ) ; context . setUfsPath ( ufsPath ) ; String counterName = Metric . getMetricNameWithTags ( WorkerMetrics . BYTES_WRITTEN_UFS , WorkerMetrics . TAG_UFS , ufsString ) ; String meterName = Metric . getMetricNameWithTags ( WorkerMetrics . BYTES_WRITTEN_UFS_THROUGHPUT , WorkerMetrics . TAG_UFS , ufsString ) ; context . setCounter ( MetricsSystem . counter ( counterName ) ) ; context . setMeter ( MetricsSystem . meter ( meterName ) ) ; } | Creates a UFS block and initialize it with bytes read from block store . |
12,043 | private void transferToUfsBlock ( BlockWriteRequestContext context , long pos ) throws Exception { OutputStream ufsOutputStream = context . getOutputStream ( ) ; long sessionId = context . getRequest ( ) . getSessionId ( ) ; long blockId = context . getRequest ( ) . getId ( ) ; TempBlockMeta block = mWorker . getBlockStore ( ) . getTempBlockMeta ( sessionId , blockId ) ; if ( block == null ) { throw new NotFoundException ( "block " + blockId + " not found" ) ; } Preconditions . checkState ( Files . copy ( Paths . get ( block . getPath ( ) ) , ufsOutputStream ) == pos ) ; } | Transfers data from block store to UFS . |
12,044 | public void flush ( final long targetCounter ) throws IOException { if ( targetCounter <= mFlushCounter . get ( ) ) { return ; } mFlushLock . lock ( ) ; try { long startTime = System . nanoTime ( ) ; long flushCounter = mFlushCounter . get ( ) ; if ( targetCounter <= flushCounter ) { return ; } long writeCounter = mWriteCounter . get ( ) ; while ( targetCounter > writeCounter ) { for ( ; ; ) { JournalEntry entry = mQueue . peek ( ) ; if ( entry == null ) { break ; } mJournalWriter . write ( entry ) ; mQueue . poll ( ) ; writeCounter = mWriteCounter . incrementAndGet ( ) ; if ( writeCounter >= targetCounter ) { if ( ( System . nanoTime ( ) - startTime ) >= mFlushBatchTimeNs ) { break ; } } } } mJournalWriter . flush ( ) ; mFlushCounter . set ( writeCounter ) ; } finally { mFlushLock . unlock ( ) ; } } | Flushes and waits until the specified counter is flushed to the journal . If the specified counter is already flushed this is essentially a no - op . |
12,045 | public void close ( ) throws IOException { if ( mIn != null ) { mIn . close ( ) ; } if ( mOut != null ) { mOut . close ( ) ; } mOffset = - 1 ; } | Closes the underlying open streams . |
12,046 | public static List < String > getList ( PropertyKey key , String delimiter ) { return sConf . getList ( key , delimiter ) ; } | Gets the value for the given key as a list . |
12,047 | public static < T extends Enum < T > > T getEnum ( PropertyKey key , Class < T > enumType ) { return sConf . getEnum ( key , enumType ) ; } | Gets the value for the given key as an enum value . |
12,048 | public static boolean supportAlluxioHA ( PrintWriter reportWriter , AlluxioConfiguration alluxioConf ) { if ( alluxioConf . getBoolean ( PropertyKey . ZOOKEEPER_ENABLED ) ) { reportWriter . println ( "Alluixo is running in high availability mode.\n" ) ; if ( ! alluxioConf . isSet ( PropertyKey . ZOOKEEPER_ADDRESS ) ) { reportWriter . println ( "Please set Zookeeper address to support " + "Alluxio high availability mode.\n" ) ; return false ; } else { reportWriter . printf ( "Zookeeper address is: %s.%n" , alluxioConf . get ( PropertyKey . ZOOKEEPER_ADDRESS ) ) ; } } return true ; } | Checks if the Zookeeper address has been set when running the Alluxio HA mode . |
12,049 | public static Status printNodesResults ( Map < Status , List < String > > map , PrintWriter reportWriter ) { boolean canFindClass = true ; boolean canFindFS = true ; for ( Map . Entry < Status , List < String > > entry : map . entrySet ( ) ) { String nodeAddresses = String . join ( " " , entry . getValue ( ) ) ; switch ( entry . getKey ( ) ) { case FAIL_TO_FIND_CLASS : canFindClass = false ; reportWriter . printf ( "Nodes of IP addresses: %s " + "cannot recognize Alluxio classes.%n%n" , nodeAddresses ) ; break ; case FAIL_TO_FIND_FS : canFindFS = false ; reportWriter . printf ( "Nodes of IP addresses: %s " + "cannot recognize Alluxio filesystem.%n%n" , nodeAddresses ) ; break ; default : reportWriter . printf ( "Nodes of IP addresses: %s " + "can recognize Alluxio filesystem.%n%n" , nodeAddresses ) ; } } return canFindClass ? ( canFindFS ? Status . SUCCESS : Status . FAIL_TO_FIND_FS ) : Status . FAIL_TO_FIND_CLASS ; } | Collects and saves the Status of checked nodes . |
12,050 | public MasterWebUIConfiguration setConfiguration ( TreeSet < Triple < String , String , String > > configuration ) { mConfiguration = configuration ; return this ; } | Sets configuration . |
12,051 | public FileInfo generateClientFileInfo ( String path ) { FileInfo ret = new FileInfo ( ) ; ret . setFileId ( getId ( ) ) ; ret . setName ( getName ( ) ) ; ret . setPath ( path ) ; ret . setBlockSizeBytes ( 0 ) ; ret . setCreationTimeMs ( getCreationTimeMs ( ) ) ; ret . setCompleted ( true ) ; ret . setFolder ( isDirectory ( ) ) ; ret . setPinned ( isPinned ( ) ) ; ret . setCacheable ( false ) ; ret . setPersisted ( isPersisted ( ) ) ; ret . setLastModificationTimeMs ( getLastModificationTimeMs ( ) ) ; ret . setTtl ( mTtl ) ; ret . setTtlAction ( mTtlAction ) ; ret . setOwner ( getOwner ( ) ) ; ret . setGroup ( getGroup ( ) ) ; ret . setMode ( getMode ( ) ) ; ret . setPersistenceState ( getPersistenceState ( ) . toString ( ) ) ; ret . setMountPoint ( isMountPoint ( ) ) ; ret . setUfsFingerprint ( Constants . INVALID_UFS_FINGERPRINT ) ; ret . setAcl ( mAcl ) ; ret . setDefaultAcl ( mDefaultAcl ) ; return ret ; } | Generates client file info for a folder . |
12,052 | public void updateFromEntry ( UpdateInodeDirectoryEntry entry ) { if ( entry . hasDefaultAcl ( ) ) { setDefaultACL ( ( DefaultAccessControlList ) ProtoUtils . fromProto ( entry . getDefaultAcl ( ) ) ) ; } if ( entry . hasDirectChildrenLoaded ( ) ) { setDirectChildrenLoaded ( entry . getDirectChildrenLoaded ( ) ) ; } if ( entry . hasMountPoint ( ) ) { setMountPoint ( entry . getMountPoint ( ) ) ; } } | Updates this inode directory s state from the given entry . |
12,053 | private long getAvailableBytes ( BlockWorkerInfo workerInfo ) { long mCapacityBytes = workerInfo . getCapacityBytes ( ) ; long mUsedBytes = workerInfo . getUsedBytes ( ) ; return mCapacityBytes - mUsedBytes - mBlockCapacityReserved ; } | The information of BlockWorkerInfo is update after a file complete write . To avoid evict user should configure alluxio . user . file . write . avoid . eviction . policy . reserved . size . bytes to reserve some space to store the block . |
12,054 | public static < T > T streamingRPCAndLog ( Logger logger , StreamingRpcCallable < T > callable , String methodName , String description , Object ... args ) { String debugDesc = logger . isDebugEnabled ( ) ? String . format ( description , args ) : null ; try ( Timer . Context ctx = MetricsSystem . timer ( getQualifiedMetricName ( methodName ) ) . time ( ) ) { logger . debug ( "Enter: {}: {}" , methodName , debugDesc ) ; T result = callable . call ( ) ; logger . debug ( "Exit (OK): {}: {}" , methodName , debugDesc ) ; return result ; } catch ( Exception e ) { logger . warn ( "Exit (Error): {}: {}, Error={}" , methodName , String . format ( description , args ) , e ) ; MetricsSystem . counter ( getQualifiedFailureMetricName ( methodName ) ) . inc ( ) ; callable . exceptionCaught ( e ) ; } return null ; } | Handles a streaming RPC callable with logging . |
12,055 | public synchronized void start ( ) { Preconditions . checkState ( mProcess == null , "Master is already running" ) ; LOG . info ( "Starting master with port {}" , mProperties . get ( PropertyKey . MASTER_RPC_PORT ) ) ; mProcess = new ExternalProcess ( mProperties , LimitedLifeMasterProcess . class , new File ( mLogsDir , "master.out" ) ) ; try { mProcess . start ( ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } | Launches the master process . |
12,056 | private static void usage ( ) { new HelpFormatter ( ) . printHelp ( String . format ( "java -cp %s %s -type <[READ, WRITE]> -fileSize <fileSize> -iterations <iterations> " + "-concurrency <concurrency>" , RuntimeConstants . ALLUXIO_JAR , MiniBenchmark . class . getCanonicalName ( ) ) , "run a mini benchmark to write or read a file" , OPTIONS , "" , true ) ; } | Prints the usage . |
12,057 | private static void writeFile ( CyclicBarrier barrier , AtomicLong runtime , int count , AlluxioConfiguration alluxioConf ) throws Exception { FileSystem fileSystem = FileSystem . Factory . create ( alluxioConf ) ; byte [ ] buffer = new byte [ ( int ) Math . min ( sFileSize , 4 * Constants . MB ) ] ; Arrays . fill ( buffer , ( byte ) 'a' ) ; AlluxioURI path = filename ( count ) ; if ( fileSystem . exists ( path ) ) { fileSystem . delete ( path ) ; } barrier . await ( ) ; long startTime = System . nanoTime ( ) ; long bytesWritten = 0 ; try ( FileOutStream outStream = fileSystem . createFile ( path ) ) { while ( bytesWritten < sFileSize ) { outStream . write ( buffer , 0 , ( int ) Math . min ( buffer . length , sFileSize - bytesWritten ) ) ; bytesWritten += buffer . length ; } } runtime . addAndGet ( System . nanoTime ( ) - startTime ) ; } | Writes a file . |
12,058 | public synchronized void updateLength ( long length ) { if ( mLength == Constants . UNKNOWN_SIZE ) { mLength = length ; } else if ( mLength != length ) { LOG . warn ( "Attempting to update block length ({}) to a different length ({})." , mLength , length ) ; } } | Updates the length if and only if the length was previously unknown . |
12,059 | public List < MasterBlockLocation > getBlockLocations ( ) { List < MasterBlockLocation > ret = new ArrayList < > ( mWorkerIdToAlias . size ( ) ) ; for ( Map . Entry < Long , String > entry : mWorkerIdToAlias . entrySet ( ) ) { ret . add ( new MasterBlockLocation ( entry . getKey ( ) , entry . getValue ( ) ) ) ; } return ret ; } | Gets the net addresses for all workers which have the block s data in their tiered storage . |
12,060 | public void close ( ) { if ( mClosed . getAndSet ( true ) ) { return ; } try { mLocalOutputStream . close ( ) ; mKodoClient . uploadFile ( mKey , mFile ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } mFile . delete ( ) ; } | Closes this output stream . When an output stream is closed the local temporary file is uploaded to KODO Service . Once the file is uploaded the temporary file is deleted . |
12,061 | public static void main ( String [ ] args ) { if ( args . length != 0 ) { LOG . info ( "java -cp {} {}" , RuntimeConstants . ALLUXIO_JAR , AlluxioProxyMonitor . class . getCanonicalName ( ) ) ; LOG . warn ( "ignoring arguments" ) ; } HealthCheckClient client = new ProxyHealthCheckClient ( NetworkAddressUtils . getBindAddress ( NetworkAddressUtils . ServiceType . PROXY_WEB , new InstancedConfiguration ( ConfigurationUtils . defaults ( ) ) ) , ( ) -> new ExponentialBackoffRetry ( 50 , 100 , 2 ) ) ; if ( ! client . isServing ( ) ) { System . exit ( 1 ) ; } System . exit ( 0 ) ; } | Starts the Alluxio proxy monitor . |
12,062 | private void getSizeInfo ( AlluxioURI path , List < URIStatus > statuses , boolean readable , boolean summarize , boolean addMemory ) { if ( summarize ) { long totalSize = 0 ; long sizeInAlluxio = 0 ; long sizeInMem = 0 ; for ( URIStatus status : statuses ) { if ( ! status . isFolder ( ) ) { long size = status . getLength ( ) ; totalSize += size ; sizeInMem += size * status . getInMemoryPercentage ( ) ; sizeInAlluxio += size * status . getInMemoryPercentage ( ) ; } } String sizeMessage = readable ? FormatUtils . getSizeFromBytes ( totalSize ) : String . valueOf ( totalSize ) ; String inAlluxioMessage = getFormattedValues ( readable , sizeInAlluxio / 100 , totalSize ) ; String inMemMessage = addMemory ? getFormattedValues ( readable , sizeInMem / 100 , totalSize ) : "" ; printInfo ( sizeMessage , inAlluxioMessage , inMemMessage , path . toString ( ) ) ; } else { for ( URIStatus status : statuses ) { if ( ! status . isFolder ( ) ) { long totalSize = status . getLength ( ) ; String sizeMessage = readable ? FormatUtils . getSizeFromBytes ( totalSize ) : String . valueOf ( totalSize ) ; String inAlluxioMessage = getFormattedValues ( readable , status . getInAlluxioPercentage ( ) * totalSize / 100 , totalSize ) ; String inMemMessage = addMemory ? getFormattedValues ( readable , status . getInMemoryPercentage ( ) * totalSize / 100 , totalSize ) : "" ; printInfo ( sizeMessage , inAlluxioMessage , inMemMessage , status . getPath ( ) ) ; } } } } | Gets and prints the size information of the input path according to options . |
12,063 | private String getFormattedValues ( boolean readable , long size , long totalSize ) { int percent = totalSize == 0 ? 0 : ( int ) ( size * 100 / totalSize ) ; String subSizeMessage = readable ? FormatUtils . getSizeFromBytes ( size ) : String . valueOf ( size ) ; return String . format ( VALUE_AND_PERCENT_FORMAT , subSizeMessage , percent ) ; } | Gets the size and its percentage information if readable option is provided get the size in human readable format . |
12,064 | private void printInfo ( String sizeMessage , String inAlluxioMessage , String inMemMessage , String path ) { System . out . println ( inMemMessage . isEmpty ( ) ? String . format ( SHORT_INFO_FORMAT , sizeMessage , inAlluxioMessage , path ) : String . format ( LONG_INFO_FORMAT , sizeMessage , inAlluxioMessage , inMemMessage , path ) ) ; } | Prints the size messages . |
12,065 | public static LogInfo setLogLevel ( String logName , String level ) throws IOException { LogInfo result = new LogInfo ( ) ; if ( StringUtils . isNotBlank ( logName ) ) { result . setLogName ( logName ) ; Log log = LogFactory . getLog ( logName ) ; Logger logger = LoggerFactory . getLogger ( logName ) ; if ( log instanceof Log4JLogger ) { process ( ( ( Log4JLogger ) log ) . getLogger ( ) , level , result ) ; } else if ( log instanceof Jdk14Logger ) { process ( ( ( Jdk14Logger ) log ) . getLogger ( ) , level , result ) ; } else if ( logger instanceof Log4jLoggerAdapter ) { try { Field field = Log4jLoggerAdapter . class . getDeclaredField ( "logger" ) ; field . setAccessible ( true ) ; org . apache . log4j . Logger log4jLogger = ( org . apache . log4j . Logger ) field . get ( logger ) ; process ( log4jLogger , level , result ) ; } catch ( NoSuchFieldException | IllegalAccessException e ) { result . setMessage ( e . getMessage ( ) ) ; } } else { result . setMessage ( "Sorry, " + log . getClass ( ) + " not supported." ) ; } } else { result . setMessage ( "Please specify a correct logName." ) ; } return result ; } | Gets a logger s level with specify name if the level argument is not null it will set to specify level first . |
12,066 | public static void warnWithException ( Logger logger , String message , Object ... args ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( message , args ) ; } else { if ( args . length > 0 && args [ args . length - 1 ] instanceof Throwable ) { args [ args . length - 1 ] = ( ( Throwable ) args [ args . length - 1 ] ) . getMessage ( ) ; } logger . warn ( message + ": {}" , args ) ; } } | Log a warning message with full exception if debug logging is enabled or just the message otherwise . |
12,067 | public static User get ( AlluxioConfiguration conf ) throws UnauthenticatedException { if ( sLoginUser == null ) { synchronized ( LoginUser . class ) { if ( sLoginUser == null ) { sLoginUser = login ( conf ) ; } } } return sLoginUser ; } | Gets current singleton login user . This method is called to identify the singleton user who runs Alluxio client . When Alluxio client gets a user by this method and connects to Alluxio service this user represents the client and is maintained in service . |
12,068 | private static User login ( AlluxioConfiguration conf ) throws UnauthenticatedException { AuthType authType = conf . getEnum ( PropertyKey . SECURITY_AUTHENTICATION_TYPE , AuthType . class ) ; checkSecurityEnabled ( authType ) ; Subject subject = new Subject ( ) ; try { LoginContext loginContext = createLoginContext ( authType , subject , User . class . getClassLoader ( ) , new LoginModuleConfiguration ( ) , conf ) ; loginContext . login ( ) ; } catch ( LoginException e ) { throw new UnauthenticatedException ( "Failed to login: " + e . getMessage ( ) , e ) ; } LOG . debug ( "login subject: {}" , subject ) ; Set < User > userSet = subject . getPrincipals ( User . class ) ; if ( userSet . isEmpty ( ) ) { throw new UnauthenticatedException ( "Failed to login: No Alluxio User is found." ) ; } if ( userSet . size ( ) > 1 ) { StringBuilder msg = new StringBuilder ( "Failed to login: More than one Alluxio Users are found:" ) ; for ( User user : userSet ) { msg . append ( " " ) . append ( user . toString ( ) ) ; } throw new UnauthenticatedException ( msg . toString ( ) ) ; } return userSet . iterator ( ) . next ( ) ; } | Logs in based on the LoginModules . |
12,069 | public void start ( InetSocketAddress address ) throws IOException { mName = address . getHostName ( ) + ":" + address . getPort ( ) ; mLeaderSelector . setId ( mName ) ; mLeaderSelector . start ( ) ; } | Starts the leader selection . If the leader selector client loses connection to Zookeeper or gets closed the calling thread will be interrupted . |
12,070 | private CuratorFramework getNewCuratorClient ( ) { CuratorFramework client = CuratorFrameworkFactory . newClient ( mZookeeperAddress , ( int ) ServerConfiguration . getMs ( PropertyKey . ZOOKEEPER_SESSION_TIMEOUT ) , ( int ) ServerConfiguration . getMs ( PropertyKey . ZOOKEEPER_CONNECTION_TIMEOUT ) , new ExponentialBackoffRetry ( Constants . SECOND_MS , 3 ) ) ; client . start ( ) ; client . close ( ) ; client = CuratorFrameworkFactory . newClient ( mZookeeperAddress , ( int ) ServerConfiguration . getMs ( PropertyKey . ZOOKEEPER_SESSION_TIMEOUT ) , ( int ) ServerConfiguration . getMs ( PropertyKey . ZOOKEEPER_CONNECTION_TIMEOUT ) , new ExponentialBackoffRetry ( Constants . SECOND_MS , 3 ) ) ; client . start ( ) ; return client ; } | Returns a new client for the zookeeper connection . The client is already started before returning . |
12,071 | public static Map < String , Properties > subProperties ( Properties prop , String regex ) { Map < String , Properties > subProperties = new HashMap < > ( ) ; Pattern pattern = Pattern . compile ( regex ) ; for ( Map . Entry < Object , Object > entry : prop . entrySet ( ) ) { Matcher m = pattern . matcher ( entry . getKey ( ) . toString ( ) ) ; if ( m . find ( ) ) { String prefix = m . group ( 1 ) ; String suffix = m . group ( 2 ) ; if ( ! subProperties . containsKey ( prefix ) ) { subProperties . put ( prefix , new Properties ( ) ) ; } subProperties . get ( prefix ) . put ( suffix , entry . getValue ( ) ) ; } } return subProperties ; } | Uses regex to parse every original property key to a prefix and a suffix . Creates sub properties that are grouped by the prefix . |
12,072 | private void loadConfigFile ( String configFile ) { try ( InputStream is = new FileInputStream ( configFile ) ) { mProperties . load ( is ) ; } catch ( Exception e ) { LOG . error ( "Error loading metrics configuration file." , e ) ; } } | Loads the metrics configuration file . |
12,073 | private void removeInstancePrefix ( ) { Properties newProperties = new Properties ( ) ; for ( Map . Entry < Object , Object > entry : mProperties . entrySet ( ) ) { String key = entry . getKey ( ) . toString ( ) ; if ( key . startsWith ( "*" ) || key . startsWith ( "worker" ) || key . startsWith ( "master" ) ) { String newKey = key . substring ( key . indexOf ( '.' ) + 1 ) ; newProperties . put ( newKey , entry . getValue ( ) ) ; } else { newProperties . put ( key , entry . getValue ( ) ) ; } } mProperties = newProperties ; } | Removes the instance prefix in the properties . This is to make the configuration parsing logic backward compatible with old configuration format . |
12,074 | public static void logPerIteration ( long startTimeMs , int times , String msg , int workerId ) { long takenTimeMs = System . currentTimeMillis ( ) - startTimeMs ; double result = 1000.0 * sFileBytes / takenTimeMs / 1024 / 1024 ; LOG . info ( times + msg + workerId + " : " + result + " Mb/sec. Took " + takenTimeMs + " ms. " ) ; } | Writes log information . |
12,075 | private String getPollDir ( ) { String pollDir = mProperties . getProperty ( CSV_KEY_DIR ) ; return pollDir != null ? pollDir : CSV_DEFAULT_DIR ; } | Gets the directory where the CSV files are created . |
12,076 | public static void retry ( String action , RunnableThrowsIOException f , RetryPolicy policy ) throws IOException { IOException e = null ; while ( policy . attempt ( ) ) { try { f . run ( ) ; return ; } catch ( IOException ioe ) { e = ioe ; LOG . warn ( "Failed to {} (attempt {}): {}" , action , policy . getAttemptCount ( ) , e . toString ( ) ) ; } } throw e ; } | Retries the given method until it doesn t throw an IO exception or the retry policy expires . If the retry policy expires the last exception generated will be rethrown . |
12,077 | public static RetryPolicy defaultClientRetry ( Duration maxRetryDuration , Duration baseSleepMs , Duration maxSleepMs ) { return ExponentialTimeBoundedRetry . builder ( ) . withMaxDuration ( maxRetryDuration ) . withInitialSleep ( baseSleepMs ) . withMaxSleep ( maxSleepMs ) . build ( ) ; } | Gives a ClientRetry based on the given parameters . |
12,078 | public boolean stopStandby ( ) { for ( int k = 0 ; k < mNumOfMasters ; k ++ ) { if ( ! mMasters . get ( k ) . isServing ( ) ) { try { LOG . info ( "master {} is a standby. stopping it..." , k ) ; mMasters . get ( k ) . stop ( ) ; LOG . info ( "master {} stopped." , k ) ; } catch ( Exception e ) { LOG . error ( e . getMessage ( ) , e ) ; return false ; } return true ; } } return false ; } | Iterates over the masters in the order of master creation stops the first standby master . |
12,079 | public void waitForNewMaster ( int timeoutMs ) throws TimeoutException , InterruptedException { CommonUtils . waitFor ( "the new leader master to start" , ( ) -> getLeaderIndex ( ) != - 1 , WaitForOptions . defaults ( ) . setTimeoutMs ( timeoutMs ) ) ; } | Waits for a new master to start until a timeout occurs . |
12,080 | public static GetStatusPOptions toGetStatusOptions ( ExistsPOptions existsOptions ) { GetStatusPOptions . Builder getStatusOptionsBuilder = GetStatusPOptions . newBuilder ( ) ; if ( existsOptions . hasCommonOptions ( ) ) { getStatusOptionsBuilder . setCommonOptions ( existsOptions . getCommonOptions ( ) ) ; } if ( existsOptions . hasLoadMetadataType ( ) ) { getStatusOptionsBuilder . setLoadMetadataType ( existsOptions . getLoadMetadataType ( ) ) ; } return getStatusOptionsBuilder . build ( ) ; } | Converts from proto type to options . |
12,081 | public static alluxio . grpc . FileInfo toProto ( FileInfo fileInfo ) { List < alluxio . grpc . FileBlockInfo > fileBlockInfos = new ArrayList < > ( ) ; for ( FileBlockInfo fileBlockInfo : fileInfo . getFileBlockInfos ( ) ) { fileBlockInfos . add ( toProto ( fileBlockInfo ) ) ; } alluxio . grpc . FileInfo . Builder builder = alluxio . grpc . FileInfo . newBuilder ( ) . setFileId ( fileInfo . getFileId ( ) ) . setName ( fileInfo . getName ( ) ) . setPath ( fileInfo . getPath ( ) ) . setUfsPath ( fileInfo . getUfsPath ( ) ) . setLength ( fileInfo . getLength ( ) ) . setBlockSizeBytes ( fileInfo . getBlockSizeBytes ( ) ) . setCreationTimeMs ( fileInfo . getCreationTimeMs ( ) ) . setCompleted ( fileInfo . isCompleted ( ) ) . setFolder ( fileInfo . isFolder ( ) ) . setPinned ( fileInfo . isPinned ( ) ) . setCacheable ( fileInfo . isCacheable ( ) ) . setPersisted ( fileInfo . isPersisted ( ) ) . addAllBlockIds ( fileInfo . getBlockIds ( ) ) . setLastModificationTimeMs ( fileInfo . getLastModificationTimeMs ( ) ) . setTtl ( fileInfo . getTtl ( ) ) . setOwner ( fileInfo . getOwner ( ) ) . setGroup ( fileInfo . getGroup ( ) ) . setMode ( fileInfo . getMode ( ) ) . setPersistenceState ( fileInfo . getPersistenceState ( ) ) . setMountPoint ( fileInfo . isMountPoint ( ) ) . addAllFileBlockInfos ( fileBlockInfos ) . setTtlAction ( fileInfo . getTtlAction ( ) ) . setMountId ( fileInfo . getMountId ( ) ) . setInAlluxioPercentage ( fileInfo . getInAlluxioPercentage ( ) ) . setInMemoryPercentage ( fileInfo . getInMemoryPercentage ( ) ) . setUfsFingerprint ( fileInfo . getUfsFingerprint ( ) ) . setReplicationMax ( fileInfo . getReplicationMax ( ) ) . setReplicationMin ( fileInfo . getReplicationMin ( ) ) ; if ( ! fileInfo . getAcl ( ) . equals ( AccessControlList . EMPTY_ACL ) ) { builder . setAcl ( toProto ( fileInfo . getAcl ( ) ) ) ; } if ( ! fileInfo . getDefaultAcl ( ) . equals ( DefaultAccessControlList . EMPTY_DEFAULT_ACL ) ) { builder . setDefaultAcl ( toProto ( fileInfo . getDefaultAcl ( ) ) ) ; } return builder . build ( ) ; } | Converts a wire type to a proto type . |
12,082 | public static void setTtl ( FileSystem fs , AlluxioURI path , long ttlMs , TtlAction ttlAction ) throws AlluxioException , IOException { SetAttributePOptions options = SetAttributePOptions . newBuilder ( ) . setRecursive ( true ) . setCommonOptions ( FileSystemMasterCommonPOptions . newBuilder ( ) . setTtl ( ttlMs ) . setTtlAction ( ttlAction ) . build ( ) ) . build ( ) ; fs . setAttribute ( path , options ) ; } | Sets a new TTL value or unsets an existing TTL value for file at path . |
12,083 | public static void setPinned ( FileSystem fs , AlluxioURI path , boolean pinned ) throws AlluxioException , IOException { SetAttributePOptions options = SetAttributePOptions . newBuilder ( ) . setPinned ( pinned ) . build ( ) ; fs . setAttribute ( path , options ) ; } | Sets pin state for the input path . |
12,084 | public static AlluxioStatusException fromCheckedException ( Throwable throwable ) { try { throw throwable ; } catch ( IOException e ) { return fromIOException ( e ) ; } catch ( AlluxioException e ) { return fromAlluxioException ( e ) ; } catch ( InterruptedException e ) { return new CancelledException ( e ) ; } catch ( RuntimeException e ) { throw new IllegalStateException ( "Expected a checked exception but got " + e ) ; } catch ( Exception e ) { return new UnknownException ( e ) ; } catch ( Throwable t ) { throw new IllegalStateException ( "Expected a checked exception but got " + t ) ; } } | Converts checked throwables to Alluxio status exceptions . Unchecked throwables should not be passed to this method . Use Throwables . propagateIfPossible before passing a Throwable to this method . |
12,085 | public static AlluxioStatusException fromStatusRuntimeException ( StatusRuntimeException e ) { return AlluxioStatusException . from ( e . getStatus ( ) . withCause ( e ) ) ; } | Converts a gRPC StatusRuntimeException to an Alluxio status exception . |
12,086 | public static AlluxioStatusException fromAlluxioException ( AlluxioException ae ) { try { throw ae ; } catch ( AccessControlException e ) { return new PermissionDeniedException ( e ) ; } catch ( BlockAlreadyExistsException | FileAlreadyCompletedException | FileAlreadyExistsException e ) { return new AlreadyExistsException ( e ) ; } catch ( BlockDoesNotExistException | FileDoesNotExistException e ) { return new NotFoundException ( e ) ; } catch ( BlockInfoException | InvalidFileSizeException | InvalidPathException e ) { return new InvalidArgumentException ( e ) ; } catch ( ConnectionFailedException | FailedToCheckpointException | UfsBlockAccessTokenUnavailableException e ) { return new UnavailableException ( e ) ; } catch ( DependencyDoesNotExistException | DirectoryNotEmptyException | InvalidWorkerStateException e ) { return new FailedPreconditionException ( e ) ; } catch ( WorkerOutOfSpaceException e ) { return new ResourceExhaustedException ( e ) ; } catch ( AlluxioException e ) { return new UnknownException ( e ) ; } } | Converts checked Alluxio exceptions to Alluxio status exceptions . |
12,087 | public static TtlAction fromProtobuf ( PTtlAction pTtlAction ) { if ( pTtlAction == null ) { return TtlAction . DELETE ; } switch ( pTtlAction ) { case DELETE : return TtlAction . DELETE ; case FREE : return TtlAction . FREE ; default : throw new IllegalStateException ( "Unknown protobuf ttl action: " + pTtlAction ) ; } } | Converts Protobuf type to Wire type . |
12,088 | public static PTtlAction toProtobuf ( TtlAction ttlAction ) { if ( ttlAction == null ) { return PTtlAction . DELETE ; } switch ( ttlAction ) { case DELETE : return PTtlAction . DELETE ; case FREE : return PTtlAction . FREE ; default : throw new IllegalStateException ( "Unknown ttl action: " + ttlAction ) ; } } | Converts Wire type to Protobuf type . |
12,089 | public Map < String , String > parseXmlConfiguration ( final String path ) { File xmlFile ; xmlFile = new File ( path ) ; if ( ! xmlFile . exists ( ) ) { System . err . format ( "File %s does not exist." , path ) ; return null ; } DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory . newInstance ( ) ; DocumentBuilder docBuilder ; try { docBuilder = docBuilderFactory . newDocumentBuilder ( ) ; } catch ( ParserConfigurationException e ) { System . err . format ( "Failed to create instance of DocumentBuilder for file: %s. %s. %n" , path , e . getMessage ( ) ) ; return null ; } Document doc ; try { doc = docBuilder . parse ( xmlFile ) ; } catch ( IOException e ) { System . err . format ( "An I/O error occured reading file %s. %s.%n" , path , e . getMessage ( ) ) ; return null ; } catch ( SAXException e ) { System . err . format ( "A parsing error occured parsing file %s. %s.%n" , path , e . getMessage ( ) ) ; return null ; } doc . getDocumentElement ( ) . normalize ( ) ; Map < String , String > ret = new HashMap < > ( ) ; NodeList propNodeList = doc . getElementsByTagName ( "property" ) ; for ( int i = 0 ; i < propNodeList . getLength ( ) ; i ++ ) { Node propNode = propNodeList . item ( i ) ; if ( propNode . getNodeType ( ) == Node . ELEMENT_NODE ) { Element element = ( Element ) propNode ; ret . put ( element . getElementsByTagName ( "name" ) . item ( 0 ) . getTextContent ( ) , element . getElementsByTagName ( "value" ) . item ( 0 ) . getTextContent ( ) ) ; } } return ret ; } | Parse an xml configuration file into a map . |
12,090 | public static void main ( String [ ] args ) { if ( args . length != 0 ) { LOG . info ( "java -cp {} {}" , RuntimeConstants . ALLUXIO_JAR , AlluxioWorker . class . getCanonicalName ( ) ) ; System . exit ( - 1 ) ; } if ( ! ConfigurationUtils . masterHostConfigured ( ServerConfiguration . global ( ) ) ) { ProcessUtils . fatalError ( LOG , ConfigurationUtils . getMasterHostNotConfiguredMessage ( "Alluxio worker" ) ) ; } CommonUtils . PROCESS_TYPE . set ( CommonUtils . ProcessType . WORKER ) ; MasterInquireClient masterInquireClient = MasterInquireClient . Factory . create ( ServerConfiguration . global ( ) ) ; try { RetryUtils . retry ( "load cluster default configuration with master" , ( ) -> { InetSocketAddress masterAddress = masterInquireClient . getPrimaryRpcAddress ( ) ; ServerConfiguration . loadClusterDefaults ( masterAddress ) ; } , RetryUtils . defaultWorkerMasterClientRetry ( ServerConfiguration . getDuration ( PropertyKey . WORKER_MASTER_CONNECT_RETRY_TIMEOUT ) ) ) ; } catch ( IOException e ) { ProcessUtils . fatalError ( LOG , "Failed to load cluster default configuration for worker: %s" , e . getMessage ( ) ) ; } WorkerProcess process = WorkerProcess . Factory . create ( ) ; ProcessUtils . run ( process ) ; } | Starts the Alluxio worker . |
12,091 | private void shutdownManagedChannel ( ChannelKey channelKey , long shutdownTimeoutMs ) { ManagedChannel managedChannel = mChannels . get ( channelKey ) . get ( ) ; managedChannel . shutdown ( ) ; try { managedChannel . awaitTermination ( shutdownTimeoutMs , TimeUnit . MILLISECONDS ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; } finally { managedChannel . shutdownNow ( ) ; } Verify . verify ( managedChannel . isShutdown ( ) ) ; LOG . debug ( "Shut down managed channel. ChannelKey: {}" , channelKey ) ; } | Shuts down the managed channel for given key . |
12,092 | public void close ( ) throws IOException { try { mLock . lock ( ) ; if ( mAvailableResources . size ( ) != mResources . size ( ) ) { LOG . warn ( "{} resources are not released when closing the resource pool." , mResources . size ( ) - mAvailableResources . size ( ) ) ; } for ( ResourceInternal < T > resourceInternal : mAvailableResources ) { closeResource ( resourceInternal . mResource ) ; } mAvailableResources . clear ( ) ; } finally { mLock . unlock ( ) ; } mGcFuture . cancel ( true ) ; } | Closes the pool and clears all the resources . The resource pool should not be used after this . |
12,093 | private boolean add ( ResourceInternal < T > resource ) { try { mLock . lock ( ) ; if ( mResources . size ( ) >= mMaxCapacity ) { return false ; } else { mResources . put ( resource . mResource , resource ) ; return true ; } } finally { mLock . unlock ( ) ; } } | Adds a newly created resource to the pool . The resource is not available when it is added . |
12,094 | private void remove ( T resource ) { try { mLock . lock ( ) ; mResources . remove ( resource ) ; } finally { mLock . unlock ( ) ; } } | Removes an existing resource from the pool . |
12,095 | private T checkHealthyAndRetry ( T resource , long endTimeMs ) throws TimeoutException , IOException { if ( isHealthy ( resource ) ) { return resource ; } else { LOG . info ( "Clearing unhealthy resource {}." , resource ) ; remove ( resource ) ; closeResource ( resource ) ; return acquire ( endTimeMs - mClock . millis ( ) , TimeUnit . MILLISECONDS ) ; } } | Checks whether the resource is healthy . If not retry . When this called the resource is not in mAvailableResources . |
12,096 | public static String [ ] getSetPermissionCommand ( String perm , String filePath ) { return new String [ ] { SET_PERMISSION_COMMAND , perm , filePath } ; } | Returns a Unix command to set permission . |
12,097 | public static List < UnixMountInfo > getUnixMountInfo ( ) throws IOException { Preconditions . checkState ( OSUtils . isLinux ( ) || OSUtils . isMacOS ( ) ) ; String output = execCommand ( MOUNT_COMMAND ) ; List < UnixMountInfo > mountInfo = new ArrayList < > ( ) ; for ( String line : output . split ( "\n" ) ) { mountInfo . add ( parseMountInfo ( line ) ) ; } return mountInfo ; } | Gets system mount information . This method should only be attempted on Unix systems . |
12,098 | public void close ( ) throws IOException { if ( mClosed . getAndSet ( true ) ) { return ; } mLocalOutputStream . close ( ) ; try ( BufferedInputStream in = new BufferedInputStream ( new FileInputStream ( mFile ) ) ) { ObjectMetadata objMeta = new ObjectMetadata ( ) ; objMeta . setContentLength ( mFile . length ( ) ) ; if ( mHash != null ) { byte [ ] hashBytes = mHash . digest ( ) ; objMeta . setContentMD5 ( new String ( Base64 . encodeBase64 ( hashBytes ) ) ) ; } mOssClient . putObject ( mBucketName , mKey , in , objMeta ) ; } catch ( ServiceException e ) { LOG . error ( "Failed to upload {}. Temporary file @ {}" , mKey , mFile . getPath ( ) ) ; throw new IOException ( e ) ; } mFile . delete ( ) ; } | Closes this output stream . When an output stream is closed the local temporary file is uploaded to OSS Service . Once the file is uploaded the temporary file is deleted . |
12,099 | public static long createFileId ( long containerId ) { long id = BlockId . createBlockId ( containerId , BlockId . getMaxSequenceNumber ( ) ) ; if ( id == INVALID_FILE_ID ) { LOG . warn ( "Created file id -1, which is invalid" ) ; } return id ; } | Creates an id for a file based on the given id of the container . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.