idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
27,100
private void copyInternal ( Map < FileInfo , URI > srcToDstItemNames ) throws IOException { if ( srcToDstItemNames . isEmpty ( ) ) { return ; } String srcBucketName = null ; String dstBucketName = null ; List < String > srcObjectNames = new ArrayList < > ( srcToDstItemNames . size ( ) ) ; List < String > dstObjectNames = new ArrayList < > ( srcToDstItemNames . size ( ) ) ; for ( Map . Entry < FileInfo , URI > srcToDstItemName : srcToDstItemNames . entrySet ( ) ) { StorageResourceId srcResourceId = srcToDstItemName . getKey ( ) . getItemInfo ( ) . getResourceId ( ) ; srcBucketName = srcResourceId . getBucketName ( ) ; String srcObjectName = srcResourceId . getObjectName ( ) ; srcObjectNames . add ( srcObjectName ) ; StorageResourceId dstResourceId = pathCodec . validatePathAndGetId ( srcToDstItemName . getValue ( ) , true ) ; dstBucketName = dstResourceId . getBucketName ( ) ; String dstObjectName = dstResourceId . getObjectName ( ) ; dstObjectNames . add ( dstObjectName ) ; } gcs . copy ( srcBucketName , srcObjectNames , dstBucketName , dstObjectNames ) ; }
Copies items in given map that maps source items to destination items .
27,101
public List < URI > listFileNames ( FileInfo fileInfo , boolean recursive ) throws IOException { Preconditions . checkNotNull ( fileInfo ) ; URI path = fileInfo . getPath ( ) ; logger . atFine ( ) . log ( "listFileNames(%s)" , path ) ; List < URI > paths = new ArrayList < > ( ) ; List < String > childNames ; if ( fileInfo . isDirectory ( ) ) { if ( fileInfo . exists ( ) ) { if ( fileInfo . isGlobalRoot ( ) ) { childNames = gcs . listBucketNames ( ) ; for ( String childName : childNames ) { URI childPath = pathCodec . getPath ( childName , null , true ) ; paths . add ( childPath ) ; logger . atFine ( ) . log ( "listFileNames: added: %s" , childPath ) ; } } else { String delimiter = recursive ? null : PATH_DELIMITER ; GoogleCloudStorageItemInfo itemInfo = fileInfo . getItemInfo ( ) ; childNames = gcs . listObjectNames ( itemInfo . getBucketName ( ) , itemInfo . getObjectName ( ) , delimiter ) ; for ( String childName : childNames ) { URI childPath = pathCodec . getPath ( itemInfo . getBucketName ( ) , childName , false ) ; paths . add ( childPath ) ; logger . atFine ( ) . log ( "listFileNames: added: %s" , childPath ) ; } } } } else { paths . add ( path ) ; logger . atFine ( ) . log ( "listFileNames: added single original path since !isDirectory(): %s" , path ) ; } return paths ; }
If the given item is a directory then the paths of its children are returned otherwise the path of the given item is returned .
27,102
public List < FileInfo > listFileInfo ( URI path ) throws IOException { Preconditions . checkNotNull ( path , "path can not be null" ) ; logger . atFine ( ) . log ( "listFileInfo(%s)" , path ) ; StorageResourceId pathId = pathCodec . validatePathAndGetId ( path , true ) ; StorageResourceId dirId = pathCodec . validatePathAndGetId ( FileInfo . convertToDirectoryPath ( pathCodec , path ) , true ) ; ExecutorService dirExecutor = Executors . newSingleThreadExecutor ( DAEMON_THREAD_FACTORY ) ; try { Future < GoogleCloudStorageItemInfo > dirFuture = dirExecutor . submit ( ( ) -> gcs . getItemInfo ( dirId ) ) ; dirExecutor . shutdown ( ) ; if ( ! pathId . isDirectory ( ) ) { GoogleCloudStorageItemInfo pathInfo = gcs . getItemInfo ( pathId ) ; if ( pathInfo . exists ( ) ) { List < FileInfo > listedInfo = new ArrayList < > ( ) ; listedInfo . add ( FileInfo . fromItemInfo ( pathCodec , pathInfo ) ) ; return listedInfo ; } } try { GoogleCloudStorageItemInfo dirInfo = dirFuture . get ( ) ; List < GoogleCloudStorageItemInfo > dirItemInfos = dirId . isRoot ( ) ? gcs . listBucketInfo ( ) : gcs . listObjectInfo ( dirId . getBucketName ( ) , dirId . getObjectName ( ) , PATH_DELIMITER ) ; if ( ! dirInfo . exists ( ) && dirItemInfos . isEmpty ( ) ) { throw new FileNotFoundException ( "Item not found: " + path ) ; } List < FileInfo > fileInfos = FileInfo . fromItemInfos ( pathCodec , dirItemInfos ) ; fileInfos . sort ( FILE_INFO_PATH_COMPARATOR ) ; return fileInfos ; } catch ( InterruptedException | ExecutionException e ) { if ( e instanceof InterruptedException ) { Thread . currentThread ( ) . interrupt ( ) ; } throw new IOException ( String . format ( "Failed to listFileInfo for '%s'" , path ) , e ) ; } } finally { dirExecutor . shutdownNow ( ) ; } }
If the given path points to a directory then the information about its children is returned otherwise information about the given file is returned .
27,103
public FileInfo getFileInfo ( URI path ) throws IOException { logger . atFine ( ) . log ( "getFileInfo(%s)" , path ) ; checkArgument ( path != null , "path must not be null" ) ; StorageResourceId resourceId = pathCodec . validatePathAndGetId ( path , true ) ; FileInfo fileInfo = FileInfo . fromItemInfo ( pathCodec , getFileInfoInternal ( resourceId , gcs . getOptions ( ) . isInferImplicitDirectoriesEnabled ( ) ) ) ; logger . atFine ( ) . log ( "getFileInfo: %s" , fileInfo ) ; return fileInfo ; }
Gets information about the given path item .
27,104
public void close ( ) { if ( gcs != null ) { logger . atFine ( ) . log ( "close()" ) ; try { gcs . close ( ) ; } finally { gcs = null ; if ( updateTimestampsExecutor != null ) { try { shutdownExecutor ( updateTimestampsExecutor , 10 ) ; } finally { updateTimestampsExecutor = null ; } } if ( cachedExecutor != null ) { try { shutdownExecutor ( cachedExecutor , 5 ) ; } finally { cachedExecutor = null ; } } } } }
Releases resources used by this instance .
27,105
public void mkdir ( URI path ) throws IOException { logger . atFine ( ) . log ( "mkdir(%s)" , path ) ; Preconditions . checkNotNull ( path ) ; checkArgument ( ! path . equals ( GCS_ROOT ) , "Cannot create root directory." ) ; StorageResourceId resourceId = pathCodec . validatePathAndGetId ( path , true ) ; if ( resourceId . isBucket ( ) ) { gcs . create ( resourceId . getBucketName ( ) ) ; return ; } resourceId = FileInfo . convertToDirectoryPath ( resourceId ) ; gcs . createEmptyObject ( resourceId ) ; tryUpdateTimestampsForParentDirectories ( ImmutableList . of ( path ) , ImmutableList . < URI > of ( ) ) ; }
Creates a directory at the specified path .
27,106
static String validateBucketName ( String bucketName ) { bucketName = FileInfo . convertToFilePath ( bucketName ) ; if ( Strings . isNullOrEmpty ( bucketName ) ) { throw new IllegalArgumentException ( "Google Cloud Storage bucket name cannot be empty." ) ; } if ( ! BUCKET_NAME_CHAR_MATCHER . matchesAllOf ( bucketName ) ) { throw new IllegalArgumentException ( String . format ( "Invalid bucket name '%s': bucket name must contain only 'a-z0-9_.-' characters." , bucketName ) ) ; } return bucketName ; }
Validate the given bucket name to make sure that it can be used as a part of a file system path .
27,107
static String validateObjectName ( String objectName , boolean allowEmptyObjectName ) { logger . atFine ( ) . log ( "validateObjectName('%s', %s)" , objectName , allowEmptyObjectName ) ; if ( isNullOrEmpty ( objectName ) || objectName . equals ( PATH_DELIMITER ) ) { if ( allowEmptyObjectName ) { objectName = "" ; } else { throw new IllegalArgumentException ( "Google Cloud Storage path must include non-empty object name." ) ; } } for ( int i = 0 ; i < ( objectName . length ( ) - 1 ) ; i ++ ) { if ( objectName . charAt ( i ) == '/' && objectName . charAt ( i + 1 ) == '/' ) { throw new IllegalArgumentException ( String . format ( "Google Cloud Storage path must not have consecutive '/' characters, got '%s'" , objectName ) ) ; } } if ( objectName . startsWith ( PATH_DELIMITER ) ) { objectName = objectName . substring ( 1 ) ; } logger . atFine ( ) . log ( "validateObjectName -> '%s'" , objectName ) ; return objectName ; }
Validate the given object name to make sure that it can be used as a part of a file system path .
27,108
String getItemName ( URI path ) { Preconditions . checkNotNull ( path ) ; if ( path . equals ( GCS_ROOT ) ) { return null ; } StorageResourceId resourceId = pathCodec . validatePathAndGetId ( path , true ) ; if ( resourceId . isBucket ( ) ) { return resourceId . getBucketName ( ) ; } int index ; String objectName = resourceId . getObjectName ( ) ; if ( FileInfo . objectHasDirectoryPath ( objectName ) ) { index = objectName . lastIndexOf ( PATH_DELIMITER , objectName . length ( ) - 2 ) ; } else { index = objectName . lastIndexOf ( PATH_DELIMITER ) ; } return index < 0 ? objectName : objectName . substring ( index + 1 ) ; }
Gets the leaf item of the given path .
27,109
public static StorageResourceId validatePathAndGetId ( URI uri , boolean allowEmptyObjectNames ) { return LEGACY_PATH_CODEC . validatePathAndGetId ( uri , allowEmptyObjectNames ) ; }
Validate a URI using the legacy path codec and return a StorageResourceId .
27,110
public static URI getParentPath ( PathCodec pathCodec , URI path ) { Preconditions . checkNotNull ( path ) ; if ( path . equals ( GCS_ROOT ) ) { return null ; } StorageResourceId resourceId = pathCodec . validatePathAndGetId ( path , true ) ; if ( resourceId . isBucket ( ) ) { return GCS_ROOT ; } int index ; String objectName = resourceId . getObjectName ( ) ; if ( FileInfo . objectHasDirectoryPath ( objectName ) ) { index = objectName . lastIndexOf ( PATH_DELIMITER , objectName . length ( ) - 2 ) ; } else { index = objectName . lastIndexOf ( PATH_DELIMITER ) ; } return index < 0 ? pathCodec . getPath ( resourceId . getBucketName ( ) , null , true ) : pathCodec . getPath ( resourceId . getBucketName ( ) , objectName . substring ( 0 , index + 1 ) , false ) ; }
Gets the parent directory of the given path .
27,111
public void commitJob ( JobContext context ) throws IOException { super . commitJob ( context ) ; Configuration conf = context . getConfiguration ( ) ; TableReference destTable = BigQueryOutputConfiguration . getTableReference ( conf ) ; String destProjectId = BigQueryOutputConfiguration . getProjectId ( conf ) ; Optional < BigQueryTableSchema > destSchema = BigQueryOutputConfiguration . getTableSchema ( conf ) ; BigQueryFileFormat outputFileFormat = BigQueryOutputConfiguration . getFileFormat ( conf ) ; List < String > sourceUris = getOutputFileURIs ( ) ; getBigQueryHelper ( ) . importFederatedFromGcs ( destProjectId , destTable , destSchema . isPresent ( ) ? destSchema . get ( ) . get ( ) : null , outputFileFormat , sourceUris ) ; }
Runs a federated import job on BigQuery for the data in the output path in addition to calling the delegate s commitJob .
27,112
protected void configureBuckets ( GoogleCloudStorageFileSystem gcsFs ) throws IOException { rootBucket = initUri . getAuthority ( ) ; checkArgument ( rootBucket != null , "No bucket specified in GCS URI: %s" , initUri ) ; pathCodec . getPath ( rootBucket , null , true ) ; logger . atFine ( ) . log ( "GHFS.configureBuckets: GoogleHadoopFileSystem root in bucket: %s" , rootBucket ) ; }
Sets and validates the root bucket .
27,113
public Path getHadoopPath ( URI gcsPath ) { logger . atFine ( ) . log ( "GHFS.getHadoopPath: %s" , gcsPath ) ; if ( gcsPath . equals ( getGcsPath ( getFileSystemRoot ( ) ) ) ) { return getFileSystemRoot ( ) ; } StorageResourceId resourceId = pathCodec . validatePathAndGetId ( gcsPath , true ) ; checkArgument ( ! resourceId . isRoot ( ) , "Missing authority in gcsPath '%s'" , gcsPath ) ; checkArgument ( resourceId . getBucketName ( ) . equals ( rootBucket ) , "Authority of URI '%s' doesn't match root bucket '%s'" , resourceId . getBucketName ( ) , rootBucket ) ; Path hadoopPath = new Path ( getScheme ( ) + "://" + rootBucket + '/' + resourceId . getObjectName ( ) ) ; logger . atFine ( ) . log ( "GHFS.getHadoopPath: %s -> %s" , gcsPath , hadoopPath ) ; return hadoopPath ; }
Validates GCS Path belongs to this file system . The bucket must match the root bucket provided at initialization time .
27,114
private JsonBatchCallback < Void > getDeletionCallback ( final StorageResourceId resourceId , final KeySetView < IOException , Boolean > innerExceptions , final BatchHelper batchHelper , final int attempt , final long generation ) { return new JsonBatchCallback < Void > ( ) { public void onSuccess ( Void obj , HttpHeaders responseHeaders ) { logger . atFine ( ) . log ( "Successfully deleted %s at generation %s" , resourceId , generation ) ; } public void onFailure ( GoogleJsonError e , HttpHeaders responseHeaders ) throws IOException { if ( errorExtractor . itemNotFound ( e ) ) { logger . atFine ( ) . log ( "deleteObjects(%s): delete not found:%n%s" , resourceId , e ) ; } else if ( errorExtractor . preconditionNotMet ( e ) && attempt <= MAXIMUM_PRECONDITION_FAILURES_IN_DELETE ) { logger . atInfo ( ) . log ( "Precondition not met while deleting %s at generation %s. Attempt %s. Retrying:%n%s" , resourceId , generation , attempt , e ) ; queueSingleObjectDelete ( resourceId , innerExceptions , batchHelper , attempt + 1 ) ; } else { innerExceptions . add ( new IOException ( String . format ( "Error deleting %s, stage 2 with generation %s:%n%s" , resourceId , generation , e ) ) ) ; } } } ; }
Helper to create a callback for a particular deletion request .
27,115
private void rewriteInternal ( final BatchHelper batchHelper , final KeySetView < IOException , Boolean > innerExceptions , final String srcBucketName , final String srcObjectName , final String dstBucketName , final String dstObjectName ) throws IOException { Storage . Objects . Rewrite rewriteObject = configureRequest ( gcs . objects ( ) . rewrite ( srcBucketName , srcObjectName , dstBucketName , dstObjectName , null ) , srcBucketName ) ; if ( storageOptions . getMaxBytesRewrittenPerCall ( ) > 0 ) { rewriteObject . setMaxBytesRewrittenPerCall ( storageOptions . getMaxBytesRewrittenPerCall ( ) ) ; } batchHelper . queue ( rewriteObject , new JsonBatchCallback < RewriteResponse > ( ) { public void onSuccess ( RewriteResponse rewriteResponse , HttpHeaders responseHeaders ) { String srcString = StorageResourceId . createReadableString ( srcBucketName , srcObjectName ) ; String dstString = StorageResourceId . createReadableString ( dstBucketName , dstObjectName ) ; if ( rewriteResponse . getDone ( ) ) { logger . atFine ( ) . log ( "Successfully copied %s to %s" , srcString , dstString ) ; } else { logger . atFine ( ) . log ( "Copy (%s to %s) did not complete. Resuming..." , srcString , dstString ) ; try { Storage . Objects . Rewrite rewriteObjectWithToken = configureRequest ( gcs . objects ( ) . rewrite ( srcBucketName , srcObjectName , dstBucketName , dstObjectName , null ) , srcBucketName ) ; if ( storageOptions . getMaxBytesRewrittenPerCall ( ) > 0 ) { rewriteObjectWithToken . setMaxBytesRewrittenPerCall ( storageOptions . getMaxBytesRewrittenPerCall ( ) ) ; } rewriteObjectWithToken . setRewriteToken ( rewriteResponse . getRewriteToken ( ) ) ; batchHelper . queue ( rewriteObjectWithToken , this ) ; } catch ( IOException e ) { innerExceptions . add ( e ) ; } } } public void onFailure ( GoogleJsonError e , HttpHeaders responseHeaders ) { onCopyFailure ( innerExceptions , e , srcBucketName , srcObjectName ) ; } } ) ; }
Performs copy operation using GCS Rewrite requests
27,116
private void copyInternal ( BatchHelper batchHelper , final KeySetView < IOException , Boolean > innerExceptions , final String srcBucketName , final String srcObjectName , final String dstBucketName , final String dstObjectName ) throws IOException { Storage . Objects . Copy copyObject = configureRequest ( gcs . objects ( ) . copy ( srcBucketName , srcObjectName , dstBucketName , dstObjectName , null ) , srcBucketName ) ; batchHelper . queue ( copyObject , new JsonBatchCallback < StorageObject > ( ) { public void onSuccess ( StorageObject copyResponse , HttpHeaders responseHeaders ) { String srcString = StorageResourceId . createReadableString ( srcBucketName , srcObjectName ) ; String dstString = StorageResourceId . createReadableString ( dstBucketName , dstObjectName ) ; logger . atFine ( ) . log ( "Successfully copied %s to %s" , srcString , dstString ) ; } public void onFailure ( GoogleJsonError e , HttpHeaders responseHeaders ) { onCopyFailure ( innerExceptions , e , srcBucketName , srcObjectName ) ; } } ) ; }
Performs copy operation using GCS Copy requests
27,117
private void onCopyFailure ( KeySetView < IOException , Boolean > innerExceptions , GoogleJsonError e , String srcBucketName , String srcObjectName ) { if ( errorExtractor . itemNotFound ( e ) ) { FileNotFoundException fnfe = GoogleCloudStorageExceptions . getFileNotFoundException ( srcBucketName , srcObjectName ) ; innerExceptions . add ( ( FileNotFoundException ) fnfe . initCause ( new IOException ( e . toString ( ) ) ) ) ; } else { String srcString = StorageResourceId . createReadableString ( srcBucketName , srcObjectName ) ; innerExceptions . add ( new IOException ( String . format ( "Error copying %s:%n%s" , srcString , e ) ) ) ; } }
Processes failed copy requests
27,118
private void handlePrefixes ( String bucketName , List < String > prefixes , List < GoogleCloudStorageItemInfo > objectInfos ) { if ( storageOptions . isInferImplicitDirectoriesEnabled ( ) ) { for ( String prefix : prefixes ) { objectInfos . add ( GoogleCloudStorageItemInfo . createInferredDirectory ( new StorageResourceId ( bucketName , prefix ) ) ) ; } } else { logger . atInfo ( ) . log ( "Inferred directories are disabled, giving up on retrieving missing directories: %s" , prefixes ) ; } }
Handle prefixes without prefix objects .
27,119
public static GoogleCloudStorageItemInfo createItemInfoForBucket ( StorageResourceId resourceId , Bucket bucket ) { Preconditions . checkArgument ( resourceId != null , "resourceId must not be null" ) ; Preconditions . checkArgument ( bucket != null , "bucket must not be null" ) ; Preconditions . checkArgument ( resourceId . isBucket ( ) , "resourceId must be a Bucket. resourceId: %s" , resourceId ) ; Preconditions . checkArgument ( resourceId . getBucketName ( ) . equals ( bucket . getName ( ) ) , "resourceId.getBucketName() must equal bucket.getName(): '%s' vs '%s'" , resourceId . getBucketName ( ) , bucket . getName ( ) ) ; return new GoogleCloudStorageItemInfo ( resourceId , bucket . getTimeCreated ( ) . getValue ( ) , 0 , bucket . getLocation ( ) , bucket . getStorageClass ( ) ) ; }
Helper for converting a StorageResourceId + Bucket into a GoogleCloudStorageItemInfo .
27,120
public static GoogleCloudStorageItemInfo createItemInfoForStorageObject ( StorageResourceId resourceId , StorageObject object ) { Preconditions . checkArgument ( resourceId != null , "resourceId must not be null" ) ; Preconditions . checkArgument ( object != null , "object must not be null" ) ; Preconditions . checkArgument ( resourceId . isStorageObject ( ) , "resourceId must be a StorageObject. resourceId: %s" , resourceId ) ; Preconditions . checkArgument ( resourceId . getBucketName ( ) . equals ( object . getBucket ( ) ) , "resourceId.getBucketName() must equal object.getBucket(): '%s' vs '%s'" , resourceId . getBucketName ( ) , object . getBucket ( ) ) ; Preconditions . checkArgument ( resourceId . getObjectName ( ) . equals ( object . getName ( ) ) , "resourceId.getObjectName() must equal object.getName(): '%s' vs '%s'" , resourceId . getObjectName ( ) , object . getName ( ) ) ; Map < String , byte [ ] > decodedMetadata = object . getMetadata ( ) == null ? null : decodeMetadata ( object . getMetadata ( ) ) ; byte [ ] md5Hash = null ; byte [ ] crc32c = null ; if ( ! Strings . isNullOrEmpty ( object . getCrc32c ( ) ) ) { crc32c = BaseEncoding . base64 ( ) . decode ( object . getCrc32c ( ) ) ; } if ( ! Strings . isNullOrEmpty ( object . getMd5Hash ( ) ) ) { md5Hash = BaseEncoding . base64 ( ) . decode ( object . getMd5Hash ( ) ) ; } return new GoogleCloudStorageItemInfo ( resourceId , object . getUpdated ( ) . getValue ( ) , object . getSize ( ) . longValue ( ) , null , null , object . getContentType ( ) , object . getContentEncoding ( ) , decodedMetadata , object . getGeneration ( ) , object . getMetageneration ( ) , new VerificationAttributes ( md5Hash , crc32c ) ) ; }
Helper for converting a StorageResourceId + StorageObject into a GoogleCloudStorageItemInfo .
27,121
private Bucket getBucket ( String bucketName ) throws IOException { logger . atFine ( ) . log ( "getBucket(%s)" , bucketName ) ; checkArgument ( ! Strings . isNullOrEmpty ( bucketName ) , "bucketName must not be null or empty" ) ; Storage . Buckets . Get getBucket = configureRequest ( gcs . buckets ( ) . get ( bucketName ) , bucketName ) ; try { return getBucket . execute ( ) ; } catch ( IOException e ) { if ( errorExtractor . itemNotFound ( e ) ) { logger . atFine ( ) . withCause ( e ) . log ( "getBucket(%s): not found" , bucketName ) ; return null ; } throw new IOException ( "Error accessing Bucket " + bucketName , e ) ; } }
Gets the bucket with the given name .
27,122
private long getWriteGeneration ( StorageResourceId resourceId , boolean overwritable ) throws IOException { logger . atFine ( ) . log ( "getWriteGeneration(%s, %s)" , resourceId , overwritable ) ; GoogleCloudStorageItemInfo info = getItemInfo ( resourceId ) ; if ( ! info . exists ( ) ) { return 0L ; } if ( info . exists ( ) && overwritable ) { long generation = info . getContentGeneration ( ) ; Preconditions . checkState ( generation != 0 , "Generation should not be 0 for an existing item" ) ; return generation ; } throw new FileAlreadyExistsException ( String . format ( "Object %s already exists." , resourceId ) ) ; }
Gets the object generation for a Write operation
27,123
private StorageObject getObject ( StorageResourceId resourceId ) throws IOException { logger . atFine ( ) . log ( "getObject(%s)" , resourceId ) ; Preconditions . checkArgument ( resourceId . isStorageObject ( ) , "Expected full StorageObject id, got %s" , resourceId ) ; String bucketName = resourceId . getBucketName ( ) ; String objectName = resourceId . getObjectName ( ) ; Storage . Objects . Get getObject = configureRequest ( gcs . objects ( ) . get ( bucketName , objectName ) , bucketName ) ; try { return getObject . execute ( ) ; } catch ( IOException e ) { if ( errorExtractor . itemNotFound ( e ) ) { logger . atFine ( ) . withCause ( e ) . log ( "getObject(%s): not found" , resourceId ) ; return null ; } throw new IOException ( "Error accessing " + resourceId , e ) ; } }
Gets the object with the given resourceId .
27,124
private static synchronized HttpTransport getStaticHttpTransport ( ) throws IOException , GeneralSecurityException { if ( staticHttpTransport == null ) { staticHttpTransport = HttpTransportFactory . createHttpTransport ( HttpTransportType . JAVA_NET ) ; } return staticHttpTransport ; }
Returns shared staticHttpTransport instance ; initializes staticHttpTransport if it hasn t already been initialized .
27,125
public Credential getCredentialFromJsonKeyFile ( String serviceAccountJsonKeyFile , List < String > scopes , HttpTransport transport ) throws IOException , GeneralSecurityException { logger . atFine ( ) . log ( "getCredentialFromJsonKeyFile(%s, %s)" , serviceAccountJsonKeyFile , scopes ) ; try ( FileInputStream fis = new FileInputStream ( serviceAccountJsonKeyFile ) ) { return GoogleCredentialWithRetry . fromGoogleCredential ( GoogleCredential . fromStream ( fis , transport , JSON_FACTORY ) . createScoped ( scopes ) ) ; } }
Get credentials listed in a JSON file .
27,126
public boolean nextKeyValue ( ) throws IOException , InterruptedException { currentValue = null ; if ( delegateReader != null ) { if ( delegateReader . nextKeyValue ( ) ) { populateCurrentKeyValue ( ) ; return true ; } else { delegateReader . close ( ) ; delegateReader = null ; } } boolean needRefresh = ! isNextFileReady ( ) && shouldExpectMoreFiles ( ) ; while ( needRefresh ) { logger . atFine ( ) . log ( "No files available, but more are expected; refreshing..." ) ; refreshFileList ( ) ; needRefresh = ! isNextFileReady ( ) && shouldExpectMoreFiles ( ) ; if ( needRefresh ) { logger . atFine ( ) . log ( "No new files found, sleeping before trying again..." ) ; try { sleeper . sleep ( pollIntervalMs ) ; context . progress ( ) ; } catch ( InterruptedException ie ) { logger . atWarning ( ) . withCause ( ie ) . log ( "Interrupted while sleeping." ) ; } } } if ( isNextFileReady ( ) ) { FileStatus newFile = moveToNextFile ( ) ; logger . atInfo ( ) . log ( "Moving to next file '%s' which has %s bytes. Records read so far: %s" , newFile . getPath ( ) , newFile . getLen ( ) , recordsRead ) ; InputSplit split = new FileSplit ( newFile . getPath ( ) , 0 , newFile . getLen ( ) , new String [ 0 ] ) ; delegateReader = delegateRecordReaderFactory . createDelegateRecordReader ( split , context . getConfiguration ( ) ) ; delegateReader . initialize ( split , context ) ; if ( ! delegateReader . nextKeyValue ( ) ) { setEndFileMarkerFile ( newFile . getPath ( ) . getName ( ) ) ; return nextKeyValue ( ) ; } else { populateCurrentKeyValue ( ) ; return true ; } } Preconditions . checkState ( ! shouldExpectMoreFiles ( ) , "Should not have exited the refresh loop shouldExpectMoreFiles = true " + "and no files ready to read." ) ; return false ; }
Reads the next key value pair . Gets next line and parses Json object . May hang for a long time waiting for more files to appear in this reader s directory .
27,127
public void close ( ) throws IOException { if ( delegateReader != null ) { logger . atWarning ( ) . log ( "Got non-null delegateReader during close(); possible premature close() call." ) ; delegateReader . close ( ) ; delegateReader = null ; } }
Closes the record reader .
27,128
private void setEndFileMarkerFile ( String fileName ) { int fileIndex = parseFileIndex ( fileName ) ; if ( endFileNumber == - 1 ) { endFileNumber = fileIndex ; logger . atInfo ( ) . log ( "Found end-marker file '%s' with index %s" , fileName , endFileNumber ) ; for ( String knownFile : knownFileSet ) { int knownFileIndex = parseFileIndex ( knownFile ) ; Preconditions . checkState ( knownFileIndex <= endFileNumber , "Found known file '%s' with index %s, which isn't less than or " + "equal to than endFileNumber %s!" , knownFile , knownFileIndex , endFileNumber ) ; } } else { Preconditions . checkState ( fileIndex == endFileNumber , "Found new end-marker file '%s' with index %s but already have endFileNumber %s!" , fileName , fileIndex , endFileNumber ) ; } }
Record a specific file as being the 0 - record end of stream marker .
27,129
private void refreshFileList ( ) throws IOException { FileStatus [ ] files = fileSystem . globStatus ( inputDirectoryAndPattern ) ; for ( FileStatus file : files ) { String fileName = file . getPath ( ) . getName ( ) ; if ( ! knownFileSet . contains ( fileName ) ) { if ( endFileNumber != - 1 ) { int newFileIndex = parseFileIndex ( fileName ) ; Preconditions . checkState ( newFileIndex < endFileNumber , "Found new file '%s' with index %s, which isn't less than endFileNumber %s!" , fileName , newFileIndex , endFileNumber ) ; } logger . atInfo ( ) . log ( "Adding new file '%s' of size %s to knownFileSet." , fileName , file . getLen ( ) ) ; knownFileSet . add ( fileName ) ; fileQueue . add ( file ) ; } } }
Lists files and sifts through the results for any new files we haven t found before . If a file of size 0 is found we mark the endFileNumber from it .
27,130
public void bindToAnyDelegationToken ( ) throws IOException { validateAccessTokenProvider ( ) ; Token < DelegationTokenIdentifier > token = selectTokenFromFsOwner ( ) ; if ( token != null ) { bindToDelegationToken ( token ) ; } else { deployUnbonded ( ) ; } if ( accessTokenProvider == null ) { throw new DelegationTokenIOException ( "No AccessTokenProvider created by Delegation Token Binding " + tokenBinding . getKind ( ) ) ; } }
Attempt to bind to any existing DT including unmarshalling its contents and creating the GCP credential provider used to authenticate the client .
27,131
public Token < DelegationTokenIdentifier > selectTokenFromFsOwner ( ) throws IOException { return lookupToken ( user . getCredentials ( ) , service , tokenBinding . getKind ( ) ) ; }
Find a token for the FS user and service name .
27,132
public void bindToDelegationToken ( Token < DelegationTokenIdentifier > token ) throws IOException { validateAccessTokenProvider ( ) ; boundDT = token ; DelegationTokenIdentifier dti = extractIdentifier ( token ) ; logger . atInfo ( ) . log ( "Using delegation token %s" , dti ) ; accessTokenProvider = tokenBinding . bindToTokenIdentifier ( dti ) ; }
Bind to a delegation token retrieved for this filesystem . Extract the secrets from the token and set internal fields to the values .
27,133
@ SuppressWarnings ( "OptionalGetWithoutIsPresent" ) public Token < DelegationTokenIdentifier > getBoundOrNewDT ( String renewer ) throws IOException { logger . atFine ( ) . log ( "Delegation token requested" ) ; if ( isBoundToDT ( ) ) { logger . atFine ( ) . log ( "Returning current token" ) ; return getBoundDT ( ) ; } return tokenBinding . createDelegationToken ( renewer ) ; }
Get any bound DT or create a new one .
27,134
public static DelegationTokenIdentifier extractIdentifier ( final Token < ? extends DelegationTokenIdentifier > token ) throws IOException { checkArgument ( token != null , "null token" ) ; DelegationTokenIdentifier identifier ; try { identifier = token . decodeIdentifier ( ) ; } catch ( RuntimeException e ) { Throwable cause = e . getCause ( ) ; if ( cause != null ) { throw new DelegationTokenIOException ( "Decoding GCS token " + cause , cause ) ; } throw e ; } if ( identifier == null ) { throw new DelegationTokenIOException ( "Failed to unmarshall token " + token ) ; } return identifier ; }
From a token get the session token identifier .
27,135
@ SuppressWarnings ( "unchecked" ) private static Token < DelegationTokenIdentifier > lookupToken ( Credentials credentials , Text service , Text kind ) throws DelegationTokenIOException { logger . atFine ( ) . log ( "Looking for token for service %s in credentials" , service ) ; Token < ? > token = credentials . getToken ( service ) ; if ( token != null ) { Text tokenKind = token . getKind ( ) ; logger . atFine ( ) . log ( "Found token of kind %s" , tokenKind ) ; if ( kind . equals ( tokenKind ) ) { return ( Token < DelegationTokenIdentifier > ) token ; } throw DelegationTokenIOException . tokenMismatch ( service , kind , tokenKind ) ; } logger . atFine ( ) . log ( "No token found for %s" , service ) ; return null ; }
Look up a token from the credentials verify it is of the correct kind .
27,136
public static String getMandatoryConfig ( Configuration config , String key ) throws IOException { String value = config . get ( key ) ; if ( Strings . isNullOrEmpty ( value ) ) { throw new IOException ( "Must supply a value for configuration setting: " + key ) ; } return value ; }
Gets value for the given key or throws if value is not found .
27,137
public static Map < String , String > getMandatoryConfig ( Configuration config , List < String > keys ) throws IOException { List < String > missingKeys = new ArrayList < > ( ) ; Map < String , String > values = new HashMap < > ( ) ; for ( String key : keys ) { String value = config . get ( key ) ; if ( Strings . isNullOrEmpty ( value ) ) { missingKeys . add ( key ) ; } else { values . put ( key , value ) ; } } if ( missingKeys . size ( ) > 0 ) { Joiner joiner = Joiner . on ( ", " ) ; String message = "Must supply value for configuration settings: " + joiner . join ( missingKeys ) ; throw new IOException ( message ) ; } return values ; }
Gets value for the given keys or throws if one or more values are not found .
27,138
public void setUploadChunkSize ( int chunkSize ) { Preconditions . checkArgument ( chunkSize > 0 , "Upload chunk size must be great than 0." ) ; Preconditions . checkArgument ( chunkSize % MediaHttpUploader . MINIMUM_CHUNK_SIZE == 0 , "Upload chunk size must be a multiple of MediaHttpUploader.MINIMUM_CHUNK_SIZE" ) ; if ( ( chunkSize > GCS_UPLOAD_GRANULARITY ) && ( chunkSize % GCS_UPLOAD_GRANULARITY != 0 ) ) { logger . atWarning ( ) . log ( "Upload chunk size should be a multiple of %s for best performance, got %s" , GCS_UPLOAD_GRANULARITY , chunkSize ) ; } uploadChunkSize = chunkSize ; }
Sets size of upload buffer used .
27,139
public synchronized int write ( ByteBuffer buffer ) throws IOException { checkState ( isInitialized , "initialize() must be invoked before use." ) ; if ( ! isOpen ( ) ) { throw new ClosedChannelException ( ) ; } if ( uploadOperation . isDone ( ) ) { waitForCompletionAndThrowIfUploadFailed ( ) ; } return pipeSinkChannel . write ( buffer ) ; }
Writes contents of the given buffer to this channel .
27,140
public void initialize ( ) throws IOException { PipedInputStream pipeSource = new PipedInputStream ( pipeBufferSize ) ; OutputStream pipeSink = new PipedOutputStream ( pipeSource ) ; pipeSinkChannel = Channels . newChannel ( pipeSink ) ; InputStreamContent objectContentStream = new InputStreamContent ( contentType , pipeSource ) ; objectContentStream . setLength ( - 1 ) ; objectContentStream . setCloseInputStream ( false ) ; T request = createRequest ( objectContentStream ) ; request . setDisableGZipContent ( true ) ; clientRequestHelper . setChunkSize ( request , uploadChunkSize ) ; uploadOperation = threadPool . submit ( new UploadOperation ( request , pipeSource ) ) ; isInitialized = true ; }
Initialize this channel object for writing .
27,141
private S waitForCompletionAndThrowIfUploadFailed ( ) throws IOException { try { return uploadOperation . get ( ) ; } catch ( InterruptedException e ) { uploadOperation . cancel ( true ) ; IOException exception = new ClosedByInterruptException ( ) ; exception . addSuppressed ( e ) ; throw exception ; } catch ( ExecutionException e ) { if ( e . getCause ( ) instanceof Error ) { throw ( Error ) e . getCause ( ) ; } throw new IOException ( "Upload failed" , e . getCause ( ) ) ; } }
Throws if upload operation failed . Propagates any errors .
27,142
public static FileNotFoundException getFileNotFoundException ( String bucketName , String objectName ) { checkArgument ( ! isNullOrEmpty ( bucketName ) , "bucketName must not be null or empty" ) ; return new FileNotFoundException ( String . format ( "Item not found: '%s'." + " If you enabled STRICT generation consistency, it is possible that" + " the live version is still available but the intended generation is deleted." , StorageResourceId . createReadableString ( bucketName , nullToEmpty ( objectName ) ) ) ) ; }
Creates FileNotFoundException with suitable message for a GCS bucket or object .
27,143
@ SuppressWarnings ( "unchecked" ) public static < T , X extends Exception > T retry ( CheckedCallable < T , X > callable , BackOff backoff , RetryDeterminer < ? super X > retryDet , Class < X > classType , Sleeper sleeper ) throws X , InterruptedException { checkNotNull ( backoff , "Must provide a non-null BackOff." ) ; checkNotNull ( retryDet , "Must provide a non-null RetryDeterminer." ) ; checkNotNull ( sleeper , "Must provide a non-null Sleeper." ) ; checkNotNull ( callable , "Must provide a non-null Executable object." ) ; X currentException ; do { try { return callable . call ( ) ; } catch ( Exception e ) { if ( classType . isInstance ( e ) ) { currentException = ( X ) e ; if ( ! retryDet . shouldRetry ( currentException ) ) { throw currentException ; } } else { if ( e instanceof RuntimeException ) { throw ( RuntimeException ) e ; } throw new RuntimeException ( "Retrying with unchecked exceptions that are not RuntimeExceptions is not supported." , e ) ; } } } while ( nextSleep ( backoff , sleeper , currentException ) ) ; throw currentException ; }
Retries the given executable function in the case of transient errors defined by the RetryDeterminer .
27,144
public static < T , X extends Exception > T retry ( CheckedCallable < T , X > callable , BackOff backoff , RetryDeterminer < ? super X > retryDet , Class < X > classType ) throws X , InterruptedException { return retry ( callable , backoff , retryDet , classType , Sleeper . DEFAULT ) ; }
Retries the given executable function in the case of transient errors defined by the RetryDeterminer and uses default sleeper .
27,145
private static boolean nextSleep ( BackOff backoff , Sleeper sleeper , Exception currentException ) throws InterruptedException { long backOffTime ; try { backOffTime = backoff . nextBackOffMillis ( ) ; } catch ( IOException e ) { throw new RuntimeException ( "Failed to to get next back off time" , e ) ; } if ( backOffTime == BackOff . STOP ) { return false ; } logger . atInfo ( ) . withCause ( currentException ) . log ( "Transient exception caught. Sleeping for %d, then retrying." , backOffTime ) ; sleeper . sleep ( backOffTime ) ; return true ; }
Determines the amount to sleep for and sleeps if needed .
27,146
static boolean objectHasDirectoryPath ( String objectName ) { return ! Strings . isNullOrEmpty ( objectName ) && objectName . endsWith ( GoogleCloudStorage . PATH_DELIMITER ) ; }
Indicates whether the given object name looks like a directory path .
27,147
private void flushIfPossible ( boolean flushAll ) throws IOException { if ( flushAll ) { flushLock . lock ( ) ; } else if ( pendingRequests . isEmpty ( ) || ! flushLock . tryLock ( ) ) { return ; } try { do { flushPendingRequests ( ) ; if ( flushAll ) { awaitRequestsCompletion ( ) ; } } while ( flushAll && ( ! pendingRequests . isEmpty ( ) || ! responseFutures . isEmpty ( ) ) ) ; } finally { flushLock . unlock ( ) ; } }
Flush our buffer if we are not already in a flush operation and we have data to flush .
27,148
public void flush ( ) throws IOException { try { flushIfPossible ( true ) ; checkState ( pendingRequests . isEmpty ( ) , "pendingRequests should be empty after flush" ) ; checkState ( responseFutures . isEmpty ( ) , "responseFutures should be empty after flush" ) ; } finally { requestsExecutor . shutdown ( ) ; try { if ( ! requestsExecutor . awaitTermination ( 1 , TimeUnit . SECONDS ) ) { logger . atWarning ( ) . log ( "Forcibly shutting down batch helper thread pool." ) ; requestsExecutor . shutdownNow ( ) ; } } catch ( InterruptedException e ) { logger . atFine ( ) . withCause ( e ) . log ( "Failed to await termination: forcibly shutting down batch helper thread pool." ) ; requestsExecutor . shutdownNow ( ) ; } } }
Sends any currently remaining requests in the batch ; should be called at the end of any series of batched requests to ensure everything has been sent .
27,149
private void awaitRequestsCompletion ( ) throws IOException { while ( ! responseFutures . isEmpty ( ) && pendingRequests . size ( ) < maxRequestsPerBatch ) { try { responseFutures . remove ( ) . get ( ) ; } catch ( InterruptedException | ExecutionException e ) { if ( e . getCause ( ) instanceof IOException ) { throw ( IOException ) e . getCause ( ) ; } throw new RuntimeException ( "Failed to execute batch" , e ) ; } } }
Awaits until all sent requests are completed . Should be serialized
27,150
public synchronized GoogleCloudStorageItemInfo putItem ( GoogleCloudStorageItemInfo item ) { StorageResourceId id = item . getResourceId ( ) ; PrefixKey key = new PrefixKey ( id . getBucketName ( ) , id . getObjectName ( ) ) ; CacheValue < GoogleCloudStorageItemInfo > value = new CacheValue < GoogleCloudStorageItemInfo > ( item , ticker . read ( ) ) ; CacheValue < GoogleCloudStorageItemInfo > oldValue = itemMap . put ( key , value ) ; if ( oldValue == null ) { return null ; } if ( isExpired ( oldValue ) ) { cleanupLists ( key ) ; return null ; } return oldValue . getValue ( ) ; }
Inserts an item into the cache . If an item with the same resource id is present it is overwritten by the new item .
27,151
public synchronized GoogleCloudStorageItemInfo removeItem ( StorageResourceId id ) { PrefixKey key = new PrefixKey ( id . getBucketName ( ) , id . getObjectName ( ) ) ; CacheValue < GoogleCloudStorageItemInfo > value = itemMap . remove ( key ) ; if ( value == null ) { return null ; } if ( isExpired ( value ) ) { cleanupLists ( key ) ; return null ; } return value . getValue ( ) ; }
Removes the item from the cache . If the item has expired associated lists are invalidated .
27,152
public synchronized void invalidateBucket ( String bucket ) { PrefixKey key = new PrefixKey ( bucket , "" ) ; getPrefixSubMap ( itemMap , key ) . clear ( ) ; getPrefixSubMap ( prefixMap , key ) . clear ( ) ; }
Invalidates all cached items and lists associated with the given bucket .
27,153
private void cleanupLists ( PrefixKey key ) { NavigableMap < PrefixKey , CacheValue < Object > > head = prefixMap . headMap ( key , true ) . descendingMap ( ) ; Iterator < Entry < PrefixKey , CacheValue < Object > > > headItr = head . entrySet ( ) . iterator ( ) ; Entry < PrefixKey , CacheValue < Object > > last = null ; while ( headItr . hasNext ( ) ) { Entry < PrefixKey , CacheValue < Object > > entry = headItr . next ( ) ; if ( isExpired ( entry . getValue ( ) ) && key . isParent ( entry . getKey ( ) ) ) { last = entry ; headItr . remove ( ) ; } } if ( last != null ) { SortedMap < PrefixKey , CacheValue < GoogleCloudStorageItemInfo > > prefix = getPrefixSubMap ( itemMap , last . getKey ( ) ) ; prefix . entrySet ( ) . removeIf ( entry -> isExpired ( entry . getValue ( ) ) ) ; } }
Removes expired list entries that contain the given key . If a list was removed it s contained items are checked for expiration too .
27,154
private static < K , V > List < V > aggregateCacheValues ( Map < K , CacheValue < V > > map ) { List < V > values = new ArrayList < > ( map . size ( ) ) ; for ( Map . Entry < K , CacheValue < V > > entry : map . entrySet ( ) ) { values . add ( entry . getValue ( ) . getValue ( ) ) ; } return values ; }
Extracts all the cached values in a map .
27,155
public Token < ? > addDelegationTokens ( Configuration conf , Credentials creds , String renewer , String url ) throws Exception { if ( ! url . startsWith ( SCHEME ) ) { url = SCHEME + "://" + url ; } FileSystem fs = FileSystem . get ( URI . create ( url ) , conf ) ; Token < ? > token = fs . getDelegationToken ( renewer ) ; if ( token == null ) { throw new DelegationTokenIOException ( "Filesystem not generating Delegation Tokens: " + url ) ; } creds . addToken ( token . getService ( ) , token ) ; return token ; }
Returns Token object via FileSystem null if bad argument .
27,156
public static Credential credential ( AccessTokenProviderClassFromConfigFactory providerClassFactory , Configuration config , Collection < String > scopes ) throws IOException , GeneralSecurityException { Class < ? extends AccessTokenProvider > clazz = providerClassFactory . getAccessTokenProviderClass ( config ) ; if ( clazz != null ) { logger . atFine ( ) . log ( "Using AccessTokenProvider (%s)" , clazz . getName ( ) ) ; try { AccessTokenProvider accessTokenProvider = clazz . getDeclaredConstructor ( ) . newInstance ( ) ; accessTokenProvider . setConf ( config ) ; return getCredentialFromAccessTokenProvider ( accessTokenProvider , scopes ) ; } catch ( ReflectiveOperationException ex ) { throw new IOException ( "Can't instantiate " + clazz . getName ( ) , ex ) ; } } return null ; }
Generate the credential .
27,157
public Credential createBigQueryCredential ( Configuration config ) throws GeneralSecurityException , IOException { Credential credential = CredentialFromAccessTokenProviderClassFactory . credential ( new AccessTokenProviderClassFromConfigFactory ( ) . withOverridePrefix ( "mapred.bq" ) , config , BIGQUERY_OAUTH_SCOPES ) ; if ( credential != null ) { return credential ; } return HadoopCredentialConfiguration . newBuilder ( ) . withConfiguration ( config ) . withOverridePrefix ( BIGQUERY_CONFIG_PREFIX ) . build ( ) . getCredential ( BIGQUERY_OAUTH_SCOPES ) ; }
Construct credentials from the passed Configuration .
27,158
public Bigquery getBigQuery ( Configuration config ) throws GeneralSecurityException , IOException { logger . atInfo ( ) . log ( "Creating BigQuery from default credential." ) ; Credential credential = createBigQueryCredential ( config ) ; return getBigQueryFromCredential ( credential , BQC_ID ) ; }
Constructs a BigQuery from the credential constructed from the environment .
27,159
public Bigquery getBigQueryFromCredential ( Credential credential , String appName ) { logger . atInfo ( ) . log ( "Creating BigQuery from given credential." ) ; if ( credential != null ) { return new Bigquery . Builder ( HTTP_TRANSPORT , JSON_FACTORY , new RetryHttpInitializer ( credential , appName ) ) . setApplicationName ( appName ) . build ( ) ; } return new Bigquery . Builder ( HTTP_TRANSPORT , JSON_FACTORY , null ) . setApplicationName ( appName ) . build ( ) ; }
Constructs a BigQuery from a given Credential .
27,160
public static void waitForJobCompletion ( Bigquery bigquery , String projectId , JobReference jobReference , Progressable progressable ) throws IOException , InterruptedException { Sleeper sleeper = Sleeper . DEFAULT ; BackOff pollBackOff = new ExponentialBackOff . Builder ( ) . setMaxIntervalMillis ( POLL_WAIT_INTERVAL_MAX_MILLIS ) . setInitialIntervalMillis ( POLL_WAIT_INITIAL_MILLIS ) . setMaxElapsedTimeMillis ( POLL_WAIT_MAX_ELAPSED_MILLIS ) . build ( ) ; long startTime = System . currentTimeMillis ( ) ; long elapsedTime = 0 ; boolean notDone = true ; while ( notDone ) { BackOff operationBackOff = new ExponentialBackOff . Builder ( ) . build ( ) ; Get get = bigquery . jobs ( ) . get ( projectId , jobReference . getJobId ( ) ) . setLocation ( jobReference . getLocation ( ) ) ; Job pollJob = ResilientOperation . retry ( ResilientOperation . getGoogleRequestCallable ( get ) , operationBackOff , RetryDeterminer . RATE_LIMIT_ERRORS , IOException . class , sleeper ) ; elapsedTime = System . currentTimeMillis ( ) - startTime ; logger . atFine ( ) . log ( "Job status (%s ms) %s: %s" , elapsedTime , jobReference . getJobId ( ) , pollJob . getStatus ( ) . getState ( ) ) ; if ( pollJob . getStatus ( ) . getState ( ) . equals ( "DONE" ) ) { notDone = false ; if ( pollJob . getStatus ( ) . getErrorResult ( ) != null ) { throw new IOException ( "Error during BigQuery job execution: " + pollJob . getStatus ( ) . getErrorResult ( ) ) ; } } else { long millisToWait = pollBackOff . nextBackOffMillis ( ) ; if ( millisToWait == BackOff . STOP ) { throw new IOException ( String . format ( "Job %s failed to complete after %s millis." , jobReference . getJobId ( ) , elapsedTime ) ) ; } Thread . sleep ( millisToWait ) ; progressable . progress ( ) ; } } }
Polls job until it is completed .
27,161
public static List < TableFieldSchema > getSchemaFromString ( String fields ) { logger . atFine ( ) . log ( "getSchemaFromString('%s')" , fields ) ; JsonParser jsonParser = new JsonParser ( ) ; JsonArray json = jsonParser . parse ( fields ) . getAsJsonArray ( ) ; List < TableFieldSchema > fieldsList = new ArrayList < > ( ) ; for ( JsonElement jsonElement : json ) { checkArgument ( jsonElement . isJsonObject ( ) , "Expected JsonObject for element, got '%s'." , jsonElement ) ; JsonObject jsonObject = jsonElement . getAsJsonObject ( ) ; checkArgument ( jsonObject . get ( "name" ) != null , "Expected non-null entry for key 'name' in JsonObject '%s'" , jsonObject ) ; checkArgument ( jsonObject . get ( "type" ) != null , "Expected non-null entry for key 'type' in JsonObject '%s'" , jsonObject ) ; TableFieldSchema fieldDef = new TableFieldSchema ( ) ; fieldDef . setName ( jsonObject . get ( "name" ) . getAsString ( ) ) ; fieldDef . setType ( jsonObject . get ( "type" ) . getAsString ( ) ) ; if ( jsonObject . get ( "mode" ) != null ) { fieldDef . setMode ( jsonObject . get ( "mode" ) . getAsString ( ) ) ; } if ( jsonObject . get ( "type" ) . getAsString ( ) . equals ( "RECORD" ) ) { checkArgument ( jsonObject . get ( "fields" ) != null , "Expected non-null entry for key 'fields' in JsonObject of type RECORD: '%s'" , jsonObject ) ; fieldDef . setFields ( getSchemaFromString ( jsonObject . get ( "fields" ) . toString ( ) ) ) ; } fieldsList . add ( fieldDef ) ; } return fieldsList ; }
Parses the given JSON string and returns the extracted schema .
27,162
public void initialize ( URI path , Configuration config , boolean initSuperclass ) throws IOException { long startTime = System . nanoTime ( ) ; Preconditions . checkArgument ( path != null , "path must not be null" ) ; Preconditions . checkArgument ( config != null , "config must not be null" ) ; Preconditions . checkArgument ( path . getScheme ( ) != null , "scheme of path must not be null" ) ; if ( ! path . getScheme ( ) . equals ( getScheme ( ) ) ) { throw new IllegalArgumentException ( "URI scheme not supported: " + path ) ; } initUri = path ; logger . atFine ( ) . log ( "GHFS.initialize: %s" , path ) ; if ( initSuperclass ) { super . initialize ( path , config ) ; } else { logger . atFine ( ) . log ( "Initializing 'statistics' as an instance not attached to the static FileSystem map" ) ; statistics = new Statistics ( getScheme ( ) ) ; } setConf ( config ) ; initializeDelegationTokenSupport ( config , path ) ; configure ( config ) ; long duration = System . nanoTime ( ) - startTime ; increment ( Counter . INIT ) ; increment ( Counter . INIT_TIME , duration ) ; }
Initializes this file system instance .
27,163
private void initializeDelegationTokenSupport ( Configuration config , URI path ) throws IOException { logger . atFine ( ) . log ( "GHFS.initializeDelegationTokenSupport" ) ; GcsDelegationTokens dts = new GcsDelegationTokens ( ) ; Text service = new Text ( getScheme ( ) + "://" + path . getAuthority ( ) ) ; dts . bindToFileSystem ( this , service ) ; try { dts . init ( config ) ; delegationTokens = dts ; if ( delegationTokens . isBoundToDT ( ) ) { logger . atFine ( ) . log ( "GHFS.initializeDelegationTokenSupport: Using existing delegation token." ) ; } } catch ( IllegalStateException e ) { logger . atInfo ( ) . log ( "GHFS.initializeDelegationTokenSupport: %s" , e . getMessage ( ) ) ; } }
Initialize the delegation token support for this filesystem .
27,164
protected int getDefaultPort ( ) { logger . atFine ( ) . log ( "GHFS.getDefaultPort:" ) ; int result = - 1 ; logger . atFine ( ) . log ( "GHFS.getDefaultPort:=> %s" , result ) ; return result ; }
The default port is listed as - 1 as an indication that ports are not used .
27,165
public FSDataOutputStream create ( Path hadoopPath , FsPermission permission , boolean overwrite , int bufferSize , short replication , long blockSize , Progressable progress ) throws IOException { long startTime = System . nanoTime ( ) ; Preconditions . checkArgument ( hadoopPath != null , "hadoopPath must not be null" ) ; Preconditions . checkArgument ( replication > 0 , "replication must be a positive integer: %s" , replication ) ; Preconditions . checkArgument ( blockSize > 0 , "blockSize must be a positive integer: %s" , blockSize ) ; checkOpen ( ) ; logger . atFine ( ) . log ( "GHFS.create: %s, overwrite: %s, bufferSize: %d (ignored)" , hadoopPath , overwrite , bufferSize ) ; URI gcsPath = getGcsPath ( hadoopPath ) ; OutputStreamType type = GCS_OUTPUT_STREAM_TYPE . get ( getConf ( ) , getConf ( ) :: getEnum ) ; OutputStream out ; switch ( type ) { case BASIC : out = new GoogleHadoopOutputStream ( this , gcsPath , statistics , new CreateFileOptions ( overwrite ) ) ; break ; case SYNCABLE_COMPOSITE : out = new GoogleHadoopSyncableOutputStream ( this , gcsPath , statistics , new CreateFileOptions ( overwrite ) ) ; break ; default : throw new IOException ( String . format ( "Unsupported output stream type given for key '%s': '%s'" , GCS_OUTPUT_STREAM_TYPE . getKey ( ) , type ) ) ; } long duration = System . nanoTime ( ) - startTime ; increment ( Counter . CREATE ) ; increment ( Counter . CREATE_TIME , duration ) ; return new FSDataOutputStream ( out , null ) ; }
Opens the given file for writing .
27,166
public void concat ( Path trg , Path [ ] psrcs ) throws IOException { logger . atFine ( ) . log ( "GHFS.concat: %s, %s" , trg , lazy ( ( ) -> Arrays . toString ( psrcs ) ) ) ; checkArgument ( psrcs . length > 0 , "psrcs must have at least one source" ) ; URI trgPath = getGcsPath ( trg ) ; List < URI > srcPaths = Arrays . stream ( psrcs ) . map ( this :: getGcsPath ) . collect ( toImmutableList ( ) ) ; checkArgument ( ! srcPaths . contains ( trgPath ) , "target must not be contained in sources" ) ; List < List < URI > > partitions = Lists . partition ( srcPaths , GoogleCloudStorage . MAX_COMPOSE_OBJECTS - 1 ) ; logger . atFine ( ) . log ( "GHFS.concat: %s, %d partitions" , trg , partitions . size ( ) ) ; for ( List < URI > partition : partitions ) { List < URI > sources = Lists . newArrayList ( trgPath ) ; sources . addAll ( partition ) ; logger . atFine ( ) . log ( "GHFS.concat compose: %s, %s" , trgPath , sources ) ; getGcsFs ( ) . compose ( sources , trgPath , CreateFileOptions . DEFAULT_CONTENT_TYPE ) ; } logger . atFine ( ) . log ( "GHFS.concat:=> " ) ; }
Concat existing files into one file .
27,167
public boolean rename ( Path src , Path dst ) throws IOException { if ( src . makeQualified ( this ) . equals ( getFileSystemRoot ( ) ) ) { logger . atFine ( ) . log ( "GHFS.rename: src is root: '%s'" , src ) ; return false ; } long startTime = System . nanoTime ( ) ; Preconditions . checkArgument ( src != null , "src must not be null" ) ; Preconditions . checkArgument ( dst != null , "dst must not be null" ) ; checkOpen ( ) ; URI srcPath = getGcsPath ( src ) ; URI dstPath = getGcsPath ( dst ) ; logger . atFine ( ) . log ( "GHFS.rename: %s -> %s" , src , dst ) ; try { getGcsFs ( ) . rename ( srcPath , dstPath ) ; } catch ( IOException e ) { ( logger . atFine ( ) . isEnabled ( ) || e . getCause ( ) == null ? logger . atFine ( ) : logger . atInfo ( ) . atMostEvery ( 5 , TimeUnit . MINUTES ) ) . withCause ( e ) . log ( "Failed GHFS.rename: %s -> %s" , src , dst ) ; return false ; } long duration = System . nanoTime ( ) - startTime ; increment ( Counter . RENAME ) ; increment ( Counter . RENAME_TIME , duration ) ; return true ; }
Renames src to dst . Src must not be equal to the filesystem root .
27,168
public FileStatus [ ] listStatus ( Path hadoopPath ) throws IOException { long startTime = System . nanoTime ( ) ; Preconditions . checkArgument ( hadoopPath != null , "hadoopPath must not be null" ) ; checkOpen ( ) ; logger . atFine ( ) . log ( "GHFS.listStatus: %s" , hadoopPath ) ; URI gcsPath = getGcsPath ( hadoopPath ) ; List < FileStatus > status ; try { List < FileInfo > fileInfos = getGcsFs ( ) . listFileInfo ( gcsPath ) ; status = new ArrayList < > ( fileInfos . size ( ) ) ; String userName = getUgiUserName ( ) ; for ( FileInfo fileInfo : fileInfos ) { status . add ( getFileStatus ( fileInfo , userName ) ) ; } } catch ( FileNotFoundException fnfe ) { logger . atFine ( ) . withCause ( fnfe ) . log ( "Got fnfe: " ) ; throw new FileNotFoundException ( String . format ( "Path '%s' does not exist." , gcsPath ) ) ; } long duration = System . nanoTime ( ) - startTime ; increment ( Counter . LIST_STATUS ) ; increment ( Counter . LIST_STATUS_TIME , duration ) ; return status . toArray ( new FileStatus [ 0 ] ) ; }
Lists file status . If the given path points to a directory then the status of children is returned otherwise the status of the given file is returned .
27,169
public void setWorkingDirectory ( Path hadoopPath ) { long startTime = System . nanoTime ( ) ; Preconditions . checkArgument ( hadoopPath != null , "hadoopPath must not be null" ) ; logger . atFine ( ) . log ( "GHFS.setWorkingDirectory: %s" , hadoopPath ) ; URI gcsPath = FileInfo . convertToDirectoryPath ( pathCodec , getGcsPath ( hadoopPath ) ) ; Path newPath = getHadoopPath ( gcsPath ) ; workingDirectory = newPath ; logger . atFine ( ) . log ( "GHFS.setWorkingDirectory: => %s" , workingDirectory ) ; long duration = System . nanoTime ( ) - startTime ; increment ( Counter . SET_WD ) ; increment ( Counter . SET_WD_TIME , duration ) ; }
Sets the current working directory to the given path .
27,170
public boolean mkdirs ( Path hadoopPath , FsPermission permission ) throws IOException { long startTime = System . nanoTime ( ) ; Preconditions . checkArgument ( hadoopPath != null , "hadoopPath must not be null" ) ; checkOpen ( ) ; logger . atFine ( ) . log ( "GHFS.mkdirs: %s, perm: %s" , hadoopPath , permission ) ; URI gcsPath = getGcsPath ( hadoopPath ) ; try { getGcsFs ( ) . mkdirs ( gcsPath ) ; } catch ( java . nio . file . FileAlreadyExistsException faee ) { throw ( FileAlreadyExistsException ) new FileAlreadyExistsException ( faee . getMessage ( ) ) . initCause ( faee ) ; } long duration = System . nanoTime ( ) - startTime ; increment ( Counter . MKDIRS ) ; increment ( Counter . MKDIRS_TIME , duration ) ; return true ; }
Makes the given path and all non - existent parents directories . Has the semantics of Unix mkdir - p .
27,171
public FileStatus getFileStatus ( Path hadoopPath ) throws IOException { long startTime = System . nanoTime ( ) ; Preconditions . checkArgument ( hadoopPath != null , "hadoopPath must not be null" ) ; checkOpen ( ) ; logger . atFine ( ) . log ( "GHFS.getFileStatus: %s" , hadoopPath ) ; URI gcsPath = getGcsPath ( hadoopPath ) ; FileInfo fileInfo = getGcsFs ( ) . getFileInfo ( gcsPath ) ; if ( ! fileInfo . exists ( ) ) { logger . atFine ( ) . log ( "GHFS.getFileStatus: not found: %s" , gcsPath ) ; throw new FileNotFoundException ( ( fileInfo . isDirectory ( ) ? "Directory not found : " : "File not found : " ) + hadoopPath ) ; } String userName = getUgiUserName ( ) ; FileStatus status = getFileStatus ( fileInfo , userName ) ; long duration = System . nanoTime ( ) - startTime ; increment ( Counter . GET_FILE_STATUS ) ; increment ( Counter . GET_FILE_STATUS_TIME , duration ) ; return status ; }
Gets status of the given path item .
27,172
private FileStatus getFileStatus ( FileInfo fileInfo , String userName ) throws IOException { FileStatus status = new FileStatus ( fileInfo . getSize ( ) , fileInfo . isDirectory ( ) , REPLICATION_FACTOR_DEFAULT , defaultBlockSize , fileInfo . getModificationTime ( ) , fileInfo . getModificationTime ( ) , reportedPermissions , userName , userName , getHadoopPath ( fileInfo . getPath ( ) ) ) ; logger . atFine ( ) . log ( "GHFS.getFileStatus: %s => %s" , fileInfo . getPath ( ) , lazy ( ( ) -> fileStatusToString ( status ) ) ) ; return status ; }
Gets FileStatus corresponding to the given FileInfo value .
27,173
public FileStatus [ ] globStatus ( Path pathPattern , PathFilter filter ) throws IOException { checkOpen ( ) ; logger . atFine ( ) . log ( "GHFS.globStatus: %s" , pathPattern ) ; Path encodedPath = new Path ( pathPattern . toUri ( ) . toString ( ) ) ; Path encodedFixedPath = getHadoopPath ( getGcsPath ( encodedPath ) ) ; Path fixedPath = new Path ( URI . create ( encodedFixedPath . toString ( ) ) ) ; logger . atFine ( ) . log ( "GHFS.globStatus fixedPath: %s => %s" , pathPattern , fixedPath ) ; if ( enableConcurrentGlob && couldUseFlatGlob ( fixedPath ) ) { return concurrentGlobInternal ( fixedPath , filter ) ; } if ( enableFlatGlob && couldUseFlatGlob ( fixedPath ) ) { return flatGlobInternal ( fixedPath , filter ) ; } return super . globStatus ( fixedPath , filter ) ; }
Returns an array of FileStatus objects whose path names match pathPattern and is accepted by the user - supplied path filter . Results are sorted by their path names .
27,174
private FileStatus [ ] concurrentGlobInternal ( Path fixedPath , PathFilter filter ) throws IOException { ExecutorService executorService = Executors . newFixedThreadPool ( 2 , DAEMON_THREAD_FACTORY ) ; Callable < FileStatus [ ] > flatGlobTask = ( ) -> flatGlobInternal ( fixedPath , filter ) ; Callable < FileStatus [ ] > nonFlatGlobTask = ( ) -> super . globStatus ( fixedPath , filter ) ; try { return executorService . invokeAny ( Arrays . asList ( flatGlobTask , nonFlatGlobTask ) ) ; } catch ( InterruptedException | ExecutionException e ) { throw ( e . getCause ( ) instanceof IOException ) ? ( IOException ) e . getCause ( ) : new IOException ( e ) ; } finally { executorService . shutdownNow ( ) ; } }
Use 2 glob algorithms that return the same result but one of them could be significantly faster than another one depending on directory layout .
27,175
public Path getHomeDirectory ( ) { Path result = new Path ( getFileSystemRoot ( ) , getHomeDirectorySubpath ( ) ) ; logger . atFine ( ) . log ( "GHFS.getHomeDirectory:=> %s" , result ) ; return result ; }
Returns home directory of the current user .
27,176
private static String fileStatusToString ( FileStatus stat ) { assert stat != null ; return String . format ( "path: %s, isDir: %s, len: %d, owner: %s" , stat . getPath ( ) . toString ( ) , stat . isDir ( ) , stat . getLen ( ) , stat . getOwner ( ) ) ; }
Converts the given FileStatus to its string representation .
27,177
private static void copyIfNotPresent ( Configuration config , String deprecatedKey , String newKey ) { String deprecatedValue = config . get ( deprecatedKey ) ; if ( config . get ( newKey ) == null && deprecatedValue != null ) { logger . atWarning ( ) . log ( "Key %s is deprecated. Copying the value of key %s to new key %s" , deprecatedKey , deprecatedKey , newKey ) ; config . set ( newKey , deprecatedValue ) ; } }
Copy the value of the deprecated key to the new key if a value is present for the deprecated key but not the new key .
27,178
private static void copyDeprecatedConfigurationOptions ( Configuration config ) { copyIfNotPresent ( config , GoogleHadoopFileSystemConfiguration . AUTH_SERVICE_ACCOUNT_ENABLE . getKey ( ) , AUTHENTICATION_PREFIX + HadoopCredentialConfiguration . ENABLE_SERVICE_ACCOUNTS_SUFFIX ) ; copyIfNotPresent ( config , GoogleHadoopFileSystemConfiguration . AUTH_SERVICE_ACCOUNT_KEY_FILE . getKey ( ) , AUTHENTICATION_PREFIX + HadoopCredentialConfiguration . SERVICE_ACCOUNT_KEYFILE_SUFFIX ) ; copyIfNotPresent ( config , GoogleHadoopFileSystemConfiguration . AUTH_SERVICE_ACCOUNT_EMAIL . getKey ( ) , AUTHENTICATION_PREFIX + HadoopCredentialConfiguration . SERVICE_ACCOUNT_EMAIL_SUFFIX ) ; copyIfNotPresent ( config , GoogleHadoopFileSystemConfiguration . AUTH_CLIENT_ID . getKey ( ) , AUTHENTICATION_PREFIX + HadoopCredentialConfiguration . CLIENT_ID_SUFFIX ) ; copyIfNotPresent ( config , GoogleHadoopFileSystemConfiguration . AUTH_CLIENT_SECRET . getKey ( ) , AUTHENTICATION_PREFIX + HadoopCredentialConfiguration . CLIENT_SECRET_SUFFIX ) ; String oauthClientFileKey = AUTHENTICATION_PREFIX + HadoopCredentialConfiguration . OAUTH_CLIENT_FILE_SUFFIX ; if ( config . get ( oauthClientFileKey ) == null ) { config . set ( oauthClientFileKey , System . getProperty ( "user.home" ) + "/.credentials/storage.json" ) ; } }
Copy deprecated configuration options to new keys if present .
27,179
private synchronized void configure ( Configuration config ) throws IOException { logger . atFine ( ) . log ( "GHFS.configure" ) ; logger . atFine ( ) . log ( "GHFS_ID = %s" , GHFS_ID ) ; overrideConfigFromFile ( config ) ; copyDeprecatedConfigurationOptions ( config ) ; setConf ( config ) ; enableFlatGlob = GCS_FLAT_GLOB_ENABLE . get ( config , config :: getBoolean ) ; enableConcurrentGlob = GCS_CONCURRENT_GLOB_ENABLE . get ( config , config :: getBoolean ) ; checksumType = GCS_FILE_CHECKSUM_TYPE . get ( config , config :: getEnum ) ; defaultBlockSize = BLOCK_SIZE . get ( config , config :: getLong ) ; reportedPermissions = new FsPermission ( PERMISSIONS_TO_REPORT . get ( config , config :: get ) ) ; if ( gcsFsSupplier == null ) { if ( GCS_LAZY_INITIALIZATION_ENABLE . get ( config , config :: getBoolean ) ) { gcsFsSupplier = Suppliers . memoize ( ( ) -> { try { GoogleCloudStorageFileSystem gcsFs = createGcsFs ( config ) ; pathCodec = gcsFs . getPathCodec ( ) ; configureBuckets ( gcsFs ) ; configureWorkingDirectory ( config ) ; gcsFsInitialized = true ; return gcsFs ; } catch ( IOException e ) { throw new RuntimeException ( "Failed to create GCS FS" , e ) ; } } ) ; pathCodec = getPathCodec ( config ) ; } else { setGcsFs ( createGcsFs ( config ) ) ; configureBuckets ( getGcsFs ( ) ) ; configureWorkingDirectory ( config ) ; } } else { configureBuckets ( getGcsFs ( ) ) ; configureWorkingDirectory ( config ) ; } logger . atFine ( ) . log ( "GHFS.configure: done" ) ; }
Configures GHFS using the supplied configuration .
27,180
public static String matchListPrefix ( String objectNamePrefix , String delimiter , String objectName ) { Preconditions . checkArgument ( ! Strings . isNullOrEmpty ( objectName ) , "objectName must not be null or empty, had args %s/%s/%s: " , objectNamePrefix , delimiter , objectName ) ; String suffix = objectName ; int suffixIndex = 0 ; if ( objectNamePrefix != null ) { if ( ! objectName . startsWith ( objectNamePrefix ) || ( objectName . equals ( objectNamePrefix ) && objectNamePrefix . endsWith ( PATH_DELIMITER ) ) ) { return null ; } else { suffixIndex = objectNamePrefix . length ( ) ; suffix = objectName . substring ( suffixIndex ) ; } } if ( ! Strings . isNullOrEmpty ( delimiter ) && suffix . contains ( delimiter ) ) { objectName = objectName . substring ( 0 , objectName . indexOf ( delimiter , suffixIndex ) + delimiter . length ( ) ) ; } return objectName ; }
Helper that mimics the GCS API behavior for taking an existing objectName and checking if it matches a user - supplied prefix with an optional directory delimiter . If it matches either the full objectName will be returned unmodified or the return value will be a String that is a prefix of the objectName inclusive of the matching prefix but truncating any suffix past the first appearance of the delimiter after the full prefix . The returned prefix includes the delimiter String at which the objectName was truncated .
27,181
public SeekableByteChannel position ( long newPosition ) throws IOException { throwIfNotOpen ( ) ; if ( newPosition == currentPosition ) { return this ; } validatePosition ( newPosition ) ; logger . atFine ( ) . log ( "Seek from %s to %s position for '%s'" , currentPosition , newPosition , resourceIdString ) ; currentPosition = newPosition ; return this ; }
Sets this channel s position .
27,182
protected void validatePosition ( long position ) throws IOException { if ( position < 0 ) { throw new EOFException ( String . format ( "Invalid seek offset: position value (%d) must be >= 0 for '%s'" , position , resourceIdString ) ) ; } if ( size >= 0 && position >= size ) { throw new EOFException ( String . format ( "Invalid seek offset: position value (%d) must be between 0 and %d for '%s'" , position , size , resourceIdString ) ) ; } }
Validates that the given position is valid for this channel .
27,183
public void write ( byte [ ] b , int offset , int len ) throws IOException { long startTime = System . nanoTime ( ) ; out . write ( b , offset , len ) ; statistics . incrementBytesWritten ( len ) ; long duration = System . nanoTime ( ) - startTime ; ghfs . increment ( GoogleHadoopFileSystemBase . Counter . WRITE ) ; ghfs . increment ( GoogleHadoopFileSystemBase . Counter . WRITE_TIME , duration ) ; }
Writes to this output stream len bytes of the specified buffer starting at the given offset .
27,184
public static void addModificationTimeToAttributes ( Map < String , byte [ ] > attributes , Clock clock ) { attributes . put ( FILE_MODIFICATION_TIMESTAMP_KEY , Longs . toByteArray ( clock . currentTimeMillis ( ) ) ) ; }
Add a key and value representing the current time as determined by the passed clock to the passed attributes dictionary .
27,185
public static String convertToFilePath ( String objectName ) { if ( ! Strings . isNullOrEmpty ( objectName ) ) { if ( objectHasDirectoryPath ( objectName ) ) { objectName = objectName . substring ( 0 , objectName . length ( ) - 1 ) ; } } return objectName ; }
Converts the given object name to look like a file path . If the object name already looks like a file path then this call is a no - op .
27,186
public static FileInfo fromItemInfo ( PathCodec pathCodec , GoogleCloudStorageItemInfo itemInfo ) { if ( itemInfo . isRoot ( ) ) { return ROOT_INFO ; } URI path = pathCodec . getPath ( itemInfo . getBucketName ( ) , itemInfo . getObjectName ( ) , true ) ; return new FileInfo ( path , itemInfo ) ; }
Handy factory method for constructing a FileInfo from a GoogleCloudStorageItemInfo while potentially returning a singleton instead of really constructing an object for cases like ROOT .
27,187
public static List < FileInfo > fromItemInfos ( PathCodec pathCodec , List < GoogleCloudStorageItemInfo > itemInfos ) { List < FileInfo > fileInfos = new ArrayList < > ( itemInfos . size ( ) ) ; for ( GoogleCloudStorageItemInfo itemInfo : itemInfos ) { fileInfos . add ( fromItemInfo ( pathCodec , itemInfo ) ) ; } return fileInfos ; }
Handy factory method for constructing a list of FileInfo from a list of GoogleCloudStorageItemInfo .
27,188
public static boolean isDirectoryPath ( URI path ) { return ( path != null ) && path . toString ( ) . endsWith ( GoogleCloudStorage . PATH_DELIMITER ) ; }
Indicates whether the given path looks like a directory path .
27,189
public static StorageResourceId convertToDirectoryPath ( StorageResourceId resourceId ) { if ( resourceId . isStorageObject ( ) ) { if ( ! objectHasDirectoryPath ( resourceId . getObjectName ( ) ) ) { resourceId = new StorageResourceId ( resourceId . getBucketName ( ) , convertToDirectoryPath ( resourceId . getObjectName ( ) ) ) ; } } return resourceId ; }
Converts the given resourceId to look like a directory path . If the path already looks like a directory path then this call is a no - op .
27,190
public static URI convertToDirectoryPath ( PathCodec pathCodec , URI path ) { StorageResourceId resourceId = pathCodec . validatePathAndGetId ( path , true ) ; if ( resourceId . isStorageObject ( ) ) { if ( ! objectHasDirectoryPath ( resourceId . getObjectName ( ) ) ) { resourceId = convertToDirectoryPath ( resourceId ) ; path = pathCodec . getPath ( resourceId . getBucketName ( ) , resourceId . getObjectName ( ) , false ) ; } } return path ; }
Converts the given path to look like a directory path . If the path already looks like a directory path then this call is a no - op .
27,191
static String getTableFieldsJson ( TableSchema tableSchema ) throws IOException { return JacksonFactory . getDefaultInstance ( ) . toString ( tableSchema . getFields ( ) ) ; }
Gets the JSON representation of the table s fields .
27,192
public void initialize ( InputSplit genericSplit , TaskAttemptContext context ) throws IOException { if ( logger . atFine ( ) . isEnabled ( ) ) { try { logger . atFine ( ) . log ( "initialize('%s', '%s')" , HadoopToStringUtil . toString ( genericSplit ) , HadoopToStringUtil . toString ( context ) ) ; } catch ( InterruptedException ie ) { logger . atFine ( ) . withCause ( ie ) . log ( "InterruptedException during HadoopToStringUtil.toString" ) ; } } Preconditions . checkArgument ( genericSplit instanceof FileSplit , "InputSplit genericSplit should be an instance of FileSplit." ) ; FileSplit fileSplit = ( FileSplit ) genericSplit ; jsonParser = new JsonParser ( ) ; lineReader = new LineRecordReader ( ) ; lineReader . initialize ( fileSplit , context ) ; }
Called once at initialization to initialize the RecordReader .
27,193
public boolean nextKeyValue ( ) throws IOException { if ( ! lineReader . nextKeyValue ( ) ) { logger . atFine ( ) . log ( "All values read: record reader read %s key, value pairs." , count ) ; return false ; } currentKey . set ( lineReader . getCurrentKey ( ) . get ( ) ) ; Text lineValue = lineReader . getCurrentValue ( ) ; currentValue = jsonParser . parse ( lineValue . toString ( ) ) . getAsJsonObject ( ) ; count ++ ; return true ; }
Reads the next key value pair . Gets next line and parses Json object .
27,194
public static GoogleCloudStorageItemInfo createInferredDirectory ( StorageResourceId resourceId ) { checkArgument ( resourceId != null , "resourceId must not be null" ) ; return new GoogleCloudStorageItemInfo ( resourceId , 0 , 0 , null , null ) ; }
Helper for creating a found GoogleCloudStorageItemInfo for an inferred directory .
27,195
public static GoogleCloudStorageItemInfo createNotFound ( StorageResourceId resourceId ) { checkArgument ( resourceId != null , "resourceId must not be null" ) ; return new GoogleCloudStorageItemInfo ( resourceId , 0 , - 1 , null , null ) ; }
Helper for creating a not found GoogleCloudStorageItemInfo for a StorageResourceId .
27,196
public void checkOutputSpecs ( JobContext job ) throws FileAlreadyExistsException , IOException { Configuration conf = job . getConfiguration ( ) ; BigQueryOutputConfiguration . validateConfiguration ( conf ) ; Path outputPath = BigQueryOutputConfiguration . getGcsOutputPath ( conf ) ; logger . atInfo ( ) . log ( "Using output path '%s'." , outputPath ) ; FileSystem outputFileSystem = outputPath . getFileSystem ( conf ) ; if ( outputFileSystem . exists ( outputPath ) ) { throw new IOException ( "The output path '" + outputPath + "' already exists." ) ; } if ( FileOutputFormat . getCompressOutput ( job ) ) { throw new IOException ( "Compression isn't supported for this OutputFormat." ) ; } try { new BigQueryFactory ( ) . getBigQueryHelper ( conf ) ; } catch ( GeneralSecurityException gse ) { throw new IOException ( "Failed to create BigQuery client" , gse ) ; } getDelegate ( conf ) . checkOutputSpecs ( job ) ; }
Checks to make sure the configuration is valid the output path doesn t already exist and that a connection to BigQuery can be established .
27,197
public synchronized OutputCommitter getOutputCommitter ( TaskAttemptContext context ) throws IOException { if ( committer == null ) { committer = createCommitter ( context ) ; } return committer ; }
Gets the cached OutputCommitter creating a new one if it doesn t exist .
27,198
public RecordWriter < K , V > getRecordWriter ( TaskAttemptContext context ) throws IOException , InterruptedException { Configuration conf = context . getConfiguration ( ) ; return getDelegate ( conf ) . getRecordWriter ( context ) ; }
Gets the RecordWriter from the wrapped FileOutputFormat .
27,199
protected OutputCommitter createCommitter ( TaskAttemptContext context ) throws IOException { Configuration conf = context . getConfiguration ( ) ; return getDelegate ( conf ) . getOutputCommitter ( context ) ; }
Create a new OutputCommitter for this OutputFormat .