idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
34,000
public void report ( ) throws IOException { DistributedFileSystem dfs = getDFS ( ) ; if ( dfs != null ) { DiskStatus ds = dfs . getDiskStatus ( ) ; long capacity = ds . getCapacity ( ) ; long used = ds . getDfsUsed ( ) ; long remaining = ds . getRemaining ( ) ; long presentCapacity = used + remaining ; boolean mode = dfs . setSafeMode ( FSConstants . SafeModeAction . SAFEMODE_GET ) ; UpgradeStatusReport status = dfs . distributedUpgradeProgress ( UpgradeAction . GET_STATUS ) ; if ( mode ) { System . out . println ( "Safe mode is ON" ) ; } if ( status != null ) { System . out . println ( status . getStatusText ( false ) ) ; } System . out . println ( "Configured Capacity: " + capacity + " (" + StringUtils . byteDesc ( capacity ) + ")" ) ; System . out . println ( "Present Capacity: " + presentCapacity + " (" + StringUtils . byteDesc ( presentCapacity ) + ")" ) ; System . out . println ( "DFS Remaining: " + remaining + " (" + StringUtils . byteDesc ( remaining ) + ")" ) ; System . out . println ( "DFS Used: " + used + " (" + StringUtils . byteDesc ( used ) + ")" ) ; System . out . println ( "DFS Used%: " + StringUtils . limitDecimalTo2 ( ( ( 1.0 * used ) / presentCapacity ) * 100 ) + "%" ) ; System . out . println ( "Under replicated blocks: " + dfs . getUnderReplicatedBlocksCount ( ) ) ; System . out . println ( "Blocks with corrupt replicas: " + dfs . getCorruptBlocksCount ( ) ) ; System . out . println ( "Missing blocks: " + dfs . getMissingBlocksCount ( ) ) ; System . out . println ( ) ; System . out . println ( "-------------------------------------------------" ) ; DatanodeInfo [ ] live = dfs . getClient ( ) . datanodeReport ( DatanodeReportType . LIVE ) ; DatanodeInfo [ ] dead = dfs . getClient ( ) . datanodeReport ( DatanodeReportType . DEAD ) ; System . out . println ( "Datanodes available: " + live . length + " (" + ( live . length + dead . length ) + " total, " + dead . length + " dead)\n" ) ; for ( DatanodeInfo dn : live ) { System . out . println ( dn . getDatanodeReport ( ) ) ; System . out . println ( ) ; } for ( DatanodeInfo dn : dead ) { System . out . println ( dn . getDatanodeReport ( ) ) ; System . out . println ( ) ; } } }
Gives a report on how the FileSystem is doing .
34,001
public int upgradeProgress ( String [ ] argv , int idx ) throws IOException { DistributedFileSystem dfs = getDFS ( ) ; if ( dfs == null ) { System . out . println ( "FileSystem is " + getFS ( ) . getUri ( ) ) ; return - 1 ; } if ( idx != argv . length - 1 ) { printUsage ( "-upgradeProgress" ) ; return - 1 ; } UpgradeAction action ; if ( "status" . equalsIgnoreCase ( argv [ idx ] ) ) { action = UpgradeAction . GET_STATUS ; } else if ( "details" . equalsIgnoreCase ( argv [ idx ] ) ) { action = UpgradeAction . DETAILED_STATUS ; } else if ( "force" . equalsIgnoreCase ( argv [ idx ] ) ) { action = UpgradeAction . FORCE_PROCEED ; } else { printUsage ( "-upgradeProgress" ) ; return - 1 ; } UpgradeStatusReport status = dfs . distributedUpgradeProgress ( action ) ; String statusText = ( status == null ? "There are no upgrades in progress." : status . getStatusText ( action == UpgradeAction . DETAILED_STATUS ) ) ; System . out . println ( statusText ) ; return 0 ; }
Command to request current distributed upgrade status a detailed status or to force the upgrade to proceed .
34,002
private ClientDatanodeProtocol getClientDatanodeProtocol ( String dnAddr ) throws IOException { String hostname = null ; int port ; int index ; Configuration conf = getConf ( ) ; if ( dnAddr == null ) { dnAddr = conf . get ( FSConstants . DFS_DATANODE_IPC_ADDRESS_KEY ) ; hostname = "localhost" ; } index = dnAddr . indexOf ( ':' ) ; if ( index < 0 ) { return null ; } port = Integer . parseInt ( dnAddr . substring ( index + 1 ) ) ; if ( hostname == null ) { hostname = dnAddr . substring ( 0 , index ) ; } InetSocketAddress addr = NetUtils . createSocketAddr ( hostname + ":" + port ) ; if ( ClientDatanodeProtocol . LOG . isDebugEnabled ( ) ) { ClientDatanodeProtocol . LOG . debug ( "ClientDatanodeProtocol addr=" + addr ) ; } return ( ClientDatanodeProtocol ) RPC . getProxy ( ClientDatanodeProtocol . class , ClientDatanodeProtocol . versionID , addr , conf ) ; }
Gets a new ClientDataNodeProtocol object .
34,003
private int getBlockInfo ( String [ ] argv , int i ) throws IOException { long blockId = Long . valueOf ( argv [ i ++ ] ) ; LocatedBlockWithFileName locatedBlock = getDFS ( ) . getClient ( ) . getBlockInfo ( blockId ) ; if ( null == locatedBlock ) { System . err . println ( "Could not find the block with id : " + blockId ) ; return - 1 ; } StringBuilder sb = new StringBuilder ( ) ; sb . append ( "block: " ) . append ( locatedBlock . getBlock ( ) ) . append ( "\n" ) . append ( "filename: " ) . append ( locatedBlock . getFileName ( ) ) . append ( "\n" ) . append ( "locations: " ) ; DatanodeInfo [ ] locs = locatedBlock . getLocations ( ) ; for ( int k = 0 ; k < locs . length ; k ++ ) { if ( k > 0 ) { sb . append ( " , " ) ; } sb . append ( locs [ k ] . getHostName ( ) ) ; } System . out . println ( sb . toString ( ) ) ; return 0 ; }
Display the filename the block belongs to and its locations .
34,004
protected HttpURLConnection openConnection ( String path , String query ) throws IOException { try { final URL url = new URI ( "http" , null , nnAddr . getAddress ( ) . getHostAddress ( ) , nnAddr . getPort ( ) , path , query , null ) . toURL ( ) ; if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "url=" + url ) ; } return ( HttpURLConnection ) url . openConnection ( ) ; } catch ( URISyntaxException e ) { throw ( IOException ) new IOException ( ) . initCause ( e ) ; } }
Open an HTTP connection to the namenode to read file data and metadata .
34,005
public static HarIndex getHarIndex ( FileSystem fs , Path initializer ) throws IOException { if ( ! initializer . getName ( ) . endsWith ( HAR ) ) { initializer = initializer . getParent ( ) ; } InputStream in = null ; try { Path indexFile = new Path ( initializer , INDEX ) ; FileStatus indexStat = fs . getFileStatus ( indexFile ) ; in = fs . open ( indexFile ) ; HarIndex harIndex = new HarIndex ( in , indexStat . getLen ( ) ) ; harIndex . harDirectory = initializer ; return harIndex ; } finally { if ( in != null ) { in . close ( ) ; } } }
Creates a HarIndex object with the path to either the HAR or a part file in the HAR .
34,006
void parseLine ( String line ) throws UnsupportedEncodingException { String [ ] splits = line . split ( " " ) ; boolean isDir = "dir" . equals ( splits [ 1 ] ) ? true : false ; if ( ! isDir && splits . length >= 6 ) { String name = URLDecoder . decode ( splits [ 0 ] , "UTF-8" ) ; String partName = URLDecoder . decode ( splits [ 2 ] , "UTF-8" ) ; long startIndex = Long . parseLong ( splits [ 3 ] ) ; long length = Long . parseLong ( splits [ 4 ] ) ; String [ ] newsplits = URLDecoder . decode ( splits [ 5 ] , "UTF-8" ) . split ( " " ) ; if ( newsplits != null && newsplits . length >= 5 ) { long mtime = Long . parseLong ( newsplits [ 0 ] ) ; IndexEntry entry = new IndexEntry ( name , startIndex , length , mtime , partName ) ; entries . add ( entry ) ; } } }
Parses each line and extracts relevant information .
34,007
public IndexEntry findEntry ( String partName , long partFileOffset ) { for ( IndexEntry e : entries ) { boolean nameMatch = partName . equals ( e . partFileName ) ; boolean inRange = ( partFileOffset >= e . startOffset ) && ( partFileOffset < e . startOffset + e . length ) ; if ( nameMatch && inRange ) { return e ; } } return null ; }
Finds the index entry corresponding to a HAR partFile at an offset .
34,008
public IndexEntry findEntryByFileName ( String fileName ) { for ( IndexEntry e : entries ) { if ( fileName . equals ( e . fileName ) ) { return e ; } } return null ; }
Finds the index entry corresponding to a file in the archive
34,009
public static void prepareBookKeeperEnv ( final String availablePath , ZooKeeper zooKeeper ) throws IOException { final CountDownLatch availablePathLatch = new CountDownLatch ( 1 ) ; StringCallback cb = new StringCallback ( ) { public void processResult ( int rc , String path , Object ctx , String name ) { if ( Code . OK . intValue ( ) == rc || Code . NODEEXISTS . intValue ( ) == rc ) { availablePathLatch . countDown ( ) ; LOG . info ( "Successfully created bookie available path:" + availablePath ) ; } else { Code code = Code . get ( rc ) ; LOG . error ( "Failed to create available bookie path (" + availablePath + ")" , KeeperException . create ( code , path ) ) ; } } } ; ZkUtils . createFullPathOptimistic ( zooKeeper , availablePath , new byte [ 0 ] , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT , cb , null ) ; try { int timeoutMs = zooKeeper . getSessionTimeout ( ) ; if ( ! availablePathLatch . await ( timeoutMs , TimeUnit . MILLISECONDS ) ) { throw new IOException ( "Couldn't create the bookie available path : " + availablePath + ", timed out after " + timeoutMs + " ms." ) ; } } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; throw new IOException ( "Interrupted when creating the bookie available " + "path: " + availablePath , e ) ; } }
Create parent ZNode under which available BookKeeper bookie servers will register themselves . Will create parent ZNodes for that path as well .
34,010
private void createZkMetadataIfNotExists ( StorageInfo si ) throws IOException { try { if ( ! hasSomeJournalData ( ) ) { try { zk . create ( zkParentPath , new byte [ ] { '0' } , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; FormatInfoWritable writable = localFormatInfoWritable . get ( ) ; writable . set ( PROTO_VERSION , si ) ; byte [ ] data = WritableUtil . writableToByteArray ( writable ) ; zk . create ( formatInfoPath , data , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; } catch ( KeeperException e ) { keeperException ( "Unrecoverable ZooKeeper error initializing " + zkParentPath , e ) ; } catch ( InterruptedException e ) { interruptedException ( "Interrupted initializing " + zkParentPath + " in ZooKeeper" , e ) ; } } } catch ( IOException e ) { LOG . error ( "Unable to initialize metadata" , e ) ; throw e ; } }
If there is no metadata present in ZooKeeper create and populate the metadata with the right format information
34,011
private boolean zkPathExists ( String path ) throws IOException { try { return zk . exists ( path , false ) != null ; } catch ( KeeperException e ) { keeperException ( "Unrecoverable ZooKeeper error checking if " + path + " exists" , e ) ; } catch ( InterruptedException e ) { interruptedException ( "Interrupted checking if ZooKeeper path " + path + " exists" , e ) ; } return false ; }
Check if a path exists in ZooKeeper
34,012
public void purgeLogsOlderThan ( long minTxIdToKeep ) throws IOException { checkEnv ( ) ; Collection < EditLogLedgerMetadata > ledgers = metadataManager . listLedgers ( false ) ; for ( EditLogLedgerMetadata ledger : ledgers ) { if ( ledger . getFirstTxId ( ) < minTxIdToKeep && ledger . getLastTxId ( ) < minTxIdToKeep ) { LOG . info ( "Purging edit log segment: " + ledger ) ; if ( ! metadataManager . deleteLedgerMetadata ( ledger , - 1 ) ) { LOG . warn ( ledger + " has already been purged!" ) ; } else { try { bookKeeperClient . deleteLedger ( ledger . getLedgerId ( ) ) ; } catch ( BKException e ) { bkException ( "Unrecoverable error deleting " + ledger + " from BookKeeper" , e ) ; } catch ( InterruptedException e ) { interruptedException ( "Interrupted deleting " + ledger + " from BookKeeper" , e ) ; } } } } }
For edit log segment that contains transactions with ids earlier than the earliest txid to be retained remove the ZooKeeper - based metadata and BookKeeper ledgers associated with these segments .
34,013
public void readToken ( String parentFieldName , JsonToken expectedToken ) throws IOException { JsonToken currentToken = jsonParser . nextToken ( ) ; if ( currentToken != expectedToken ) { throw new IOException ( "Expected a " + expectedToken . toString ( ) + " token when reading the value of the field: " + parentFieldName + " but found a " + currentToken . toString ( ) + " token" ) ; } }
This is a helper method that reads a JSON token using a JsonParser instance and throws an exception if the next token is not the same as the token we expect .
34,014
public String getFieldName ( ) throws IOException { if ( jsonParser . getCurrentToken ( ) != JsonToken . FIELD_NAME ) { throw new IOException ( "Expected a field of type " + JsonToken . FIELD_NAME + ", but found a field of type " + jsonParser . getCurrentToken ( ) ) ; } return jsonParser . getCurrentName ( ) ; }
If the current token is a field name this method returns the name of the field .
34,015
void setBlock ( final int index , final Block b ) { blockList [ index2BlockId ( index ) ] = b . getBlockId ( ) ; blockList [ index2BlockLen ( index ) ] = b . getNumBytes ( ) ; blockList [ index2BlockGenStamp ( index ) ] = b . getGenerationStamp ( ) ; }
Set the indexTh block
34,016
public static void main ( String argv [ ] ) throws Exception { try { Configuration conf = new Configuration ( ) ; UtilizationReporter utilizationReporter = new UtilizationReporter ( conf ) ; utilizationReporter . start ( ) ; } catch ( Throwable e ) { System . err . println ( e ) ; LOG . error ( StringUtils . stringifyException ( e ) ) ; System . exit ( - 1 ) ; } }
main program to run on each TaskTracker
34,017
static InetSocketAddress getNameNodeAddress ( Configuration conf , String cname , String rpcKey , String cname2 ) { String fs = conf . get ( cname ) ; String fs1 = conf . get ( rpcKey ) ; String fs2 = conf . get ( cname2 ) ; Configuration newconf = new Configuration ( conf ) ; newconf . set ( "fs.default.name" , fs ) ; if ( fs1 != null ) { newconf . set ( DFS_NAMENODE_RPC_ADDRESS_KEY , fs1 ) ; } if ( fs2 != null ) { newconf . set ( "dfs.namenode.dn-address" , fs2 ) ; } return DataNode . getNameNodeAddress ( newconf ) ; }
Returns the IP address of the namenode
34,018
public static void write ( XMLOutputter xml , MD5MD5CRC32FileChecksum that ) throws IOException { xml . startTag ( MD5MD5CRC32FileChecksum . class . getName ( ) ) ; if ( that != null ) { xml . attribute ( "bytesPerCRC" , "" + that . bytesPerCRC ) ; xml . attribute ( "crcPerBlock" , "" + that . crcPerBlock ) ; xml . attribute ( "md5" , "" + that . md5 ) ; } xml . endTag ( ) ; }
Write that object to xml output .
34,019
public static MD5MD5CRC32FileChecksum valueOf ( Attributes attrs ) throws SAXException { final String bytesPerCRC = attrs . getValue ( "bytesPerCRC" ) ; final String crcPerBlock = attrs . getValue ( "crcPerBlock" ) ; final String md5 = attrs . getValue ( "md5" ) ; if ( bytesPerCRC == null || crcPerBlock == null || md5 == null ) { return null ; } try { return new MD5MD5CRC32FileChecksum ( Integer . valueOf ( bytesPerCRC ) , Integer . valueOf ( crcPerBlock ) , new MD5Hash ( md5 ) ) ; } catch ( Exception e ) { throw new SAXException ( "Invalid attributes: bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5 , e ) ; } }
Return the object represented in the attributes .
34,020
private String getNextElementsValue ( String wantedName ) throws IOException { boolean gotSTART_ELEMENT = false ; try { int eventType = in . getEventType ( ) ; while ( true ) { switch ( eventType ) { case XMLStreamConstants . CHARACTERS : if ( gotSTART_ELEMENT ) { return in . getText ( ) . trim ( ) ; } break ; case XMLStreamConstants . END_DOCUMENT : throw new IOException ( "End of XML while looking for element [" + wantedName + "]" ) ; case XMLStreamConstants . START_ELEMENT : if ( gotSTART_ELEMENT ) { throw new IOException ( "START_ELEMENT [" + in . getName ( ) + " event when expecting CHARACTERS event for [" + wantedName + "]" ) ; } else if ( in . getName ( ) . toString ( ) . equals ( wantedName ) ) { gotSTART_ELEMENT = true ; } else { throw new IOException ( "unexpected element name [" + in . getName ( ) + "], was expecting [" + wantedName + "]" ) ; } break ; case XMLStreamConstants . COMMENT : case XMLStreamConstants . END_ELEMENT : case XMLStreamConstants . SPACE : case XMLStreamConstants . START_DOCUMENT : break ; case XMLStreamConstants . ATTRIBUTE : case XMLStreamConstants . CDATA : case XMLStreamConstants . DTD : case XMLStreamConstants . ENTITY_DECLARATION : case XMLStreamConstants . ENTITY_REFERENCE : case XMLStreamConstants . NAMESPACE : case XMLStreamConstants . NOTATION_DECLARATION : case XMLStreamConstants . PROCESSING_INSTRUCTION : default : throw new IOException ( "Unsupported event type [" + eventType + "] (see XMLStreamConstants)" ) ; } if ( ! in . hasNext ( ) ) { break ; } eventType = in . next ( ) ; } } catch ( XMLStreamException e ) { throw new IOException ( "Error reading XML stream" , e ) ; } throw new IOException ( "Error reading XML stream, should never reach this line, " + "most likely XML does not have elements we are loking for" ) ; }
Get next element s value checks that the element s name is wantedName .
34,021
public void update ( String newPath ) throws IOException { String id = hostname + Thread . currentThread ( ) . getId ( ) ; CurrentInProgressMetadataWritable cip = localWritable . get ( ) ; cip . set ( id , newPath ) ; byte [ ] data = WritableUtil . writableToByteArray ( cip ) ; try { zooKeeper . setData ( fullyQualifiedZNode , data , expectedZNodeVersion . get ( ) ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Set " + fullyQualifiedZNode + " to point to " + newPath ) ; } } catch ( KeeperException . BadVersionException e ) { LOG . error ( fullyQualifiedZNode + " has been updated by another process" , e ) ; throw new StaleVersionException ( fullyQualifiedZNode + "has been updated by another process!" ) ; } catch ( KeeperException e ) { keeperException ( "Unrecoverable ZooKeeper error updating " + fullyQualifiedZNode , e ) ; } catch ( InterruptedException e ) { interruptedException ( "Interrupted updating " + fullyQualifiedZNode , e ) ; } }
Update the data in the ZNode to point to a the ZNode containing the metadata for the ledger containing the current in - progress edit log segment .
34,022
public String read ( ) throws IOException { CurrentInProgressMetadataWritable cip = localWritable . get ( ) ; if ( readAndUpdateVersion ( cip ) ) { return cip . getPath ( ) ; } else { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( fullyQualifiedZNode + " is currently clear." ) ; } } return null ; }
Read the full path to the ZNode holding the metadata for the ledger containing the current in - progress edit log segment or null if no segment is currently in - progress
34,023
public void clear ( ) throws IOException { try { zooKeeper . setData ( fullyQualifiedZNode , null , expectedZNodeVersion . get ( ) ) ; } catch ( KeeperException . BadVersionException e ) { LOG . error ( fullyQualifiedZNode + " has been updated by another process" , e ) ; throw new StaleVersionException ( fullyQualifiedZNode + "has been updated by another process!" ) ; } catch ( KeeperException e ) { keeperException ( "Unrecoverable ZooKeeper error clearing " + fullyQualifiedZNode , e ) ; } catch ( InterruptedException e ) { interruptedException ( "Interrupted clearing " + fullyQualifiedZNode , e ) ; } }
Clear out the data in the ZNode specified in the constructor to indicate that no segment is currently in progress . This does not delete the actual ZNode .
34,024
protected void onSizeChanged ( int w , int h , int oldw , int oldh ) { super . onSizeChanged ( w , h , oldw , oldh ) ; setupBounds ( w , h ) ; setupPaints ( ) ; invalidate ( ) ; }
Use onSizeChanged instead of onAttachedToWindow to get the dimensions of the view because this method is called after measuring the dimensions of MATCH_PARENT & WRAP_CONTENT . Use this dimensions to setup the bounds and paints .
34,025
private void setupPaints ( ) { barPaint . setColor ( barColor ) ; barPaint . setAntiAlias ( true ) ; barPaint . setStyle ( Style . STROKE ) ; barPaint . setStrokeWidth ( barWidth ) ; rimPaint . setColor ( rimColor ) ; rimPaint . setAntiAlias ( true ) ; rimPaint . setStyle ( Style . STROKE ) ; rimPaint . setStrokeWidth ( rimWidth ) ; }
Set the properties of the paints we re using to draw the progress wheel
34,026
private void setupBounds ( int layout_width , int layout_height ) { int paddingTop = getPaddingTop ( ) ; int paddingBottom = getPaddingBottom ( ) ; int paddingLeft = getPaddingLeft ( ) ; int paddingRight = getPaddingRight ( ) ; if ( ! fillRadius ) { int minValue = Math . min ( layout_width - paddingLeft - paddingRight , layout_height - paddingBottom - paddingTop ) ; int circleDiameter = Math . min ( minValue , circleRadius * 2 - barWidth * 2 ) ; int xOffset = ( layout_width - paddingLeft - paddingRight - circleDiameter ) / 2 + paddingLeft ; int yOffset = ( layout_height - paddingTop - paddingBottom - circleDiameter ) / 2 + paddingTop ; circleBounds = new RectF ( xOffset + barWidth , yOffset + barWidth , xOffset + circleDiameter - barWidth , yOffset + circleDiameter - barWidth ) ; } else { circleBounds = new RectF ( paddingLeft + barWidth , paddingTop + barWidth , layout_width - paddingRight - barWidth , layout_height - paddingBottom - barWidth ) ; } }
Set the bounds of the component
34,027
private void parseAttributes ( TypedArray a ) { DisplayMetrics metrics = getContext ( ) . getResources ( ) . getDisplayMetrics ( ) ; barWidth = ( int ) TypedValue . applyDimension ( TypedValue . COMPLEX_UNIT_DIP , barWidth , metrics ) ; rimWidth = ( int ) TypedValue . applyDimension ( TypedValue . COMPLEX_UNIT_DIP , rimWidth , metrics ) ; circleRadius = ( int ) TypedValue . applyDimension ( TypedValue . COMPLEX_UNIT_DIP , circleRadius , metrics ) ; circleRadius = ( int ) a . getDimension ( R . styleable . ProgressWheel_matProg_circleRadius , circleRadius ) ; fillRadius = a . getBoolean ( R . styleable . ProgressWheel_matProg_fillRadius , false ) ; barWidth = ( int ) a . getDimension ( R . styleable . ProgressWheel_matProg_barWidth , barWidth ) ; rimWidth = ( int ) a . getDimension ( R . styleable . ProgressWheel_matProg_rimWidth , rimWidth ) ; float baseSpinSpeed = a . getFloat ( R . styleable . ProgressWheel_matProg_spinSpeed , spinSpeed / 360.0f ) ; spinSpeed = baseSpinSpeed * 360 ; barSpinCycleTime = a . getInt ( R . styleable . ProgressWheel_matProg_barSpinCycleTime , ( int ) barSpinCycleTime ) ; barColor = a . getColor ( R . styleable . ProgressWheel_matProg_barColor , barColor ) ; rimColor = a . getColor ( R . styleable . ProgressWheel_matProg_rimColor , rimColor ) ; linearProgress = a . getBoolean ( R . styleable . ProgressWheel_matProg_linearProgress , false ) ; if ( a . getBoolean ( R . styleable . ProgressWheel_matProg_progressIndeterminate , false ) ) { spin ( ) ; } a . recycle ( ) ; }
Parse the attributes passed to the view from the XML
34,028
public void setInstantProgress ( float progress ) { if ( isSpinning ) { mProgress = 0.0f ; isSpinning = false ; } if ( progress > 1.0f ) { progress -= 1.0f ; } else if ( progress < 0 ) { progress = 0 ; } if ( progress == mTargetProgress ) { return ; } mTargetProgress = Math . min ( progress * 360.0f , 360.0f ) ; mProgress = mTargetProgress ; lastTimeAnimated = SystemClock . uptimeMillis ( ) ; invalidate ( ) ; }
Set the progress to a specific value the bar will be set instantly to that value
34,029
public void setProgress ( float progress ) { if ( isSpinning ) { mProgress = 0.0f ; isSpinning = false ; runCallback ( ) ; } if ( progress > 1.0f ) { progress -= 1.0f ; } else if ( progress < 0 ) { progress = 0 ; } if ( progress == mTargetProgress ) { return ; } if ( mProgress == mTargetProgress ) { lastTimeAnimated = SystemClock . uptimeMillis ( ) ; } mTargetProgress = Math . min ( progress * 360.0f , 360.0f ) ; invalidate ( ) ; }
Set the progress to a specific value the bar will smoothly animate until that value
34,030
public void execute ( ) throws MojoExecutionException { if ( StringUtils . isBlank ( this . refreshPort ) ) { this . refreshPort = "8080" ; } if ( ! ping ( ) ) { getLog ( ) . warn ( "Connection failed to " + this . refreshHost + ":" + this . refreshPort + ", " + getAbortedMsg ( ) ) ; return ; } executeRefresh ( ) ; }
Mojo interface implementation
34,031
protected void refreshWebScripts ( String url ) { URL alfrescoTomcatUrl = buildFinalUrl ( url ) ; if ( alfrescoTomcatUrl == null ) { getLog ( ) . error ( "Could not build refresh URL for " + refreshWebappName + ", " + getAbortedMsg ( ) ) ; } List < NameValuePair > postData = new ArrayList < NameValuePair > ( ) ; postData . add ( new BasicNameValuePair ( "reset" , "on" ) ) ; postData . add ( new BasicNameValuePair ( "submit" , "Refresh Web Scripts" ) ) ; makePostCall ( alfrescoTomcatUrl , postData , "Refresh Web Scripts" ) ; }
Perform a Refresh of Web Scripts container in webapp . Called by specific refresh mojo implementation .
34,032
protected void clearDependencyCaches ( String url ) { URL alfrescoTomcatUrl = buildFinalUrl ( url ) ; if ( alfrescoTomcatUrl == null ) { getLog ( ) . error ( "Could not build clear dependency caches URL for " + refreshWebappName + ", " + getAbortedMsg ( ) ) ; } makePostCall ( alfrescoTomcatUrl , null , "Clear Dependency Caches" ) ; }
Perform a Clear Dependency Caches call on Share webapp . Called by specific refresh mojo implementation currently only applicable to Share webapp .
34,033
private void makePostCall ( URL alfrescoTomcatUrl , List < NameValuePair > postData , String operation ) { CloseableHttpClient client = null ; CloseableHttpResponse response = null ; try { HttpHost targetHost = new HttpHost ( alfrescoTomcatUrl . getHost ( ) , alfrescoTomcatUrl . getPort ( ) , alfrescoTomcatUrl . getProtocol ( ) ) ; CredentialsProvider credsProvider = new BasicCredentialsProvider ( ) ; credsProvider . setCredentials ( new AuthScope ( targetHost . getHostName ( ) , targetHost . getPort ( ) ) , new UsernamePasswordCredentials ( refreshUsername , refreshPassword ) ) ; client = HttpClients . custom ( ) . setDefaultCredentialsProvider ( credsProvider ) . build ( ) ; AuthCache authCache = new BasicAuthCache ( ) ; BasicScheme basicAuth = new BasicScheme ( ) ; authCache . put ( targetHost , basicAuth ) ; HttpClientContext localContext = HttpClientContext . create ( ) ; localContext . setAuthCache ( authCache ) ; HttpPost httpPost = new HttpPost ( alfrescoTomcatUrl . toURI ( ) ) ; response = client . execute ( httpPost ) ; if ( postData != null ) { UrlEncodedFormEntity entity = new UrlEncodedFormEntity ( postData , "UTF-8" ) ; httpPost . setEntity ( entity ) ; } httpPost . setHeader ( "Accept-Charset" , "iso-8859-1,utf-8" ) ; httpPost . setHeader ( "Accept-Language" , "en-us" ) ; response = client . execute ( httpPost ) ; if ( response == null ) { getLog ( ) . error ( "POST request failed to " + alfrescoTomcatUrl . toString ( ) + ", " + getAbortedMsg ( ) ) ; return ; } int statusCode = response . getStatusLine ( ) . getStatusCode ( ) ; if ( statusCode == HttpStatus . SC_OK ) { getLog ( ) . info ( "Successfull " + operation + " for " + refreshWebappName ) ; } else { String reasonPhrase = response . getStatusLine ( ) . getReasonPhrase ( ) ; getLog ( ) . warn ( "Failed to " + operation + " for " + refreshWebappName + ". Response status: " + statusCode + ", message: " + reasonPhrase ) ; } } catch ( Exception ex ) { getLog ( ) . error ( "POST request failed to " + alfrescoTomcatUrl . toString ( ) + ", " + getAbortedMsg ( ) ) ; getLog ( ) . error ( "Exception Msg: " + ex . getMessage ( ) ) ; } finally { closeQuietly ( response ) ; closeQuietly ( client ) ; } }
Helper method to make a POST request to the Alfresco Webapp
34,034
protected String getPort ( ) { String port = tomcatPort ; if ( mavenTomcatPort != null ) { port = mavenTomcatPort ; getLog ( ) . info ( "Tomcat Port overridden by property maven.tomcat.port" ) ; } return port ; }
Get the Tomcat port . By default the port is changed by using the maven . alfresco . tomcat . port property but for legacy and external configuration purposes maven . tomcat . port will override if defined
34,035
protected boolean tomcatIsRunning ( ) { CloseableHttpClient client = HttpClients . createDefault ( ) ; CloseableHttpResponse response = null ; try { HttpGet httpget = new HttpGet ( "http://localhost:" + getPort ( ) + "/alfresco" ) ; response = client . execute ( httpget ) ; getLog ( ) . info ( "Tomcat is running on port " + getPort ( ) ) ; return true ; } catch ( Exception ex ) { getLog ( ) . info ( "Tomcat is not running on port " + getPort ( ) ) ; return false ; } }
Check if Tomcat is already running .
34,036
protected void copyAlfrescoLicense ( ) throws MojoExecutionException { final String warOutputDir = getWarOutputDir ( PLATFORM_WAR_PREFIX_NAME ) ; final String licDestDir = warOutputDir + "/WEB-INF/classes/alfresco/extension/license" ; getLog ( ) . info ( "Copying Alfresco Enterprise license to: " + licDestDir ) ; executeMojo ( plugin ( groupId ( "org.apache.maven.plugins" ) , artifactId ( "maven-resources-plugin" ) , version ( MAVEN_RESOURCE_PLUGIN_VERSION ) ) , goal ( "copy-resources" ) , configuration ( element ( name ( "outputDirectory" ) , licDestDir ) , element ( name ( "resources" ) , element ( name ( "resource" ) , element ( name ( "directory" ) , "src/test/license" ) , element ( name ( "includes" ) , element ( name ( "include" ) , "*.lic" ) ) , element ( name ( "filtering" ) , "false" ) ) ) ) , execEnv ) ; }
Copy the Alfresco Enterprise license to its correct place in the Platform WAR if it exists . It is not enough to have it on the test classpath then it will start up as Trial license ...
34,037
protected void copyShareConfigCustom ( ) throws MojoExecutionException { final String warOutputDir = getWarOutputDir ( SHARE_WAR_PREFIX_NAME ) ; final String distDir = warOutputDir + "/WEB-INF/classes/alfresco/web-extension/" ; String repoUrl = project . getProperties ( ) . getProperty ( "alfresco.repo.url" ) ; if ( repoUrl == null ) { project . getProperties ( ) . setProperty ( "alfresco.repo.url" , "http://localhost:" + getPort ( ) + "/alfresco" ) ; } getLog ( ) . info ( "Copying Share config custom to: " + distDir ) ; executeMojo ( plugin ( groupId ( "org.apache.maven.plugins" ) , artifactId ( "maven-resources-plugin" ) , version ( MAVEN_RESOURCE_PLUGIN_VERSION ) ) , goal ( "copy-resources" ) , configuration ( element ( name ( "outputDirectory" ) , distDir ) , element ( name ( "resources" ) , element ( name ( "resource" ) , element ( name ( "directory" ) , "src/test/resources/share" ) , element ( name ( "includes" ) , element ( name ( "include" ) , "*.xml" ) ) , element ( name ( "filtering" ) , "true" ) ) ) ) , execEnv ) ; }
Copy Share Config Custom in order to have global overrides for development and dynamic port
34,038
protected void copyHotswapAgentProperties ( String warPrefix ) throws MojoExecutionException { if ( copyHotswapAgentConfig == false ) { return ; } final String warOutputDir = getWarOutputDir ( warPrefix ) + "/WEB-INF/classes/" ; getLog ( ) . info ( "Copying " + warPrefix + "-hotswap-agent.properties to " + warOutputDir ) ; executeMojo ( plugin ( groupId ( "com.coderplus.maven.plugins" ) , artifactId ( "copy-rename-maven-plugin" ) , version ( "1.0" ) ) , goal ( "rename" ) , configuration ( element ( name ( "sourceFile" ) , project . getBuild ( ) . getTestOutputDirectory ( ) + "/" + warPrefix + "-hotswap-agent.properties" ) , element ( name ( "destinationFile" ) , warOutputDir + "hotswap-agent.properties" ) ) , execEnv ) ; }
Copy and Build hotswap - agent . properties
34,039
protected String packageAndInstallCustomWar ( String warName ) throws MojoExecutionException { final String warArtifactId = "${project.artifactId}-" + warName ; final String warSourceDir = getWarOutputDir ( warName ) ; String warPath = project . getBuild ( ) . getDirectory ( ) + "/" + warName + ".war" ; ZipUtil . pack ( new File ( warSourceDir ) , new File ( warPath ) ) ; executeMojo ( plugin ( groupId ( "org.apache.maven.plugins" ) , artifactId ( "maven-install-plugin" ) , version ( MAVEN_INSTALL_PLUGIN_VERSION ) ) , goal ( "install-file" ) , configuration ( element ( name ( "file" ) , warPath ) , element ( name ( "groupId" ) , "${project.groupId}" ) , element ( name ( "artifactId" ) , warArtifactId ) , element ( name ( "version" ) , "${project.version}" ) , element ( name ( "packaging" ) , "war" ) ) , execEnv ) ; return warArtifactId ; }
Package customized war file and install it in local maven repo .
34,040
private static FormatBundle < DatasetKeyOutputFormat > outputBundle ( Configuration conf ) { FormatBundle < DatasetKeyOutputFormat > bundle = FormatBundle . forOutput ( DatasetKeyOutputFormat . class ) ; for ( Map . Entry < String , String > entry : conf ) { bundle . set ( entry . getKey ( ) , entry . getValue ( ) ) ; } return bundle ; }
Builds a FormatBundle for DatasetKeyOutputFormat by copying a temp config .
34,041
private static boolean hasErrors ( Symbol symbol ) { switch ( symbol . kind ) { case ALTERNATIVE : return hasErrors ( symbol , ( ( Symbol . Alternative ) symbol ) . symbols ) ; case EXPLICIT_ACTION : return false ; case IMPLICIT_ACTION : return symbol instanceof Symbol . ErrorAction ; case REPEATER : Symbol . Repeater r = ( Symbol . Repeater ) symbol ; return hasErrors ( r . end ) || hasErrors ( symbol , r . production ) ; case ROOT : case SEQUENCE : return hasErrors ( symbol , symbol . production ) ; case TERMINAL : return false ; default : throw new RuntimeException ( "unknown symbol kind: " + symbol . kind ) ; } }
Returns true if the Parser contains any Error symbol indicating that it may fail for some inputs .
34,042
public int run ( ) throws IOException { Preconditions . checkArgument ( samplePaths != null && ! samplePaths . isEmpty ( ) , "Sample JSON path is required" ) ; Preconditions . checkArgument ( samplePaths . size ( ) == 1 , "Only one JSON sample can be given" ) ; Schema sampleSchema = JsonUtil . inferSchema ( open ( samplePaths . get ( 0 ) ) , recordName , numRecords ) ; if ( sampleSchema != null ) { output ( sampleSchema . toString ( ! minimize ) , console , outputPath ) ; return 0 ; } else { console . error ( "Sample file did not contain any records" ) ; return 1 ; } }
By default we look at first 10 records
34,043
private void loadKiteConf ( Services services ) throws IOException { String [ ] paths = services . getConf ( ) . getStrings ( KITE_CONFIGURATION ) ; if ( paths != null && paths . length != 0 ) { kiteConf = new Configuration ( false ) ; for ( String path : paths ) { if ( path . startsWith ( "hdfs" ) ) { Path p = new Path ( path ) ; HadoopAccessorService has = services . get ( HadoopAccessorService . class ) ; try { FileSystem fs = has . createFileSystem ( System . getProperty ( "user.name" ) , p . toUri ( ) , has . createJobConf ( p . toUri ( ) . getAuthority ( ) ) ) ; if ( fs . exists ( p ) ) { FSDataInputStream is = null ; try { is = fs . open ( p ) ; Configuration partialConf = new XConfiguration ( is ) ; kiteConf = merge ( kiteConf , partialConf ) ; } finally { if ( is != null ) { is . close ( ) ; } } LOG . info ( "Loaded Kite Configuration: " + path ) ; } else { LOG . warn ( "Kite Configuration could not be found at [" + path + "]" ) ; } } catch ( HadoopAccessorException hae ) { throw new IOException ( hae ) ; } } else { File f = new File ( path ) ; if ( f . exists ( ) ) { InputStream is = null ; try { is = new FileInputStream ( f ) ; Configuration partialConf = new XConfiguration ( is ) ; kiteConf = merge ( kiteConf , partialConf ) ; } finally { if ( is != null ) { is . close ( ) ; } } LOG . info ( "Loaded Kite Configuration: " + path ) ; } else { LOG . warn ( "Kite Configuration could not be found at [" + path + "]" ) ; } } } } else { LOG . info ( "Kite Configuration not specified" ) ; } }
ability to specify multiple paths to configurations
34,044
public Result get ( Get get ) { HTableInterface table = pool . getTable ( tableName ) ; try { for ( GetModifier getModifier : getModifiers ) { get = getModifier . modifyGet ( get ) ; } try { return table . get ( get ) ; } catch ( IOException e ) { throw new DatasetIOException ( "Error performing get" , e ) ; } } finally { if ( table != null ) { try { table . close ( ) ; } catch ( IOException e ) { throw new DatasetIOException ( "Error putting table back into pool" , e ) ; } } } }
Execute a Get on HBase .
34,045
public Result get ( Get get , GetModifier getModifier ) { if ( getModifier != null ) { get = getModifier . modifyGet ( get ) ; } return get ( get ) ; }
Execute the get on HBase invoking the getModifier before executing the get if getModifier is not null .
34,046
public boolean put ( PutAction putAction ) { HTableInterface table = pool . getTable ( tableName ) ; try { return put ( putAction , table ) ; } finally { if ( table != null ) { try { table . close ( ) ; } catch ( IOException e ) { throw new DatasetIOException ( "Error putting table back into pool" , e ) ; } } } }
Execute a Put on HBase .
34,047
public boolean put ( PutAction putAction , HTableInterface table ) { for ( PutActionModifier putActionModifier : putActionModifiers ) { putAction = putActionModifier . modifyPutAction ( putAction ) ; } Put put = putAction . getPut ( ) ; if ( putAction . getVersionCheckAction ( ) != null ) { byte [ ] versionBytes = null ; long version = putAction . getVersionCheckAction ( ) . getVersion ( ) ; if ( version != 0 ) { versionBytes = Bytes . toBytes ( version ) ; } try { return table . checkAndPut ( put . getRow ( ) , Constants . SYS_COL_FAMILY , Constants . VERSION_CHECK_COL_QUALIFIER , versionBytes , put ) ; } catch ( IOException e ) { throw new DatasetIOException ( "Error putting row from table with checkAndPut" , e ) ; } } else { try { table . put ( put ) ; return true ; } catch ( IOException e ) { throw new DatasetIOException ( "Error putting row from table" , e ) ; } } }
Execute a Put on HBase using a pre - define HTableInterface
34,048
public boolean put ( PutAction putAction , PutActionModifier putActionModifier ) { if ( putActionModifier != null ) { putAction = putActionModifier . modifyPutAction ( putAction ) ; } return put ( putAction ) ; }
Execute the put on HBase invoking the putModifier before executing the put if putModifier is not null .
34,049
public < E > boolean put ( E entity , EntityMapper < E > entityMapper ) { return put ( entity , null , entityMapper ) ; }
Execute a Put on HBase creating the Put by mapping the key and entity to a Put with the entityMapper .
34,050
public < E > boolean put ( E entity , PutActionModifier putActionModifier , EntityMapper < E > entityMapper ) { PutAction putAction = entityMapper . mapFromEntity ( entity ) ; return put ( putAction , putActionModifier ) ; }
Execute a Put on HBase creating the Put by mapping the key and entity to a Put with the entityMapper . putModifier will be invoked on this created Put before the Put is executed .
34,051
public < E > long increment ( PartitionKey key , String fieldName , long amount , EntityMapper < E > entityMapper ) { Increment increment = entityMapper . mapToIncrement ( key , fieldName , amount ) ; HTableInterface table = pool . getTable ( tableName ) ; Result result ; try { result = table . increment ( increment ) ; } catch ( IOException e ) { throw new DatasetIOException ( "Error incrementing field." , e ) ; } return entityMapper . mapFromIncrementResult ( result , fieldName ) ; }
Execute an increment on an entity field . This field must be a type that supports increments . Returns the new increment value of type long .
34,052
public boolean delete ( DeleteAction deleteAction ) { HTableInterface table = pool . getTable ( tableName ) ; try { for ( DeleteActionModifier deleteActionModifier : deleteActionModifiers ) { deleteAction = deleteActionModifier . modifyDeleteAction ( deleteAction ) ; } Delete delete = deleteAction . getDelete ( ) ; if ( deleteAction . getVersionCheckAction ( ) != null ) { byte [ ] versionBytes = Bytes . toBytes ( deleteAction . getVersionCheckAction ( ) . getVersion ( ) ) ; try { return table . checkAndDelete ( delete . getRow ( ) , Constants . SYS_COL_FAMILY , Constants . VERSION_CHECK_COL_QUALIFIER , versionBytes , delete ) ; } catch ( IOException e ) { throw new DatasetIOException ( "Error deleteing row from table with checkAndDelete" , e ) ; } } else { try { table . delete ( delete ) ; return true ; } catch ( IOException e ) { throw new DatasetIOException ( "Error deleteing row from table" , e ) ; } } } finally { if ( table != null ) { try { table . close ( ) ; } catch ( IOException e ) { throw new DatasetIOException ( "Error putting table back into pool" , e ) ; } } } }
Execute a Delete on HBase .
34,053
public boolean delete ( DeleteAction deleteAction , DeleteActionModifier deleteActionModifier ) { if ( deleteActionModifier != null ) { deleteAction = deleteActionModifier . modifyDeleteAction ( deleteAction ) ; } return delete ( deleteAction ) ; }
Execute the delete on HBase invoking the deleteModifier before executing the delete if deleteModifier is not null .
34,054
public boolean delete ( PartitionKey key , Set < String > columns , VersionCheckAction checkAction , KeySerDe keySerDe ) { return delete ( key , columns , checkAction , null , keySerDe ) ; }
Execute a Delete on HBase creating the Delete from the key and the set of columns . Only the columns specified in this set will be deleted in the row .
34,055
public boolean delete ( PartitionKey key , Set < String > columns , VersionCheckAction checkAction , DeleteActionModifier deleteActionModifier , KeySerDe keySerDe ) { byte [ ] keyBytes = keySerDe . serialize ( key ) ; Delete delete = new Delete ( keyBytes ) ; for ( String requiredColumn : columns ) { String [ ] familyAndColumn = requiredColumn . split ( ":" ) ; if ( familyAndColumn . length == 1 ) { delete . deleteFamily ( Bytes . toBytes ( familyAndColumn [ 0 ] ) ) ; } else { delete . deleteColumns ( Bytes . toBytes ( familyAndColumn [ 0 ] ) , Bytes . toBytes ( familyAndColumn [ 1 ] ) ) ; } } return delete ( new DeleteAction ( delete , checkAction ) , deleteActionModifier ) ; }
Execute a Delete on HBase creating the Delete from the key and the set of columns . Only the columns specified in this set will be deleted in the row . deleteModifier will be invoked on this created Delete before the Delete is executed .
34,056
public < E > EntityScannerBuilder < E > getScannerBuilder ( EntityMapper < E > entityMapper ) { EntityScannerBuilder < E > builder = new BaseEntityScanner . Builder < E > ( pool , tableName , entityMapper ) ; for ( ScanModifier scanModifier : scanModifiers ) { builder . addScanModifier ( scanModifier ) ; } return builder ; }
Get an EntityScannerBuilder that the client can use to build an EntityScanner .
34,057
public < E > EntityBatch < E > createBatch ( EntityMapper < E > entityMapper , long writeBufferSize ) { return new BaseEntityBatch < E > ( this , entityMapper , pool , tableName , writeBufferSize ) ; }
Create an EntityBatch that can be used to write batches of entities .
34,058
public StorageKey toKey ( Path fromPath , StorageKey storage ) { final List < FieldPartitioner > partitioners = Accessor . getDefault ( ) . getFieldPartitioners ( storage . getPartitionStrategy ( ) ) ; String truncatedPath = fromPath . toString ( ) ; if ( truncatedPath . startsWith ( rootPath . toString ( ) ) ) { truncatedPath = truncatedPath . substring ( rootPath . toString ( ) . length ( ) ) ; } List < String > pathParts = new LinkedList < String > ( ) ; if ( ! truncatedPath . isEmpty ( ) ) { Path currentPath = new Path ( truncatedPath ) ; while ( currentPath != null ) { String name = currentPath . getName ( ) ; if ( ! name . isEmpty ( ) ) { pathParts . add ( currentPath . getName ( ) ) ; } currentPath = currentPath . getParent ( ) ; } Collections . reverse ( pathParts ) ; } final List < Object > values = Lists . newArrayList ( new Object [ pathParts . size ( ) ] ) ; for ( int i = 0 ; i < pathParts . size ( ) ; i ++ ) { values . set ( i , valueForDirname ( ( FieldPartitioner < ? , ? > ) partitioners . get ( i ) , pathParts . get ( i ) ) ) ; } storage . replaceValues ( values ) ; return storage ; }
Supposed to build keys from start to finish vs end to start
34,059
@ GwtIncompatible ( "Array.newArray(Class, int)" ) public final E [ ] toArray ( Class < E > type ) { return Iterables . toArray ( iterable , type ) ; }
Returns an array containing all of the elements from this fluent iterable in iteration order .
34,060
private static < T > Iterator < T > start ( T singleton ) { return Collections . singleton ( singleton ) . iterator ( ) ; }
Convenience function to wrap some object in an Iterator .
34,061
private static < T > T coalesce ( T ... values ) { for ( T value : values ) { if ( value != null ) { return value ; } } return null ; }
Returns the first non - null value from the sequence or null if there is no non - null value .
34,062
public void writeInt ( int n ) throws IOException { byte [ ] intBytes = new byte [ ] { ( byte ) ( ( n >>> 24 ) ^ 0x80 ) , ( byte ) ( n >>> 16 ) , ( byte ) ( n >>> 8 ) , ( byte ) n } ; out . write ( intBytes ) ; }
An int is written by flipping the sign bit and writing it as a big endian int .
34,063
public void writeLong ( long n ) throws IOException { byte [ ] intBytes = new byte [ ] { ( byte ) ( ( n >>> 56 ) ^ 0x80 ) , ( byte ) ( n >>> 48 ) , ( byte ) ( n >>> 40 ) , ( byte ) ( n >>> 32 ) , ( byte ) ( n >>> 24 ) , ( byte ) ( n >>> 16 ) , ( byte ) ( n >>> 8 ) , ( byte ) n } ; out . write ( intBytes ) ; }
A long is written by flipping the sign bit and writing it as a big endian long .
34,064
public void printUserProfies ( ) { EntityScanner < UserProfileModel > scanner = userProfileDao . getScanner ( ) ; scanner . initialize ( ) ; try { for ( UserProfileModel entity : scanner ) { System . out . println ( entity . toString ( ) ) ; } } finally { scanner . close ( ) ; } }
Print all user profiles .
34,065
public void printUserProfileActionsForLastName ( String lastName ) { PartitionKey startKey = new PartitionKey ( "lastName" ) ; EntityScanner < UserProfileActionsModel > scanner = userProfileActionsDao . getScanner ( startKey , null ) ; scanner . initialize ( ) ; try { for ( UserProfileActionsModel entity : scanner ) { if ( ! entity . getUserProfileModel ( ) . getLastName ( ) . equals ( lastName ) ) { break ; } System . out . println ( entity . toString ( ) ) ; } } finally { scanner . close ( ) ; } }
Print the user profiles and actions for all users with the provided last name
34,066
public void create ( String firstName , String lastName , boolean married ) { long ts = System . currentTimeMillis ( ) ; UserProfileModel profileModel = UserProfileModel . newBuilder ( ) . setFirstName ( firstName ) . setLastName ( lastName ) . setMarried ( married ) . setCreated ( ts ) . build ( ) ; UserActionsModel actionsModel = UserActionsModel . newBuilder ( ) . setFirstName ( firstName ) . setLastName ( lastName ) . setActions ( new HashMap < String , String > ( ) ) . build ( ) ; actionsModel . getActions ( ) . put ( "profile_created" , Long . toString ( ts ) ) ; UserProfileActionsModel profileActionsModel = UserProfileActionsModel . newBuilder ( ) . setUserProfileModel ( profileModel ) . setUserActionsModel ( actionsModel ) . build ( ) ; if ( ! userProfileActionsDao . put ( profileActionsModel ) ) { System . out . println ( "Creating a new user profile failed due to a write conflict." ) ; } }
Create a fresh new user record .
34,067
public void updateUserProfile ( String firstName , String lastName , boolean married ) { long ts = System . currentTimeMillis ( ) ; PartitionKey key = new PartitionKey ( lastName , firstName ) ; UserProfileActionsModel profileActionsModel = userProfileActionsDao . get ( key ) ; UserProfileActionsModel updatedProfileActionsModel = UserProfileActionsModel . newBuilder ( profileActionsModel ) . setUserProfileModel ( UserProfileModel . newBuilder ( profileActionsModel . getUserProfileModel ( ) ) . setMarried ( married ) . build ( ) ) . build ( ) ; updatedProfileActionsModel . getUserActionsModel ( ) . getActions ( ) . put ( "profile_updated" , Long . toString ( ts ) ) ; if ( ! userProfileActionsDao . put ( updatedProfileActionsModel ) ) { System . out . println ( "Updating the user profile failed due to a write conflict" ) ; } }
Update the married status of a new user record .
34,068
public void addAction ( String firstName , String lastName , String actionType , String actionValue ) { UserActionsModel actionsModel = UserActionsModel . newBuilder ( ) . setLastName ( lastName ) . setFirstName ( firstName ) . setActions ( new HashMap < String , String > ( ) ) . build ( ) ; actionsModel . getActions ( ) . put ( actionType , actionValue ) ; userActionsDao . put ( actionsModel ) ; }
Add an action to the user profile .
34,069
private void registerSchemas ( Configuration conf , SchemaManager schemaManager ) throws InterruptedException { HBaseAdmin admin ; try { admin = new HBaseAdmin ( conf ) ; if ( admin . tableExists ( "kite_example_user_profiles" ) ) { admin . disableTable ( "kite_example_user_profiles" ) ; admin . deleteTable ( "kite_example_user_profiles" ) ; } } catch ( IOException e ) { throw new RuntimeException ( e ) ; } SchemaTool tool = new SchemaTool ( admin , schemaManager ) ; tool . createOrMigrateSchemaDirectory ( "classpath:example-models" , true ) ; }
Uses SchemaTool to register the required schemas and create the required tables .
34,070
public static void main ( String [ ] args ) throws InterruptedException { UserProfileExample example = new UserProfileExample ( ) ; example . create ( "John" , "Doe" , true ) ; example . create ( "Jane" , "Doe" , false ) ; example . create ( "Foo" , "Bar" , false ) ; example . printUserProfies ( ) ; example . addAction ( "Jane" , "Doe" , "last_login" , "2013-07-30 00:00:00" ) ; example . addAction ( "Jane" , "Doe" , "ad_click" , "example.com_ad_id" ) ; example . addAction ( "Foo" , "Bar" , "last_login" , "2013-07-30 00:00:00" ) ; example . printUserProfileActionsForLastName ( "Doe" ) ; example . updateUserProfile ( "Jane" , "Doe" , true ) ; example . printUserProfileActionsForLastName ( "Doe" ) ; }
The main driver method . Doesn t require any arguments .
34,071
private Class parse ( String str , ScriptContext ctx ) throws ScriptException { String fileName = getFileName ( ctx ) ; String sourcePath = getSourcePath ( ctx ) ; String classPath = getClassPath ( ctx ) ; Map < String , byte [ ] > classBytes = compiler . compile ( fileName , str , ctx . getErrorWriter ( ) , sourcePath , classPath ) ; if ( classBytes == null ) { throw new ScriptException ( "compilation failed" ) ; } MemoryClassLoader loader = new MemoryClassLoader ( classBytes , classPath , getParentLoader ( ctx ) ) ; String mainClassName = getMainClassName ( ctx ) ; if ( mainClassName != null ) { try { Class clazz = loader . load ( mainClassName ) ; Method mainMethod = findMainMethod ( clazz ) ; if ( mainMethod == null ) { throw new ScriptException ( "no main method in " + mainClassName ) ; } return clazz ; } catch ( ClassNotFoundException cnfe ) { throw new ScriptException ( cnfe ) ; } } Iterable < Class > classes ; try { classes = loader . loadAll ( ) ; } catch ( ClassNotFoundException exp ) { throw new ScriptException ( exp ) ; } Class c = findMainClass ( classes ) ; if ( c != null ) { return c ; } else { Iterator < Class > itr = classes . iterator ( ) ; if ( itr . hasNext ( ) ) { return itr . next ( ) ; } else { return null ; } } }
Internals only below this point
34,072
private Decoder getColumnDecoder ( Schema writtenFieldAvroSchema , InputStream in ) { if ( writtenFieldAvroSchema . getType ( ) == Type . INT || writtenFieldAvroSchema . getType ( ) == Type . LONG || writtenFieldAvroSchema . getType ( ) == Type . STRING ) { return new ColumnDecoder ( in ) ; } else { return DecoderFactory . get ( ) . binaryDecoder ( in , null ) ; } }
Returns an Avro Decoder . The implementation it chooses will depend on the schema of the field .
34,073
private Encoder getColumnEncoder ( Schema fieldAvroSchema , OutputStream out ) { if ( fieldAvroSchema . getType ( ) == Type . INT || fieldAvroSchema . getType ( ) == Type . LONG || fieldAvroSchema . getType ( ) == Type . STRING ) { return new ColumnEncoder ( out ) ; } else { return EncoderFactory . get ( ) . binaryEncoder ( out , null ) ; } }
Returns an Avro Encoder . The implementation it chooses will depend on the schema of the field .
34,074
public EntityScannerBuilder < E > addEqualFilter ( String fieldName , Object filterValue ) { SingleFieldEntityFilter singleFieldEntityFilter = new SingleFieldEntityFilter ( entityMapper . getEntitySchema ( ) , entityMapper . getEntitySerDe ( ) , fieldName , filterValue , CompareFilter . CompareOp . EQUAL ) ; filterList . add ( singleFieldEntityFilter . getFilter ( ) ) ; return this ; }
Add an Equality Filter to the Scanner Will Filter Results Not Equal to the Filter Value
34,075
public EntityScannerBuilder < E > addRegexMatchFilter ( String fieldName , String regexString ) { RegexEntityFilter regexEntityFilter = new RegexEntityFilter ( entityMapper . getEntitySchema ( ) , entityMapper . getEntitySerDe ( ) , fieldName , regexString ) ; filterList . add ( regexEntityFilter . getFilter ( ) ) ; return this ; }
Add a Regex Equality Filter to the Scanner Will Filter Results Not Equal to the Filter Value
34,076
public EntityScannerBuilder < E > addIsMissingFilter ( String fieldName ) { SingleFieldEntityFilter singleFieldEntityFilter = new SingleFieldEntityFilter ( entityMapper . getEntitySchema ( ) , entityMapper . getEntitySerDe ( ) , fieldName , "++++NON_SHALL_PASS++++" , CompareFilter . CompareOp . EQUAL ) ; SingleColumnValueFilter filter = ( SingleColumnValueFilter ) singleFieldEntityFilter . getFilter ( ) ; filter . setFilterIfMissing ( false ) ; filterList . add ( filter ) ; return this ; }
Only include rows which are missing this field this was the only possible way to do it .
34,077
private void initRecordBuilderFactories ( ) { for ( FieldMapping fieldMapping : avroSchema . getColumnMappingDescriptor ( ) . getFieldMappings ( ) ) { if ( fieldMapping . getMappingType ( ) == MappingType . KEY_AS_COLUMN ) { String fieldName = fieldMapping . getFieldName ( ) ; Schema fieldSchema = avroSchema . getAvroSchema ( ) . getField ( fieldName ) . schema ( ) ; Schema . Type fieldSchemaType = fieldSchema . getType ( ) ; if ( fieldSchemaType == Schema . Type . RECORD ) { AvroRecordBuilderFactory < E > factory = buildAvroRecordBuilderFactory ( fieldSchema ) ; kacRecordBuilderFactories . put ( fieldName , factory ) ; } } } }
Initialize the AvroRecordBuilderFactories for all keyAsColumn mapped fields that are record types . We need to be able to get record builders for these since the records are broken across many columns and need to be constructed by the composer .
34,078
public Set < String > getRequiredColumns ( ) { Set < String > set = new HashSet < String > ( ) ; for ( FieldMapping fieldMapping : fieldMappings ) { if ( FieldMapping . MappingType . KEY == fieldMapping . getMappingType ( ) ) { continue ; } else if ( FieldMapping . MappingType . KEY_AS_COLUMN == fieldMapping . getMappingType ( ) ) { set . add ( fieldMapping . getFamilyAsString ( ) + ":" ) ; } else { set . add ( fieldMapping . getFamilyAsString ( ) + ":" + fieldMapping . getQualifierAsString ( ) ) ; } } return set ; }
Get the columns required by this schema .
34,079
public Set < String > getRequiredColumnFamilies ( ) { Set < String > set = new HashSet < String > ( ) ; for ( FieldMapping mapping : fieldMappings ) { if ( FieldMapping . MappingType . KEY != mapping . getMappingType ( ) ) set . add ( mapping . getFamilyAsString ( ) ) ; } return set ; }
Get the column families required by this schema .
34,080
private static boolean shouldFormatDFSCluster ( String localDFSLocation , boolean clean ) { boolean format = true ; File f = new File ( localDFSLocation ) ; if ( f . exists ( ) && f . isDirectory ( ) && ! clean ) { format = false ; } return format ; }
Returns true if we should format the DFS Cluster . We ll format if clean is true or if the dfsFsLocation does not exist .
34,081
private static Configuration configureDFSCluster ( Configuration config , String localDFSLocation , String bindIP , int namenodeRpcPort , int namenodeHttpPort , int datanodePort , int datanodeIpcPort , int datanodeHttpPort ) { logger . info ( "HDFS force binding to ip: " + bindIP ) ; config = new KiteCompatibleConfiguration ( config , bindIP , namenodeRpcPort , namenodeHttpPort ) ; config . set ( DFSConfigKeys . FS_DEFAULT_NAME_KEY , "hdfs://" + bindIP + ":" + namenodeRpcPort ) ; config . set ( DFSConfigKeys . DFS_DATANODE_ADDRESS_KEY , bindIP + ":" + datanodePort ) ; config . set ( DFSConfigKeys . DFS_DATANODE_IPC_ADDRESS_KEY , bindIP + ":" + datanodeIpcPort ) ; config . set ( DFSConfigKeys . DFS_DATANODE_HTTP_ADDRESS_KEY , bindIP + ":" + datanodeHttpPort ) ; config . setBoolean ( "dfs.namenode.datanode.registration.ip-hostname-check" , false ) ; config . set ( "hdfs.minidfs.basedir" , localDFSLocation ) ; String user = System . getProperty ( "user.name" ) ; config . set ( "hadoop.proxyuser." + user + ".groups" , "*" ) ; config . set ( "hadoop.proxyuser." + user + ".hosts" , "*" ) ; return config ; }
Configure the DFS Cluster before launching it .
34,082
public < E > Iterator < E > filter ( Iterator < E > iterator , EntityAccessor < E > accessor ) { return Iterators . filter ( iterator , toEntityPredicate ( accessor ) ) ; }
Filter the entities returned by a given iterator by these constraints .
34,083
@ SuppressWarnings ( "unchecked" ) public boolean alignedWithBoundaries ( ) { if ( constraints . isEmpty ( ) ) { return true ; } else if ( strategy == null ) { return false ; } Multimap < String , FieldPartitioner > partitioners = HashMultimap . create ( ) ; Set < String > partitionFields = Sets . newHashSet ( ) ; for ( FieldPartitioner fp : Accessor . getDefault ( ) . getFieldPartitioners ( strategy ) ) { partitioners . put ( fp . getSourceName ( ) , fp ) ; partitionFields . add ( fp . getName ( ) ) ; } for ( Map . Entry < String , Predicate > entry : constraints . entrySet ( ) ) { if ( partitionFields . contains ( entry . getKey ( ) ) ) { continue ; } Collection < FieldPartitioner > fps = partitioners . get ( entry . getKey ( ) ) ; if ( fps . isEmpty ( ) ) { LOG . debug ( "No field partitioners for key {}" , entry . getKey ( ) ) ; return false ; } Predicate predicate = entry . getValue ( ) ; if ( ! ( predicate instanceof Exists ) ) { boolean satisfied = false ; for ( FieldPartitioner fp : fps ) { if ( fp instanceof CalendarFieldPartitioner ) { TimeDomain domain = TimeDomain . get ( strategy , entry . getKey ( ) ) ; Predicate strict = domain . projectStrict ( predicate ) ; Predicate permissive = domain . project ( predicate ) ; LOG . debug ( "Time predicate strict: {}" , strict ) ; LOG . debug ( "Time predicate permissive: {}" , permissive ) ; satisfied = strict != null && strict . equals ( permissive ) ; break ; } else { Predicate strict = fp . projectStrict ( predicate ) ; Predicate permissive = fp . project ( predicate ) ; if ( strict != null && strict . equals ( permissive ) ) { satisfied = true ; break ; } } } if ( ! satisfied ) { LOG . debug ( "Predicate not satisfied: {}" , predicate ) ; return false ; } } } return true ; }
If this returns true the entities selected by this set of constraints align to partition boundaries .
34,084
public Map < String , String > toNormalizedQueryMap ( ) { Map < String , String > query = Maps . newTreeMap ( ) ; return toQueryMap ( query , true ) ; }
Get a normalized query map for the constraints . A normalized query map will be equal in value and iteration order for any logically equivalent set of constraints .
34,085
private static boolean startsWith ( String [ ] left , List < String > right ) { if ( left . length < right . size ( ) ) { return false ; } for ( int i = 0 ; i < right . size ( ) ; i += 1 ) { if ( ! left [ i ] . equals ( right . get ( i ) ) ) { return false ; } } return true ; }
Returns true if left starts with right .
34,086
public int position ( String fieldName ) { if ( fieldPositions . containsKey ( fieldName ) ) { return fieldPositions . get ( fieldName ) ; } else { throw new DatasetException ( "Cannot recover " + fieldName + " from key" ) ; } }
Resolves the storage position of a field in keys with this KeySchema .
34,087
public static String toExpression ( PartitionStrategy partitionStrategy ) { List < FieldPartitioner > fieldPartitioners = partitionStrategy . getFieldPartitioners ( ) ; if ( fieldPartitioners . size ( ) == 1 ) { return PartitionFunctions . toExpression ( fieldPartitioners . get ( 0 ) ) ; } StringBuilder sb = new StringBuilder ( ) ; sb . append ( "[" ) ; for ( FieldPartitioner fieldPartitioner : fieldPartitioners ) { if ( sb . length ( ) > 1 ) { sb . append ( ", " ) ; } sb . append ( PartitionFunctions . toExpression ( fieldPartitioner ) ) ; } sb . append ( "]" ) ; return sb . toString ( ) ; }
Convert a PartitionStrategy into a serialized expression . This can be used to set a PartitionStrategy in an Avro property if the PartitionStrategy is passed as an object .
34,088
private void initializeEntityVersionEntityMapper ( ) { AvroEntitySchema avroEntitySchema = schemaParser . parseEntitySchema ( managedSchemaEntityVersionSchema ) ; avroEntitySchema = AvroUtils . mergeSpecificStringTypes ( ManagedSchemaEntityVersion . class , avroEntitySchema ) ; AvroEntityComposer < ManagedSchemaEntityVersion > entityComposer = new AvroEntityComposer < ManagedSchemaEntityVersion > ( avroEntitySchema , true ) ; AvroEntitySerDe < ManagedSchemaEntityVersion > entitySerDe = new AvroEntitySerDe < ManagedSchemaEntityVersion > ( entityComposer , avroEntitySchema , avroEntitySchema , true ) ; this . managedSchemaEntityVersionEntityMapper = new BaseEntityMapper < ManagedSchemaEntityVersion > ( avroEntitySchema , entitySerDe ) ; }
Initialize the entity mapper we ll use to convert the schema version metadata in each row to a ManagedSchemaEntityVersion record .
34,089
private void updateEntityMappers ( ) { for ( Entry < Integer , EntitySchema > entry : schemaManager . getEntitySchemas ( tableName , entityName ) . entrySet ( ) ) { if ( ! entityMappers . containsKey ( entry . getKey ( ) ) ) { AvroEntitySchema writtenSchema = ( AvroEntitySchema ) entry . getValue ( ) ; EntityMapper < ENTITY > entityMapper = constructWrappedEntityMapper ( keySchema , entitySchema , writtenSchema , entityClass ) ; entityMappers . put ( entry . getKey ( ) , entityMapper ) ; } } }
Update the map of wrapped entity mappers to reflect the most recent entity schema metadata returned by the schemaManager .
34,090
private Range < String > transformClosed ( Range < String > range ) { if ( range . hasLowerBound ( ) ) { String lower = range . lowerEndpoint ( ) ; String afterLower = domain . next ( apply ( lower ) ) ; if ( afterLower != null ) { if ( range . hasUpperBound ( ) ) { String upper = range . upperEndpoint ( ) ; String upperImage = apply ( upper ) ; if ( upper . equals ( upperImage ) && range . isUpperBoundClosed ( ) ) { return Ranges . closed ( afterLower , upperImage ) ; } else { String beforeUpper = domain . previous ( upperImage ) ; if ( afterLower . compareTo ( beforeUpper ) <= 0 ) { return Ranges . closed ( afterLower , beforeUpper ) ; } } } else { return Ranges . atLeast ( afterLower ) ; } } } else if ( range . hasUpperBound ( ) ) { String upper = range . upperEndpoint ( ) ; String upperImage = apply ( upper ) ; if ( upper . equals ( upperImage ) && range . isUpperBoundClosed ( ) ) { return Ranges . atMost ( upperImage ) ; } else { String beforeUpper = domain . previous ( upperImage ) ; if ( beforeUpper != null ) { return Ranges . atMost ( beforeUpper ) ; } } } return null ; }
Transforms a Range predicate to a closed range on this partitioner s upper bounds . Handles edge cases correctly .
34,091
static boolean deleteParentDirectoriesIfEmpty ( FileSystem fs , Path root , Path path ) throws IOException { boolean deleted = false ; try { for ( Path current = path . getParent ( ) ; ! current . equals ( root ) && ! ( current . getParent ( ) == null ) ; current = current . getParent ( ) ) { final FileStatus [ ] stats = fs . listStatus ( current ) ; if ( stats == null || stats . length == 0 ) { LOG . debug ( "Deleting empty path {}" , current ) ; deleted = fs . delete ( current , true ) || deleted ; } else { break ; } } } catch ( FileNotFoundException e ) { LOG . debug ( "Path does not exist it may have been deleted by another process." , e ) ; } return deleted ; }
Deletes the empty parent directories of the specified path . The method catches and ignores FileNotFoundException as it is possible that multiple parallel Kite instances are importing into a directory under the same root directory and it can happen that a Kite instance founds an empty directory which needs to be deleted but when it tries execute the delete command the folder is missing because it has been already deleted by another Kite instance .
34,092
public static boolean supportsRename ( URI fsUri , Configuration conf ) { String fsUriScheme = fsUri . getScheme ( ) ; return conf . getBoolean ( FileSystemProperties . SUPPORTS_RENAME_PROP , ! ( fsUriScheme . equalsIgnoreCase ( "s3n" ) || fsUriScheme . equalsIgnoreCase ( "s3a" ) ) ) ; }
Determine whether a FileSystem that supports efficient file renaming is being used . Two known FileSystem implementations that currently lack this feature are S3N and S3A .
34,093
private static String valueString ( Object value , Schema schema ) { if ( value == null || schema . getType ( ) == Schema . Type . NULL ) { return null ; } switch ( schema . getType ( ) ) { case BOOLEAN : case FLOAT : case DOUBLE : case INT : case LONG : case STRING : return value . toString ( ) ; case ENUM : return String . valueOf ( schema . getEnumOrdinal ( value . toString ( ) ) ) ; case UNION : int index = ReflectData . get ( ) . resolveUnion ( schema , value ) ; return valueString ( value , schema . getTypes ( ) . get ( index ) ) ; default : throw new DatasetOperationException ( "Unsupported field type:" + schema . getType ( ) ) ; } }
Returns a the value as the first matching schema type or null .
34,094
@ SuppressWarnings ( "unchecked" ) private static < E > Dataset < E > loadOrCreateJobDataset ( JobContext jobContext ) { Dataset < Object > dataset = load ( jobContext ) . getDataset ( ) ; String jobDatasetName = getJobDatasetName ( jobContext ) ; DatasetRepository repo = getDatasetRepository ( jobContext ) ; if ( repo . exists ( TEMP_NAMESPACE , jobDatasetName ) ) { Dataset < E > tempDataset = repo . load ( TEMP_NAMESPACE , jobDatasetName , DatasetKeyOutputFormat . < E > getType ( jobContext ) ) ; try { Compatibility . checkCompatible ( dataset . getDescriptor ( ) , tempDataset . getDescriptor ( ) ) ; return tempDataset ; } catch ( RuntimeException ex ) { } } return repo . create ( TEMP_NAMESPACE , jobDatasetName , copy ( dataset . getDescriptor ( ) ) , DatasetKeyOutputFormat . < E > getType ( jobContext ) ) ; }
The job dataset may already exist if the ApplicationMaster was restarted
34,095
public void createOrMigrateSchemaDirectory ( String schemaDirectory , boolean createTableAndFamilies ) throws InterruptedException { List < String > schemaStrings ; if ( schemaDirectory . startsWith ( CLASSPATH_PREFIX ) ) { URL dirURL = getClass ( ) . getClassLoader ( ) . getResource ( schemaDirectory . substring ( CLASSPATH_PREFIX . length ( ) ) ) ; if ( dirURL != null && dirURL . getProtocol ( ) . equals ( "file" ) ) { try { schemaStrings = getSchemaStringsFromDir ( new File ( dirURL . toURI ( ) ) ) ; } catch ( URISyntaxException e ) { throw new DatasetException ( e ) ; } } else if ( dirURL != null && dirURL . getProtocol ( ) . equals ( "jar" ) ) { String jarPath = dirURL . getPath ( ) . substring ( 5 , dirURL . getPath ( ) . indexOf ( "!" ) ) ; schemaStrings = getSchemaStringsFromJar ( jarPath , schemaDirectory . substring ( CLASSPATH_PREFIX . length ( ) ) ) ; } else { String msg = "Could not find classpath resource: " + schemaDirectory ; LOG . error ( msg ) ; throw new DatasetException ( msg ) ; } } else { schemaStrings = getSchemaStringsFromDir ( new File ( schemaDirectory ) ) ; } Map < String , List < String > > tableEntitySchemaMap = new HashMap < String , List < String > > ( ) ; for ( String schemaString : schemaStrings ) { List < String > tables = getTablesFromSchemaString ( schemaString ) ; for ( String table : tables ) { if ( tableEntitySchemaMap . containsKey ( table ) ) { tableEntitySchemaMap . get ( table ) . add ( schemaString ) ; } else { List < String > entityList = new ArrayList < String > ( ) ; entityList . add ( schemaString ) ; tableEntitySchemaMap . put ( table , entityList ) ; } } } for ( Entry < String , List < String > > entry : tableEntitySchemaMap . entrySet ( ) ) { String table = entry . getKey ( ) ; List < String > entitySchemas = entry . getValue ( ) ; if ( entitySchemas . size ( ) == 0 ) { String msg = "Table requested, but no entity schemas for Table: " + table ; LOG . error ( msg ) ; throw new ValidationException ( msg ) ; } } Collection < HTableDescriptor > tableDescriptors = Lists . newArrayList ( ) ; for ( Entry < String , List < String > > entry : tableEntitySchemaMap . entrySet ( ) ) { String table = entry . getKey ( ) ; for ( String entitySchemaString : entry . getValue ( ) ) { boolean migrationRequired = prepareManagedSchema ( table , entitySchemaString ) ; if ( migrationRequired ) { tableDescriptors . add ( prepareTableDescriptor ( table , entitySchemaString ) ) ; } } } if ( createTableAndFamilies ) { createTables ( tableDescriptors ) ; } }
Scans the schemaDirectory for avro schemas and creates or migrates HBase Common managed schemas managed by this instances entity manager .
34,096
private boolean prepareManagedSchema ( String tableName , String entitySchemaString ) { String entityName = getEntityNameFromSchemaString ( entitySchemaString ) ; AvroEntitySchema entitySchema = parser . parseEntitySchema ( entitySchemaString ) ; AvroKeySchema keySchema = parser . parseKeySchema ( entitySchemaString ) ; if ( schemaManager . hasManagedSchema ( tableName , entityName ) ) { KeySchema currentKeySchema = schemaManager . getKeySchema ( tableName , entityName ) ; if ( ! keySchema . equals ( currentKeySchema ) ) { String msg = "Migrating schema with different keys. Current: " + currentKeySchema . getRawSchema ( ) + " New: " + keySchema . getRawSchema ( ) ; LOG . error ( msg ) ; throw new ValidationException ( msg ) ; } if ( ! schemaManager . hasSchemaVersion ( tableName , entityName , entitySchema ) ) { LOG . info ( "Migrating Schema: (" + tableName + ", " + entityName + ")" ) ; schemaManager . migrateSchema ( tableName , entityName , entitySchemaString ) ; } else { LOG . info ( "Schema hasn't changed, not migrating: (" + tableName + ", " + entityName + ")" ) ; return false ; } } else { LOG . info ( "Creating Schema: (" + tableName + ", " + entityName + ")" ) ; parser . parseEntitySchema ( entitySchemaString ) . getColumnMappingDescriptor ( ) . getRequiredColumnFamilies ( ) ; schemaManager . createSchema ( tableName , entityName , entitySchemaString , "org.kitesdk.data.hbase.avro.AvroKeyEntitySchemaParser" , "org.kitesdk.data.hbase.avro.AvroKeySerDe" , "org.kitesdk.data.hbase.avro.AvroEntitySerDe" ) ; } return true ; }
Prepare managed schema for this entitySchema
34,097
private HTableDescriptor prepareTableDescriptor ( String tableName , String entitySchemaString ) { HTableDescriptor descriptor = new HTableDescriptor ( Bytes . toBytes ( tableName ) ) ; AvroEntitySchema entitySchema = parser . parseEntitySchema ( entitySchemaString ) ; Set < String > familiesToAdd = entitySchema . getColumnMappingDescriptor ( ) . getRequiredColumnFamilies ( ) ; familiesToAdd . add ( new String ( Constants . SYS_COL_FAMILY ) ) ; familiesToAdd . add ( new String ( Constants . OBSERVABLE_COL_FAMILY ) ) ; for ( String familyToAdd : familiesToAdd ) { if ( ! descriptor . hasFamily ( familyToAdd . getBytes ( ) ) ) { descriptor . addFamily ( new HColumnDescriptor ( familyToAdd ) ) ; } } return descriptor ; }
Prepare the Table descriptor for the given entity Schema
34,098
private void createTables ( Collection < HTableDescriptor > tableDescriptors ) throws InterruptedException { try { Set < String > tablesCreated = Sets . newHashSet ( ) ; Multimap < String , HTableDescriptor > pendingTableUpdates = ArrayListMultimap . create ( ) ; for ( HTableDescriptor tableDescriptor : tableDescriptors ) { String tableName = Bytes . toString ( tableDescriptor . getName ( ) ) ; if ( tablesCreated . contains ( tableName ) ) { pendingTableUpdates . put ( tableName , tableDescriptor ) ; } else { LOG . info ( "Creating table " + tableName ) ; hbaseAdmin . createTableAsync ( tableDescriptor , new byte [ ] [ ] { } ) ; tablesCreated . add ( tableName ) ; } } for ( int waitCount = 0 ; waitCount < MAX_SECOND_WAIT_FOR_TABLE_CREATION ; waitCount ++ ) { Iterator < String > iterator = tablesCreated . iterator ( ) ; while ( iterator . hasNext ( ) ) { String table = iterator . next ( ) ; if ( hbaseAdmin . isTableAvailable ( table ) ) { if ( pendingTableUpdates . containsKey ( table ) ) { for ( HTableDescriptor tableDescriptor : pendingTableUpdates . get ( table ) ) { modifyTable ( table , tableDescriptor ) ; } } iterator . remove ( ) ; } } if ( tablesCreated . isEmpty ( ) ) { break ; } Thread . sleep ( 1000 ) ; } } catch ( IOException e ) { throw new DatasetException ( e ) ; } }
Create the tables asynchronously with the HBase
34,099
private void modifyTable ( String tableName , HTableDescriptor newDescriptor ) { LOG . info ( "Modifying table " + tableName ) ; HColumnDescriptor [ ] newFamilies = newDescriptor . getColumnFamilies ( ) ; try { List < HColumnDescriptor > columnsToAdd = Lists . newArrayList ( ) ; HTableDescriptor currentFamilies = hbaseAdmin . getTableDescriptor ( Bytes . toBytes ( tableName ) ) ; for ( HColumnDescriptor newFamily : newFamilies ) { if ( ! currentFamilies . hasFamily ( newFamily . getName ( ) ) ) { columnsToAdd . add ( new HColumnDescriptor ( newFamily . getName ( ) ) ) ; } } if ( ! columnsToAdd . isEmpty ( ) ) { hbaseAdmin . disableTable ( tableName ) ; try { for ( HColumnDescriptor columnToAdd : columnsToAdd ) { hbaseAdmin . addColumn ( tableName , columnToAdd ) ; } } finally { hbaseAdmin . enableTable ( tableName ) ; } } } catch ( IOException e ) { throw new DatasetException ( e ) ; } }
add the column families which are not already present to the given table