idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
34,000 | public void report ( ) throws IOException { DistributedFileSystem dfs = getDFS ( ) ; if ( dfs != null ) { DiskStatus ds = dfs . getDiskStatus ( ) ; long capacity = ds . getCapacity ( ) ; long used = ds . getDfsUsed ( ) ; long remaining = ds . getRemaining ( ) ; long presentCapacity = used + remaining ; boolean mode = d... | Gives a report on how the FileSystem is doing . |
34,001 | public int upgradeProgress ( String [ ] argv , int idx ) throws IOException { DistributedFileSystem dfs = getDFS ( ) ; if ( dfs == null ) { System . out . println ( "FileSystem is " + getFS ( ) . getUri ( ) ) ; return - 1 ; } if ( idx != argv . length - 1 ) { printUsage ( "-upgradeProgress" ) ; return - 1 ; } UpgradeAc... | Command to request current distributed upgrade status a detailed status or to force the upgrade to proceed . |
34,002 | private ClientDatanodeProtocol getClientDatanodeProtocol ( String dnAddr ) throws IOException { String hostname = null ; int port ; int index ; Configuration conf = getConf ( ) ; if ( dnAddr == null ) { dnAddr = conf . get ( FSConstants . DFS_DATANODE_IPC_ADDRESS_KEY ) ; hostname = "localhost" ; } index = dnAddr . inde... | Gets a new ClientDataNodeProtocol object . |
34,003 | private int getBlockInfo ( String [ ] argv , int i ) throws IOException { long blockId = Long . valueOf ( argv [ i ++ ] ) ; LocatedBlockWithFileName locatedBlock = getDFS ( ) . getClient ( ) . getBlockInfo ( blockId ) ; if ( null == locatedBlock ) { System . err . println ( "Could not find the block with id : " + block... | Display the filename the block belongs to and its locations . |
34,004 | protected HttpURLConnection openConnection ( String path , String query ) throws IOException { try { final URL url = new URI ( "http" , null , nnAddr . getAddress ( ) . getHostAddress ( ) , nnAddr . getPort ( ) , path , query , null ) . toURL ( ) ; if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "url=" + url ) ; } retu... | Open an HTTP connection to the namenode to read file data and metadata . |
34,005 | public static HarIndex getHarIndex ( FileSystem fs , Path initializer ) throws IOException { if ( ! initializer . getName ( ) . endsWith ( HAR ) ) { initializer = initializer . getParent ( ) ; } InputStream in = null ; try { Path indexFile = new Path ( initializer , INDEX ) ; FileStatus indexStat = fs . getFileStatus (... | Creates a HarIndex object with the path to either the HAR or a part file in the HAR . |
34,006 | void parseLine ( String line ) throws UnsupportedEncodingException { String [ ] splits = line . split ( " " ) ; boolean isDir = "dir" . equals ( splits [ 1 ] ) ? true : false ; if ( ! isDir && splits . length >= 6 ) { String name = URLDecoder . decode ( splits [ 0 ] , "UTF-8" ) ; String partName = URLDecoder . decode (... | Parses each line and extracts relevant information . |
34,007 | public IndexEntry findEntry ( String partName , long partFileOffset ) { for ( IndexEntry e : entries ) { boolean nameMatch = partName . equals ( e . partFileName ) ; boolean inRange = ( partFileOffset >= e . startOffset ) && ( partFileOffset < e . startOffset + e . length ) ; if ( nameMatch && inRange ) { return e ; } ... | Finds the index entry corresponding to a HAR partFile at an offset . |
34,008 | public IndexEntry findEntryByFileName ( String fileName ) { for ( IndexEntry e : entries ) { if ( fileName . equals ( e . fileName ) ) { return e ; } } return null ; } | Finds the index entry corresponding to a file in the archive |
34,009 | public static void prepareBookKeeperEnv ( final String availablePath , ZooKeeper zooKeeper ) throws IOException { final CountDownLatch availablePathLatch = new CountDownLatch ( 1 ) ; StringCallback cb = new StringCallback ( ) { public void processResult ( int rc , String path , Object ctx , String name ) { if ( Code . ... | Create parent ZNode under which available BookKeeper bookie servers will register themselves . Will create parent ZNodes for that path as well . |
34,010 | private void createZkMetadataIfNotExists ( StorageInfo si ) throws IOException { try { if ( ! hasSomeJournalData ( ) ) { try { zk . create ( zkParentPath , new byte [ ] { '0' } , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; FormatInfoWritable writable = localFormatInfoWritable . get ( ) ; writable . set ( PROTO_... | If there is no metadata present in ZooKeeper create and populate the metadata with the right format information |
34,011 | private boolean zkPathExists ( String path ) throws IOException { try { return zk . exists ( path , false ) != null ; } catch ( KeeperException e ) { keeperException ( "Unrecoverable ZooKeeper error checking if " + path + " exists" , e ) ; } catch ( InterruptedException e ) { interruptedException ( "Interrupted checkin... | Check if a path exists in ZooKeeper |
34,012 | public void purgeLogsOlderThan ( long minTxIdToKeep ) throws IOException { checkEnv ( ) ; Collection < EditLogLedgerMetadata > ledgers = metadataManager . listLedgers ( false ) ; for ( EditLogLedgerMetadata ledger : ledgers ) { if ( ledger . getFirstTxId ( ) < minTxIdToKeep && ledger . getLastTxId ( ) < minTxIdToKeep )... | For edit log segment that contains transactions with ids earlier than the earliest txid to be retained remove the ZooKeeper - based metadata and BookKeeper ledgers associated with these segments . |
34,013 | public void readToken ( String parentFieldName , JsonToken expectedToken ) throws IOException { JsonToken currentToken = jsonParser . nextToken ( ) ; if ( currentToken != expectedToken ) { throw new IOException ( "Expected a " + expectedToken . toString ( ) + " token when reading the value of the field: " + parentField... | This is a helper method that reads a JSON token using a JsonParser instance and throws an exception if the next token is not the same as the token we expect . |
34,014 | public String getFieldName ( ) throws IOException { if ( jsonParser . getCurrentToken ( ) != JsonToken . FIELD_NAME ) { throw new IOException ( "Expected a field of type " + JsonToken . FIELD_NAME + ", but found a field of type " + jsonParser . getCurrentToken ( ) ) ; } return jsonParser . getCurrentName ( ) ; } | If the current token is a field name this method returns the name of the field . |
34,015 | void setBlock ( final int index , final Block b ) { blockList [ index2BlockId ( index ) ] = b . getBlockId ( ) ; blockList [ index2BlockLen ( index ) ] = b . getNumBytes ( ) ; blockList [ index2BlockGenStamp ( index ) ] = b . getGenerationStamp ( ) ; } | Set the indexTh block |
34,016 | public static void main ( String argv [ ] ) throws Exception { try { Configuration conf = new Configuration ( ) ; UtilizationReporter utilizationReporter = new UtilizationReporter ( conf ) ; utilizationReporter . start ( ) ; } catch ( Throwable e ) { System . err . println ( e ) ; LOG . error ( StringUtils . stringifyE... | main program to run on each TaskTracker |
34,017 | static InetSocketAddress getNameNodeAddress ( Configuration conf , String cname , String rpcKey , String cname2 ) { String fs = conf . get ( cname ) ; String fs1 = conf . get ( rpcKey ) ; String fs2 = conf . get ( cname2 ) ; Configuration newconf = new Configuration ( conf ) ; newconf . set ( "fs.default.name" , fs ) ;... | Returns the IP address of the namenode |
34,018 | public static void write ( XMLOutputter xml , MD5MD5CRC32FileChecksum that ) throws IOException { xml . startTag ( MD5MD5CRC32FileChecksum . class . getName ( ) ) ; if ( that != null ) { xml . attribute ( "bytesPerCRC" , "" + that . bytesPerCRC ) ; xml . attribute ( "crcPerBlock" , "" + that . crcPerBlock ) ; xml . att... | Write that object to xml output . |
34,019 | public static MD5MD5CRC32FileChecksum valueOf ( Attributes attrs ) throws SAXException { final String bytesPerCRC = attrs . getValue ( "bytesPerCRC" ) ; final String crcPerBlock = attrs . getValue ( "crcPerBlock" ) ; final String md5 = attrs . getValue ( "md5" ) ; if ( bytesPerCRC == null || crcPerBlock == null || md5 ... | Return the object represented in the attributes . |
34,020 | private String getNextElementsValue ( String wantedName ) throws IOException { boolean gotSTART_ELEMENT = false ; try { int eventType = in . getEventType ( ) ; while ( true ) { switch ( eventType ) { case XMLStreamConstants . CHARACTERS : if ( gotSTART_ELEMENT ) { return in . getText ( ) . trim ( ) ; } break ; case XML... | Get next element s value checks that the element s name is wantedName . |
34,021 | public void update ( String newPath ) throws IOException { String id = hostname + Thread . currentThread ( ) . getId ( ) ; CurrentInProgressMetadataWritable cip = localWritable . get ( ) ; cip . set ( id , newPath ) ; byte [ ] data = WritableUtil . writableToByteArray ( cip ) ; try { zooKeeper . setData ( fullyQualifie... | Update the data in the ZNode to point to a the ZNode containing the metadata for the ledger containing the current in - progress edit log segment . |
34,022 | public String read ( ) throws IOException { CurrentInProgressMetadataWritable cip = localWritable . get ( ) ; if ( readAndUpdateVersion ( cip ) ) { return cip . getPath ( ) ; } else { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( fullyQualifiedZNode + " is currently clear." ) ; } } return null ; } | Read the full path to the ZNode holding the metadata for the ledger containing the current in - progress edit log segment or null if no segment is currently in - progress |
34,023 | public void clear ( ) throws IOException { try { zooKeeper . setData ( fullyQualifiedZNode , null , expectedZNodeVersion . get ( ) ) ; } catch ( KeeperException . BadVersionException e ) { LOG . error ( fullyQualifiedZNode + " has been updated by another process" , e ) ; throw new StaleVersionException ( fullyQualified... | Clear out the data in the ZNode specified in the constructor to indicate that no segment is currently in progress . This does not delete the actual ZNode . |
34,024 | protected void onSizeChanged ( int w , int h , int oldw , int oldh ) { super . onSizeChanged ( w , h , oldw , oldh ) ; setupBounds ( w , h ) ; setupPaints ( ) ; invalidate ( ) ; } | Use onSizeChanged instead of onAttachedToWindow to get the dimensions of the view because this method is called after measuring the dimensions of MATCH_PARENT & WRAP_CONTENT . Use this dimensions to setup the bounds and paints . |
34,025 | private void setupPaints ( ) { barPaint . setColor ( barColor ) ; barPaint . setAntiAlias ( true ) ; barPaint . setStyle ( Style . STROKE ) ; barPaint . setStrokeWidth ( barWidth ) ; rimPaint . setColor ( rimColor ) ; rimPaint . setAntiAlias ( true ) ; rimPaint . setStyle ( Style . STROKE ) ; rimPaint . setStrokeWidth ... | Set the properties of the paints we re using to draw the progress wheel |
34,026 | private void setupBounds ( int layout_width , int layout_height ) { int paddingTop = getPaddingTop ( ) ; int paddingBottom = getPaddingBottom ( ) ; int paddingLeft = getPaddingLeft ( ) ; int paddingRight = getPaddingRight ( ) ; if ( ! fillRadius ) { int minValue = Math . min ( layout_width - paddingLeft - paddingRight ... | Set the bounds of the component |
34,027 | private void parseAttributes ( TypedArray a ) { DisplayMetrics metrics = getContext ( ) . getResources ( ) . getDisplayMetrics ( ) ; barWidth = ( int ) TypedValue . applyDimension ( TypedValue . COMPLEX_UNIT_DIP , barWidth , metrics ) ; rimWidth = ( int ) TypedValue . applyDimension ( TypedValue . COMPLEX_UNIT_DIP , ri... | Parse the attributes passed to the view from the XML |
34,028 | public void setInstantProgress ( float progress ) { if ( isSpinning ) { mProgress = 0.0f ; isSpinning = false ; } if ( progress > 1.0f ) { progress -= 1.0f ; } else if ( progress < 0 ) { progress = 0 ; } if ( progress == mTargetProgress ) { return ; } mTargetProgress = Math . min ( progress * 360.0f , 360.0f ) ; mProgr... | Set the progress to a specific value the bar will be set instantly to that value |
34,029 | public void setProgress ( float progress ) { if ( isSpinning ) { mProgress = 0.0f ; isSpinning = false ; runCallback ( ) ; } if ( progress > 1.0f ) { progress -= 1.0f ; } else if ( progress < 0 ) { progress = 0 ; } if ( progress == mTargetProgress ) { return ; } if ( mProgress == mTargetProgress ) { lastTimeAnimated = ... | Set the progress to a specific value the bar will smoothly animate until that value |
34,030 | public void execute ( ) throws MojoExecutionException { if ( StringUtils . isBlank ( this . refreshPort ) ) { this . refreshPort = "8080" ; } if ( ! ping ( ) ) { getLog ( ) . warn ( "Connection failed to " + this . refreshHost + ":" + this . refreshPort + ", " + getAbortedMsg ( ) ) ; return ; } executeRefresh ( ) ; } | Mojo interface implementation |
34,031 | protected void refreshWebScripts ( String url ) { URL alfrescoTomcatUrl = buildFinalUrl ( url ) ; if ( alfrescoTomcatUrl == null ) { getLog ( ) . error ( "Could not build refresh URL for " + refreshWebappName + ", " + getAbortedMsg ( ) ) ; } List < NameValuePair > postData = new ArrayList < NameValuePair > ( ) ; postDa... | Perform a Refresh of Web Scripts container in webapp . Called by specific refresh mojo implementation . |
34,032 | protected void clearDependencyCaches ( String url ) { URL alfrescoTomcatUrl = buildFinalUrl ( url ) ; if ( alfrescoTomcatUrl == null ) { getLog ( ) . error ( "Could not build clear dependency caches URL for " + refreshWebappName + ", " + getAbortedMsg ( ) ) ; } makePostCall ( alfrescoTomcatUrl , null , "Clear Dependenc... | Perform a Clear Dependency Caches call on Share webapp . Called by specific refresh mojo implementation currently only applicable to Share webapp . |
34,033 | private void makePostCall ( URL alfrescoTomcatUrl , List < NameValuePair > postData , String operation ) { CloseableHttpClient client = null ; CloseableHttpResponse response = null ; try { HttpHost targetHost = new HttpHost ( alfrescoTomcatUrl . getHost ( ) , alfrescoTomcatUrl . getPort ( ) , alfrescoTomcatUrl . getPro... | Helper method to make a POST request to the Alfresco Webapp |
34,034 | protected String getPort ( ) { String port = tomcatPort ; if ( mavenTomcatPort != null ) { port = mavenTomcatPort ; getLog ( ) . info ( "Tomcat Port overridden by property maven.tomcat.port" ) ; } return port ; } | Get the Tomcat port . By default the port is changed by using the maven . alfresco . tomcat . port property but for legacy and external configuration purposes maven . tomcat . port will override if defined |
34,035 | protected boolean tomcatIsRunning ( ) { CloseableHttpClient client = HttpClients . createDefault ( ) ; CloseableHttpResponse response = null ; try { HttpGet httpget = new HttpGet ( "http://localhost:" + getPort ( ) + "/alfresco" ) ; response = client . execute ( httpget ) ; getLog ( ) . info ( "Tomcat is running on por... | Check if Tomcat is already running . |
34,036 | protected void copyAlfrescoLicense ( ) throws MojoExecutionException { final String warOutputDir = getWarOutputDir ( PLATFORM_WAR_PREFIX_NAME ) ; final String licDestDir = warOutputDir + "/WEB-INF/classes/alfresco/extension/license" ; getLog ( ) . info ( "Copying Alfresco Enterprise license to: " + licDestDir ) ; execu... | Copy the Alfresco Enterprise license to its correct place in the Platform WAR if it exists . It is not enough to have it on the test classpath then it will start up as Trial license ... |
34,037 | protected void copyShareConfigCustom ( ) throws MojoExecutionException { final String warOutputDir = getWarOutputDir ( SHARE_WAR_PREFIX_NAME ) ; final String distDir = warOutputDir + "/WEB-INF/classes/alfresco/web-extension/" ; String repoUrl = project . getProperties ( ) . getProperty ( "alfresco.repo.url" ) ; if ( re... | Copy Share Config Custom in order to have global overrides for development and dynamic port |
34,038 | protected void copyHotswapAgentProperties ( String warPrefix ) throws MojoExecutionException { if ( copyHotswapAgentConfig == false ) { return ; } final String warOutputDir = getWarOutputDir ( warPrefix ) + "/WEB-INF/classes/" ; getLog ( ) . info ( "Copying " + warPrefix + "-hotswap-agent.properties to " + warOutputDir... | Copy and Build hotswap - agent . properties |
34,039 | protected String packageAndInstallCustomWar ( String warName ) throws MojoExecutionException { final String warArtifactId = "${project.artifactId}-" + warName ; final String warSourceDir = getWarOutputDir ( warName ) ; String warPath = project . getBuild ( ) . getDirectory ( ) + "/" + warName + ".war" ; ZipUtil . pack ... | Package customized war file and install it in local maven repo . |
34,040 | private static FormatBundle < DatasetKeyOutputFormat > outputBundle ( Configuration conf ) { FormatBundle < DatasetKeyOutputFormat > bundle = FormatBundle . forOutput ( DatasetKeyOutputFormat . class ) ; for ( Map . Entry < String , String > entry : conf ) { bundle . set ( entry . getKey ( ) , entry . getValue ( ) ) ; ... | Builds a FormatBundle for DatasetKeyOutputFormat by copying a temp config . |
34,041 | private static boolean hasErrors ( Symbol symbol ) { switch ( symbol . kind ) { case ALTERNATIVE : return hasErrors ( symbol , ( ( Symbol . Alternative ) symbol ) . symbols ) ; case EXPLICIT_ACTION : return false ; case IMPLICIT_ACTION : return symbol instanceof Symbol . ErrorAction ; case REPEATER : Symbol . Repeater ... | Returns true if the Parser contains any Error symbol indicating that it may fail for some inputs . |
34,042 | public int run ( ) throws IOException { Preconditions . checkArgument ( samplePaths != null && ! samplePaths . isEmpty ( ) , "Sample JSON path is required" ) ; Preconditions . checkArgument ( samplePaths . size ( ) == 1 , "Only one JSON sample can be given" ) ; Schema sampleSchema = JsonUtil . inferSchema ( open ( samp... | By default we look at first 10 records |
34,043 | private void loadKiteConf ( Services services ) throws IOException { String [ ] paths = services . getConf ( ) . getStrings ( KITE_CONFIGURATION ) ; if ( paths != null && paths . length != 0 ) { kiteConf = new Configuration ( false ) ; for ( String path : paths ) { if ( path . startsWith ( "hdfs" ) ) { Path p = new Pat... | ability to specify multiple paths to configurations |
34,044 | public Result get ( Get get ) { HTableInterface table = pool . getTable ( tableName ) ; try { for ( GetModifier getModifier : getModifiers ) { get = getModifier . modifyGet ( get ) ; } try { return table . get ( get ) ; } catch ( IOException e ) { throw new DatasetIOException ( "Error performing get" , e ) ; } } finall... | Execute a Get on HBase . |
34,045 | public Result get ( Get get , GetModifier getModifier ) { if ( getModifier != null ) { get = getModifier . modifyGet ( get ) ; } return get ( get ) ; } | Execute the get on HBase invoking the getModifier before executing the get if getModifier is not null . |
34,046 | public boolean put ( PutAction putAction ) { HTableInterface table = pool . getTable ( tableName ) ; try { return put ( putAction , table ) ; } finally { if ( table != null ) { try { table . close ( ) ; } catch ( IOException e ) { throw new DatasetIOException ( "Error putting table back into pool" , e ) ; } } } } | Execute a Put on HBase . |
34,047 | public boolean put ( PutAction putAction , HTableInterface table ) { for ( PutActionModifier putActionModifier : putActionModifiers ) { putAction = putActionModifier . modifyPutAction ( putAction ) ; } Put put = putAction . getPut ( ) ; if ( putAction . getVersionCheckAction ( ) != null ) { byte [ ] versionBytes = null... | Execute a Put on HBase using a pre - define HTableInterface |
34,048 | public boolean put ( PutAction putAction , PutActionModifier putActionModifier ) { if ( putActionModifier != null ) { putAction = putActionModifier . modifyPutAction ( putAction ) ; } return put ( putAction ) ; } | Execute the put on HBase invoking the putModifier before executing the put if putModifier is not null . |
34,049 | public < E > boolean put ( E entity , EntityMapper < E > entityMapper ) { return put ( entity , null , entityMapper ) ; } | Execute a Put on HBase creating the Put by mapping the key and entity to a Put with the entityMapper . |
34,050 | public < E > boolean put ( E entity , PutActionModifier putActionModifier , EntityMapper < E > entityMapper ) { PutAction putAction = entityMapper . mapFromEntity ( entity ) ; return put ( putAction , putActionModifier ) ; } | Execute a Put on HBase creating the Put by mapping the key and entity to a Put with the entityMapper . putModifier will be invoked on this created Put before the Put is executed . |
34,051 | public < E > long increment ( PartitionKey key , String fieldName , long amount , EntityMapper < E > entityMapper ) { Increment increment = entityMapper . mapToIncrement ( key , fieldName , amount ) ; HTableInterface table = pool . getTable ( tableName ) ; Result result ; try { result = table . increment ( increment ) ... | Execute an increment on an entity field . This field must be a type that supports increments . Returns the new increment value of type long . |
34,052 | public boolean delete ( DeleteAction deleteAction ) { HTableInterface table = pool . getTable ( tableName ) ; try { for ( DeleteActionModifier deleteActionModifier : deleteActionModifiers ) { deleteAction = deleteActionModifier . modifyDeleteAction ( deleteAction ) ; } Delete delete = deleteAction . getDelete ( ) ; if ... | Execute a Delete on HBase . |
34,053 | public boolean delete ( DeleteAction deleteAction , DeleteActionModifier deleteActionModifier ) { if ( deleteActionModifier != null ) { deleteAction = deleteActionModifier . modifyDeleteAction ( deleteAction ) ; } return delete ( deleteAction ) ; } | Execute the delete on HBase invoking the deleteModifier before executing the delete if deleteModifier is not null . |
34,054 | public boolean delete ( PartitionKey key , Set < String > columns , VersionCheckAction checkAction , KeySerDe keySerDe ) { return delete ( key , columns , checkAction , null , keySerDe ) ; } | Execute a Delete on HBase creating the Delete from the key and the set of columns . Only the columns specified in this set will be deleted in the row . |
34,055 | public boolean delete ( PartitionKey key , Set < String > columns , VersionCheckAction checkAction , DeleteActionModifier deleteActionModifier , KeySerDe keySerDe ) { byte [ ] keyBytes = keySerDe . serialize ( key ) ; Delete delete = new Delete ( keyBytes ) ; for ( String requiredColumn : columns ) { String [ ] familyA... | Execute a Delete on HBase creating the Delete from the key and the set of columns . Only the columns specified in this set will be deleted in the row . deleteModifier will be invoked on this created Delete before the Delete is executed . |
34,056 | public < E > EntityScannerBuilder < E > getScannerBuilder ( EntityMapper < E > entityMapper ) { EntityScannerBuilder < E > builder = new BaseEntityScanner . Builder < E > ( pool , tableName , entityMapper ) ; for ( ScanModifier scanModifier : scanModifiers ) { builder . addScanModifier ( scanModifier ) ; } return build... | Get an EntityScannerBuilder that the client can use to build an EntityScanner . |
34,057 | public < E > EntityBatch < E > createBatch ( EntityMapper < E > entityMapper , long writeBufferSize ) { return new BaseEntityBatch < E > ( this , entityMapper , pool , tableName , writeBufferSize ) ; } | Create an EntityBatch that can be used to write batches of entities . |
34,058 | public StorageKey toKey ( Path fromPath , StorageKey storage ) { final List < FieldPartitioner > partitioners = Accessor . getDefault ( ) . getFieldPartitioners ( storage . getPartitionStrategy ( ) ) ; String truncatedPath = fromPath . toString ( ) ; if ( truncatedPath . startsWith ( rootPath . toString ( ) ) ) { trunc... | Supposed to build keys from start to finish vs end to start |
34,059 | @ GwtIncompatible ( "Array.newArray(Class, int)" ) public final E [ ] toArray ( Class < E > type ) { return Iterables . toArray ( iterable , type ) ; } | Returns an array containing all of the elements from this fluent iterable in iteration order . |
34,060 | private static < T > Iterator < T > start ( T singleton ) { return Collections . singleton ( singleton ) . iterator ( ) ; } | Convenience function to wrap some object in an Iterator . |
34,061 | private static < T > T coalesce ( T ... values ) { for ( T value : values ) { if ( value != null ) { return value ; } } return null ; } | Returns the first non - null value from the sequence or null if there is no non - null value . |
34,062 | public void writeInt ( int n ) throws IOException { byte [ ] intBytes = new byte [ ] { ( byte ) ( ( n >>> 24 ) ^ 0x80 ) , ( byte ) ( n >>> 16 ) , ( byte ) ( n >>> 8 ) , ( byte ) n } ; out . write ( intBytes ) ; } | An int is written by flipping the sign bit and writing it as a big endian int . |
34,063 | public void writeLong ( long n ) throws IOException { byte [ ] intBytes = new byte [ ] { ( byte ) ( ( n >>> 56 ) ^ 0x80 ) , ( byte ) ( n >>> 48 ) , ( byte ) ( n >>> 40 ) , ( byte ) ( n >>> 32 ) , ( byte ) ( n >>> 24 ) , ( byte ) ( n >>> 16 ) , ( byte ) ( n >>> 8 ) , ( byte ) n } ; out . write ( intBytes ) ; } | A long is written by flipping the sign bit and writing it as a big endian long . |
34,064 | public void printUserProfies ( ) { EntityScanner < UserProfileModel > scanner = userProfileDao . getScanner ( ) ; scanner . initialize ( ) ; try { for ( UserProfileModel entity : scanner ) { System . out . println ( entity . toString ( ) ) ; } } finally { scanner . close ( ) ; } } | Print all user profiles . |
34,065 | public void printUserProfileActionsForLastName ( String lastName ) { PartitionKey startKey = new PartitionKey ( "lastName" ) ; EntityScanner < UserProfileActionsModel > scanner = userProfileActionsDao . getScanner ( startKey , null ) ; scanner . initialize ( ) ; try { for ( UserProfileActionsModel entity : scanner ) { ... | Print the user profiles and actions for all users with the provided last name |
34,066 | public void create ( String firstName , String lastName , boolean married ) { long ts = System . currentTimeMillis ( ) ; UserProfileModel profileModel = UserProfileModel . newBuilder ( ) . setFirstName ( firstName ) . setLastName ( lastName ) . setMarried ( married ) . setCreated ( ts ) . build ( ) ; UserActionsModel a... | Create a fresh new user record . |
34,067 | public void updateUserProfile ( String firstName , String lastName , boolean married ) { long ts = System . currentTimeMillis ( ) ; PartitionKey key = new PartitionKey ( lastName , firstName ) ; UserProfileActionsModel profileActionsModel = userProfileActionsDao . get ( key ) ; UserProfileActionsModel updatedProfileAct... | Update the married status of a new user record . |
34,068 | public void addAction ( String firstName , String lastName , String actionType , String actionValue ) { UserActionsModel actionsModel = UserActionsModel . newBuilder ( ) . setLastName ( lastName ) . setFirstName ( firstName ) . setActions ( new HashMap < String , String > ( ) ) . build ( ) ; actionsModel . getActions (... | Add an action to the user profile . |
34,069 | private void registerSchemas ( Configuration conf , SchemaManager schemaManager ) throws InterruptedException { HBaseAdmin admin ; try { admin = new HBaseAdmin ( conf ) ; if ( admin . tableExists ( "kite_example_user_profiles" ) ) { admin . disableTable ( "kite_example_user_profiles" ) ; admin . deleteTable ( "kite_exa... | Uses SchemaTool to register the required schemas and create the required tables . |
34,070 | public static void main ( String [ ] args ) throws InterruptedException { UserProfileExample example = new UserProfileExample ( ) ; example . create ( "John" , "Doe" , true ) ; example . create ( "Jane" , "Doe" , false ) ; example . create ( "Foo" , "Bar" , false ) ; example . printUserProfies ( ) ; example . addAction... | The main driver method . Doesn t require any arguments . |
34,071 | private Class parse ( String str , ScriptContext ctx ) throws ScriptException { String fileName = getFileName ( ctx ) ; String sourcePath = getSourcePath ( ctx ) ; String classPath = getClassPath ( ctx ) ; Map < String , byte [ ] > classBytes = compiler . compile ( fileName , str , ctx . getErrorWriter ( ) , sourcePath... | Internals only below this point |
34,072 | private Decoder getColumnDecoder ( Schema writtenFieldAvroSchema , InputStream in ) { if ( writtenFieldAvroSchema . getType ( ) == Type . INT || writtenFieldAvroSchema . getType ( ) == Type . LONG || writtenFieldAvroSchema . getType ( ) == Type . STRING ) { return new ColumnDecoder ( in ) ; } else { return DecoderFacto... | Returns an Avro Decoder . The implementation it chooses will depend on the schema of the field . |
34,073 | private Encoder getColumnEncoder ( Schema fieldAvroSchema , OutputStream out ) { if ( fieldAvroSchema . getType ( ) == Type . INT || fieldAvroSchema . getType ( ) == Type . LONG || fieldAvroSchema . getType ( ) == Type . STRING ) { return new ColumnEncoder ( out ) ; } else { return EncoderFactory . get ( ) . binaryEnco... | Returns an Avro Encoder . The implementation it chooses will depend on the schema of the field . |
34,074 | public EntityScannerBuilder < E > addEqualFilter ( String fieldName , Object filterValue ) { SingleFieldEntityFilter singleFieldEntityFilter = new SingleFieldEntityFilter ( entityMapper . getEntitySchema ( ) , entityMapper . getEntitySerDe ( ) , fieldName , filterValue , CompareFilter . CompareOp . EQUAL ) ; filterList... | Add an Equality Filter to the Scanner Will Filter Results Not Equal to the Filter Value |
34,075 | public EntityScannerBuilder < E > addRegexMatchFilter ( String fieldName , String regexString ) { RegexEntityFilter regexEntityFilter = new RegexEntityFilter ( entityMapper . getEntitySchema ( ) , entityMapper . getEntitySerDe ( ) , fieldName , regexString ) ; filterList . add ( regexEntityFilter . getFilter ( ) ) ; re... | Add a Regex Equality Filter to the Scanner Will Filter Results Not Equal to the Filter Value |
34,076 | public EntityScannerBuilder < E > addIsMissingFilter ( String fieldName ) { SingleFieldEntityFilter singleFieldEntityFilter = new SingleFieldEntityFilter ( entityMapper . getEntitySchema ( ) , entityMapper . getEntitySerDe ( ) , fieldName , "++++NON_SHALL_PASS++++" , CompareFilter . CompareOp . EQUAL ) ; SingleColumnVa... | Only include rows which are missing this field this was the only possible way to do it . |
34,077 | private void initRecordBuilderFactories ( ) { for ( FieldMapping fieldMapping : avroSchema . getColumnMappingDescriptor ( ) . getFieldMappings ( ) ) { if ( fieldMapping . getMappingType ( ) == MappingType . KEY_AS_COLUMN ) { String fieldName = fieldMapping . getFieldName ( ) ; Schema fieldSchema = avroSchema . getAvroS... | Initialize the AvroRecordBuilderFactories for all keyAsColumn mapped fields that are record types . We need to be able to get record builders for these since the records are broken across many columns and need to be constructed by the composer . |
34,078 | public Set < String > getRequiredColumns ( ) { Set < String > set = new HashSet < String > ( ) ; for ( FieldMapping fieldMapping : fieldMappings ) { if ( FieldMapping . MappingType . KEY == fieldMapping . getMappingType ( ) ) { continue ; } else if ( FieldMapping . MappingType . KEY_AS_COLUMN == fieldMapping . getMappi... | Get the columns required by this schema . |
34,079 | public Set < String > getRequiredColumnFamilies ( ) { Set < String > set = new HashSet < String > ( ) ; for ( FieldMapping mapping : fieldMappings ) { if ( FieldMapping . MappingType . KEY != mapping . getMappingType ( ) ) set . add ( mapping . getFamilyAsString ( ) ) ; } return set ; } | Get the column families required by this schema . |
34,080 | private static boolean shouldFormatDFSCluster ( String localDFSLocation , boolean clean ) { boolean format = true ; File f = new File ( localDFSLocation ) ; if ( f . exists ( ) && f . isDirectory ( ) && ! clean ) { format = false ; } return format ; } | Returns true if we should format the DFS Cluster . We ll format if clean is true or if the dfsFsLocation does not exist . |
34,081 | private static Configuration configureDFSCluster ( Configuration config , String localDFSLocation , String bindIP , int namenodeRpcPort , int namenodeHttpPort , int datanodePort , int datanodeIpcPort , int datanodeHttpPort ) { logger . info ( "HDFS force binding to ip: " + bindIP ) ; config = new KiteCompatibleConfigur... | Configure the DFS Cluster before launching it . |
34,082 | public < E > Iterator < E > filter ( Iterator < E > iterator , EntityAccessor < E > accessor ) { return Iterators . filter ( iterator , toEntityPredicate ( accessor ) ) ; } | Filter the entities returned by a given iterator by these constraints . |
34,083 | @ SuppressWarnings ( "unchecked" ) public boolean alignedWithBoundaries ( ) { if ( constraints . isEmpty ( ) ) { return true ; } else if ( strategy == null ) { return false ; } Multimap < String , FieldPartitioner > partitioners = HashMultimap . create ( ) ; Set < String > partitionFields = Sets . newHashSet ( ) ; for ... | If this returns true the entities selected by this set of constraints align to partition boundaries . |
34,084 | public Map < String , String > toNormalizedQueryMap ( ) { Map < String , String > query = Maps . newTreeMap ( ) ; return toQueryMap ( query , true ) ; } | Get a normalized query map for the constraints . A normalized query map will be equal in value and iteration order for any logically equivalent set of constraints . |
34,085 | private static boolean startsWith ( String [ ] left , List < String > right ) { if ( left . length < right . size ( ) ) { return false ; } for ( int i = 0 ; i < right . size ( ) ; i += 1 ) { if ( ! left [ i ] . equals ( right . get ( i ) ) ) { return false ; } } return true ; } | Returns true if left starts with right . |
34,086 | public int position ( String fieldName ) { if ( fieldPositions . containsKey ( fieldName ) ) { return fieldPositions . get ( fieldName ) ; } else { throw new DatasetException ( "Cannot recover " + fieldName + " from key" ) ; } } | Resolves the storage position of a field in keys with this KeySchema . |
34,087 | public static String toExpression ( PartitionStrategy partitionStrategy ) { List < FieldPartitioner > fieldPartitioners = partitionStrategy . getFieldPartitioners ( ) ; if ( fieldPartitioners . size ( ) == 1 ) { return PartitionFunctions . toExpression ( fieldPartitioners . get ( 0 ) ) ; } StringBuilder sb = new String... | Convert a PartitionStrategy into a serialized expression . This can be used to set a PartitionStrategy in an Avro property if the PartitionStrategy is passed as an object . |
34,088 | private void initializeEntityVersionEntityMapper ( ) { AvroEntitySchema avroEntitySchema = schemaParser . parseEntitySchema ( managedSchemaEntityVersionSchema ) ; avroEntitySchema = AvroUtils . mergeSpecificStringTypes ( ManagedSchemaEntityVersion . class , avroEntitySchema ) ; AvroEntityComposer < ManagedSchemaEntityV... | Initialize the entity mapper we ll use to convert the schema version metadata in each row to a ManagedSchemaEntityVersion record . |
34,089 | private void updateEntityMappers ( ) { for ( Entry < Integer , EntitySchema > entry : schemaManager . getEntitySchemas ( tableName , entityName ) . entrySet ( ) ) { if ( ! entityMappers . containsKey ( entry . getKey ( ) ) ) { AvroEntitySchema writtenSchema = ( AvroEntitySchema ) entry . getValue ( ) ; EntityMapper < E... | Update the map of wrapped entity mappers to reflect the most recent entity schema metadata returned by the schemaManager . |
34,090 | private Range < String > transformClosed ( Range < String > range ) { if ( range . hasLowerBound ( ) ) { String lower = range . lowerEndpoint ( ) ; String afterLower = domain . next ( apply ( lower ) ) ; if ( afterLower != null ) { if ( range . hasUpperBound ( ) ) { String upper = range . upperEndpoint ( ) ; String upp... | Transforms a Range predicate to a closed range on this partitioner s upper bounds . Handles edge cases correctly . |
34,091 | static boolean deleteParentDirectoriesIfEmpty ( FileSystem fs , Path root , Path path ) throws IOException { boolean deleted = false ; try { for ( Path current = path . getParent ( ) ; ! current . equals ( root ) && ! ( current . getParent ( ) == null ) ; current = current . getParent ( ) ) { final FileStatus [ ] stats... | Deletes the empty parent directories of the specified path . The method catches and ignores FileNotFoundException as it is possible that multiple parallel Kite instances are importing into a directory under the same root directory and it can happen that a Kite instance founds an empty directory which needs to be delete... |
34,092 | public static boolean supportsRename ( URI fsUri , Configuration conf ) { String fsUriScheme = fsUri . getScheme ( ) ; return conf . getBoolean ( FileSystemProperties . SUPPORTS_RENAME_PROP , ! ( fsUriScheme . equalsIgnoreCase ( "s3n" ) || fsUriScheme . equalsIgnoreCase ( "s3a" ) ) ) ; } | Determine whether a FileSystem that supports efficient file renaming is being used . Two known FileSystem implementations that currently lack this feature are S3N and S3A . |
34,093 | private static String valueString ( Object value , Schema schema ) { if ( value == null || schema . getType ( ) == Schema . Type . NULL ) { return null ; } switch ( schema . getType ( ) ) { case BOOLEAN : case FLOAT : case DOUBLE : case INT : case LONG : case STRING : return value . toString ( ) ; case ENUM : return St... | Returns a the value as the first matching schema type or null . |
34,094 | @ SuppressWarnings ( "unchecked" ) private static < E > Dataset < E > loadOrCreateJobDataset ( JobContext jobContext ) { Dataset < Object > dataset = load ( jobContext ) . getDataset ( ) ; String jobDatasetName = getJobDatasetName ( jobContext ) ; DatasetRepository repo = getDatasetRepository ( jobContext ) ; if ( repo... | The job dataset may already exist if the ApplicationMaster was restarted |
34,095 | public void createOrMigrateSchemaDirectory ( String schemaDirectory , boolean createTableAndFamilies ) throws InterruptedException { List < String > schemaStrings ; if ( schemaDirectory . startsWith ( CLASSPATH_PREFIX ) ) { URL dirURL = getClass ( ) . getClassLoader ( ) . getResource ( schemaDirectory . substring ( CLA... | Scans the schemaDirectory for avro schemas and creates or migrates HBase Common managed schemas managed by this instances entity manager . |
34,096 | private boolean prepareManagedSchema ( String tableName , String entitySchemaString ) { String entityName = getEntityNameFromSchemaString ( entitySchemaString ) ; AvroEntitySchema entitySchema = parser . parseEntitySchema ( entitySchemaString ) ; AvroKeySchema keySchema = parser . parseKeySchema ( entitySchemaString ) ... | Prepare managed schema for this entitySchema |
34,097 | private HTableDescriptor prepareTableDescriptor ( String tableName , String entitySchemaString ) { HTableDescriptor descriptor = new HTableDescriptor ( Bytes . toBytes ( tableName ) ) ; AvroEntitySchema entitySchema = parser . parseEntitySchema ( entitySchemaString ) ; Set < String > familiesToAdd = entitySchema . getC... | Prepare the Table descriptor for the given entity Schema |
34,098 | private void createTables ( Collection < HTableDescriptor > tableDescriptors ) throws InterruptedException { try { Set < String > tablesCreated = Sets . newHashSet ( ) ; Multimap < String , HTableDescriptor > pendingTableUpdates = ArrayListMultimap . create ( ) ; for ( HTableDescriptor tableDescriptor : tableDescriptor... | Create the tables asynchronously with the HBase |
34,099 | private void modifyTable ( String tableName , HTableDescriptor newDescriptor ) { LOG . info ( "Modifying table " + tableName ) ; HColumnDescriptor [ ] newFamilies = newDescriptor . getColumnFamilies ( ) ; try { List < HColumnDescriptor > columnsToAdd = Lists . newArrayList ( ) ; HTableDescriptor currentFamilies = hbase... | add the column families which are not already present to the given table |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.