idx
int64
0
165k
question
stringlengths
73
5.81k
target
stringlengths
5
918
12,000
private boolean startsWithCustomRoot ( String path ) { for ( Enumeration < String > it = customRoots . elements ( ) ; it != null && it . hasMoreElements ( ) ; ) { if ( path . startsWith ( it . nextElement ( ) ) ) { return true ; } } return false ; }
Tests whether path starts with a custom file system root .
12,001
public boolean isDirectoryOrLinkedDirectory ( SftpFile file ) throws SftpStatusException , SshException { return file . isDirectory ( ) || ( file . isLink ( ) && stat ( file . getAbsolutePath ( ) ) . isDirectory ( ) ) ; }
Determine whether the file object is pointing to a symbolic link that is pointing to a directory .
12,002
public SftpFileAttributes get ( String remote , String local , boolean resume ) throws FileNotFoundException , SftpStatusException , SshException , TransferCancelledException { return get ( remote , local , null , resume ) ; }
Download the remote file into the local file .
12,003
public InputStream getInputStream ( String remotefile , long position ) throws SftpStatusException , SshException { String remotePath = resolveRemotePath ( remotefile ) ; sftp . getAttributes ( remotePath ) ; return new SftpFileInputStream ( sftp . openFile ( remotePath , SftpSubsystemChannel . OPEN_READ ) , position ) ; }
Create an InputStream for reading a remote file .
12,004
public SftpFileAttributes get ( String remote , OutputStream local , long position ) throws SftpStatusException , SshException , TransferCancelledException { return get ( remote , local , null , position ) ; }
Download the remote file into an OutputStream .
12,005
public OutputStream getOutputStream ( String remotefile ) throws SftpStatusException , SshException { String remotePath = resolveRemotePath ( remotefile ) ; return new SftpFileOutputStream ( sftp . openFile ( remotePath , SftpSubsystemChannel . OPEN_CREATE | SftpSubsystemChannel . OPEN_TRUNCATE | SftpSubsystemChannel . OPEN_WRITE ) ) ; }
Create an OutputStream for writing to a remote file .
12,006
public void put ( InputStream in , String remote , long position ) throws SftpStatusException , SshException , TransferCancelledException { put ( in , remote , null , position ) ; }
Upload the contents of an InputStream to the remote computer .
12,007
public void rm ( String path , boolean force , boolean recurse ) throws SftpStatusException , SshException { String actual = resolveRemotePath ( path ) ; SftpFileAttributes attrs = null ; attrs = sftp . getAttributes ( actual ) ; SftpFile file ; if ( attrs . isDirectory ( ) ) { SftpFile [ ] list = ls ( path ) ; if ( ! force && ( list . length > 0 ) ) { throw new SftpStatusException ( SftpStatusException . SSH_FX_FAILURE , "You cannot delete non-empty directory, use force=true to overide" ) ; } for ( int i = 0 ; i < list . length ; i ++ ) { file = list [ i ] ; if ( file . isDirectory ( ) && ! file . getFilename ( ) . equals ( "." ) && ! file . getFilename ( ) . equals ( ".." ) ) { if ( recurse ) { rm ( file . getAbsolutePath ( ) , force , recurse ) ; } else { throw new SftpStatusException ( SftpStatusException . SSH_FX_FAILURE , "Directory has contents, cannot delete without recurse=true" ) ; } } else if ( file . isFile ( ) ) { sftp . removeFile ( file . getAbsolutePath ( ) ) ; } } sftp . removeDirectory ( actual ) ; } else { sftp . removeFile ( actual ) ; } }
Remove a file or directory on the remote computer with options to force deletion of existing files and recursion .
12,008
public String getAbsolutePath ( String path ) throws SftpStatusException , SshException { String actual = resolveRemotePath ( path ) ; return sftp . getAbsolutePath ( actual ) ; }
Get the absolute path for a file .
12,009
public void putFiles ( String local , String remote , FileTransferProgress progress , boolean resume ) throws FileNotFoundException , SftpStatusException , SshException , TransferCancelledException { putFileMatches ( local , remote , progress , resume ) ; }
make local copies of some of the variables then call putfilematches which calls put on each file that matches the regexp local .
12,010
public int authenticate ( AuthenticationClient auth , String servicename ) throws SshException { try { auth . authenticate ( this , servicename ) ; readMessage ( ) ; transport . disconnect ( TransportProtocol . PROTOCOL_ERROR , "Unexpected response received from Authentication Protocol" ) ; throw new SshException ( "Unexpected response received from Authentication Protocol" , SshException . PROTOCOL_VIOLATION ) ; } catch ( AuthenticationResult result ) { state = result . getResult ( ) ; if ( state == SshAuthentication . COMPLETE ) transport . completedAuthentication ( ) ; return state ; } }
Authenticate using the mechanism provided .
12,011
public void sendRequest ( String username , String servicename , String methodname , byte [ ] requestdata ) throws SshException { ByteArrayWriter msg = new ByteArrayWriter ( ) ; try { msg . write ( SSH_MSG_USERAUTH_REQUEST ) ; msg . writeString ( username ) ; msg . writeString ( servicename ) ; msg . writeString ( methodname ) ; if ( requestdata != null ) { msg . write ( requestdata ) ; } transport . sendMessage ( msg . toByteArray ( ) , true ) ; } catch ( IOException ex ) { throw new SshException ( ex , SshException . INTERNAL_ERROR ) ; } finally { try { msg . close ( ) ; } catch ( IOException e ) { } } }
Send an authentication request . This sends an SSH_MSG_USERAUTH_REQUEST message .
12,012
public static UnsignedInteger32 add ( UnsignedInteger32 x , UnsignedInteger32 y ) { return new UnsignedInteger32 ( x . longValue ( ) + y . longValue ( ) ) ; }
Add two unsigned integers together .
12,013
public boolean setAuthenticationMethod ( int methodId , Authentication method ) { if ( methodId < 0 || methodId > 255 ) return false ; if ( method == null ) { return ( authMethods . remove ( new Integer ( methodId ) ) != null ) ; } else { authMethods . put ( new Integer ( methodId ) , method ) ; } return true ; }
Adds another authentication method .
12,014
public Authentication getAuthenticationMethod ( int methodId ) { Object method = authMethods . get ( new Integer ( methodId ) ) ; if ( method == null ) return null ; return ( Authentication ) method ; }
Get authentication method which corresponds to given method id
12,015
public void installCBCCiphers ( ComponentFactory ciphers ) { if ( testJCECipher ( "3des-cbc" , TripleDesCbc . class ) ) { ciphers . add ( "3des-cbc" , TripleDesCbc . class ) ; } if ( testJCECipher ( "blowfish-cbc" , BlowfishCbc . class ) ) { ciphers . add ( "blowfish-cbc" , BlowfishCbc . class ) ; } if ( testJCECipher ( "aes128-cbc" , AES128Cbc . class ) ) { ciphers . add ( "aes128-cbc" , AES128Cbc . class ) ; } if ( testJCECipher ( "aes192-cbc" , AES192Cbc . class ) ) { ciphers . add ( "aes192-cbc" , AES192Cbc . class ) ; } if ( testJCECipher ( "aes256-cbc" , AES256Cbc . class ) ) { ciphers . add ( "aes256-cbc" , AES256Cbc . class ) ; } }
Install deprecated Counter - Block - Mode ciphers .
12,016
public void installArcFourCiphers ( ComponentFactory ciphers ) { if ( testJCECipher ( "arcfour" , ArcFour . class ) ) { ciphers . add ( "arcfour" , ArcFour . class ) ; } if ( testJCECipher ( "arcfour128" , ArcFour128 . class ) ) { ciphers . add ( "arcfour128" , ArcFour128 . class ) ; } if ( testJCECipher ( "arcfour256" , ArcFour256 . class ) ) { ciphers . add ( "arcfour256" , ArcFour256 . class ) ; } }
Install deprecated ArcFour ciphers
12,017
public Event addAttribute ( String key , Object value ) { eventAttributes . put ( key , ( value == null ? "null" : value ) ) ; return this ; }
Add an attribute to the event
12,018
public InetAddress getInetAddress ( ) { if ( remoteIP == null ) { try { remoteIP = InetAddress . getByName ( remoteHost ) ; } catch ( UnknownHostException e ) { return null ; } } return remoteIP ; }
Get remote host as InetAddress object might return null if addresses are resolved by proxy and it is not possible to resolve it locally
12,019
public void run ( Iterable < Item > items , Context cx ) { Function prepareFunc = ( Function ) indexResults . getPrototype ( ) . get ( "prepare" , indexResults ) ; prepareFunc . call ( cx , scope , indexResults , NO_ARGS ) ; Object args [ ] = new Object [ ] { null , mapFunction } ; for ( Item item : items ) { args [ 0 ] = item ; indexFunction . call ( cx , scope , indexResults , args ) ; } Function doneFunc = ( Function ) indexResults . getPrototype ( ) . get ( "setDone" , indexResults ) ; doneFunc . call ( cx , scope , indexResults , NO_ARGS ) ; }
Run the indexer on the given iterable of items . This will attempt to apply some optimizations to ensure that only items which need re - indexing are actually passed to the map function .
12,020
public static Indexer create ( String mapTxt ) { Context cx = Context . enter ( ) ; try { return new Indexer ( mapTxt , cx ) ; } finally { Context . exit ( ) ; } }
Create a new indexer object
12,021
public boolean isAuthorizedForBucket ( AuthContext ctx , Bucket bucket ) { if ( ctx . getUsername ( ) . equals ( adminName ) ) { return ctx . getPassword ( ) . equals ( adminPass ) ; } if ( bucket . getName ( ) . equals ( ctx . getUsername ( ) ) ) { return bucket . getPassword ( ) . equals ( ctx . getPassword ( ) ) ; } return bucket . getPassword ( ) . isEmpty ( ) && ctx . getPassword ( ) . isEmpty ( ) ; }
Determine if the given credentials allow access to the bucket
12,022
public boolean isAdministrator ( AuthContext ctx ) { return ctx . getUsername ( ) != null && ctx . getUsername ( ) . equals ( adminName ) && ctx . getPassword ( ) != null && ctx . getPassword ( ) . equals ( adminPass ) ; }
Check if the given credentials allow administrative access
12,023
public static BinaryResponse create ( BinaryCommand command , MemcachedServer server , ErrorCode errOk , ErrorCode errNotSupp ) { if ( ! server . isCccpEnabled ( ) ) { return new BinaryResponse ( command , errNotSupp ) ; } String config = server . getBucket ( ) . getJSON ( ) ; config = config . replaceAll ( Pattern . quote ( server . getHostname ( ) ) , Matcher . quoteReplacement ( "$HOST" ) ) ; byte [ ] jsBytes = config . getBytes ( ) ; ByteBuffer buf = create ( command , errOk , Datatype . RAW . value ( ) , 0 , 0 , jsBytes . length , 0 ) ; buf . put ( jsBytes ) ; buf . rewind ( ) ; return new BinaryResponse ( buf ) ; }
Create a new response which contains a cluster configuration if supported
12,024
public void loadDocuments ( String docsFile ) throws IOException { ZipFile zipFile = new ZipFile ( docsFile ) ; Enumeration < ? extends ZipEntry > entries = zipFile . entries ( ) ; int numDocs = 0 ; int numDesigns = 0 ; while ( entries . hasMoreElements ( ) ) { ZipEntry ent = entries . nextElement ( ) ; String fName = ent . getName ( ) ; InputStream is = zipFile . getInputStream ( ent ) ; String contents = ReaderUtils . fromStream ( is ) ; Matcher mIsDoc = ptnDOCUMENT . matcher ( fName ) ; if ( mIsDoc . matches ( ) ) { String docId = mIsDoc . group ( 1 ) ; handleDocument ( docId , contents ) ; numDocs ++ ; continue ; } Matcher mIsDesign = ptnDESIGN . matcher ( fName ) ; if ( mIsDesign . matches ( ) ) { String designName = mIsDesign . group ( 1 ) ; handleDesign ( designName , contents ) ; numDesigns ++ ; } } System . err . printf ( "Loaded %d documents. %d design documents%n" , numDocs , numDesigns ) ; }
Load documents into the bucket
12,025
public static void main ( String [ ] args ) throws Exception { String input = args [ 0 ] ; File outputFile = new File ( input . replace ( ".zip" , "" ) + ".serialized.xz" ) ; FileOutputStream fos = new FileOutputStream ( outputFile ) ; LZMA2Options options = new LZMA2Options ( 9 ) ; XZOutputStream xzo = new XZOutputStream ( fos , options ) ; ObjectOutputStream oos = new ObjectOutputStream ( xzo ) ; BundleSerializer ml = new BundleSerializer ( ) ; ml . loadDocuments ( input ) ; oos . writeObject ( ml . toStore ) ; oos . flush ( ) ; oos . close ( ) ; }
Converts a zip file into a serialized compress resource .
12,026
public String processInput ( String input ) { JsonObject object ; try { object = gs . fromJson ( input , JsonObject . class ) ; } catch ( Throwable t ) { return "{ \"status\" : \"fail\", \"error\" : \"Failed to parse input\" }" ; } String command = object . get ( "command" ) . getAsString ( ) ; JsonObject payload ; if ( ! object . has ( "payload" ) ) { payload = new JsonObject ( ) ; } else { payload = object . get ( "payload" ) . getAsJsonObject ( ) ; } CommandStatus status ; try { status = dispatch ( command , payload ) ; } catch ( Throwable t ) { status = new CommandStatus ( ) ; status . fail ( t ) . setPayload ( payload ) ; } return status . toString ( ) ; }
Process the input sent from the client utilizing the mock server and return the response .
12,027
public static void main ( String [ ] args ) { try { VBucketInfo vbi [ ] = new VBucketInfo [ 1024 ] ; for ( int ii = 0 ; ii < vbi . length ; ++ ii ) { vbi [ ii ] = new VBucketInfo ( ) ; } MemcachedServer server = new MemcachedServer ( null , null , 11211 , vbi , false ) ; for ( VBucketInfo aVbi : vbi ) { aVbi . setOwner ( server ) ; } server . run ( ) ; } catch ( IOException e ) { Logger . getLogger ( MemcachedServer . class . getName ( ) ) . log ( Level . SEVERE , "Fatal error! failed to create socket: " , e ) ; } }
Program entry point that runs the memcached server as a standalone server just like any other memcached server ...
12,028
public static DesignDocument create ( String body , String name ) throws DesignParseException { DesignDocument doc = new DesignDocument ( body ) ; doc . id = "_design/" + name ; doc . load ( ) ; return doc ; }
Create a new design document
12,029
public List < Entry > parse ( String [ ] argv ) { optind = - 1 ; List < Entry > ret = new ArrayList < Entry > ( ) ; int idx = 0 ; while ( idx < argv . length ) { if ( argv [ idx ] . equals ( "--" ) ) { ++ idx ; break ; } if ( argv [ idx ] . charAt ( 0 ) != '-' ) { break ; } if ( argv [ idx ] . startsWith ( "--" ) ) { idx = parseLongOption ( argv , ret , idx ) ; } else if ( argv [ idx ] . startsWith ( "-" ) ) { idx = parseShortOption ( argv , ret , idx ) ; } else { break ; } ++ idx ; } if ( idx != argv . length ) { optind = idx ; } return ret ; }
Parse the given hasArgument vector
12,030
public static Bucket create ( CouchbaseMock mock , BucketConfiguration config ) throws IOException { switch ( config . type ) { case MEMCACHED : return new MemcachedBucket ( mock , config ) ; case COUCHBASE : return new CouchbaseBucket ( mock , config ) ; default : throw new FileNotFoundException ( "I don't know about this type..." ) ; } }
Create a bucket .
12,031
protected Map < String , Object > getCommonConfig ( ) { Map < String , Object > mm = new HashMap < String , Object > ( ) ; mm . put ( "replicaNumber" , numReplicas ) ; Map < String , Object > ramQuota = new HashMap < String , Object > ( ) ; ramQuota . put ( "rawRAM" , 1024 * 1024 * 100 ) ; ramQuota . put ( "ram" , 1024 * 1024 * 100 ) ; mm . put ( "quota" , ramQuota ) ; return mm ; }
Returns configuration information common to both Couchbase and Memcached buckets
12,032
public void respawn ( int index ) { configurationRwLock . writeLock ( ) . lock ( ) ; try { if ( index >= 0 && index < servers . length ) { servers [ index ] . startup ( ) ; } rebalance ( ) ; } finally { Info . incrementConfigRevision ( ) ; configurationRwLock . writeLock ( ) . unlock ( ) ; } }
Re - Add a previously failed - over node
12,033
final void rebalance ( ) { configurationRwLock . writeLock ( ) . lock ( ) ; try { Info . incrementConfigRevision ( ) ; List < MemcachedServer > nodes = activeServers ( ) ; for ( int ii = 0 ; ii < numVBuckets ; ++ ii ) { Collections . shuffle ( nodes ) ; vbInfo [ ii ] . setOwner ( nodes . get ( 0 ) ) ; if ( nodes . size ( ) < 2 ) { continue ; } List < MemcachedServer > replicas = nodes . subList ( 1 , nodes . size ( ) ) ; if ( replicas . size ( ) > numReplicas ) { replicas = replicas . subList ( 0 , numReplicas ) ; } vbInfo [ ii ] . setReplicas ( replicas ) ; } } finally { Info . incrementConfigRevision ( ) ; configurationRwLock . writeLock ( ) . unlock ( ) ; } }
Issues a rebalance within the bucket . vBuckets which are mapped to failed - over nodes are relocated with their first replica being promoted to active .
12,034
public static JsonObject getJsonQuery ( URL url ) throws MalformedURLException { String query = url . getQuery ( ) ; JsonObject payload = new JsonObject ( ) ; JsonParser parser = new JsonParser ( ) ; if ( query == null ) { return null ; } for ( String kv : query . split ( "&" ) ) { String [ ] parts = kv . split ( "=" ) ; if ( parts . length != 2 ) { throw new MalformedURLException ( ) ; } String optName = parts [ 0 ] ; JsonElement optVal ; try { optVal = parser . parse ( URLDecoder . decode ( parts [ 1 ] , "UTF-8" ) ) ; } catch ( UnsupportedEncodingException e ) { throw new MalformedURLException ( ) ; } payload . add ( optName , optVal ) ; } return payload ; }
Parses a url - encoded query string and
12,035
public static Map < String , String > getQueryParams ( String s ) throws MalformedURLException { Map < String , String > params = new HashMap < String , String > ( ) ; for ( String kv : s . split ( "&" ) ) { String [ ] parts = kv . split ( "=" ) ; if ( parts . length != 2 ) { throw new MalformedURLException ( ) ; } try { String k = URLDecoder . decode ( parts [ 0 ] , "UTF-8" ) ; String v = URLDecoder . decode ( parts [ 1 ] , "UTF-8" ) ; params . put ( k , v ) ; } catch ( UnsupportedEncodingException ex ) { throw new MalformedURLException ( ex . getMessage ( ) ) ; } } return params ; }
Get traditional query parameters as a Java map
12,036
public static void makeStringResponse ( HttpResponse response , String s ) { StringEntity entity = new StringEntity ( s , ContentType . TEXT_PLAIN ) ; entity . setContentEncoding ( "utf-8" ) ; response . setEntity ( entity ) ; }
Sets a string as the response
12,037
public static void makeResponse ( HttpResponse response , String msg , int status ) { response . setStatusCode ( status ) ; makeStringResponse ( response , msg ) ; }
Sets the response body and status
12,038
public static void make400Response ( HttpResponse response , String msg ) { makeResponse ( response , msg , HttpStatus . SC_BAD_REQUEST ) ; }
Sets a 404 not found response with a message
12,039
public static void bailResponse ( HttpContext cx , HttpResponse response ) throws IOException , HttpException { HttpServerConnection conn = getConnection ( cx ) ; conn . sendResponseHeader ( response ) ; conn . sendResponseEntity ( response ) ; conn . flush ( ) ; }
Send and flush the response object over the current connection and close the connection
12,040
public static AuthContext getAuth ( HttpContext cx , HttpRequest req ) throws IOException { AuthContext auth = ( AuthContext ) cx . getAttribute ( HttpServer . CX_AUTH ) ; if ( auth == null ) { Header authHdr = req . getLastHeader ( HttpHeaders . AUTHORIZATION ) ; if ( authHdr == null ) { auth = new AuthContext ( ) ; } else { auth = new AuthContext ( authHdr . getValue ( ) ) ; } } return auth ; }
Get any authorization credentials supplied over the connection . If no credentials were provided in the request an empty AuthContex is returned
12,041
public static Reducer create ( String txt ) { Context cx = Context . enter ( ) ; try { return new Reducer ( txt , cx ) ; } finally { Context . exit ( ) ; } }
Create a new Reducer object
12,042
public void stopServer ( ) { shouldRun = false ; try { listener . close ( ) ; } catch ( IOException ex ) { } while ( true ) { synchronized ( allWorkers ) { if ( allWorkers . isEmpty ( ) ) { break ; } for ( Worker w : allWorkers ) { w . stopSocket ( ) ; w . interrupt ( ) ; } } } try { listener . close ( ) ; } catch ( IOException ex ) { ex . printStackTrace ( ) ; } }
Shut down the HTTP server and all its workers and close the listener socket .
12,043
public static void defineDesignDocument ( CouchbaseMock mock , String designName , String contents , String bucketName ) throws IOException { URL url = getDesignURL ( mock , designName , bucketName ) ; HttpURLConnection conn = ( HttpURLConnection ) url . openConnection ( ) ; setAuthHeaders ( mock , bucketName , conn ) ; conn . setRequestMethod ( "PUT" ) ; conn . setRequestProperty ( "Content-Type" , "application/json" ) ; conn . setDoOutput ( true ) ; conn . setDoInput ( true ) ; OutputStreamWriter osw = new OutputStreamWriter ( conn . getOutputStream ( ) ) ; osw . write ( contents ) ; osw . flush ( ) ; osw . close ( ) ; try { conn . getInputStream ( ) . close ( ) ; } catch ( IOException ex ) { InputStream es = conn . getErrorStream ( ) ; if ( es != null ) { System . err . printf ( "Problem creating view: %s%n" , ReaderUtils . fromStream ( es ) ) ; } else { System . err . printf ( "Error stream is null!\n" ) ; } throw ex ; } }
Utility method to define a view
12,044
private static void sendHelpText ( HttpResponse response , int code ) throws IOException { HandlerUtil . makeStringResponse ( response , MockHelpCommandHandler . getIndentedHelp ( ) ) ; response . setStatusCode ( code ) ; }
Sends a help text with the provided code
12,045
public void write ( ByteBuffer bb , VBucketCoordinates coords ) { if ( ! enabled ) { return ; } bb . putLong ( 24 , coords . getUuid ( ) ) ; bb . putLong ( 32 , coords . getSeqno ( ) ) ; }
Write the appropriate mutation information into the output buffers . This method will do nothing if extra mutation information is not enabled .
12,046
public void startHarakiriMonitor ( InetSocketAddress address , boolean terminate ) throws IOException { if ( terminate ) { harakiriMonitor . setTemrinateAction ( new Callable ( ) { public Object call ( ) throws Exception { System . exit ( 1 ) ; return null ; } } ) ; } harakiriMonitor . connect ( address . getHostName ( ) , address . getPort ( ) ) ; harakiriMonitor . start ( ) ; }
Tell the harakiri monitor to connect to the given address .
12,047
private static BucketConfiguration createDefaultConfig ( String hostname , int numNodes , int bucketStartPort , int numVBuckets , int numReplicas ) { BucketConfiguration defaultConfig = new BucketConfiguration ( ) ; defaultConfig . type = BucketType . COUCHBASE ; defaultConfig . hostname = hostname ; defaultConfig . numNodes = numNodes ; if ( numReplicas > - 1 ) { defaultConfig . numReplicas = numReplicas ; } defaultConfig . bucketStartPort = bucketStartPort ; defaultConfig . numVBuckets = numVBuckets ; return defaultConfig ; }
Initializes the default configuration from the command line parameters . This is present in order to allow the super constructor to be the first statement
12,048
public int getCarrierPort ( String bucketName ) { Bucket bucket = buckets . get ( bucketName ) ; if ( null == bucket ) { throw new RuntimeException ( "Bucket does not exist. Has the mock been started?" ) ; } return bucket . getCarrierPort ( ) ; }
Get the carrier port for a bucket .
12,049
public void createBucket ( BucketConfiguration config ) throws BucketAlreadyExistsException , IOException { if ( ! config . validate ( ) ) { throw new IllegalArgumentException ( "Invalid bucket configuration" ) ; } synchronized ( buckets ) { if ( buckets . containsKey ( config . name ) ) { throw new BucketAlreadyExistsException ( config . name ) ; } Bucket bucket = Bucket . create ( this , config ) ; BucketAdminServer adminServer = new BucketAdminServer ( bucket , httpServer , this ) ; adminServer . register ( ) ; bucket . setAdminServer ( adminServer ) ; HttpAuthVerifier verifier = new HttpAuthVerifier ( bucket , authenticator ) ; if ( config . type == BucketType . COUCHBASE ) { CAPIServer capi = new CAPIServer ( bucket , verifier ) ; capi . register ( httpServer ) ; bucket . setCAPIServer ( capi ) ; } buckets . put ( config . name , bucket ) ; bucket . start ( ) ; } }
Create a new bucket and start it .
12,050
public void removeBucket ( String name ) throws FileNotFoundException { Bucket bucket ; synchronized ( buckets ) { if ( ! buckets . containsKey ( name ) ) { throw new FileNotFoundException ( "No such bucket: " + name ) ; } bucket = buckets . remove ( name ) ; } CAPIServer capi = bucket . getCAPIServer ( ) ; if ( capi != null ) { capi . shutdown ( ) ; } BucketAdminServer adminServer = bucket . getAdminServer ( ) ; if ( adminServer != null ) { adminServer . shutdown ( ) ; } bucket . stop ( ) ; }
Destroy a bucket
12,051
private void start ( String docsFile , String monitorAddress , boolean useBeerSample ) throws IOException { try { if ( port == 0 ) { ServerSocketChannel ch = ServerSocketChannel . open ( ) ; ch . socket ( ) . bind ( new InetSocketAddress ( 0 ) ) ; port = ch . socket ( ) . getLocalPort ( ) ; if ( monitorAddress == null && debug ) { System . out . println ( "port=" + port ) ; } httpServer . bind ( ch ) ; } else { httpServer . bind ( new InetSocketAddress ( port ) ) ; } } catch ( IOException ex ) { Logger . getLogger ( CouchbaseMock . class . getName ( ) ) . log ( Level . SEVERE , null , ex ) ; System . exit ( - 1 ) ; } for ( BucketConfiguration config : initialConfigs . values ( ) ) { try { createBucket ( config ) ; } catch ( BucketAlreadyExistsException ex ) { throw new IOException ( ex ) ; } } httpServer . start ( ) ; if ( docsFile != null ) { DocumentLoader loader = new DocumentLoader ( this , "default" ) ; loader . loadDocuments ( docsFile ) ; } else if ( useBeerSample ) { RestAPIUtil . loadBeerSample ( this ) ; } if ( monitorAddress != null ) { startHarakiriMonitor ( monitorAddress , true ) ; } else if ( debug ) { StringBuilder wireshark = new StringBuilder ( "couchbase && (" ) ; System . out . println ( "\nConnection strings:" ) ; for ( Bucket bucket : getBuckets ( ) . values ( ) ) { System . out . println ( "couchbase://127.0.0.1:" + port + "=http/" + bucket . getName ( ) ) ; StringBuilder connstr = new StringBuilder ( "couchbase://" ) ; for ( MemcachedServer server : bucket . getServers ( ) ) { connstr . append ( server . getHostname ( ) ) . append ( ":" ) . append ( server . getPort ( ) ) . append ( "=mcd," ) ; wireshark . append ( "tcp.port == " ) . append ( server . getPort ( ) ) . append ( " || " ) ; } connstr . replace ( connstr . length ( ) - 1 , connstr . length ( ) , "" ) ; connstr . append ( "/" ) . append ( bucket . getName ( ) ) ; System . out . println ( connstr ) ; } wireshark . replace ( wireshark . length ( ) - 4 , wireshark . length ( ) , "" ) ; wireshark . append ( ")" ) ; System . out . println ( "\nWireshark filters:" ) ; System . out . println ( "http && tcp.port == " + port ) ; System . out . println ( wireshark ) ; } startupLatch . countDown ( ) ; }
Used for the command line this ensures that the CountDownLatch object is only set to 0 when all the command line parameters have been initialized ; so that when the monitor finally sends the port over the socket all the items will have already been initialized .
12,052
public void run ( ) throws Exception { client . sendRequest ( cmd ) ; long endTime = System . currentTimeMillis ( ) + spec . getMaxDuration ( ) ; Thread . sleep ( spec . getAfter ( ) ) ; int numAttempts = 0 ; long now = System . currentTimeMillis ( ) ; while ( now < endTime ) { client . sendRequest ( cmd ) ; now = System . currentTimeMillis ( ) ; numAttempts ++ ; long sleepTime = 0 ; if ( spec . isConstant ( ) ) { sleepTime = spec . getInterval ( ) ; } else if ( spec . isLinear ( ) ) { sleepTime = spec . getInterval ( ) * numAttempts ; } else if ( spec . isExponential ( ) ) { sleepTime = ( long ) Math . pow ( spec . getInterval ( ) , numAttempts ) ; } if ( spec . getCeil ( ) > 0 ) { sleepTime = Math . min ( spec . getCeil ( ) , sleepTime ) ; } if ( now + sleepTime > endTime ) { break ; } else { accuSleep ( sleepTime ) ; now = System . currentTimeMillis ( ) ; } } }
Runs until the retry duration is reached
12,053
public void step ( ) throws IOException { if ( closed ) { throw new ClosedChannelException ( ) ; } if ( input . position ( ) == header . length ) { if ( command == null ) { command = CommandFactory . create ( input ) ; } if ( command . complete ( ) ) { command . process ( ) ; protocolHandler . execute ( command , this ) ; command = null ; input . rewind ( ) ; } } }
Attempt to process a single command from the input buffer . Note this does not actually read from the socket .
12,054
boolean hasOutput ( ) { if ( pending == null ) { return false ; } if ( pending . isEmpty ( ) ) { return false ; } if ( ! pending . get ( 0 ) . hasRemaining ( ) ) { return false ; } return true ; }
Determines whether this connection has pending responses to be sent
12,055
public void returnOutputContext ( OutputContext ctx ) { List < ByteBuffer > remaining = ctx . releaseRemaining ( ) ; if ( pending == null ) { pending = remaining ; } else { List < ByteBuffer > tmp = pending ; pending = remaining ; pending . addAll ( tmp ) ; } }
Re - transfer ownership of a given output buffer to the connection
12,056
void setSupportedFeatures ( boolean [ ] input ) { if ( input . length != supportedFeatures . length ) { throw new IllegalArgumentException ( "Bad features length!" ) ; } for ( int i = 0 ; i < input . length ; i ++ ) { BinaryHelloCommand . Feature feature = BinaryHelloCommand . Feature . valueOf ( i ) ; if ( feature == null ) { supportedFeatures [ i ] = false ; continue ; } switch ( feature ) { case MUTATION_SEQNO : case XERROR : case XATTR : case SELECT_BUCKET : case TRACING : supportedFeatures [ i ] = input [ i ] ; break ; case SNAPPY : supportedFeatures [ i ] = input [ i ] && server . getCompression ( ) != CompressionMode . DISABLED ; break ; default : supportedFeatures [ i ] = false ; break ; } } if ( supportedFeatures [ BinaryHelloCommand . Feature . MUTATION_SEQNO . getValue ( ) ] ) { miw . setEnabled ( true ) ; } else { miw . setEnabled ( false ) ; } }
Sets the supported features from a HELLO command .
12,057
public ByteBuffer [ ] getIov ( ) { if ( buffers . size ( ) == 1 ) { singleArray [ 0 ] = buffers . get ( 0 ) ; return singleArray ; } return buffers . toArray ( new ByteBuffer [ buffers . size ( ) ] ) ; }
Get an array of buffers representing all the active chunks
12,058
public OutputContext getSlice ( int limit ) { List < ByteBuffer > newBufs = new LinkedList < ByteBuffer > ( ) ; ByteBuffer buf = ByteBuffer . allocate ( limit ) ; Iterator < ByteBuffer > iter = buffers . iterator ( ) ; while ( iter . hasNext ( ) && buf . position ( ) < buf . limit ( ) ) { ByteBuffer cur = iter . next ( ) ; int diff = buf . limit ( ) - buf . position ( ) ; if ( diff > cur . limit ( ) ) { buf . put ( cur ) ; iter . remove ( ) ; } else { ByteBuffer slice = cur . duplicate ( ) ; slice . limit ( diff ) ; buf . put ( slice ) ; } } return new OutputContext ( newBufs ) ; }
Get an OutputBuffer containing a subset of the current one
12,059
public void updateBytesSent ( long num ) { Iterator < ByteBuffer > iter = buffers . iterator ( ) ; while ( iter . hasNext ( ) ) { ByteBuffer cur = iter . next ( ) ; if ( cur . hasRemaining ( ) ) { break ; } iter . remove ( ) ; } }
Indicate that some data has been flushed to the network
12,060
public List < ByteBuffer > releaseRemaining ( ) { List < ByteBuffer > ret = buffers ; buffers = null ; return ret ; }
Truncate the output . This will empty the list of chunks
12,061
private MutationStatus incrCoords ( KeySpec ks ) { final StorageVBucketCoordinates curCoord ; synchronized ( vbCoords ) { curCoord = vbCoords [ ks . vbId ] ; } long seq = curCoord . incrSeqno ( ) ; long uuid = curCoord . getUuid ( ) ; VBucketCoordinates coord = new BasicVBucketCoordinates ( uuid , seq ) ; return new MutationStatus ( coord ) ; }
Increments the current coordinates for a new mutation .
12,062
void forceStorageMutation ( Item itm , VBucketCoordinates coords ) { forceMutation ( itm . getKeySpec ( ) . vbId , itm , coords , false ) ; }
Force a storage of an item to the cache .
12,063
void forceDeleteMutation ( Item itm , VBucketCoordinates coords ) { forceMutation ( itm . getKeySpec ( ) . vbId , itm , coords , true ) ; }
Forces the deletion of an item from the case .
12,064
public static int convertExpiryTime ( int original ) { if ( original == 0 ) { return original ; } else if ( original > THIRTY_DAYS ) { return original + ( int ) Info . getClockOffset ( ) ; } return ( int ) ( ( new Date ( ) . getTime ( ) / 1000 ) + original + Info . getClockOffset ( ) ) ; }
Converts an expiration value to an absolute Unix timestamp .
12,065
public static < T > T decode ( String json , Class < T > cls ) { return GSON . fromJson ( json , cls ) ; }
Attempt to decode a JSON string as a Java object
12,066
@ SuppressWarnings ( "unchecked" ) public static Map < String , Object > decodeAsMap ( String json ) { return decode ( json , HashMap . class ) ; }
Decode a JSON string as Java map . The string must represent a JSON Object
12,067
public Map < String , Object > rowAt ( int ix ) { return ( Map < String , Object > ) rows . get ( ix ) ; }
Get the raw JSON row at a given index
12,068
public String executeRaw ( Iterable < Item > items , Configuration config ) throws QueryExecutionException { if ( config == null ) { config = new Configuration ( ) ; } Context cx = Context . enter ( ) ; Scriptable scope = cx . initStandardObjects ( ) ; NativeObject configObject = config . toNativeObject ( ) ; Scriptable redFunc = null ; if ( reducer != null ) { redFunc = reducer . getFunction ( ) ; } try { indexer . run ( items , cx ) ; Scriptable indexResults = indexer . getLastResults ( ) ; Scriptable resultObject ; try { resultObject = jsRun . execute ( configObject , indexResults , redFunc , cx ) ; } catch ( JavaScriptException ex ) { Object thrownObject = ex . getValue ( ) ; String jsonException ; try { jsonException = ( String ) NativeJSON . stringify ( cx , scope , thrownObject , null , null ) ; throw new QueryExecutionException ( jsonException ) ; } catch ( EcmaError ex2 ) { throw new QueryExecutionException ( ex2 . getErrorMessage ( ) ) ; } } catch ( EcmaError parseErr ) { throw new QueryExecutionException ( parseErr . getErrorMessage ( ) ) ; } NativeArray rows = ( NativeArray ) resultObject . get ( "rows" , resultObject ) ; resultObject . delete ( "rows" ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "{" ) ; for ( Object id : ( ( NativeObject ) resultObject ) . getAllIds ( ) ) { if ( ! ( id instanceof String ) ) { throw new RuntimeException ( "ARGH: " + id ) ; } sb . append ( '"' ) . append ( id ) . append ( "\":" ) ; sb . append ( ( String ) NativeJSON . stringify ( cx , scope , resultObject . get ( ( String ) id , resultObject ) , null , null ) ) ; sb . append ( "," ) ; } sb . append ( "\"rows\":[\n" ) ; for ( int i = 0 ; i < rows . size ( ) ; i ++ ) { Object o = rows . get ( i , rows ) ; sb . append ( ( String ) NativeJSON . stringify ( cx , scope , o , null , null ) ) ; if ( i < rows . size ( ) - 1 ) { sb . append ( "," ) ; } sb . append ( "\n" ) ; } sb . append ( "]\n" ) ; sb . append ( "}\n" ) ; return sb . toString ( ) ; } finally { Context . exit ( ) ; } }
Executes the view query with the given parameters .
12,069
public synchronized ThriftClient getThriftClient ( ) { if ( mode . api != ConnectionAPI . THRIFT_SMART ) return getSimpleThriftClient ( ) ; if ( tclient == null ) tclient = getSmartThriftClient ( ) ; return tclient ; }
Thrift client connection
12,070
public String [ ] getEndpointInfo ( InetAddress endpoint ) { String [ ] rawEndpointInfo = getRawEndpointInfo ( endpoint ) ; if ( rawEndpointInfo == null ) throw new RuntimeException ( "Unknown host " + endpoint + " with no default configured" ) ; return rawEndpointInfo ; }
Get the raw information about an end point
12,071
public String getDatacenter ( InetAddress endpoint ) { String [ ] info = getEndpointInfo ( endpoint ) ; assert info != null : "No location defined for endpoint " + endpoint ; return info [ 0 ] ; }
Return the data center for which an endpoint resides in
12,072
public String getRack ( InetAddress endpoint ) { String [ ] info = getEndpointInfo ( endpoint ) ; assert info != null : "No location defined for endpoint " + endpoint ; return info [ 1 ] ; }
Return the rack for which an endpoint resides in
12,073
public void setPartitionFilter ( Expression partitionFilter ) throws IOException { UDFContext context = UDFContext . getUDFContext ( ) ; Properties property = context . getUDFProperties ( AbstractCassandraStorage . class ) ; property . setProperty ( PARTITION_FILTER_SIGNATURE , indexExpressionsToString ( filterToIndexExpressions ( partitionFilter ) ) ) ; }
set partition filter
12,074
public void putNext ( Tuple t ) throws IOException { if ( t . size ( ) < 1 ) { logger . warn ( "Empty output skipped, filter empty tuples to suppress this warning" ) ; return ; } ByteBuffer key = objToBB ( t . get ( 0 ) ) ; if ( t . getType ( 1 ) == DataType . TUPLE ) writeColumnsFromTuple ( key , t , 1 ) ; else if ( t . getType ( 1 ) == DataType . BAG ) { if ( t . size ( ) > 2 ) throw new IOException ( "No arguments allowed after bag" ) ; writeColumnsFromBag ( key , ( DataBag ) t . get ( 1 ) ) ; } else throw new IOException ( "Second argument in output must be a tuple or bag" ) ; }
write next row
12,075
private void writeColumnsFromTuple ( ByteBuffer key , Tuple t , int offset ) throws IOException { ArrayList < Mutation > mutationList = new ArrayList < Mutation > ( ) ; for ( int i = offset ; i < t . size ( ) ; i ++ ) { if ( t . getType ( i ) == DataType . BAG ) writeColumnsFromBag ( key , ( DataBag ) t . get ( i ) ) ; else if ( t . getType ( i ) == DataType . TUPLE ) { Tuple inner = ( Tuple ) t . get ( i ) ; if ( inner . size ( ) > 0 ) mutationList . add ( mutationFromTuple ( inner ) ) ; } else if ( ! usePartitionFilter ) { throw new IOException ( "Output type was not a bag or a tuple" ) ; } } if ( mutationList . size ( ) > 0 ) writeMutations ( key , mutationList ) ; }
write tuple data to cassandra
12,076
private Mutation mutationFromTuple ( Tuple t ) throws IOException { Mutation mutation = new Mutation ( ) ; if ( t . get ( 1 ) == null ) { if ( allow_deletes ) { mutation . deletion = new Deletion ( ) ; mutation . deletion . predicate = new org . apache . cassandra . thrift . SlicePredicate ( ) ; mutation . deletion . predicate . column_names = Arrays . asList ( objToBB ( t . get ( 0 ) ) ) ; mutation . deletion . setTimestamp ( FBUtilities . timestampMicros ( ) ) ; } else throw new IOException ( "null found but deletes are disabled, set " + PIG_ALLOW_DELETES + "=true in environment or allow_deletes=true in URL to enable" ) ; } else { org . apache . cassandra . thrift . Column column = new org . apache . cassandra . thrift . Column ( ) ; column . setName ( objToBB ( t . get ( 0 ) ) ) ; column . setValue ( objToBB ( t . get ( 1 ) ) ) ; column . setTimestamp ( FBUtilities . timestampMicros ( ) ) ; mutation . column_or_supercolumn = new ColumnOrSuperColumn ( ) ; mutation . column_or_supercolumn . column = column ; } return mutation ; }
compose Cassandra mutation from tuple
12,077
private void writeColumnsFromBag ( ByteBuffer key , DataBag bag ) throws IOException { List < Mutation > mutationList = new ArrayList < Mutation > ( ) ; for ( Tuple pair : bag ) { Mutation mutation = new Mutation ( ) ; if ( DataType . findType ( pair . get ( 1 ) ) == DataType . BAG ) { SuperColumn sc = new SuperColumn ( ) ; sc . setName ( objToBB ( pair . get ( 0 ) ) ) ; List < org . apache . cassandra . thrift . Column > columns = new ArrayList < org . apache . cassandra . thrift . Column > ( ) ; for ( Tuple subcol : ( DataBag ) pair . get ( 1 ) ) { org . apache . cassandra . thrift . Column column = new org . apache . cassandra . thrift . Column ( ) ; column . setName ( objToBB ( subcol . get ( 0 ) ) ) ; column . setValue ( objToBB ( subcol . get ( 1 ) ) ) ; column . setTimestamp ( FBUtilities . timestampMicros ( ) ) ; columns . add ( column ) ; } if ( columns . isEmpty ( ) ) { if ( allow_deletes ) { mutation . deletion = new Deletion ( ) ; mutation . deletion . super_column = objToBB ( pair . get ( 0 ) ) ; mutation . deletion . setTimestamp ( FBUtilities . timestampMicros ( ) ) ; } else throw new IOException ( "SuperColumn deletion attempted with empty bag, but deletes are disabled, set " + PIG_ALLOW_DELETES + "=true in environment or allow_deletes=true in URL to enable" ) ; } else { sc . columns = columns ; mutation . column_or_supercolumn = new ColumnOrSuperColumn ( ) ; mutation . column_or_supercolumn . super_column = sc ; } } else mutation = mutationFromTuple ( pair ) ; mutationList . add ( mutation ) ; if ( mutationList . size ( ) >= 10 ) { writeMutations ( key , mutationList ) ; mutationList . clear ( ) ; } } if ( mutationList . size ( ) > 0 ) writeMutations ( key , mutationList ) ; }
write bag data to Cassandra
12,078
private void writeMutations ( ByteBuffer key , List < Mutation > mutations ) throws IOException { try { writer . write ( key , mutations ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } }
write mutation to Cassandra
12,079
private List < IndexExpression > filterToIndexExpressions ( Expression expression ) throws IOException { List < IndexExpression > indexExpressions = new ArrayList < IndexExpression > ( ) ; Expression . BinaryExpression be = ( Expression . BinaryExpression ) expression ; ByteBuffer name = ByteBuffer . wrap ( be . getLhs ( ) . toString ( ) . getBytes ( ) ) ; ByteBuffer value = ByteBuffer . wrap ( be . getRhs ( ) . toString ( ) . getBytes ( ) ) ; switch ( expression . getOpType ( ) ) { case OP_EQ : indexExpressions . add ( new IndexExpression ( name , IndexOperator . EQ , value ) ) ; break ; case OP_GE : indexExpressions . add ( new IndexExpression ( name , IndexOperator . GTE , value ) ) ; break ; case OP_GT : indexExpressions . add ( new IndexExpression ( name , IndexOperator . GT , value ) ) ; break ; case OP_LE : indexExpressions . add ( new IndexExpression ( name , IndexOperator . LTE , value ) ) ; break ; case OP_LT : indexExpressions . add ( new IndexExpression ( name , IndexOperator . LT , value ) ) ; break ; case OP_AND : indexExpressions . addAll ( filterToIndexExpressions ( be . getLhs ( ) ) ) ; indexExpressions . addAll ( filterToIndexExpressions ( be . getRhs ( ) ) ) ; break ; default : throw new IOException ( "Unsupported expression type: " + expression . getOpType ( ) . name ( ) ) ; } return indexExpressions ; }
get a list of Cassandra IndexExpression from Pig expression
12,080
private static String indexExpressionsToString ( List < IndexExpression > indexExpressions ) throws IOException { assert indexExpressions != null ; IndexClause indexClause = new IndexClause ( ) ; indexClause . setExpressions ( indexExpressions ) ; indexClause . setStart_key ( "" . getBytes ( ) ) ; TSerializer serializer = new TSerializer ( new TBinaryProtocol . Factory ( ) ) ; try { return Hex . bytesToHex ( serializer . serialize ( indexClause ) ) ; } catch ( TException e ) { throw new IOException ( e ) ; } }
convert a list of index expression to string
12,081
private static List < IndexExpression > indexExpressionsFromString ( String ie ) throws IOException { assert ie != null ; TDeserializer deserializer = new TDeserializer ( new TBinaryProtocol . Factory ( ) ) ; IndexClause indexClause = new IndexClause ( ) ; try { deserializer . deserialize ( indexClause , Hex . hexToBytes ( ie ) ) ; } catch ( TException e ) { throw new IOException ( e ) ; } return indexClause . getExpressions ( ) ; }
convert string to a list of index expression
12,082
private List < IndexExpression > getIndexExpressions ( ) throws IOException { UDFContext context = UDFContext . getUDFContext ( ) ; Properties property = context . getUDFProperties ( AbstractCassandraStorage . class ) ; if ( property . getProperty ( PARTITION_FILTER_SIGNATURE ) != null ) return indexExpressionsFromString ( property . getProperty ( PARTITION_FILTER_SIGNATURE ) ) ; else return null ; }
get a list of index expression
12,083
protected List < ColumnDef > getColumnMetadata ( Cassandra . Client client ) throws TException , CharacterCodingException , InvalidRequestException , ConfigurationException { return getColumnMeta ( client , true , true ) ; }
get a list of column for the column family
12,084
private Tuple keyToTuple ( ByteBuffer key , CfDef cfDef , AbstractType comparator ) throws IOException { Tuple tuple = TupleFactory . getInstance ( ) . newTuple ( 1 ) ; addKeyToTuple ( tuple , key , cfDef , comparator ) ; return tuple ; }
convert key to a tuple
12,085
private void addKeyToTuple ( Tuple tuple , ByteBuffer key , CfDef cfDef , AbstractType comparator ) throws IOException { if ( comparator instanceof AbstractCompositeType ) { setTupleValue ( tuple , 0 , composeComposite ( ( AbstractCompositeType ) comparator , key ) ) ; } else { setTupleValue ( tuple , 0 , cassandraToObj ( getDefaultMarshallers ( cfDef ) . get ( MarshallerType . KEY_VALIDATOR ) , key ) ) ; } }
add key to a tuple
12,086
public Iterator < RangeTombstone > rangeIterator ( ) { return ranges == null ? Iterators . < RangeTombstone > emptyIterator ( ) : ranges . iterator ( ) ; }
Use sparingly not the most efficient thing
12,087
public BooleanConditionBuilder must ( ConditionBuilder ... conditionBuilders ) { if ( must == null ) { must = new ArrayList < > ( conditionBuilders . length ) ; } for ( ConditionBuilder conditionBuilder : conditionBuilders ) { must . add ( conditionBuilder . build ( ) ) ; } return this ; }
Returns this builder with the specified mandatory conditions .
12,088
public BooleanConditionBuilder should ( ConditionBuilder ... conditionBuilders ) { if ( should == null ) { should = new ArrayList < > ( conditionBuilders . length ) ; } for ( ConditionBuilder conditionBuilder : conditionBuilders ) { should . add ( conditionBuilder . build ( ) ) ; } return this ; }
Returns this builder with the specified optional conditions .
12,089
public BooleanConditionBuilder not ( ConditionBuilder ... conditionBuilders ) { if ( not == null ) { not = new ArrayList < > ( conditionBuilders . length ) ; } for ( ConditionBuilder conditionBuilder : conditionBuilders ) { not . add ( conditionBuilder . build ( ) ) ; } return this ; }
Returns this builder with the specified mandatory not conditions .
12,090
public Schema load ( KSMetaData keyspaceDef ) { for ( CFMetaData cfm : keyspaceDef . cfMetaData ( ) . values ( ) ) load ( cfm ) ; setKeyspaceDefinition ( keyspaceDef ) ; return this ; }
Load specific keyspace into Schema
12,091
public void storeKeyspaceInstance ( Keyspace keyspace ) { if ( keyspaceInstances . containsKey ( keyspace . getName ( ) ) ) throw new IllegalArgumentException ( String . format ( "Keyspace %s was already initialized." , keyspace . getName ( ) ) ) ; keyspaceInstances . put ( keyspace . getName ( ) , keyspace ) ; }
Store given Keyspace instance to the schema
12,092
public CFMetaData getCFMetaData ( String keyspaceName , String cfName ) { assert keyspaceName != null ; KSMetaData ksm = keyspaces . get ( keyspaceName ) ; return ( ksm == null ) ? null : ksm . cfMetaData ( ) . get ( cfName ) ; }
Given a keyspace name & column family name get the column family meta data . If the keyspace name or column family name is not valid this function returns null .
12,093
public CFMetaData getCFMetaData ( UUID cfId ) { Pair < String , String > cf = getCF ( cfId ) ; return ( cf == null ) ? null : getCFMetaData ( cf . left , cf . right ) ; }
Get ColumnFamily metadata by its identifier
12,094
public Map < String , CFMetaData > getKeyspaceMetaData ( String keyspaceName ) { assert keyspaceName != null ; KSMetaData ksm = keyspaces . get ( keyspaceName ) ; assert ksm != null ; return ksm . cfMetaData ( ) ; }
Get metadata about keyspace inner ColumnFamilies
12,095
public void purge ( CFMetaData cfm ) { cfIdMap . remove ( Pair . create ( cfm . ksName , cfm . cfName ) ) ; cfm . markPurged ( ) ; }
Used for ColumnFamily data eviction out from the schema
12,096
public void updateVersion ( ) { try { MessageDigest versionDigest = MessageDigest . getInstance ( "MD5" ) ; for ( Row row : SystemKeyspace . serializedSchema ( ) ) { if ( invalidSchemaRow ( row ) || ignoredSchemaRow ( row ) ) continue ; ColumnFamilyStore . removeDeletedColumnsOnly ( row . cf , Integer . MAX_VALUE , SecondaryIndexManager . nullUpdater ) ; row . cf . purgeTombstones ( Integer . MAX_VALUE ) ; row . cf . updateDigest ( versionDigest ) ; } version = UUID . nameUUIDFromBytes ( versionDigest . digest ( ) ) ; SystemKeyspace . updateSchemaVersion ( version ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
Read schema from system keyspace and calculate MD5 digest of every row resulting digest will be converted into UUID which would act as content - based version of the schema .
12,097
public boolean signal ( ) { if ( ! hasWaiters ( ) ) return false ; while ( true ) { RegisteredSignal s = queue . poll ( ) ; if ( s == null || s . signal ( ) != null ) return s != null ; } }
Signal one waiting thread
12,098
public void signalAll ( ) { if ( ! hasWaiters ( ) ) return ; int i = 0 , s = 5 ; Thread randomThread = null ; Iterator < RegisteredSignal > iter = queue . iterator ( ) ; while ( iter . hasNext ( ) ) { RegisteredSignal signal = iter . next ( ) ; Thread signalled = signal . signal ( ) ; if ( signalled != null ) { if ( signalled == randomThread ) break ; if ( ++ i == s ) { randomThread = signalled ; s <<= 1 ; } } iter . remove ( ) ; } }
Signal all waiting threads
12,099
public int getWaiting ( ) { if ( ! hasWaiters ( ) ) return 0 ; Iterator < RegisteredSignal > iter = queue . iterator ( ) ; int count = 0 ; while ( iter . hasNext ( ) ) { Signal next = iter . next ( ) ; if ( ! next . isCancelled ( ) ) count ++ ; } return count ; }
Return how many threads are waiting