idx
int64
0
165k
question
stringlengths
73
5.81k
target
stringlengths
5
918
11,600
public static boolean isBlank ( final String source ) { if ( isEmpty ( source ) ) { return true ; } int strLen = source . length ( ) ; for ( int i = 0 ; i < strLen ; i ++ ) { if ( ! Character . isWhitespace ( source . charAt ( i ) ) ) { return false ; } } return true ; }
Returns true if the source string is null or all characters in this string is space ; false otherwise .
11,601
public static int indexOfIgnoreCase ( final String source , final String target ) { int targetIndex = source . indexOf ( target ) ; if ( targetIndex == INDEX_OF_NOT_FOUND ) { String sourceLowerCase = source . toLowerCase ( ) ; String targetLowerCase = target . toLowerCase ( ) ; targetIndex = sourceLowerCase . indexOf ( targetLowerCase ) ; return targetIndex ; } else { return targetIndex ; } }
Returns the index within given source string of the first occurrence of the specified target string with ignore case sensitive .
11,602
public static boolean containsWord ( final String text , final String word ) { if ( text == null || word == null ) { return false ; } if ( text . contains ( word ) ) { Matcher matcher = matchText ( text ) ; for ( ; matcher . find ( ) ; ) { String matchedWord = matcher . group ( 0 ) ; if ( matchedWord . equals ( word ) ) { return true ; } } } return false ; }
Returns true if given text contains given word ; false otherwise .
11,603
public static boolean containsWordIgnoreCase ( final String text , final String word ) { if ( text == null || word == null ) { return false ; } return containsWord ( text . toLowerCase ( ) , word . toLowerCase ( ) ) ; }
Returns true if given text contains given word ignore case ; false otherwise .
11,604
public static boolean startsWithIgnoreCase ( final String source , final String target ) { if ( source . startsWith ( target ) ) { return true ; } if ( source . length ( ) < target . length ( ) ) { return false ; } return source . substring ( 0 , target . length ( ) ) . equalsIgnoreCase ( target ) ; }
Returns true if given source string start with target string ignore case sensitive ; false otherwise .
11,605
public static boolean endsWithIgnoreCase ( final String source , final String target ) { if ( source . endsWith ( target ) ) { return true ; } if ( source . length ( ) < target . length ( ) ) { return false ; } return source . substring ( source . length ( ) - target . length ( ) ) . equalsIgnoreCase ( target ) ; }
Returns true if given source string end with target string ignore case sensitive ; false otherwise .
11,606
public static String getRandomString ( final int stringLength ) { StringBuilder stringBuilder = getStringBuild ( ) ; for ( int i = 0 ; i < stringLength ; i ++ ) { stringBuilder . append ( getRandomAlphabetic ( ) ) ; } return stringBuilder . toString ( ) ; }
Returns a random given length size alphabetic string .
11,607
public static String encoding ( final String source , final String sourceCharset , final String encodingCharset ) throws IllegalArgumentException { byte [ ] sourceBytes ; String encodeString = null ; try { sourceBytes = source . getBytes ( sourceCharset ) ; encodeString = new String ( sourceBytes , encodingCharset ) ; } catch ( UnsupportedEncodingException e ) { throw new IllegalArgumentException ( String . format ( "Unsupported encoding:%s or %s" , sourceCharset , encodingCharset ) ) ; } return encodeString ; }
Return encoded string by given charset .
11,608
public static double [ ] getBorders ( int numberOfBins , List < Double > counts ) { Collections . sort ( counts ) ; if ( ! counts . isEmpty ( ) ) { List < Integer > borderInds = findBordersNaive ( numberOfBins , counts ) ; if ( borderInds == null ) { borderInds = findBorderIndsByRepeatedDividing ( numberOfBins , counts ) ; } double [ ] result = new double [ borderInds . size ( ) ] ; for ( int i = 0 ; i < result . length ; i ++ ) { result [ i ] = counts . get ( borderInds . get ( i ) ) ; } return result ; } else return new double [ ] { 0.0 } ; }
Carefull this method sorts the counts list!
11,609
public Map < String , Set < String > > getDependencies ( ) { Map < String , Set < String > > new_deps = new HashMap < String , Set < String > > ( ) ; if ( explicitPackages == null ) return new_deps ; for ( Name pkg : explicitPackages ) { Set < Name > set = deps . get ( pkg ) ; if ( set != null ) { Set < String > new_set = new_deps . get ( pkg . toString ( ) ) ; if ( new_set == null ) { new_set = new HashSet < String > ( ) ; new_deps . put ( ":" + pkg . toString ( ) , new_set ) ; } for ( Name d : set ) { new_set . add ( ":" + d . toString ( ) ) ; } } } return new_deps ; }
Fetch the set of dependencies that are relevant to the compile that has just been performed . I . e . we are only interested in dependencies for classes that were explicitly compiled .
11,610
public void visitPubapi ( Element e ) { Name n = ( ( ClassSymbol ) e ) . fullname ; Name p = ( ( ClassSymbol ) e ) . packge ( ) . fullname ; StringBuffer sb = publicApiPerClass . get ( n ) ; assert ( sb == null ) ; sb = new StringBuffer ( ) ; PubapiVisitor v = new PubapiVisitor ( sb ) ; v . visit ( e ) ; if ( sb . length ( ) > 0 ) { publicApiPerClass . put ( n , sb ) ; } explicitPackages . add ( p ) ; }
Visit the api of a class and construct a pubapi string and store it into the pubapi_perclass map .
11,611
public void collect ( Name currPkg , Name depPkg ) { if ( ! currPkg . equals ( depPkg ) ) { Set < Name > theset = deps . get ( currPkg ) ; if ( theset == null ) { theset = new HashSet < Name > ( ) ; deps . put ( currPkg , theset ) ; } theset . add ( depPkg ) ; } }
Collect a dependency . curr_pkg is marked as depending on dep_pkg .
11,612
@ SuppressWarnings ( "unchecked" ) public void preload ( int quantity ) { Object [ ] objects = new Object [ quantity ] ; for ( int i = 0 ; i < quantity ; i ++ ) { objects [ i ] = this . instantiate ( ) ; } for ( int i = 0 ; i < quantity ; i ++ ) { this . release ( ( T ) objects [ i ] ) ; } }
Instantiate quantity numbers of elements from the Pool and set them in the pool right away
11,613
private void blockMe ( int cyclesToBlock ) { long unblock = cycles + cyclesToBlock ; BlockedEntry newEntry = new BlockedEntry ( unblock ) ; blockedCollection . add ( newEntry ) ; while ( unblock > cycles ) { try { newEntry . getSync ( ) . acquire ( ) ; } catch ( InterruptedException exc ) { log . error ( "[temporaryBlock] Spurious wakeup" , exc ) ; } } }
Blocks the calling thread until enough turns have elapsed
11,614
public void tick ( ) { log . trace ( "Tick {}" , cycles ) ; cycles ++ ; for ( BlockedEntry entry : blockedCollection ) { if ( entry . getTimeout ( ) >= cycles ) { entry . getSync ( ) . release ( ) ; } } }
Signals one cycles has elapsed
11,615
public void waitFor ( Integer clientId , int turns , String reason ) { if ( ! registered . contains ( clientId ) ) { throw new IllegalArgumentException ( "Unknown robot. All robots must first register with clock" ) ; } synchronized ( waitingList ) { if ( waitingList . contains ( clientId ) ) { throw new IllegalArgumentException ( "Client " + clientId + " is already waiting, no multithreading is allowed" ) ; } waitingList . add ( clientId ) ; } log . trace ( "[waitFor] Blocking {} for {} turns. Reason: {}" , clientId , turns , reason ) ; blockMe ( turns ) ; log . trace ( "[waitFor] Unblocked {} - {}" , clientId , reason ) ; synchronized ( waitingList ) { waitingList . remove ( clientId ) ; } }
Blocks the calling thread for the specified number of cycles
11,616
public String produce ( int size ) { byte bytes [ ] = new byte [ size ] ; _random . nextBytes ( bytes ) ; return Strings . toHexString ( bytes ) ; }
Produces a nonce of an arbitrary size in bytes which is not to be proved later .
11,617
public String produce ( String state , long time ) { Nonce nonce = new Nonce ( INSTANCE , time > 0 ? time : TTL , SIZE , _random ) ; _map . put ( state + ':' + nonce . value , nonce ) ; return nonce . value ; }
Produces a nonce that is associated with the given state and is to be proved within a time limit .
11,618
public boolean renew ( String value , String state , long time ) { Nonce nonce = _map . get ( state + ':' + value ) ; if ( nonce != null && ! nonce . hasExpired ( ) ) { nonce . renew ( time > 0 ? time : TTL ) ; return true ; } else { return false ; } }
Renews a nonce . The nonce must be valid and must have not expired .
11,619
public boolean prove ( String value , String state ) { Nonce nonce = _map . remove ( state + ':' + value ) ; return nonce != null && ! nonce . hasExpired ( ) ; }
Proves a nonce and its associated state . A nonce can only be successfully proved once .
11,620
public boolean hasCategory ( final String conceptURI ) { return Iterables . any ( getCategories ( ) , new Predicate < TopicAnnotation > ( ) { public boolean apply ( TopicAnnotation ta ) { return ta . getTopicReference ( ) . equals ( conceptURI ) ; } } ) ; }
Returns true if the contents has been categorized with a category identified by the URI passed by parameter
11,621
public static SQLiteDatabase create ( CursorFactory factory ) { return openDatabase ( com . couchbase . lite . internal . database . sqlite . SQLiteDatabaseConfiguration . MEMORY_DB_PATH , factory , CREATE_IF_NECESSARY ) ; }
Create a memory backed SQLite database . Its contents will be destroyed when the database is closed .
11,622
public long replace ( String table , String nullColumnHack , ContentValues initialValues ) { try { return insertWithOnConflict ( table , nullColumnHack , initialValues , CONFLICT_REPLACE ) ; } catch ( SQLException e ) { DLog . e ( TAG , "Error inserting " + initialValues , e ) ; return - 1 ; } }
Convenience method for replacing a row in the database .
11,623
private static byte [ ] decodeBase64Digest ( String base64Digest ) { String expectedPrefix = "sha1-" ; if ( ! base64Digest . startsWith ( expectedPrefix ) ) { throw new IllegalArgumentException ( base64Digest + " did not start with " + expectedPrefix ) ; } base64Digest = base64Digest . replaceFirst ( expectedPrefix , "" ) ; byte [ ] bytes = new byte [ 0 ] ; try { bytes = Base64 . decode ( base64Digest ) ; } catch ( IOException e ) { throw new IllegalArgumentException ( e ) ; } return bytes ; }
Decode base64 d getDigest into a byte array that is suitable for use as a blob key .
11,624
protected void fireTrigger ( final ReplicationTrigger trigger ) { Log . d ( Log . TAG_SYNC , "%s [fireTrigger()] => " + trigger , this ) ; synchronized ( executor ) { if ( ! executor . isShutdown ( ) ) { executor . submit ( new Runnable ( ) { public void run ( ) { try { Log . d ( Log . TAG_SYNC , "firing trigger: %s" , trigger ) ; stateMachine . fire ( trigger ) ; } catch ( Exception e ) { Log . i ( Log . TAG_SYNC , "Error in StateMachine.fire(trigger): %s" , e . getMessage ( ) ) ; throw new RuntimeException ( e ) ; } } } ) ; } } }
Fire a trigger to the state machine
11,625
protected void start ( ) { try { if ( ! db . isOpen ( ) ) { String msg = String . format ( Locale . ENGLISH , "Db: %s is not open, abort replication" , db ) ; parentReplication . setLastError ( new Exception ( msg ) ) ; fireTrigger ( ReplicationTrigger . STOP_IMMEDIATE ) ; return ; } db . addActiveReplication ( parentReplication ) ; this . authenticating = false ; initSessionId ( ) ; initBatcher ( ) ; initAuthorizer ( ) ; initializeRequestWorkers ( ) ; this . lastSequence = null ; if ( ! isContinuous ( ) ) goOnline ( ) ; else { if ( isNetworkReachable ( ) ) goOnline ( ) ; else triggerGoOffline ( ) ; startNetworkReachabilityManager ( ) ; } } catch ( Exception e ) { Log . e ( Log . TAG_SYNC , "%s: Exception in start()" , e , this ) ; } }
Start the replication process .
11,626
protected void close ( ) { this . authenticating = false ; for ( Future future : pendingFutures ) { future . cancel ( false ) ; CancellableRunnable runnable = cancellables . get ( future ) ; if ( runnable != null ) { runnable . cancel ( ) ; cancellables . remove ( future ) ; } } if ( remoteRequestExecutor != null && ! remoteRequestExecutor . isShutdown ( ) ) { Utils . shutdownAndAwaitTermination ( remoteRequestExecutor , Replication . DEFAULT_MAX_TIMEOUT_FOR_SHUTDOWN , Replication . DEFAULT_MAX_TIMEOUT_FOR_SHUTDOWN ) ; } clientFactory . evictAllConnectionsInPool ( ) ; }
Close all resources associated with this replicator .
11,627
protected void checkSession ( ) { if ( getAuthenticator ( ) != null ) { Authorizer auth = ( Authorizer ) getAuthenticator ( ) ; auth . setRemoteURL ( remote ) ; auth . setLocalUUID ( db . publicUUID ( ) ) ; } if ( getAuthenticator ( ) != null && getAuthenticator ( ) instanceof SessionCookieAuthorizer ) { checkSessionAtPath ( "_session" ) ; } else { login ( ) ; } }
Before doing anything else determine whether we have an active login session .
11,628
private void refreshRemoteCheckpointDoc ( ) { Log . i ( Log . TAG_SYNC , "%s: Refreshing remote checkpoint to get its _rev..." , this ) ; Future future = sendAsyncRequest ( "GET" , "_local/" + remoteCheckpointDocID ( ) , null , new RemoteRequestCompletion ( ) { public void onCompletion ( RemoteRequest remoteRequest , Response httpResponse , Object result , Throwable e ) { if ( db == null ) { Log . w ( Log . TAG_SYNC , "%s: db == null while refreshing remote checkpoint. aborting" , this ) ; return ; } if ( e != null && Utils . getStatusFromError ( e ) != Status . NOT_FOUND ) { Log . e ( Log . TAG_SYNC , "%s: Error refreshing remote checkpoint" , e , this ) ; } else { Log . d ( Log . TAG_SYNC , "%s: Refreshed remote checkpoint: %s" , this , result ) ; remoteCheckpoint = ( Map < String , Object > ) result ; lastSequenceChanged = true ; saveLastSequence ( ) ; } } } ) ; pendingFutures . add ( future ) ; }
Variant of - fetchRemoveCheckpointDoc that s used while replication is running to reload the checkpoint to get its current revision number if there was an error saving it .
11,629
protected void stop ( ) { this . authenticating = false ; batcher . clear ( ) ; setLifecycle ( Replication . Lifecycle . ONESHOT ) ; cancelRetryFuture ( ) ; while ( ! pendingFutures . isEmpty ( ) ) { Future future = pendingFutures . poll ( ) ; if ( future != null && ! future . isCancelled ( ) && ! future . isDone ( ) ) { future . cancel ( true ) ; CancellableRunnable runnable = cancellables . get ( future ) ; if ( runnable != null ) { runnable . cancel ( ) ; cancellables . remove ( future ) ; } } } }
Actual work of stopping the replication process .
11,630
private void notifyChangeListeners ( final Replication . ChangeEvent changeEvent ) { if ( changeListenerNotifyStyle == ChangeListenerNotifyStyle . SYNC ) { for ( ChangeListener changeListener : changeListeners ) { try { changeListener . changed ( changeEvent ) ; } catch ( Exception e ) { Log . e ( Log . TAG_SYNC , "Unknown Error in changeListener.changed(changeEvent)" , e ) ; } } } else { synchronized ( executor ) { if ( ! executor . isShutdown ( ) ) { executor . submit ( new Runnable ( ) { public void run ( ) { try { for ( ChangeListener changeListener : changeListeners ) changeListener . changed ( changeEvent ) ; } catch ( Exception e ) { Log . e ( Log . TAG_SYNC , "Exception notifying replication listener: %s" , e , this ) ; throw new RuntimeException ( e ) ; } } } ) ; } } } }
Notify all change listeners of a ChangeEvent
11,631
private void scheduleRetryFuture ( ) { Log . v ( Log . TAG_SYNC , "%s: Failed to xfer; will retry in %d sec" , this , RETRY_DELAY_SECONDS ) ; synchronized ( executor ) { if ( ! executor . isShutdown ( ) ) { this . retryFuture = executor . schedule ( new Runnable ( ) { public void run ( ) { retryIfReady ( ) ; } } , RETRY_DELAY_SECONDS , TimeUnit . SECONDS ) ; } } }
helper function to schedule retry future . no in iOS code .
11,632
private void cancelRetryFuture ( ) { if ( retryFuture != null && ! retryFuture . isDone ( ) ) { retryFuture . cancel ( true ) ; } retryFuture = null ; }
helper function to cancel retry future . not in iOS code .
11,633
protected void retryReplicationIfError ( ) { Log . d ( TAG , "retryReplicationIfError() state=" + stateMachine . getState ( ) + ", error=" + this . error + ", isContinuous()=" + isContinuous ( ) + ", isTransientError()=" + Utils . isTransientError ( this . error ) ) ; if ( ! stateMachine . getState ( ) . equals ( ReplicationState . IDLE ) ) return ; if ( this . error != null ) { if ( isContinuous ( ) ) { if ( Utils . isTransientError ( this . error ) ) { onBeforeScheduleRetry ( ) ; cancelRetryFuture ( ) ; scheduleRetryFuture ( ) ; } } } }
Retry replication if previous attempt ends with error
11,634
void dumpUnsafe ( Printer printer , boolean verbose ) { printer . println ( "Connection #" + mConnectionId + ":" ) ; if ( verbose ) { printer . println ( " connectionPtr: 0x" + Long . toHexString ( mConnectionPtr ) ) ; } printer . println ( " isPrimaryConnection: " + mIsPrimaryConnection ) ; printer . println ( " onlyAllowReadOnlyOperations: " + mOnlyAllowReadOnlyOperations ) ; mRecentOperations . dump ( printer , verbose ) ; }
Dumps debugging information about this connection in the case where the caller might not actually own the connection .
11,635
void collectDbStatsUnsafe ( ArrayList < com . couchbase . lite . internal . database . sqlite . SQLiteDebug . DbStats > dbStatsList ) { dbStatsList . add ( getMainDbStatsUnsafe ( 0 , 0 , 0 ) ) ; }
Collects statistics about database connection memory usage in the case where the caller might not actually own the connection .
11,636
protected void execute ( ) { Log . v ( Log . TAG_SYNC , "%s: RemoteRequest execute() called, url: %s" , this , url ) ; executeRequest ( factory . getOkHttpClient ( ) , request ( ) ) ; Log . v ( Log . TAG_SYNC , "%s: RemoteRequest execute() finished, url: %s" , this , url ) ; }
Execute remote request
11,637
protected RequestBody setCompressedBody ( byte [ ] bodyBytes ) { if ( bodyBytes . length < MIN_JSON_LENGTH_TO_COMPRESS ) return null ; byte [ ] encodedBytes = Utils . compressByGzip ( bodyBytes ) ; if ( encodedBytes == null || encodedBytes . length >= bodyBytes . length ) return null ; return RequestBody . create ( JSON , encodedBytes ) ; }
Generate gzipped body
11,638
int indexOf ( byte [ ] data , int dataLength , byte [ ] pattern , int dataOffset ) { int [ ] failure = computeFailure ( pattern ) ; int j = 0 ; if ( data . length == 0 ) return - 1 ; final int patternLength = pattern . length ; for ( int i = dataOffset ; i < dataLength ; i ++ ) { while ( j > 0 && pattern [ j ] != data [ i ] ) j = failure [ j - 1 ] ; if ( pattern [ j ] == data [ i ] ) j ++ ; if ( j == patternLength ) return i - patternLength + 1 ; } return - 1 ; }
Finds the first occurrence of the pattern in the text .
11,639
private static int [ ] computeFailure ( byte [ ] pattern ) { int [ ] failure = new int [ pattern . length ] ; int j = 0 ; for ( int i = 1 ; i < pattern . length ; i ++ ) { while ( j > 0 && pattern [ j ] != pattern [ i ] ) j = failure [ j - 1 ] ; if ( pattern [ j ] == pattern [ i ] ) j ++ ; failure [ i ] = j ; } return failure ; }
Computes the failure function using a boot - strapping process where the pattern is matched against itself .
11,640
public List < String > getAllDatabaseNames ( ) { String [ ] databaseFiles = directoryFile . list ( new FilenameFilter ( ) { public boolean accept ( File dir , String filename ) { if ( filename . endsWith ( Manager . kDBExtension ) ) { return true ; } return false ; } } ) ; List < String > result = new ArrayList < String > ( ) ; for ( String databaseFile : databaseFiles ) { String trimmed = databaseFile . substring ( 0 , databaseFile . length ( ) - Manager . kDBExtension . length ( ) ) ; String replaced = trimmed . replace ( ':' , '/' ) ; result . add ( replaced ) ; } Collections . sort ( result ) ; return Collections . unmodifiableList ( result ) ; }
An array of the names of all existing databases .
11,641
public void close ( ) { synchronized ( lockDatabases ) { Log . d ( Database . TAG , "Closing " + this ) ; Database [ ] openDbs = databases . values ( ) . toArray ( new Database [ databases . size ( ) ] ) ; for ( Database database : openDbs ) database . close ( ) ; databases . clear ( ) ; context . getNetworkReachabilityManager ( ) . stopListening ( ) ; if ( workExecutor != null && ! workExecutor . isShutdown ( ) ) Utils . shutdownAndAwaitTermination ( workExecutor ) ; Log . d ( Database . TAG , "Closed " + this ) ; } }
Releases all resources used by the Manager instance and closes all its databases .
11,642
public boolean replaceDatabase ( String databaseName , String databaseDir ) { Database db = getDatabase ( databaseName , false ) ; if ( db == null ) return false ; File dir = new File ( databaseDir ) ; if ( ! dir . exists ( ) ) { Log . w ( Database . TAG , "Database file doesn't exist at path : %s" , databaseDir ) ; return false ; } if ( ! dir . isDirectory ( ) ) { Log . w ( Database . TAG , "Database file is not a directory. " + "Use -replaceDatabaseNamed:withDatabaseFilewithAttachments:error: instead." ) ; return false ; } File destDir = new File ( db . getPath ( ) ) ; File srcDir = new File ( databaseDir ) ; if ( destDir . exists ( ) ) { if ( ! FileDirUtils . deleteRecursive ( destDir ) ) { Log . w ( Database . TAG , "Failed to delete file/directly: " + destDir ) ; return false ; } } try { FileDirUtils . copyFolder ( srcDir , destDir ) ; } catch ( IOException e ) { Log . w ( Database . TAG , "Failed to copy directly from " + srcDir + " to " + destDir , e ) ; return false ; } try { db . open ( ) ; } catch ( CouchbaseLiteException e ) { Log . w ( Database . TAG , "Failed to open database" , e ) ; return false ; } if ( ! db . replaceUUIDs ( ) ) { Log . w ( Database . TAG , "Failed to replace UUIDs" ) ; db . close ( ) ; return false ; } db . close ( ) ; return true ; }
Replaces or installs a database from a file .
11,643
public Future runAsync ( String databaseName , final AsyncTask function ) throws CouchbaseLiteException { final Database database = getDatabase ( databaseName ) ; return runAsync ( new Runnable ( ) { public void run ( ) { function . run ( database ) ; } } ) ; }
Asynchronously dispatches a callback to run on a background thread . The callback will be passed Database instance . There is not currently a known reason to use it it may not make sense on the Android API but it was added for the purpose of having a consistent API with iOS .
11,644
public void startedPart ( Map headers ) { if ( _docReader != null ) throw new IllegalStateException ( "_docReader is already defined" ) ; Log . v ( TAG , "%s: Starting new document; headers =%s" , this , headers ) ; _docReader = new MultipartDocumentReader ( db ) ; _docReader . setHeaders ( headers ) ; _docReader . startedPart ( headers ) ; }
This method is called when a part s headers have been parsed before its data is parsed .
11,645
public void finishedPart ( ) { if ( _docReader == null ) throw new IllegalStateException ( "_docReader is not defined" ) ; _docReader . finish ( ) ; _onDocument . onDocument ( _docReader . getDocumentProperties ( ) , _docReader . getDocumentSize ( ) ) ; _docReader = null ; Log . v ( TAG , "%s: Finished document" , this ) ; }
This method is called when a part is complete .
11,646
public Document getDocument ( ) { if ( getDocumentId ( ) == null ) { return null ; } assert ( database != null ) ; Document document = database . getDocument ( getDocumentId ( ) ) ; document . loadCurrentRevisionFrom ( this ) ; return document ; }
The document this row was mapped from . This will be nil if a grouping was enabled in the query because then the result rows don t correspond to individual documents .
11,647
public String getDocumentId ( ) { String docID = null ; if ( documentRevision != null ) docID = documentRevision . getDocID ( ) ; if ( docID == null ) { if ( value != null ) { if ( value instanceof Map ) { Map < String , Object > props = ( Map < String , Object > ) value ; docID = ( String ) props . get ( "_id" ) ; } } } if ( docID == null ) docID = sourceDocID ; return docID ; }
The ID of the document described by this view row . This is not necessarily the same as the document that caused this row to be emitted ; see the discussion of the . sourceDocumentID property for details .
11,648
public String getDocumentRevisionId ( ) { String rev = null ; if ( documentRevision != null ) rev = documentRevision . getRevID ( ) ; if ( rev == null ) { if ( value instanceof Map ) { Map < String , Object > mapValue = ( Map < String , Object > ) value ; rev = ( String ) mapValue . get ( "_rev" ) ; if ( rev == null ) { rev = ( String ) mapValue . get ( "rev" ) ; } } } return rev ; }
The revision ID of the document this row was mapped from .
11,649
public void add ( final AtomicAction action ) { if ( action instanceof Action ) { Action a = ( Action ) action ; peforms . addAll ( a . peforms ) ; backouts . addAll ( a . backouts ) ; cleanUps . addAll ( a . cleanUps ) ; } else { add ( new ActionBlock ( ) { public void execute ( ) throws ActionException { action . perform ( ) ; } } , new ActionBlock ( ) { public void execute ( ) throws ActionException { action . backout ( ) ; } } , new ActionBlock ( ) { public void execute ( ) throws ActionException { action . cleanup ( ) ; } } ) ; } }
Adds an action as a step of thid one .
11,650
public void add ( ActionBlock perform , ActionBlock backout , ActionBlock cleanup ) { peforms . add ( perform != null ? perform : nullAction ) ; backouts . add ( backout != null ? backout : nullAction ) ; cleanUps . add ( cleanup != null ? cleanup : nullAction ) ; }
Adds an action as a step of this one . The action has three components each optional .
11,651
public void run ( ) throws ActionException { try { perform ( ) ; try { cleanup ( ) ; } catch ( ActionException e ) { } lastError = null ; } catch ( ActionException e ) { lastError = e ; throw e ; } }
Performs all the actions in order . If any action fails backs out the previously performed actions in reverse order . If the actions succeeded cleans them up in reverse order . The lastError property is set to the exception thrown by the failed perform block . The failedStep property is set to the index of the failed perform block .
11,652
private void doAction ( List < ActionBlock > actions ) throws ActionException { try { actions . get ( nextStep ) . execute ( ) ; } catch ( ActionException e ) { throw e ; } catch ( Exception e ) { throw new ActionException ( "Exception raised by step: " + nextStep , e ) ; } }
Subroutine that calls an action block from either performs backOuts or cleanUps .
11,653
public boolean setVersion ( String version ) { SQLiteStorageEngine storage = store . getStorageEngine ( ) ; boolean hasView ; Cursor cursor = null ; try { String sql = "SELECT name, version FROM views WHERE name=?" ; String [ ] args = { name } ; cursor = storage . rawQuery ( sql , args ) ; hasView = cursor . moveToNext ( ) ; } catch ( SQLException e ) { Log . e ( Log . TAG_VIEW , "Error querying existing view name " + name , e ) ; return false ; } finally { if ( cursor != null ) cursor . close ( ) ; } if ( ! hasView ) { ContentValues insertValues = new ContentValues ( ) ; insertValues . put ( "name" , name ) ; insertValues . put ( "version" , version ) ; insertValues . put ( "total_docs" , 0 ) ; storage . insert ( "views" , null , insertValues ) ; createIndex ( ) ; return true ; } ContentValues updateValues = new ContentValues ( ) ; updateValues . put ( "version" , version ) ; updateValues . put ( "lastSequence" , 0 ) ; updateValues . put ( "total_docs" , 0 ) ; String [ ] whereArgs = { name , version } ; int rowsAffected = storage . update ( "views" , updateValues , "name=? AND version!=?" , whereArgs ) ; return ( rowsAffected > 0 ) ; }
Updates the version of the view . A change in version means the delegate s map block has changed its semantics so the _index should be deleted .
11,654
public long getLastSequenceIndexed ( ) { String sql = "SELECT lastSequence FROM views WHERE name=?" ; String [ ] args = { name } ; Cursor cursor = null ; long result = - 1 ; try { cursor = store . getStorageEngine ( ) . rawQuery ( sql , args ) ; if ( cursor . moveToNext ( ) ) { result = cursor . getLong ( 0 ) ; } } catch ( Exception e ) { Log . e ( Log . TAG_VIEW , "Error getting last sequence indexed" , e ) ; } finally { if ( cursor != null ) { cursor . close ( ) ; } } return result ; }
The last sequence number that has been indexed .
11,655
private static boolean groupTogether ( Object key1 , Object key2 , int groupLevel ) { if ( groupLevel == 0 || ! ( key1 instanceof List ) || ! ( key2 instanceof List ) ) { return key1 . equals ( key2 ) ; } @ SuppressWarnings ( "unchecked" ) List < Object > key1List = ( List < Object > ) key1 ; @ SuppressWarnings ( "unchecked" ) List < Object > key2List = ( List < Object > ) key2 ; if ( ( key1List . size ( ) < groupLevel || key2List . size ( ) < groupLevel ) && key1List . size ( ) != key2List . size ( ) ) { return false ; } int end = Math . min ( groupLevel , Math . min ( key1List . size ( ) , key2List . size ( ) ) ) ; for ( int i = 0 ; i < end ; ++ i ) { if ( key1List . get ( i ) != null && ! key1List . get ( i ) . equals ( key2List . get ( i ) ) ) return false ; else if ( key1List . get ( i ) == null && key2List . get ( i ) != null ) return false ; } return true ; }
Are key1 and key2 grouped together at this groupLevel?
11,656
public static Object groupKey ( Object key , int groupLevel ) { if ( groupLevel > 0 && ( key instanceof List ) && ( ( ( List < Object > ) key ) . size ( ) > groupLevel ) ) { return ( ( List < Object > ) key ) . subList ( 0 , groupLevel ) ; } else { return key ; } }
Returns the prefix of the key to use in the result row at this groupLevel
11,657
public int getTotalRows ( ) { try { updateIndex ( ) ; } catch ( CouchbaseLiteException e ) { Log . e ( Log . TAG_VIEW , "Update index failed when getting the total rows" , e ) ; } return getCurrentTotalRows ( ) ; }
Get total number of rows in the view . The view s index will be updated if needed before returning the value .
11,658
public static double totalValues ( List < Object > values ) { double total = 0 ; for ( Object object : values ) { if ( object instanceof Number ) { Number number = ( Number ) object ; total += number . doubleValue ( ) ; } else { Log . w ( Log . TAG_VIEW , "Warning non-numeric value found in totalValues: %s" , object ) ; } } return total ; }
Utility function to use in reduce blocks . Totals an array of Numbers .
11,659
protected Status updateIndexes ( List < View > views ) throws CouchbaseLiteException { List < ViewStore > storages = new ArrayList < ViewStore > ( ) ; for ( View view : views ) { storages . add ( view . viewStore ) ; } return viewStore . updateIndexes ( storages ) ; }
Update multiple view indexes at once .
11,660
public List < QueryRow > query ( QueryOptions options ) throws CouchbaseLiteException { if ( options == null ) options = new QueryOptions ( ) ; if ( groupOrReduce ( options ) ) return viewStore . reducedQuery ( options ) ; else return viewStore . regularQuery ( options ) ; }
Queries the view . Does NOT first update the index .
11,661
public static Printer create ( Printer printer , String prefix ) { if ( prefix == null || prefix . equals ( "" ) ) { return printer ; } return new PrefixPrinter ( printer , prefix ) ; }
Creates a new PrefixPrinter .
11,662
public void deleteCookie ( Cookie cookie ) { cookies . remove ( cookie . name ( ) ) ; deletePersistedCookie ( cookie . name ( ) ) ; }
Non - standard helper method to delete cookie
11,663
public static int getDefaultPageSize ( ) { synchronized ( sLock ) { if ( sDefaultPageSize == 0 ) { try { Class clazz = Class . forName ( "android.os.StatFs" ) ; Method m = clazz . getMethod ( "getBlockSize" ) ; Object statFsObj = clazz . getConstructor ( String . class ) . newInstance ( "/data" ) ; Integer value = ( Integer ) m . invoke ( statFsObj , ( Object [ ] ) null ) ; if ( value != null ) return value . intValue ( ) ; } catch ( Exception e ) { } } if ( sDefaultPageSize == 0 ) sDefaultPageSize = 1024 ; return sDefaultPageSize ; } }
Gets the default page size to use when creating a database .
11,664
private static String byteArrayToHexString ( byte [ ] bytes ) { StringBuilder sb = new StringBuilder ( bytes . length * 2 ) ; for ( byte element : bytes ) { int v = element & 0xff ; if ( v < 16 ) { sb . append ( '0' ) ; } sb . append ( Integer . toHexString ( v ) ) ; } return sb . toString ( ) ; }
Using some super basic byte array &lt ; - &gt ; hex conversions so we don t have to rely on any large Base64 libraries . Can be overridden if you like!
11,665
public SavedRevision createRevision ( Map < String , Object > properties ) throws CouchbaseLiteException { boolean allowConflict = false ; return document . putProperties ( properties , revisionInternal . getRevID ( ) , allowConflict ) ; }
Creates and saves a new revision with the given properties . This will fail with a 412 error if the receiver is not the current revision of the document .
11,666
public Map < String , Object > getProperties ( ) { Map < String , Object > properties = revisionInternal . getProperties ( ) ; if ( ! checkedProperties ) { if ( properties == null ) { if ( loadProperties ( ) == true ) { properties = revisionInternal . getProperties ( ) ; } } checkedProperties = true ; } return properties != null ? Collections . unmodifiableMap ( properties ) : null ; }
The contents of this revision of the document . Any keys in the dictionary that begin with _ such as _id and _rev contain CouchbaseLite metadata .
11,667
public Object jsonObject ( ) { if ( json == null ) { return null ; } if ( cached == null ) { Object tmp = null ; if ( json [ 0 ] == '{' ) { tmp = new LazyJsonObject < String , Object > ( json ) ; } else if ( json [ 0 ] == '[' ) { tmp = new LazyJsonArray < Object > ( json ) ; } else { try { if ( json . length > 0 && json [ json . length - 1 ] == 0 ) { tmp = Manager . getObjectMapper ( ) . readValue ( json , 0 , json . length - 1 , Object . class ) ; } else { tmp = Manager . getObjectMapper ( ) . readValue ( json , Object . class ) ; } } catch ( Exception e ) { Log . w ( Database . TAG , "Exception parsing json" , e ) ; } } cached = tmp ; } return cached ; }
values are requested
11,668
public void queueObjects ( List < T > objects ) { if ( objects == null || objects . size ( ) == 0 ) return ; boolean readyToProcess = false ; synchronized ( mutex ) { Log . v ( Log . TAG_BATCHER , "%s: queueObjects called with %d objects (current inbox size = %d)" , this , objects . size ( ) , inbox . size ( ) ) ; inbox . addAll ( objects ) ; mutex . notifyAll ( ) ; if ( isFlushing ) { return ; } scheduleBatchProcess ( false ) ; if ( inbox . size ( ) >= capacity && isPendingFutureReadyOrInProcessing ( ) ) readyToProcess = true ; } if ( readyToProcess ) { synchronized ( processMutex ) { try { processMutex . wait ( 5 ) ; } catch ( InterruptedException e ) { } } } }
Adds multiple objects to the queue .
11,669
public void flushAll ( boolean waitForAllToFinish ) { Log . v ( Log . TAG_BATCHER , "%s: flushing all objects (wait=%b)" , this , waitForAllToFinish ) ; synchronized ( mutex ) { isFlushing = true ; unschedule ( ) ; } while ( true ) { ScheduledFuture future = null ; synchronized ( mutex ) { if ( inbox . size ( ) == 0 ) break ; final List < T > toProcess = new ArrayList < T > ( inbox ) ; inbox . clear ( ) ; mutex . notifyAll ( ) ; synchronized ( workExecutor ) { if ( ! workExecutor . isShutdown ( ) ) { future = workExecutor . schedule ( new Runnable ( ) { public void run ( ) { processor . process ( toProcess ) ; synchronized ( mutex ) { lastProcessedTime = System . currentTimeMillis ( ) ; } } } , 0 , TimeUnit . MILLISECONDS ) ; } } } if ( waitForAllToFinish ) { if ( future != null && ! future . isDone ( ) && ! future . isCancelled ( ) ) { try { future . get ( ) ; } catch ( Exception e ) { Log . e ( Log . TAG_BATCHER , "%s: Error while waiting for pending future " + "when flushing all items" , e , this ) ; } } } } synchronized ( mutex ) { isFlushing = false ; } }
Sends _all_ the queued objects at once to the processor block . After this method returns all inbox objects will be processed .
11,670
private void scheduleBatchProcess ( boolean immediate ) { synchronized ( mutex ) { if ( inbox . size ( ) == 0 ) return ; long suggestedDelay = 0 ; if ( ! immediate && inbox . size ( ) < capacity ) { if ( System . currentTimeMillis ( ) - lastProcessedTime < delay ) suggestedDelay = delay ; else { suggestedDelay = Math . min ( SMALL_DELAY_AFTER_LONG_PAUSE , delay ) ; } } scheduleWithDelay ( suggestedDelay ) ; } }
Schedule batch process based on capacity inbox size and last processed time .
11,671
private void scheduleWithDelay ( long delay ) { synchronized ( mutex ) { if ( scheduled && delay < scheduledDelay ) { if ( isPendingFutureReadyOrInProcessing ( ) ) { Log . v ( Log . TAG_BATCHER , "%s: scheduleWithDelay: %d ms, ignored as current batch " + "is ready or in process" , this , delay ) ; return ; } unschedule ( ) ; } if ( ! scheduled ) { scheduled = true ; scheduledDelay = delay ; Log . v ( Log . TAG_BATCHER , "%s: scheduleWithDelay %d ms, scheduled ..." , this , delay ) ; synchronized ( workExecutor ) { if ( ! workExecutor . isShutdown ( ) ) { pendingFuture = workExecutor . schedule ( new Runnable ( ) { public void run ( ) { Log . v ( Log . TAG_BATCHER , "%s: call processNow ..." , this ) ; processNow ( ) ; Log . v ( Log . TAG_BATCHER , "%s: call processNow done" , this ) ; } } , scheduledDelay , TimeUnit . MILLISECONDS ) ; } } } else Log . v ( Log . TAG_BATCHER , "%s: scheduleWithDelay %d ms, ignored" , this , delay ) ; } }
Schedule the batch processing with the delay . If there is one batch currently in processing the schedule will be ignored as after the processing is done the next batch will be rescheduled .
11,672
private void unschedule ( ) { synchronized ( mutex ) { if ( pendingFuture != null && ! pendingFuture . isDone ( ) && ! pendingFuture . isCancelled ( ) ) { Log . v ( Log . TAG_BATCHER , "%s: cancelling the pending future ..." , this ) ; pendingFuture . cancel ( false ) ; } scheduled = false ; } }
Unschedule the scheduled batch processing .
11,673
private boolean isPendingFutureReadyOrInProcessing ( ) { synchronized ( mutex ) { if ( pendingFuture != null && ! pendingFuture . isDone ( ) && ! pendingFuture . isCancelled ( ) ) { return pendingFuture . getDelay ( TimeUnit . MILLISECONDS ) <= 0 ; } return false ; } }
Check if the current pending future is ready to be processed or in processing .
11,674
private void processNow ( ) { List < T > toProcess ; boolean scheduleNextBatchImmediately = false ; synchronized ( mutex ) { int count = inbox . size ( ) ; Log . v ( Log . TAG_BATCHER , "%s: processNow() called, inbox size: %d" , this , count ) ; if ( count == 0 ) return ; else if ( count <= capacity ) { toProcess = new ArrayList < T > ( inbox ) ; inbox . clear ( ) ; } else { toProcess = new ArrayList < T > ( inbox . subList ( 0 , capacity ) ) ; for ( int i = 0 ; i < capacity ; i ++ ) inbox . remove ( 0 ) ; scheduleNextBatchImmediately = true ; } mutex . notifyAll ( ) ; } synchronized ( processMutex ) { if ( toProcess != null && toProcess . size ( ) > 0 ) { Log . v ( Log . TAG_BATCHER , "%s: invoking processor %s with %d items" , this , processor , toProcess . size ( ) ) ; processor . process ( toProcess ) ; } else Log . v ( Log . TAG_BATCHER , "%s: nothing to process" , this ) ; synchronized ( mutex ) { lastProcessedTime = System . currentTimeMillis ( ) ; scheduled = false ; scheduleBatchProcess ( scheduleNextBatchImmediately ) ; Log . v ( Log . TAG_BATCHER , "%s: invoking processor done" , this , processor , toProcess . size ( ) ) ; } processMutex . notifyAll ( ) ; } }
This method is called by the work executor to do the batch process . The inbox items up to the batcher capacity will be taken out to process . The next batch will be rescheduled if there are still some items left in the inbox .
11,675
private String getRequestHeaderContentType ( ) { String contentType = getRequestHeaderValue ( "Content-Type" ) ; if ( contentType != null ) { int index = contentType . indexOf ( ';' ) ; if ( index > 0 ) contentType = contentType . substring ( 0 , index ) ; contentType = contentType . trim ( ) ; } return contentType ; }
get Content - Type from URLConnection
11,676
private void setResponseLocation ( URL url ) { String location = url . getPath ( ) ; String query = url . getQuery ( ) ; if ( query != null ) { int startOfQuery = location . indexOf ( query ) ; if ( startOfQuery > 0 ) { location = location . substring ( 0 , startOfQuery ) ; } } connection . getResHeader ( ) . add ( "Location" , location ) ; }
Router + Handlers
11,677
private static void convertCBLQueryRowsToMaps ( Map < String , Object > allDocsResult ) { List < Map < String , Object > > rowsAsMaps = new ArrayList < Map < String , Object > > ( ) ; List < QueryRow > rows = ( List < QueryRow > ) allDocsResult . get ( "rows" ) ; if ( rows != null ) { for ( QueryRow row : rows ) { rowsAsMaps . add ( row . asJSONDictionary ( ) ) ; } } allDocsResult . put ( "rows" , rowsAsMaps ) ; }
This is a hack to deal with the fact that there is currently no custom serializer for QueryRow . Instead just convert everything to generic Maps .
11,678
public void changed ( Database . ChangeEvent event ) { synchronized ( changesLock ) { if ( isTimeout ) return ; lastChangesTimestamp = System . currentTimeMillis ( ) ; stopTimeout ( ) ; if ( ! filled ) { filled = true ; RevisionList changes = db . changesSince ( changesSince , changesOptions , changesFilter , changesFilterParams ) ; if ( changes . size ( ) > 0 ) { sendLongpollChanges ( changes , changesSince ) ; return ; } } List < RevisionInternal > revs = new ArrayList < RevisionInternal > ( ) ; List < DocumentChange > changes = event . getChanges ( ) ; for ( DocumentChange change : changes ) { RevisionInternal rev = change . getAddedRevision ( ) ; if ( rev == null ) continue ; String winningRevID = change . getWinningRevisionID ( ) ; if ( ! this . changesIncludesConflicts ) { if ( winningRevID == null ) continue ; else if ( ! winningRevID . equals ( rev . getRevID ( ) ) ) { RevisionInternal mRev = db . getDocument ( rev . getDocID ( ) , winningRevID , changesIncludesDocs ) ; mRev . setSequence ( rev . getSequence ( ) ) ; rev = mRev ; } } if ( ! event . getSource ( ) . runFilter ( changesFilter , changesFilterParams , rev ) ) continue ; if ( longpoll ) { revs . add ( rev ) ; } else { Log . d ( TAG , "Router: Sending continuous change chunk" ) ; sendContinuousChange ( rev ) ; } timeoutLastSeqence = rev . getSequence ( ) ; } if ( longpoll && revs . size ( ) > 0 ) sendLongpollChanges ( revs , changesSince ) ; else startTimeout ( ) ; } }
Implementation of ChangeListener
11,679
public void cancel ( ) { final OnCancelListener listener ; synchronized ( this ) { if ( mIsCanceled ) { return ; } mIsCanceled = true ; mCancelInProgress = true ; listener = mOnCancelListener ; } try { if ( listener != null ) { listener . onCancel ( ) ; } } finally { synchronized ( this ) { mCancelInProgress = false ; notifyAll ( ) ; } } }
Cancels the operation and signals the cancellation listener . If the operation has not yet started then it will be canceled as soon as it does .
11,680
public void setOnCancelListener ( OnCancelListener listener ) { synchronized ( this ) { waitForCancelFinishedLocked ( ) ; if ( mOnCancelListener == listener ) { return ; } mOnCancelListener = listener ; if ( ! mIsCanceled || listener == null ) { return ; } } listener . onCancel ( ) ; }
Sets the cancellation listener to be called when canceled .
11,681
public void waitForRows ( ) throws CouchbaseLiteException { start ( ) ; while ( true ) { try { queryFuture . get ( ) ; break ; } catch ( InterruptedException e ) { continue ; } catch ( Exception e ) { lastError = e ; throw new CouchbaseLiteException ( e , Status . INTERNAL_SERVER_ERROR ) ; } } }
Blocks until the intial async query finishes . After this call either . rows or . error will be non - nil .
11,682
public QueryEnumerator getRows ( ) { start ( ) ; if ( rows == null ) { return null ; } else { return new QueryEnumerator ( rows ) ; } }
Gets the results of the Query . The value will be null until the initial Query completes .
11,683
public SavedRevision getCurrentRevision ( ) { if ( currentRevision == null ) currentRevision = getRevision ( null ) ; return currentRevision ; }
Get the current revision
11,684
public Map < String , Object > getProperties ( ) { return getCurrentRevision ( ) == null ? null : getCurrentRevision ( ) . getProperties ( ) ; }
The contents of the current revision of the document . This is shorthand for self . currentRevision . properties . Any keys in the dictionary that begin with _ such as _id and _rev contain CouchbaseLite metadata .
11,685
public boolean delete ( ) throws CouchbaseLiteException { return getCurrentRevision ( ) == null ? false : getCurrentRevision ( ) . deleteDocument ( ) != null ; }
Deletes this document by adding a deletion revision . This will be replicated to other databases .
11,686
public void purge ( ) throws CouchbaseLiteException { Map < String , List < String > > docsToRevs = new HashMap < String , List < String > > ( ) ; List < String > revs = new ArrayList < String > ( ) ; revs . add ( "*" ) ; docsToRevs . put ( documentId , revs ) ; database . purgeRevisions ( docsToRevs ) ; database . removeDocumentFromCache ( this ) ; }
Purges this document from the database ; this is more than deletion it forgets entirely about it . The purge will NOT be replicated to other databases .
11,687
public SavedRevision getRevision ( String revID ) { if ( revID != null && currentRevision != null && revID . equals ( currentRevision . getId ( ) ) ) return currentRevision ; RevisionInternal revisionInternal = database . getDocument ( getId ( ) , revID , true ) ; return getRevisionFromRev ( revisionInternal ) ; }
The revision with the specified ID .
11,688
public long getLength ( ) { Number length = ( Number ) metadata . get ( "length" ) ; if ( length != null ) { return length . longValue ( ) ; } else { return 0 ; } }
Get the length in bytes of the contents .
11,689
protected static Map < String , Object > installAttachmentBodies ( Map < String , Object > attachments , Database database ) throws CouchbaseLiteException { Map < String , Object > updatedAttachments = new HashMap < String , Object > ( ) ; for ( String name : attachments . keySet ( ) ) { Object value = attachments . get ( name ) ; if ( value instanceof Attachment ) { Attachment attachment = ( Attachment ) value ; Map < String , Object > metadataMutable = new HashMap < String , Object > ( ) ; metadataMutable . putAll ( attachment . getMetadata ( ) ) ; InputStream body = attachment . getBodyIfNew ( ) ; if ( body != null ) { BlobStoreWriter writer ; try { writer = blobStoreWriterForBody ( body , database ) ; } catch ( Exception e ) { throw new CouchbaseLiteException ( e . getMessage ( ) , Status . ATTACHMENT_ERROR ) ; } metadataMutable . put ( "length" , writer . getLength ( ) ) ; metadataMutable . put ( "digest" , writer . mD5DigestString ( ) ) ; metadataMutable . put ( "follows" , true ) ; database . rememberAttachmentWriter ( writer ) ; } updatedAttachments . put ( name , metadataMutable ) ; } else if ( value instanceof AttachmentInternal ) { throw new IllegalArgumentException ( "AttachmentInternal objects not expected here. Could indicate a bug" ) ; } else if ( value != null ) { updatedAttachments . put ( name , value ) ; } } return updatedAttachments ; }
Goes through an _attachments dictionary and replaces any values that are Attachment objects with proper JSON metadata dicts . It registers the attachment bodies with the blob store and sets the metadata getDigest and follows properties accordingly .
11,690
private void initSupportExecutor ( ) { if ( supportExecutor == null || supportExecutor . isShutdown ( ) ) { supportExecutor = Executors . newSingleThreadExecutor ( new ThreadFactory ( ) { public Thread newThread ( Runnable r ) { String maskedRemote = URLUtils . sanitizeURL ( remote ) ; return new Thread ( r , "CBLPusherSupportExecutor-" + maskedRemote ) ; } } ) ; } }
create single thread supportExecutor for push replication
11,691
public List < String > getAttachmentNames ( ) { Map < String , Object > attachmentMetadata = getAttachmentMetadata ( ) ; if ( attachmentMetadata == null ) { return new ArrayList < String > ( ) ; } return new ArrayList < String > ( attachmentMetadata . keySet ( ) ) ; }
The names of all attachments
11,692
public List < Attachment > getAttachments ( ) { Map < String , Object > attachmentMetadata = getAttachmentMetadata ( ) ; if ( attachmentMetadata == null ) { return new ArrayList < Attachment > ( ) ; } List < Attachment > result = new ArrayList < Attachment > ( attachmentMetadata . size ( ) ) ; for ( Map . Entry < String , Object > entry : attachmentMetadata . entrySet ( ) ) { Attachment attachment = toAttachment ( entry . getKey ( ) , entry . getValue ( ) ) ; if ( attachment != null ) { result . add ( attachment ) ; } } return result ; }
All attachments as Attachment objects .
11,693
public void setUserProperties ( Map < String , Object > userProperties ) { Map < String , Object > newProps = new HashMap < String , Object > ( ) ; newProps . putAll ( userProperties ) ; for ( String key : properties . keySet ( ) ) { if ( key . startsWith ( "_" ) ) { newProps . put ( key , properties . get ( key ) ) ; } } properties = newProps ; }
Sets the userProperties of the Revision . Set replaces all properties except for those with keys prefixed with _ .
11,694
protected void addAttachment ( Attachment attachment , String name ) { Map < String , Object > attachments = ( Map < String , Object > ) properties . get ( "_attachments" ) ; if ( attachments == null ) { attachments = new HashMap < String , Object > ( ) ; } attachments . put ( name , attachment ) ; properties . put ( "_attachments" , attachments ) ; if ( attachment != null ) { attachment . setName ( name ) ; } }
Creates or updates an attachment . The attachment data will be written to the database when the revision is saved .
11,695
protected void beginReplicating ( ) { Log . v ( TAG , "submit startReplicating()" ) ; executor . submit ( new Runnable ( ) { public void run ( ) { if ( isRunning ( ) ) { Log . v ( TAG , "start startReplicating()" ) ; initPendingSequences ( ) ; initDownloadsToInsert ( ) ; startChangeTracker ( ) ; } } } ) ; }
Actual work of starting the replication process .
11,696
protected void processInbox ( RevisionList inbox ) { Log . d ( TAG , "processInbox called" ) ; if ( db == null || ! db . isOpen ( ) ) { Log . w ( Log . TAG_SYNC , "%s: Database is null or closed. Unable to continue. db name is %s." , this , db . getName ( ) ) ; return ; } if ( canBulkGet == null ) { canBulkGet = serverIsSyncGatewayVersion ( "0.81" ) ; } String lastInboxSequence = ( ( PulledRevision ) inbox . get ( inbox . size ( ) - 1 ) ) . getRemoteSequenceID ( ) ; int numRevisionsRemoved = 0 ; try { numRevisionsRemoved = db . findMissingRevisions ( inbox ) ; } catch ( SQLException e ) { Log . e ( TAG , String . format ( Locale . ENGLISH , "%s failed to look up local revs" , this ) , e ) ; inbox = null ; } int inboxCount = 0 ; if ( inbox != null ) { inboxCount = inbox . size ( ) ; } if ( numRevisionsRemoved > 0 ) { Log . v ( TAG , "%s: processInbox() setting changesCount to: %s" , this , getChangesCount ( ) . get ( ) - numRevisionsRemoved ) ; addToChangesCount ( - 1 * numRevisionsRemoved ) ; } if ( inboxCount == 0 ) { Log . d ( TAG , "%s no new remote revisions to fetch. add lastInboxSequence (%s) to pendingSequences (%s)" , this , lastInboxSequence , pendingSequences ) ; long seq = pendingSequences . addValue ( lastInboxSequence ) ; pendingSequences . removeSequence ( seq ) ; setLastSequence ( pendingSequences . getCheckpointedValue ( ) ) ; pauseOrResume ( ) ; return ; } Log . v ( TAG , "%s: fetching %s remote revisions..." , this , inboxCount ) ; for ( int i = 0 ; i < inbox . size ( ) ; i ++ ) { PulledRevision rev = ( PulledRevision ) inbox . get ( i ) ; if ( canBulkGet || ( rev . getGeneration ( ) == 1 && ! rev . isDeleted ( ) && ! rev . isConflicted ( ) ) ) { bulkRevsToPull . add ( rev ) ; } else { queueRemoteRevision ( rev ) ; } rev . setSequence ( pendingSequences . addValue ( rev . getRemoteSequenceID ( ) ) ) ; } pullRemoteRevisions ( ) ; pauseOrResume ( ) ; }
Process a bunch of remote revisions from the _changes feed at once
11,697
protected void pullBulkRevisions ( List < RevisionInternal > bulkRevs ) { int nRevs = bulkRevs . size ( ) ; if ( nRevs == 0 ) { return ; } Log . d ( TAG , "%s bulk-fetching %d remote revisions..." , this , nRevs ) ; Log . d ( TAG , "%s bulk-fetching remote revisions: %s" , this , bulkRevs ) ; if ( ! canBulkGet ) { pullBulkWithAllDocs ( bulkRevs ) ; return ; } Log . v ( TAG , "%s: POST _bulk_get" , this ) ; final List < RevisionInternal > remainingRevs = new ArrayList < RevisionInternal > ( bulkRevs ) ; ++ httpConnectionCount ; final RemoteBulkDownloaderRequest downloader ; try { downloader = new RemoteBulkDownloaderRequest ( clientFactory , remote , true , bulkRevs , db , this . requestHeaders , new RemoteBulkDownloaderRequest . BulkDownloaderDocument ( ) { public void onDocument ( Map < String , Object > props , long size ) { RevisionInternal rev ; if ( props . get ( "_id" ) != null ) { rev = new RevisionInternal ( props , size ) ; } else { rev = new RevisionInternal ( ( String ) props . get ( "id" ) , ( String ) props . get ( "rev" ) , false ) ; } int pos = remainingRevs . indexOf ( rev ) ; if ( pos > - 1 ) { rev . setSequence ( remainingRevs . get ( pos ) . getSequence ( ) ) ; remainingRevs . remove ( pos ) ; } else { Log . w ( TAG , "%s : Received unexpected rev rev" , this ) ; } if ( props . get ( "_id" ) != null ) { queueDownloadedRevision ( rev ) ; } else { Status status = statusFromBulkDocsResponseItem ( props ) ; Throwable err = new CouchbaseLiteException ( status ) ; revisionFailed ( rev , err ) ; } } } , new RemoteRequestCompletion ( ) { public void onCompletion ( RemoteRequest remoteRequest , Response httpResponse , Object result , Throwable e ) { if ( e != null ) { setError ( e ) ; completedChangesCount . addAndGet ( remainingRevs . size ( ) ) ; } -- httpConnectionCount ; pullRemoteRevisions ( ) ; if ( cancellables != null && cancellables . values ( ) != null && remoteRequest != null ) cancellables . values ( ) . remove ( remoteRequest ) ; } } ) ; } catch ( Exception e ) { Log . e ( TAG , "%s: pullBulkRevisions Exception: %s" , this , e ) ; return ; } downloader . setAuthenticator ( getAuthenticator ( ) ) ; downloader . setCompressedRequest ( canSendCompressedRequests ( ) ) ; synchronized ( remoteRequestExecutor ) { if ( ! remoteRequestExecutor . isShutdown ( ) ) { Future future = remoteRequestExecutor . submit ( downloader ) ; pendingFutures . add ( future ) ; cancellables . put ( future , downloader ) ; } } }
Get a bunch of revisions in one bulk request . Will use _bulk_get if possible .
11,698
private void queueDownloadedRevision ( RevisionInternal rev ) { if ( revisionBodyTransformationBlock != null ) { for ( Map . Entry < String , Map < String , Object > > entry : ( ( Map < String , Map < String , Object > > ) rev . getProperties ( ) . get ( "_attachments" ) ) . entrySet ( ) ) { String name = entry . getKey ( ) ; Map < String , Object > attachment = entry . getValue ( ) ; attachment . remove ( "file" ) ; if ( attachment . get ( "follows" ) != null && attachment . get ( "data" ) == null ) { String filePath = db . fileForAttachmentDict ( attachment ) . getPath ( ) ; if ( filePath != null ) attachment . put ( "file" , filePath ) ; } } RevisionInternal xformed = transformRevision ( rev ) ; if ( xformed == null ) { Log . v ( TAG , "%s: Transformer rejected revision %s" , this , rev ) ; pendingSequences . removeSequence ( rev . getSequence ( ) ) ; lastSequence = pendingSequences . getCheckpointedValue ( ) ; pauseOrResume ( ) ; return ; } rev = xformed ; Map < String , Map < String , Object > > attachments = ( Map < String , Map < String , Object > > ) rev . getProperties ( ) . get ( "_attachments" ) ; for ( Map . Entry < String , Map < String , Object > > entry : attachments . entrySet ( ) ) { Map < String , Object > attachment = entry . getValue ( ) ; attachment . remove ( "file" ) ; } } if ( rev . getBody ( ) != null ) queuedMemorySize . addAndGet ( rev . getBody ( ) . getSize ( ) ) ; downloadsToInsert . queueObject ( rev ) ; if ( queuedMemorySize . get ( ) > MAX_QUEUE_MEMORY_SIZE ) { Log . d ( TAG , "Flushing queued memory size at: " + queuedMemorySize ) ; downloadsToInsert . flushAllAndWait ( ) ; } }
This invokes the tranformation block if one is installed and queues the resulting CBL_Revision
11,699
protected void pullBulkWithAllDocs ( final List < RevisionInternal > bulkRevs ) { ++ httpConnectionCount ; final RevisionList remainingRevs = new RevisionList ( bulkRevs ) ; Collection < String > keys = CollectionUtils . transform ( bulkRevs , new CollectionUtils . Functor < RevisionInternal , String > ( ) { public String invoke ( RevisionInternal rev ) { return rev . getDocID ( ) ; } } ) ; Map < String , Object > body = new HashMap < String , Object > ( ) ; body . put ( "keys" , keys ) ; Future future = sendAsyncRequest ( "POST" , "_all_docs?include_docs=true" , body , new RemoteRequestCompletion ( ) { public void onCompletion ( RemoteRequest remoteRequest , Response httpResponse , Object result , Throwable e ) { Map < String , Object > res = ( Map < String , Object > ) result ; if ( e != null ) { setError ( e ) ; } else { List < Map < String , Object > > rows = ( List < Map < String , Object > > ) res . get ( "rows" ) ; Log . v ( TAG , "%s checking %d bulk-fetched remote revisions" , this , rows . size ( ) ) ; for ( Map < String , Object > row : rows ) { Map < String , Object > doc = ( Map < String , Object > ) row . get ( "doc" ) ; if ( doc != null && doc . get ( "_attachments" ) == null ) { RevisionInternal rev = new RevisionInternal ( doc ) ; RevisionInternal removedRev = remainingRevs . removeAndReturnRev ( rev ) ; if ( removedRev != null ) { rev . setSequence ( removedRev . getSequence ( ) ) ; queueDownloadedRevision ( rev ) ; } } else { Status status = statusFromBulkDocsResponseItem ( row ) ; if ( status . isError ( ) && row . containsKey ( "key" ) && row . get ( "key" ) != null ) { RevisionInternal rev = remainingRevs . revWithDocId ( ( String ) row . get ( "key" ) ) ; if ( rev != null ) { remainingRevs . remove ( rev ) ; revisionFailed ( rev , new CouchbaseLiteException ( status ) ) ; } } } } } if ( remainingRevs . size ( ) > 0 ) { Log . v ( TAG , "%s bulk-fetch didn't work for %d of %d revs; getting individually" , this , remainingRevs . size ( ) , bulkRevs . size ( ) ) ; for ( RevisionInternal rev : remainingRevs ) { queueRemoteRevision ( rev ) ; } pullRemoteRevisions ( ) ; } -- httpConnectionCount ; pullRemoteRevisions ( ) ; } } ) ; pendingFutures . add ( future ) ; }
This is compatible with CouchDB but it only works for revs of generation 1 without attachments .