idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
6,000
private Region remove ( Region x ) { this . deletedNode = NULL_NODE ; this . root = remove ( x , this . root ) ; Region d = this . deletedElement ; this . deletedElement = null ; if ( d == null ) { return null ; } else { return new Region ( d ) ; } }
Remove from the tree .
6,001
private Region find ( Region x ) { Region current = this . root ; while ( current != NULL_NODE ) { long res = x . orderRelativeTo ( current ) ; if ( res < 0 ) { current = current . left ; } else if ( res > 0 ) { current = current . right ; } else { return current ; } } return null ; }
Find an item in the tree .
6,002
private Region insert ( Region x , Region t ) { if ( t == NULL_NODE ) { t = x ; } else if ( x . orderRelativeTo ( t ) < 0 ) { t . left ( insert ( x , t . left ) ) ; } else if ( x . orderRelativeTo ( t ) > 0 ) { t . right ( insert ( x , t . right ) ) ; } else { throw new AssertionError ( "Cannot insert " + x + " into " + this ) ; } t = skew ( t ) ; t = split ( t ) ; return t ; }
Internal method to insert into a subtree .
6,003
private Region remove ( Region x , Region t ) { if ( t != NULL_NODE ) { this . lastNode = t ; if ( x . orderRelativeTo ( t ) < 0 ) { t . left ( remove ( x , t . left ) ) ; } else { this . deletedNode = t ; t . right ( remove ( x , t . right ) ) ; } if ( t == this . lastNode ) { if ( this . deletedNode != NULL_NODE && x . orderRelativeTo ( this . deletedNode ) == 0 ) { this . deletedNode . swap ( t ) ; this . deletedElement = t ; t = t . right ; } } else if ( t . left . level < t . level - 1 || t . right . level < t . level - 1 ) { if ( t . right . level > -- t . level ) { t . right . level = t . level ; } t = skew ( t ) ; t . right ( skew ( t . right ) ) ; t . right . right ( skew ( t . right . right ) ) ; t = split ( t ) ; t . right ( split ( t . right ) ) ; } } return t ; }
Internal method to remove from a subtree .
6,004
private static Region skew ( Region t ) { if ( t . left . level == t . level ) { t = rotateWithLeftChild ( t ) ; } return t ; }
Skew primitive for AA - trees .
6,005
private static Region split ( Region t ) { if ( t . right . right . level == t . level ) { t = rotateWithRightChild ( t ) ; t . level ++ ; } return t ; }
Split primitive for AA - trees .
6,006
private static Region rotateWithLeftChild ( Region k2 ) { Region k1 = k2 . left ; k2 . left ( k1 . right ) ; k1 . right ( k2 ) ; return k1 ; }
Rotate binary tree node with left child .
6,007
private static Region rotateWithRightChild ( Region k1 ) { Region k2 = k1 . right ; k1 . right ( k2 . left ) ; k2 . left ( k1 ) ; return k2 ; }
Rotate binary tree node with right child .
6,008
public int compareTo ( Comparable < ? > other ) { if ( other instanceof Region ) { Region r = ( Region ) other ; if ( this . start < r . start ) { return - 1 ; } else if ( this . end > r . end ) { return 1 ; } else { return 0 ; } } else if ( other instanceof Long ) { Long l = ( Long ) other ; if ( l > end ) { return - 1 ; } else if ( l < start ) { return 1 ; } else { return 0 ; } } else { throw new AssertionError ( ) ; } }
Order this region relative to another .
6,009
public List < String > getAllWFIdsFromTerms ( KAFDocument kaf ) { List < Term > terms = kaf . getTerms ( ) ; List < String > wfTermIds = new ArrayList < > ( ) ; for ( int i = 0 ; i < terms . size ( ) ; i ++ ) { List < WF > sentTerms = terms . get ( i ) . getWFs ( ) ; for ( WF form : sentTerms ) { wfTermIds . add ( form . getId ( ) ) ; } } return wfTermIds ; }
Get all the WF ids for the terms contained in the KAFDocument .
6,010
public boolean checkTermsRefsIntegrity ( List < String > wfIds , List < String > termWfIds ) { for ( int i = 0 ; i < wfIds . size ( ) ; i ++ ) { if ( ! termWfIds . contains ( wfIds . get ( i ) ) ) { return false ; } } return true ; }
Check that the references from the entity spans are actually contained in the term ids .
6,011
public String convertToConLLTypes ( String neType ) { String conllType = null ; if ( neType . equalsIgnoreCase ( "PERSON" ) || neType . equalsIgnoreCase ( "ORGANIZATION" ) || neType . equalsIgnoreCase ( "LOCATION" ) || neType . length ( ) == 3 ) { conllType = neType . substring ( 0 , 3 ) ; } else { conllType = neType ; } return conllType ; }
Convert Entity class annotation to CoNLL formats .
6,012
public static void main ( final String [ ] args ) throws IOException , JDOMException { CLI cmdLine = new CLI ( ) ; cmdLine . parseCLI ( args ) ; }
Main entry point of ixa - pipe - nerc .
6,013
public final void annotate ( final InputStream inputStream , final OutputStream outputStream ) throws IOException , JDOMException { BufferedReader breader = new BufferedReader ( new InputStreamReader ( inputStream , UTF_8 ) ) ; BufferedWriter bwriter = new BufferedWriter ( new OutputStreamWriter ( outputStream , UTF_8 ) ) ; KAFDocument kaf = KAFDocument . createFromStream ( breader ) ; String model = parsedArguments . getString ( MODEL ) ; String outputFormat = parsedArguments . getString ( "outputFormat" ) ; String lexer = parsedArguments . getString ( "lexer" ) ; String dictTag = parsedArguments . getString ( "dictTag" ) ; String dictPath = parsedArguments . getString ( "dictPath" ) ; String clearFeatures = parsedArguments . getString ( "clearFeatures" ) ; String lang = null ; if ( parsedArguments . getString ( "language" ) != null ) { lang = parsedArguments . getString ( "language" ) ; if ( ! kaf . getLang ( ) . equalsIgnoreCase ( lang ) ) { System . err . println ( "Language parameter in NAF and CLI do not match!!" ) ; } } else { lang = kaf . getLang ( ) ; } Properties properties = setAnnotateProperties ( model , lang , lexer , dictTag , dictPath , clearFeatures ) ; KAFDocument . LinguisticProcessor newLp = kaf . addLinguisticProcessor ( "entities" , IXA_PIPE_NERC + Files . getNameWithoutExtension ( model ) , version + "-" + commit ) ; newLp . setBeginTimestamp ( ) ; Annotate annotator = new Annotate ( properties ) ; annotator . annotateNEsToKAF ( kaf ) ; newLp . setEndTimestamp ( ) ; String kafToString = null ; if ( outputFormat . equalsIgnoreCase ( "conll03" ) ) { kafToString = annotator . annotateNEsToCoNLL2003 ( kaf ) ; } else if ( outputFormat . equalsIgnoreCase ( "conll02" ) ) { kafToString = annotator . annotateNEsToCoNLL2002 ( kaf ) ; } else if ( outputFormat . equalsIgnoreCase ( "opennlp" ) ) { kafToString = annotator . annotateNEsToOpenNLP ( kaf ) ; } else { kafToString = kaf . toString ( ) ; } bwriter . write ( kafToString ) ; bwriter . close ( ) ; breader . close ( ) ; }
Main method to do Named Entity tagging .
6,014
private Properties setAnnotateProperties ( String model , String language , String lexer , String dictTag , String dictPath , String clearFeatures ) { Properties annotateProperties = new Properties ( ) ; annotateProperties . setProperty ( MODEL , model ) ; annotateProperties . setProperty ( "language" , language ) ; annotateProperties . setProperty ( "ruleBasedOption" , lexer ) ; annotateProperties . setProperty ( "dictTag" , dictTag ) ; annotateProperties . setProperty ( "dictPath" , dictPath ) ; annotateProperties . setProperty ( "clearFeatures" , clearFeatures ) ; return annotateProperties ; }
Set a Properties object with the CLI parameters for NER annotation .
6,015
private String getClientData ( BufferedReader inFromClient ) { StringBuilder stringFromClient = new StringBuilder ( ) ; try { String line ; while ( ( line = inFromClient . readLine ( ) ) != null ) { if ( line . matches ( "<ENDOFDOCUMENT>" ) ) { break ; } stringFromClient . append ( line ) . append ( "\n" ) ; if ( line . matches ( "</NAF>" ) ) { break ; } } } catch ( IOException e ) { e . printStackTrace ( ) ; } return stringFromClient . toString ( ) ; }
Read data from the client and output to a String .
6,016
private void sendDataToClient ( BufferedWriter outToClient , String kafToString ) throws IOException { outToClient . write ( kafToString ) ; outToClient . close ( ) ; }
Send data back to server after annotation .
6,017
private String getAnnotations ( Annotate annotator , String stringFromClient ) throws JDOMException , IOException { BufferedReader clientReader = new BufferedReader ( new StringReader ( stringFromClient ) ) ; KAFDocument kaf = KAFDocument . createFromStream ( clientReader ) ; KAFDocument . LinguisticProcessor newLp = kaf . addLinguisticProcessor ( "entities" , "ixa-pipe-nerc-" + Files . getNameWithoutExtension ( model ) , version + "-" + commit ) ; newLp . setBeginTimestamp ( ) ; annotator . annotateNEsToKAF ( kaf ) ; String kafToString = null ; if ( outputFormat . equalsIgnoreCase ( "conll03" ) ) { kafToString = annotator . annotateNEsToCoNLL2003 ( kaf ) ; } else if ( outputFormat . equalsIgnoreCase ( "conll02" ) ) { kafToString = annotator . annotateNEsToCoNLL2002 ( kaf ) ; } else { newLp . setEndTimestamp ( ) ; kafToString = kaf . toString ( ) ; } return kafToString ; }
Named Entity annotator .
6,018
public Toml read ( File file ) { try { return read ( new InputStreamReader ( new FileInputStream ( file ) , "UTF8" ) ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
Populates the current Toml instance with values from file .
6,019
private void deliver ( ) { try { deliverUnsafe ( ) ; } catch ( Throwable t ) { innerController . cancel ( ) ; if ( ! finished ) { outerResponseObserver . onError ( t ) ; } } }
Tries to kick off the delivery loop wrapping it in error handling .
6,020
private boolean maybeFinish ( ) { Throwable localError = this . cancellation . get ( ) ; if ( localError != null ) { finished = true ; outerResponseObserver . onError ( localError ) ; return true ; } if ( done && ! reframer . hasFullFrame ( ) && ! awaitingInner ) { finished = true ; if ( error != null ) { outerResponseObserver . onError ( error ) ; } else if ( reframer . hasPartialFrame ( ) ) { outerResponseObserver . onError ( new IncompleteStreamException ( ) ) ; } else { outerResponseObserver . onComplete ( ) ; } return true ; } return false ; }
Completes the outer observer if appropriate .
6,021
public final Company updateCompany ( Company company ) { UpdateCompanyRequest request = UpdateCompanyRequest . newBuilder ( ) . setCompany ( company ) . build ( ) ; return updateCompany ( request ) ; }
Updates specified company .
6,022
public final EntityType createEntityType ( String parent , EntityType entityType , String languageCode ) { CreateEntityTypeRequest request = CreateEntityTypeRequest . newBuilder ( ) . setParent ( parent ) . setEntityType ( entityType ) . setLanguageCode ( languageCode ) . build ( ) ; return createEntityType ( request ) ; }
Creates an entity type in the specified agent .
6,023
@ BetaApi ( "The surface for long-running operations is not stable yet and may change in the future." ) public final OperationFuture < Empty , Struct > batchUpdateEntitiesAsync ( BatchUpdateEntitiesRequest request ) { return batchUpdateEntitiesOperationCallable ( ) . futureCall ( request ) ; }
Updates or creates multiple entities in the specified entity type . This method does not affect entities in the entity type that aren t explicitly specified in the request .
6,024
public FieldValue get ( String name ) { if ( schema == null ) { throw new UnsupportedOperationException ( "Retrieving field value by name is not supported when there is no fields schema provided" ) ; } return get ( schema . getIndex ( name ) ) ; }
Gets field value by index .
6,025
public static DiskTypeId of ( ZoneId zoneId , String type ) { return new DiskTypeId ( zoneId . getProject ( ) , zoneId . getZone ( ) , type ) ; }
Returns a disk type identity given the zone identity and the disk type name .
6,026
public static DiskTypeId of ( String zone , String type ) { return of ( ZoneId . of ( null , zone ) , type ) ; }
Returns a disk type identity given the zone and disk type names .
6,027
public static DiskTypeId of ( String project , String zone , String type ) { return of ( ZoneId . of ( project , zone ) , type ) ; }
Returns a disk type identity given project disk zone and disk type names .
6,028
public DurationRule maxAge ( long maxAge , TimeUnit timeUnit ) { return maxAge ( Duration . ofNanos ( TimeUnit . NANOSECONDS . convert ( maxAge , timeUnit ) ) ) ; }
Creates a new instance of the DurationRule
6,029
static < T > T runWithRetries ( Callable < T > callable ) { Span span = tracer . getCurrentSpan ( ) ; ExponentialBackOff backOff = newBackOff ( ) ; Context context = Context . current ( ) ; int attempt = 0 ; while ( true ) { attempt ++ ; try { span . addAnnotation ( "Starting operation" , ImmutableMap . of ( "Attempt" , AttributeValue . longAttributeValue ( attempt ) ) ) ; T result = callable . call ( ) ; return result ; } catch ( SpannerException e ) { if ( ! e . isRetryable ( ) ) { throw e ; } logger . log ( Level . FINE , "Retryable exception, will sleep and retry" , e ) ; long delay = e . getRetryDelayInMillis ( ) ; if ( delay != - 1 ) { backoffSleep ( context , delay ) ; } else { backoffSleep ( context , backOff ) ; } } catch ( Exception e ) { Throwables . throwIfUnchecked ( e ) ; throw newSpannerException ( ErrorCode . INTERNAL , "Unexpected exception thrown" , e ) ; } } }
Helper to execute some work retrying with backoff on retryable errors .
6,030
public HttpRequestInitializer getHttpRequestInitializer ( final ServiceOptions < ? , ? > serviceOptions ) { Credentials scopedCredentials = serviceOptions . getScopedCredentials ( ) ; final HttpRequestInitializer delegate = scopedCredentials != null && scopedCredentials != NoCredentials . getInstance ( ) ? new HttpCredentialsAdapter ( scopedCredentials ) : null ; HeaderProvider internalHeaderProvider = getInternalHeaderProviderBuilder ( serviceOptions ) . build ( ) ; final HeaderProvider headerProvider = serviceOptions . getMergedHeaderProvider ( internalHeaderProvider ) ; return new HttpRequestInitializer ( ) { public void initialize ( HttpRequest httpRequest ) throws IOException { if ( delegate != null ) { delegate . initialize ( httpRequest ) ; } if ( connectTimeout >= 0 ) { httpRequest . setConnectTimeout ( connectTimeout ) ; } if ( readTimeout >= 0 ) { httpRequest . setReadTimeout ( readTimeout ) ; } HttpHeadersUtils . setHeaders ( httpRequest . getHeaders ( ) , headerProvider . getHeaders ( ) ) ; } } ; }
Returns a request initializer responsible for initializing requests according to service options .
6,031
public List < Query > shard ( SortedSet < ByteString > splitPoints ) { Preconditions . checkState ( builder . getRowsLimit ( ) == 0 , "Can't shard a query with a row limit" ) ; List < RowSet > shardedRowSets = RowSetUtil . shard ( builder . getRows ( ) , splitPoints ) ; List < Query > shards = Lists . newArrayListWithCapacity ( shardedRowSets . size ( ) ) ; for ( RowSet rowSet : shardedRowSets ) { Query queryShard = new Query ( tableId ) ; queryShard . builder . mergeFrom ( this . builder . build ( ) ) ; queryShard . builder . setRows ( rowSet ) ; shards . add ( queryShard ) ; } return shards ; }
Split this query into multiple queries that logically combine into this query . This is intended to be used by map reduce style frameworks like Beam to split a query across multiple workers .
6,032
public static MachineTypeId of ( String zone , String type ) { return new MachineTypeId ( null , zone , type ) ; }
Returns a machine type identity given the zone and type names .
6,033
public static MachineTypeId of ( String project , String zone , String type ) { return new MachineTypeId ( project , zone , type ) ; }
Returns a machine type identity given project zone and type names .
6,034
public String getMd5ToHexString ( ) { if ( md5 == null ) { return null ; } byte [ ] decodedMd5 = BaseEncoding . base64 ( ) . decode ( md5 ) ; StringBuilder stringBuilder = new StringBuilder ( ) ; for ( byte b : decodedMd5 ) { stringBuilder . append ( String . format ( "%02x" , b & 0xff ) ) ; } return stringBuilder . toString ( ) ; }
Returns the MD5 hash of blob s data decoded to string .
6,035
public Map < String , String > getMetadata ( ) { return metadata == null || Data . isNull ( metadata ) ? null : Collections . unmodifiableMap ( metadata ) ; }
Returns blob s user provided metadata .
6,036
public final NotificationChannel updateNotificationChannel ( FieldMask updateMask , NotificationChannel notificationChannel ) { UpdateNotificationChannelRequest request = UpdateNotificationChannelRequest . newBuilder ( ) . setUpdateMask ( updateMask ) . setNotificationChannel ( notificationChannel ) . build ( ) ; return updateNotificationChannel ( request ) ; }
Updates a notification channel . Fields not specified in the field mask remain unchanged .
6,037
public final Job submitJob ( String projectId , String region , Job job ) { SubmitJobRequest request = SubmitJobRequest . newBuilder ( ) . setProjectId ( projectId ) . setRegion ( region ) . setJob ( job ) . build ( ) ; return submitJob ( request ) ; }
Submits a job to a cluster .
6,038
public final Job getJob ( String projectId , String region , String jobId ) { GetJobRequest request = GetJobRequest . newBuilder ( ) . setProjectId ( projectId ) . setRegion ( region ) . setJobId ( jobId ) . build ( ) ; return getJob ( request ) ; }
Gets the resource representation for a job in a project .
6,039
public final void deleteJob ( String projectId , String region , String jobId ) { DeleteJobRequest request = DeleteJobRequest . newBuilder ( ) . setProjectId ( projectId ) . setRegion ( region ) . setJobId ( jobId ) . build ( ) ; deleteJob ( request ) ; }
Deletes the job from the project . If the job is active the delete fails and the response returns FAILED_PRECONDITION .
6,040
public int getIndex ( String name ) { Integer index = nameIndex . get ( name ) ; if ( index == null ) { throw new IllegalArgumentException ( "Field with name '" + name + "' was not found" ) ; } return index ; }
Get schema field s index by name .
6,041
public static TimestampBound ofReadTimestamp ( Timestamp timestamp ) { return new TimestampBound ( Mode . READ_TIMESTAMP , checkNotNull ( timestamp ) , null ) ; }
Returns a timestamp bound that will perform reads and queries at the given timestamp . Unlike other modes reads at a specific timestamp are repeatable ; the same read at the same timestamp always returns the same data . If the timestamp is in the future the read will block until the specified timestamp modulo the read s deadline .
6,042
public static TimestampBound ofExactStaleness ( long num , TimeUnit units ) { checkStaleness ( num ) ; return new TimestampBound ( Mode . EXACT_STALENESS , null , createDuration ( num , units ) ) ; }
Returns a timestamp bound that will perform reads and queries at an exact staleness . The timestamp is chosen soon after the read is started .
6,043
public static java . util . Date toJavaUtilDate ( Date date ) { Calendar cal = Calendar . getInstance ( ) ; cal . set ( Calendar . HOUR_OF_DAY , 0 ) ; cal . set ( Calendar . MINUTE , 0 ) ; cal . set ( Calendar . SECOND , 0 ) ; cal . set ( Calendar . MILLISECOND , 0 ) ; cal . set ( date . year , date . month - 1 , date . dayOfMonth ) ; return cal . getTime ( ) ; }
Convert a Google Date to a Java Util Date .
6,044
public static Date fromJavaUtilDate ( java . util . Date date ) { Calendar cal = Calendar . getInstance ( ) ; cal . setTime ( date ) ; cal . set ( Calendar . HOUR_OF_DAY , 0 ) ; cal . set ( Calendar . MINUTE , 0 ) ; cal . set ( Calendar . SECOND , 0 ) ; cal . set ( Calendar . MILLISECOND , 0 ) ; return new Date ( cal . get ( Calendar . YEAR ) , cal . get ( Calendar . MONTH ) + 1 , cal . get ( Calendar . DAY_OF_MONTH ) ) ; }
Convert a Java Util Date to a Google Date .
6,045
public final Note createNote ( ProjectName parent , String noteId , Note note ) { CreateNoteRequest request = CreateNoteRequest . newBuilder ( ) . setParent ( parent == null ? null : parent . toString ( ) ) . setNoteId ( noteId ) . setNote ( note ) . build ( ) ; return createNote ( request ) ; }
Creates a new note .
6,046
public final BatchCreateNotesResponse batchCreateNotes ( ProjectName parent , Map < String , Note > notes ) { BatchCreateNotesRequest request = BatchCreateNotesRequest . newBuilder ( ) . setParent ( parent == null ? null : parent . toString ( ) ) . putAllNotes ( notes ) . build ( ) ; return batchCreateNotes ( request ) ; }
Creates new notes in batch .
6,047
static ResourcePath create ( DatabaseRootName databaseName , ImmutableList < String > segments ) { return new AutoValue_ResourcePath ( segments , databaseName ) ; }
Creates a new Path .
6,048
static ResourcePath create ( String resourceName ) { String [ ] parts = resourceName . split ( "/" ) ; if ( parts . length >= 6 && parts [ 0 ] . equals ( "projects" ) && parts [ 2 ] . equals ( "databases" ) ) { String [ ] path = Arrays . copyOfRange ( parts , 5 , parts . length ) ; return create ( DatabaseRootName . of ( parts [ 1 ] , parts [ 3 ] ) , ImmutableList . < String > builder ( ) . add ( path ) . build ( ) ) ; } return create ( DatabaseRootName . parse ( resourceName ) ) ; }
Create a new Path from its string representation .
6,049
String getName ( ) { String path = getPath ( ) ; if ( path . isEmpty ( ) ) { return getDatabaseName ( ) + "/documents" ; } else { return getDatabaseName ( ) + "/documents/" + getPath ( ) ; } }
String representation as expected by the Firestore API .
6,050
private RpcBatch . Callback < ManagedZone > createZoneCallback ( final DnsOptions serviceOptions , final DnsBatchResult < Zone > result , final boolean nullForNotFound , final boolean idempotent ) { return new RpcBatch . Callback < ManagedZone > ( ) { public void onSuccess ( ManagedZone response ) { result . success ( response == null ? null : Zone . fromPb ( serviceOptions . getService ( ) , response ) ) ; } public void onFailure ( GoogleJsonError googleJsonError ) { DnsException serviceException = new DnsException ( googleJsonError , idempotent ) ; if ( nullForNotFound && serviceException . getCode ( ) == HTTP_NOT_FOUND ) { result . success ( null ) ; } else { result . error ( serviceException ) ; } } } ; }
A joint callback for both get zone and create zone operations .
6,051
private RpcBatch . Callback < Change > createChangeRequestCallback ( final String zoneName , final DnsBatchResult < ChangeRequest > result , final boolean nullForNotFound , final boolean idempotent ) { return new RpcBatch . Callback < Change > ( ) { public void onSuccess ( Change response ) { result . success ( response == null ? null : ChangeRequest . fromPb ( options . getService ( ) , zoneName , response ) ) ; } public void onFailure ( GoogleJsonError googleJsonError ) { DnsException serviceException = new DnsException ( googleJsonError , idempotent ) ; if ( serviceException . getCode ( ) == HTTP_NOT_FOUND ) { if ( "entity.parameters.changeId" . equals ( serviceException . getLocation ( ) ) || ( serviceException . getMessage ( ) != null && serviceException . getMessage ( ) . contains ( "parameters.changeId" ) ) ) { result . success ( null ) ; return ; } } result . error ( serviceException ) ; } } ; }
A joint callback for both get change request and create change request operations .
6,052
private List < FieldOrder > createImplicitOrderBy ( ) { List < FieldOrder > implicitOrders = new ArrayList < > ( options . fieldOrders ) ; boolean hasDocumentId = false ; if ( implicitOrders . isEmpty ( ) ) { for ( FieldFilter fieldFilter : options . fieldFilters ) { if ( ! fieldFilter . isEqualsFilter ( ) ) { implicitOrders . add ( new FieldOrder ( fieldFilter . fieldPath , Direction . ASCENDING ) ) ; break ; } } } else { for ( FieldOrder fieldOrder : options . fieldOrders ) { if ( fieldOrder . fieldPath . equals ( FieldPath . DOCUMENT_ID ) ) { hasDocumentId = true ; } } } if ( ! hasDocumentId ) { Direction lastDirection = implicitOrders . isEmpty ( ) ? Direction . ASCENDING : implicitOrders . get ( implicitOrders . size ( ) - 1 ) . direction ; implicitOrders . add ( new FieldOrder ( FieldPath . documentId ( ) , lastDirection ) ) ; } return implicitOrders ; }
Computes the backend ordering semantics for DocumentSnapshot cursors .
6,053
public Query limit ( int limit ) { QueryOptions newOptions = new QueryOptions ( options ) ; newOptions . limit = limit ; return new Query ( firestore , path , newOptions ) ; }
Creates and returns a new Query that s additionally limited to only return up to the specified number of documents .
6,054
public Query offset ( int offset ) { QueryOptions newOptions = new QueryOptions ( options ) ; newOptions . offset = offset ; return new Query ( firestore , path , newOptions ) ; }
Creates and returns a new Query that skips the first n results .
6,055
public Query startAt ( Object ... fieldValues ) { QueryOptions newOptions = new QueryOptions ( options ) ; newOptions . startCursor = createCursor ( newOptions . fieldOrders , fieldValues , true ) ; return new Query ( firestore , path , newOptions ) ; }
Creates and returns a new Query that starts at the provided fields relative to the order of the query . The order of the field values must match the order of the order by clauses of the query .
6,056
public Query startAfter ( Object ... fieldValues ) { QueryOptions newOptions = new QueryOptions ( options ) ; newOptions . startCursor = createCursor ( newOptions . fieldOrders , fieldValues , false ) ; return new Query ( firestore , path , newOptions ) ; }
Creates and returns a new Query that starts after the provided fields relative to the order of the query . The order of the field values must match the order of the order by clauses of the query .
6,057
public Query endBefore ( Object ... fieldValues ) { QueryOptions newOptions = new QueryOptions ( options ) ; newOptions . endCursor = createCursor ( newOptions . fieldOrders , fieldValues , true ) ; return new Query ( firestore , path , newOptions ) ; }
Creates and returns a new Query that ends before the provided fields relative to the order of the query . The order of the field values must match the order of the order by clauses of the query .
6,058
public Query endAt ( Object ... fieldValues ) { QueryOptions newOptions = new QueryOptions ( options ) ; newOptions . endCursor = createCursor ( newOptions . fieldOrders , fieldValues , false ) ; return new Query ( firestore , path , newOptions ) ; }
Creates and returns a new Query that ends at the provided fields relative to the order of the query . The order of the field values must match the order of the order by clauses of the query .
6,059
StructuredQuery . Builder buildQuery ( ) { StructuredQuery . Builder structuredQuery = StructuredQuery . newBuilder ( ) ; structuredQuery . addFrom ( StructuredQuery . CollectionSelector . newBuilder ( ) . setCollectionId ( path . getId ( ) ) ) ; if ( options . fieldFilters . size ( ) == 1 ) { Filter filter = options . fieldFilters . get ( 0 ) . toProto ( ) ; if ( filter . hasFieldFilter ( ) ) { structuredQuery . getWhereBuilder ( ) . setFieldFilter ( filter . getFieldFilter ( ) ) ; } else { Preconditions . checkState ( filter . hasUnaryFilter ( ) , "Expected a UnaryFilter or a FieldFilter." ) ; structuredQuery . getWhereBuilder ( ) . setUnaryFilter ( filter . getUnaryFilter ( ) ) ; } } else if ( options . fieldFilters . size ( ) > 1 ) { Filter . Builder filter = Filter . newBuilder ( ) ; StructuredQuery . CompositeFilter . Builder compositeFilter = StructuredQuery . CompositeFilter . newBuilder ( ) ; compositeFilter . setOp ( CompositeFilter . Operator . AND ) ; for ( FieldFilter fieldFilter : options . fieldFilters ) { compositeFilter . addFilters ( fieldFilter . toProto ( ) ) ; } filter . setCompositeFilter ( compositeFilter . build ( ) ) ; structuredQuery . setWhere ( filter . build ( ) ) ; } if ( ! options . fieldOrders . isEmpty ( ) ) { for ( FieldOrder order : options . fieldOrders ) { structuredQuery . addOrderBy ( order . toProto ( ) ) ; } } if ( ! options . fieldProjections . isEmpty ( ) ) { structuredQuery . getSelectBuilder ( ) . addAllFields ( options . fieldProjections ) ; } if ( options . limit != - 1 ) { structuredQuery . setLimit ( Int32Value . newBuilder ( ) . setValue ( options . limit ) ) ; } if ( options . offset != - 1 ) { structuredQuery . setOffset ( options . offset ) ; } if ( options . startCursor != null ) { structuredQuery . setStartAt ( options . startCursor ) ; } if ( options . endCursor != null ) { structuredQuery . setEndAt ( options . endCursor ) ; } return structuredQuery ; }
Build the final Firestore query .
6,060
public void stream ( final ApiStreamObserver < DocumentSnapshot > responseObserver ) { stream ( new QuerySnapshotObserver ( ) { public void onNext ( QueryDocumentSnapshot documentSnapshot ) { responseObserver . onNext ( documentSnapshot ) ; } public void onError ( Throwable throwable ) { responseObserver . onError ( throwable ) ; } public void onCompleted ( ) { responseObserver . onCompleted ( ) ; } } , null ) ; }
Executes the query and streams the results as a StreamObserver of DocumentSnapshots .
6,061
public Operation < R , M > reload ( ) throws SpannerException { if ( isDone ) { return this ; } com . google . longrunning . Operation proto = rpc . getOperation ( name ) ; return Operation . < R , M > create ( rpc , proto , parser ) ; }
Fetches the current status of this operation .
6,062
public Operation < R , M > waitFor ( RetryOption ... waitOptions ) throws SpannerException { if ( isDone ( ) ) { return this ; } RetrySettings waitSettings = RetryOption . mergeToSettings ( DEFAULT_OPERATION_WAIT_SETTINGS , waitOptions ) ; try { com . google . longrunning . Operation proto = RetryHelper . poll ( new Callable < com . google . longrunning . Operation > ( ) { public com . google . longrunning . Operation call ( ) throws Exception { return rpc . getOperation ( name ) ; } } , waitSettings , new BasicResultRetryAlgorithm < com . google . longrunning . Operation > ( ) { public boolean shouldRetry ( Throwable prevThrowable , com . google . longrunning . Operation prevResponse ) { if ( prevResponse != null ) { return ! prevResponse . getDone ( ) ; } if ( prevThrowable instanceof SpannerException ) { SpannerException spannerException = ( SpannerException ) prevThrowable ; return spannerException . getErrorCode ( ) != ErrorCode . NOT_FOUND && spannerException . isRetryable ( ) ; } return false ; } } , clock ) ; return Operation . create ( rpc , proto , parser ) ; } catch ( InterruptedException e ) { throw SpannerExceptionFactory . propagateInterrupt ( e ) ; } catch ( ExecutionException e ) { Throwable cause = e . getCause ( ) ; if ( cause instanceof SpannerException ) { SpannerException spannerException = ( SpannerException ) cause ; if ( spannerException . getErrorCode ( ) == ErrorCode . NOT_FOUND ) { return null ; } throw spannerException ; } if ( cause instanceof PollException ) { throw SpannerExceptionFactory . newSpannerException ( ErrorCode . DEADLINE_EXCEEDED , "Operation did not complete in the given time" ) ; } throw SpannerExceptionFactory . newSpannerException ( cause ) ; } }
Blocks till the operation is complete or maximum time if specified has elapsed .
6,063
public final Finding createFinding ( SourceName parent , String findingId , Finding finding ) { CreateFindingRequest request = CreateFindingRequest . newBuilder ( ) . setParent ( parent == null ? null : parent . toString ( ) ) . setFindingId ( findingId ) . setFinding ( finding ) . build ( ) ; return createFinding ( request ) ; }
Creates a finding . The corresponding source must exist for finding creation to succeed .
6,064
public final Finding updateFinding ( Finding finding ) { UpdateFindingRequest request = UpdateFindingRequest . newBuilder ( ) . setFinding ( finding ) . build ( ) ; return updateFinding ( request ) ; }
Creates or updates a finding . The corresponding source must exist for a finding creation to succeed .
6,065
public final OrganizationSettings updateOrganizationSettings ( OrganizationSettings organizationSettings ) { UpdateOrganizationSettingsRequest request = UpdateOrganizationSettingsRequest . newBuilder ( ) . setOrganizationSettings ( organizationSettings ) . build ( ) ; return updateOrganizationSettings ( request ) ; }
Updates an organization s settings .
6,066
public final Source updateSource ( Source source ) { UpdateSourceRequest request = UpdateSourceRequest . newBuilder ( ) . setSource ( source ) . build ( ) ; return updateSource ( request ) ; }
Updates a source .
6,067
public final SecurityMarks updateSecurityMarks ( SecurityMarks securityMarks ) { UpdateSecurityMarksRequest request = UpdateSecurityMarksRequest . newBuilder ( ) . setSecurityMarks ( securityMarks ) . build ( ) ; return updateSecurityMarks ( request ) ; }
Updates security marks .
6,068
public final ListSessionsPagedResponse listSessions ( String database ) { ListSessionsRequest request = ListSessionsRequest . newBuilder ( ) . setDatabase ( database ) . build ( ) ; return listSessions ( request ) ; }
Lists all sessions in a given database .
6,069
public String toUrlSafe ( ) { try { return URLEncoder . encode ( TextFormat . printToString ( toPb ( ) ) , UTF_8 . name ( ) ) ; } catch ( UnsupportedEncodingException e ) { throw new IllegalStateException ( "Unexpected encoding exception" , e ) ; } }
Returns the key in an encoded form that can be used as part of a URL .
6,070
public void createProdInstance ( ) { if ( ! adminClient . exists ( instanceId ) ) { System . out . println ( "Instance does not exist, creating a PRODUCTION instance" ) ; CreateInstanceRequest createInstanceRequest = CreateInstanceRequest . of ( instanceId ) . addCluster ( clusterId , "us-central1-f" , 3 , StorageType . SSD ) . setType ( Instance . Type . PRODUCTION ) . addLabel ( "department" , "accounting" ) ; try { Instance instance = adminClient . createInstance ( createInstanceRequest ) ; System . out . printf ( "PRODUCTION type instance %s created successfully%n" , instance . getId ( ) ) ; } catch ( Exception e ) { System . err . println ( "Failed to create instance: " + e . getMessage ( ) ) ; throw e ; } } }
Demonstrates how to create a Production instance within a provided project .
6,071
public void listInstances ( ) { System . out . println ( "\nListing Instances" ) ; try { List < Instance > instances = adminClient . listInstances ( ) ; for ( Instance instance : instances ) { System . out . println ( instance . getId ( ) ) ; } } catch ( PartialListInstancesException e ) { System . err . println ( "Failed to list instances: " + e . getMessage ( ) ) ; System . err . println ( "The following zones are unavailable: " + e . getUnavailableZones ( ) ) ; System . err . println ( "But the following instances are reachable: " + e . getInstances ( ) ) ; } }
Demonstrates how to list all instances within a project .
6,072
public Instance getInstance ( ) { System . out . println ( "\nGet Instance" ) ; Instance instance = null ; try { instance = adminClient . getInstance ( instanceId ) ; System . out . println ( "Instance ID: " + instance . getId ( ) ) ; System . out . println ( "Display Name: " + instance . getDisplayName ( ) ) ; System . out . print ( "Labels: " ) ; Map < String , String > labels = instance . getLabels ( ) ; for ( String key : labels . keySet ( ) ) { System . out . printf ( "%s - %s" , key , labels . get ( key ) ) ; } System . out . println ( "\nState: " + instance . getState ( ) ) ; System . out . println ( "Type: " + instance . getType ( ) ) ; } catch ( NotFoundException e ) { System . err . println ( "Failed to get non-existent instance: " + e . getMessage ( ) ) ; } return instance ; }
Demonstrates how to get an instance .
6,073
public void listClusters ( ) { System . out . println ( "\nListing Clusters" ) ; try { List < Cluster > clusters = adminClient . listClusters ( instanceId ) ; for ( Cluster cluster : clusters ) { System . out . println ( cluster . getId ( ) ) ; } } catch ( NotFoundException e ) { System . err . println ( "Failed to list clusters from a non-existent instance: " + e . getMessage ( ) ) ; } }
Demonstrates how to list clusters within an instance .
6,074
public void deleteInstance ( ) { System . out . println ( "\nDeleting Instance" ) ; try { adminClient . deleteInstance ( instanceId ) ; System . out . println ( "Instance deleted: " + instanceId ) ; } catch ( NotFoundException e ) { System . err . println ( "Failed to delete non-existent instance: " + e . getMessage ( ) ) ; } }
Demonstrates how to delete an instance .
6,075
public void addCluster ( ) { System . out . printf ( "%nAdding cluster: %s to instance: %s%n" , CLUSTER , instanceId ) ; try { adminClient . createCluster ( CreateClusterRequest . of ( instanceId , CLUSTER ) . setZone ( "us-central1-c" ) . setServeNodes ( 3 ) . setStorageType ( StorageType . SSD ) ) ; System . out . printf ( "Cluster: %s created successfully%n" , CLUSTER ) ; } catch ( AlreadyExistsException e ) { System . err . println ( "Failed to add cluster, already exists: " + e . getMessage ( ) ) ; } }
Demonstrates how to add a cluster to an instance .
6,076
public void deleteCluster ( ) { System . out . printf ( "%nDeleting cluster: %s from instance: %s%n" , CLUSTER , instanceId ) ; try { adminClient . deleteCluster ( instanceId , CLUSTER ) ; System . out . printf ( "Cluster: %s deleted successfully%n" , CLUSTER ) ; } catch ( NotFoundException e ) { System . err . println ( "Failed to delete a non-existent cluster: " + e . getMessage ( ) ) ; } }
Demonstrates how to delete a cluster from an instance .
6,077
public Operation createSnapshot ( String snapshot , OperationOption ... options ) { return compute . create ( SnapshotInfo . of ( SnapshotId . of ( snapshot ) , getDiskId ( ) ) , options ) ; }
Creates a snapshot for this disk given the snapshot s name .
6,078
public Operation createSnapshot ( String snapshot , String description , OperationOption ... options ) { SnapshotInfo snapshotInfo = SnapshotInfo . newBuilder ( SnapshotId . of ( snapshot ) , getDiskId ( ) ) . setDescription ( description ) . build ( ) ; return compute . create ( snapshotInfo , options ) ; }
Creates a snapshot for this disk given the snapshot s name and description .
6,079
public Operation createImage ( String image , OperationOption ... options ) { ImageInfo imageInfo = ImageInfo . of ( ImageId . of ( image ) , DiskImageConfiguration . of ( getDiskId ( ) ) ) ; return compute . create ( imageInfo , options ) ; }
Creates an image for this disk given the image s name .
6,080
public Operation createImage ( String image , String description , OperationOption ... options ) { ImageInfo imageInfo = ImageInfo . newBuilder ( ImageId . of ( image ) , DiskImageConfiguration . of ( getDiskId ( ) ) ) . setDescription ( description ) . build ( ) ; return compute . create ( imageInfo , options ) ; }
Creates an image for this disk given the image s name and description .
6,081
public Operation resize ( long sizeGb , OperationOption ... options ) { return compute . resize ( getDiskId ( ) , sizeGb , options ) ; }
Resizes this disk to the requested size . The new size must be larger than the previous one .
6,082
public static Builder newBuilder ( TableId destinationTable , List < String > sourceUris ) { return new Builder ( ) . setDestinationTable ( destinationTable ) . setSourceUris ( sourceUris ) ; }
Creates a builder for a BigQuery Load Job configuration given the destination table and source URIs .
6,083
public static Builder newBuilder ( TableId destinationTable , List < String > sourceUris , FormatOptions format ) { return newBuilder ( destinationTable , sourceUris ) . setFormatOptions ( format ) ; }
Creates a builder for a BigQuery Load Job configuration given the destination table format and source URIs .
6,084
public static Builder newBuilder ( TableId destinationTable , String sourceUri , FormatOptions format ) { return newBuilder ( destinationTable , ImmutableList . of ( sourceUri ) , format ) ; }
Creates a builder for a BigQuery Load Job configuration given the destination table format and source URI .
6,085
public static LoadJobConfiguration of ( TableId destinationTable , List < String > sourceUris ) { return newBuilder ( destinationTable , sourceUris ) . build ( ) ; }
Returns a BigQuery Load Job Configuration for the given destination table and source URIs .
6,086
public static LoadJobConfiguration of ( TableId destinationTable , String sourceUri ) { return of ( destinationTable , ImmutableList . of ( sourceUri ) ) ; }
Returns a BigQuery Load Job Configuration for the given destination table and source URI .
6,087
public static LoadJobConfiguration of ( TableId destinationTable , String sourceUri , FormatOptions format ) { return of ( destinationTable , ImmutableList . of ( sourceUri ) , format ) ; }
Returns a BigQuery Load Job Configuration for the given destination table format and source URI .
6,088
public Table create ( String tableId , TableDefinition definition , TableOption ... options ) { TableInfo tableInfo = TableInfo . of ( TableId . of ( getDatasetId ( ) . getDataset ( ) , tableId ) , definition ) ; return bigquery . create ( tableInfo , options ) ; }
Creates a new table in this dataset .
6,089
ApiFuture < Void > begin ( ) { BeginTransactionRequest . Builder beginTransaction = BeginTransactionRequest . newBuilder ( ) ; beginTransaction . setDatabase ( firestore . getDatabaseName ( ) ) ; if ( previousTransactionId != null ) { beginTransaction . getOptionsBuilder ( ) . getReadWriteBuilder ( ) . setRetryTransaction ( previousTransactionId ) ; } ApiFuture < BeginTransactionResponse > transactionBeginFuture = firestore . sendRequest ( beginTransaction . build ( ) , firestore . getClient ( ) . beginTransactionCallable ( ) ) ; return ApiFutures . transform ( transactionBeginFuture , new ApiFunction < BeginTransactionResponse , Void > ( ) { public Void apply ( BeginTransactionResponse beginTransactionResponse ) { transactionId = beginTransactionResponse . getTransaction ( ) ; pending = true ; return null ; } } ) ; }
Starts a transaction and obtains the transaction id from the server .
6,090
ApiFuture < Void > rollback ( ) { pending = false ; RollbackRequest . Builder reqBuilder = RollbackRequest . newBuilder ( ) ; reqBuilder . setTransaction ( transactionId ) ; reqBuilder . setDatabase ( firestore . getDatabaseName ( ) ) ; ApiFuture < Empty > rollbackFuture = firestore . sendRequest ( reqBuilder . build ( ) , firestore . getClient ( ) . rollbackCallable ( ) ) ; return ApiFutures . transform ( rollbackFuture , new ApiFunction < Empty , Void > ( ) { public Void apply ( Empty beginTransactionResponse ) { return null ; } } ) ; }
Rolls a transaction back and releases all read locks .
6,091
public final TransferConfig updateTransferConfig ( TransferConfig transferConfig , FieldMask updateMask ) { UpdateTransferConfigRequest request = UpdateTransferConfigRequest . newBuilder ( ) . setTransferConfig ( transferConfig ) . setUpdateMask ( updateMask ) . build ( ) ; return updateTransferConfig ( request ) ; }
Updates a data transfer configuration . All fields must be set even if they are not updated .
6,092
void handleLastScannedRow ( ByteString key ) { try { currentState = currentState . handleLastScannedRow ( key ) ; } catch ( RuntimeException e ) { currentState = null ; throw e ; } }
Handle last scanned row events from the server .
6,093
void handleChunk ( CellChunk chunk ) { try { currentState = currentState . handleChunk ( chunk ) ; } catch ( RuntimeException e ) { currentState = null ; throw e ; } }
Feeds a new chunk into the sate machine . If the chunk is invalid the state machine will throw an exception and should not be used for further input .
6,094
RowT consumeRow ( ) { Preconditions . checkState ( currentState == AWAITING_ROW_CONSUME , "No row to consume" ) ; RowT row = completeRow ; reset ( ) ; return row ; }
Returns the last completed row and transitions to awaiting a new row .
6,095
public static final String formatAnnotatedDatasetName ( String project , String dataset , String annotatedDataset ) { return ANNOTATED_DATASET_PATH_TEMPLATE . instantiate ( "project" , project , "dataset" , dataset , "annotated_dataset" , annotatedDataset ) ; }
Formats a string containing the fully - qualified path to represent a annotated_dataset resource .
6,096
public static final String formatAnnotationSpecSetName ( String project , String annotationSpecSet ) { return ANNOTATION_SPEC_SET_PATH_TEMPLATE . instantiate ( "project" , project , "annotation_spec_set" , annotationSpecSet ) ; }
Formats a string containing the fully - qualified path to represent a annotation_spec_set resource .
6,097
public static final String formatDataItemName ( String project , String dataset , String dataItem ) { return DATA_ITEM_PATH_TEMPLATE . instantiate ( "project" , project , "dataset" , dataset , "data_item" , dataItem ) ; }
Formats a string containing the fully - qualified path to represent a data_item resource .
6,098
public static final String formatDatasetName ( String project , String dataset ) { return DATASET_PATH_TEMPLATE . instantiate ( "project" , project , "dataset" , dataset ) ; }
Formats a string containing the fully - qualified path to represent a dataset resource .
6,099
public static final String formatExampleName ( String project , String dataset , String annotatedDataset , String example ) { return EXAMPLE_PATH_TEMPLATE . instantiate ( "project" , project , "dataset" , dataset , "annotated_dataset" , annotatedDataset , "example" , example ) ; }
Formats a string containing the fully - qualified path to represent a example resource .