idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
1,000
public VersionDiff diff ( Semver version ) { if ( ! Objects . equals ( this . major , version . getMajor ( ) ) ) return VersionDiff . MAJOR ; if ( ! Objects . equals ( this . minor , version . getMinor ( ) ) ) return VersionDiff . MINOR ; if ( ! Objects . equals ( this . patch , version . getPatch ( ) ) ) return VersionDiff . PATCH ; if ( ! areSameSuffixes ( version . getSuffixTokens ( ) ) ) return VersionDiff . SUFFIX ; if ( ! Objects . equals ( this . build , version . getBuild ( ) ) ) return VersionDiff . BUILD ; return VersionDiff . NONE ; }
Returns the greatest difference between 2 versions . For example if the current version is 1 . 2 . 3 and compared version is 1 . 3 . 0 the biggest difference is the MINOR number .
1,001
public static Requirement buildNPM ( String requirement ) { if ( requirement . isEmpty ( ) ) { requirement = "*" ; } return buildWithTokenizer ( requirement , Semver . SemverType . NPM ) ; }
Builds a requirement following the rules of NPM .
1,002
private static List < Token > addParentheses ( List < Token > tokens ) { List < Token > result = new ArrayList < Token > ( ) ; result . add ( new Token ( TokenType . OPENING , "(" ) ) ; for ( Token token : tokens ) { if ( token . type == TokenType . OR ) { result . add ( new Token ( TokenType . CLOSING , ")" ) ) ; result . add ( token ) ; result . add ( new Token ( TokenType . OPENING , "(" ) ) ; } else { result . add ( token ) ; } } result . add ( new Token ( TokenType . CLOSING , ")" ) ) ; return result ; }
Return parenthesized expression giving lowest priority to OR operator
1,003
private static List < Tokenizer . Token > toReversePolishNotation ( List < Tokenizer . Token > tokens ) { LinkedList < Tokenizer . Token > queue = new LinkedList < Tokenizer . Token > ( ) ; Stack < Tokenizer . Token > stack = new Stack < Tokenizer . Token > ( ) ; for ( int i = 0 ; i < tokens . size ( ) ; i ++ ) { Tokenizer . Token token = tokens . get ( i ) ; switch ( token . type ) { case VERSION : queue . add ( token ) ; break ; case CLOSING : while ( stack . peek ( ) . type != Tokenizer . TokenType . OPENING ) { queue . add ( stack . pop ( ) ) ; } stack . pop ( ) ; if ( stack . size ( ) > 0 && stack . peek ( ) . type . isUnary ( ) ) { queue . add ( stack . pop ( ) ) ; } break ; default : if ( token . type . isUnary ( ) ) { queue . push ( token ) ; i ++ ; queue . push ( tokens . get ( i ) ) ; } else { stack . push ( token ) ; } break ; } } while ( ! stack . isEmpty ( ) ) { queue . add ( stack . pop ( ) ) ; } Collections . reverse ( queue ) ; return queue ; }
Adaptation of the shutting yard algorithm
1,004
private static Requirement evaluateReversePolishNotation ( Iterator < Tokenizer . Token > iterator , Semver . SemverType type ) { try { Tokenizer . Token token = iterator . next ( ) ; if ( token . type == Tokenizer . TokenType . VERSION ) { if ( "*" . equals ( token . value ) || ( type == Semver . SemverType . NPM && "latest" . equals ( token . value ) ) ) { return new Requirement ( new Range ( "0.0.0" , Range . RangeOperator . GTE ) , null , null , null ) ; } Semver version = new Semver ( token . value , type ) ; if ( version . getMinor ( ) != null && version . getPatch ( ) != null ) { Range range = new Range ( version , Range . RangeOperator . EQ ) ; return new Requirement ( range , null , null , null ) ; } else { return tildeRequirement ( version . getValue ( ) , type ) ; } } else if ( token . type == Tokenizer . TokenType . HYPHEN ) { Tokenizer . Token token3 = iterator . next ( ) ; Tokenizer . Token token2 = iterator . next ( ) ; return hyphenRequirement ( token2 . value , token3 . value , type ) ; } else if ( token . type . isUnary ( ) ) { Tokenizer . Token token2 = iterator . next ( ) ; Range . RangeOperator rangeOp ; switch ( token . type ) { case EQ : rangeOp = Range . RangeOperator . EQ ; break ; case LT : rangeOp = Range . RangeOperator . LT ; break ; case LTE : rangeOp = Range . RangeOperator . LTE ; break ; case GT : rangeOp = Range . RangeOperator . GT ; break ; case GTE : rangeOp = Range . RangeOperator . GTE ; break ; case TILDE : return tildeRequirement ( token2 . value , type ) ; case CARET : return caretRequirement ( token2 . value , type ) ; default : throw new SemverException ( "Invalid requirement" ) ; } Range range = new Range ( token2 . value , rangeOp ) ; return new Requirement ( range , null , null , null ) ; } else { Requirement req1 = evaluateReversePolishNotation ( iterator , type ) ; Requirement req2 = evaluateReversePolishNotation ( iterator , type ) ; RequirementOperator requirementOp ; switch ( token . type ) { case OR : requirementOp = RequirementOperator . OR ; break ; case AND : requirementOp = RequirementOperator . AND ; break ; default : throw new SemverException ( "Invalid requirement" ) ; } return new Requirement ( null , req1 , requirementOp , req2 ) ; } } catch ( NoSuchElementException e ) { throw new SemverException ( "Invalid requirement" ) ; } }
Evaluates a reverse polish notation token list
1,005
protected static Requirement hyphenRequirement ( String lowerVersion , String upperVersion , Semver . SemverType type ) { if ( type != Semver . SemverType . NPM ) { throw new SemverException ( "The hyphen requirements are only compatible with NPM." ) ; } Semver lower = extrapolateVersion ( new Semver ( lowerVersion , type ) ) ; Semver upper = new Semver ( upperVersion , type ) ; Range . RangeOperator upperOperator = Range . RangeOperator . LTE ; if ( upper . getMinor ( ) == null || upper . getPatch ( ) == null ) { upperOperator = Range . RangeOperator . LT ; if ( upper . getMinor ( ) == null ) { upper = extrapolateVersion ( upper ) . withIncMajor ( ) ; } else { upper = extrapolateVersion ( upper ) . withIncMinor ( ) ; } } Requirement req1 = new Requirement ( new Range ( lower , Range . RangeOperator . GTE ) , null , null , null ) ; Requirement req2 = new Requirement ( new Range ( upper , upperOperator ) , null , null , null ) ; return new Requirement ( null , req1 , RequirementOperator . AND , req2 ) ; }
Creates a requirement that satisfies x1 . y1 . z1 - x2 . y2 . z2 .
1,006
private static Semver extrapolateVersion ( Semver semver ) { StringBuilder sb = new StringBuilder ( ) . append ( semver . getMajor ( ) ) . append ( "." ) . append ( semver . getMinor ( ) == null ? 0 : semver . getMinor ( ) ) . append ( "." ) . append ( semver . getPatch ( ) == null ? 0 : semver . getPatch ( ) ) ; boolean first = true ; for ( int i = 0 ; i < semver . getSuffixTokens ( ) . length ; i ++ ) { if ( first ) { sb . append ( "-" ) ; first = false ; } else { sb . append ( "." ) ; } sb . append ( semver . getSuffixTokens ( ) [ i ] ) ; } if ( semver . getBuild ( ) != null ) { sb . append ( "+" ) . append ( semver . getBuild ( ) ) ; } return new Semver ( sb . toString ( ) , semver . getType ( ) ) ; }
Extrapolates the optional minor and patch numbers . - 1 = 1 . 0 . 0 - 1 . 2 = 1 . 2 . 0 - 1 . 2 . 3 = 1 . 2 . 3
1,007
public boolean isSatisfiedBy ( Semver version ) { if ( this . range != null ) { return this . range . isSatisfiedBy ( version ) ; } else { switch ( this . op ) { case AND : try { List < Range > set = getAllRanges ( this , new ArrayList < Range > ( ) ) ; for ( Range range : set ) { if ( ! range . isSatisfiedBy ( version ) ) { return false ; } } if ( version . getSuffixTokens ( ) . length > 0 ) { for ( Range range : set ) { if ( range . version == null ) { continue ; } if ( range . version . getSuffixTokens ( ) . length > 0 ) { Semver allowed = range . version ; if ( Objects . equals ( version . getMajor ( ) , allowed . getMajor ( ) ) && Objects . equals ( version . getMinor ( ) , allowed . getMinor ( ) ) && Objects . equals ( version . getPatch ( ) , allowed . getPatch ( ) ) ) { return true ; } } } return false ; } return true ; } catch ( Exception e ) { return this . req1 . isSatisfiedBy ( version ) && this . req2 . isSatisfiedBy ( version ) ; } case OR : return this . req1 . isSatisfiedBy ( version ) || this . req2 . isSatisfiedBy ( version ) ; } throw new RuntimeException ( "Code error. Unknown RequirementOperator: " + this . op ) ; } }
Checks if the requirement is satisfied by a version .
1,008
public JSONObject toJSON ( ) throws JSONException { JSONObject json = new JSONObject ( ) ; json . put ( JSON_TYPE , this . getType ( ) ) ; return json ; }
Formats the object s attributes as GeoJSON .
1,009
public void setFeatures ( List < Feature > features ) { this . mFeatures . clear ( ) ; if ( features != null ) { this . mFeatures . addAll ( features ) ; } }
Sets the list of features contained within this feature collection . All previously existing features are removed as a result of setting this property .
1,010
public void setGeometries ( List < Geometry > geometries ) { this . mGeometries . clear ( ) ; if ( geometries != null ) { this . mGeometries . addAll ( geometries ) ; } }
Sets the list of geometries contained within this geometry collection . All previously existing geometries are removed as a result of setting this property .
1,011
public static String parent ( String node ) { final int index = node . lastIndexOf ( '/' ) ; if ( index < 0 ) return null ; return node . substring ( 0 , index ) ; }
Returns the parent tree node of a given node .
1,012
public void addDependency ( Service service ) { assertDuringInitialization ( ) ; LOG . info ( "Adding a dependency on {} to {}" , service . getName ( ) , getName ( ) ) ; addDependsOn ( service ) ; service . addDependedBy ( this ) ; }
Adds a service this service depends on .
1,013
protected void setReady ( boolean ready ) { synchronized ( SERVICE_AVILABILITY_LOCK ) { LOG . info ( "Service {} is now {}" , getName ( ) , ready ? "READY" : "NOT READY" ) ; this . ready = ready ; checkAvailability ( ) ; } runAvailableMethod ( ) ; }
Sets the readiness of this service . If this service is ready and all its dependencies are available than this service becomes available .
1,014
public void create ( String fqn , boolean ephemeral ) { awaitRunning ( ) ; try { LOG . trace ( "Creating {} {}" , fqn , ephemeral ? "(ephemeral)" : "" ) ; channel . send ( new Message ( null , new Request ( Request . CREATE , fqn , ephemeral ) ) ) ; synchronized ( updateCondition ) { while ( ! exists ( fqn ) ) updateCondition . wait ( ) ; } } catch ( Exception ex ) { LOG . error ( "failure bcasting PUT request" , ex ) ; } }
Adds a new node to the tree . If the node does not exist yet it will be created . Also parent nodes will be created if non - existent . If the node already exists this is a no - op it s status as ephemeral may change .
1,015
public byte [ ] get ( String fqn ) { final Node n = findNode ( fqn ) ; if ( n == null ) return null ; final byte [ ] buffer = n . getData ( ) ; if ( buffer == null ) return null ; return Arrays . copyOf ( buffer , buffer . length ) ; }
Finds a node given its name and returns the data associated with it . Returns null if the node was not found in the tree or the data is null .
1,016
public List < String > getChildren ( String fqn ) { final Node n = findNode ( fqn ) ; if ( n == null ) return null ; final Set < String > names = n . getChildrenNames ( ) ; return new ArrayList < String > ( names ) ; }
Returns all children of a given node .
1,017
public void write ( ByteBuffer buffer ) { if ( obj != null ) buffer . put ( getSerialized ( ) ) ; tmpBuffer = null ; }
This method is not thread safe!
1,018
public void handleDownstream ( ChannelHandlerContext ctx , ChannelEvent evt ) throws Exception { if ( ! ( evt instanceof MessageEvent ) ) { ctx . sendDownstream ( evt ) ; return ; } final MessageEvent e = ( MessageEvent ) evt ; final Object originalMessage = e . getMessage ( ) ; final Object encodedMessage = encode ( ctx , e . getChannel ( ) , originalMessage ) ; if ( originalMessage == encodedMessage ) ctx . sendDownstream ( evt ) ; else if ( encodedMessage != null ) ctx . sendDownstream ( new DownstreamMessageEvent ( ctx . getChannel ( ) , e . getFuture ( ) , encodedMessage , e . getRemoteAddress ( ) ) ) ; }
Code copied from org . jboss . netty . handler . codec . oneone . OneToOneEncoder and org . jboss . netty . handler . codec . oneone . OneToOneDecoder
1,019
protected void postInit ( ) throws Exception { if ( controlTree == null ) { throw new RuntimeException ( "controlTree not set" ) ; } controlTree . create ( NODES , false ) ; controlTree . create ( LEADERS , false ) ; if ( controlTree . exists ( myNodeInfo . treeNodePath ) ) { LOG . error ( "A node with the name " + myNodeInfo . getName ( ) + " already exists!" ) ; throw new RuntimeException ( "Initialization failure" ) ; } LOG . info ( "Required peer node properties: {}" , requiredPeerNodeProperties ) ; LOG . info ( "Required server properties: {}" , requiredServerProperties ) ; final Set < String > requiredProperties = ( myId == 0 ? requiredServerProperties : requiredPeerNodeProperties ) ; for ( String property : requiredProperties ) { if ( ! property . equals ( "id" ) && myNodeInfo . get ( property ) == null ) { LOG . error ( "Required property {} not set!" , property ) ; throw new RuntimeException ( "Initialization failure" ) ; } } branch = new DistributedBranchHelper ( controlTree , NODES , false ) { protected boolean isNodeComplete ( String node , Set < String > properties ) { if ( ! properties . contains ( "id" ) ) { return false ; } final short id = Short . parseShort ( new String ( controlTree . get ( node + "/id" ) , Charsets . UTF_8 ) ) ; final Set < String > requiredProperties = ( id == 0 ? requiredServerProperties : requiredPeerNodeProperties ) ; final boolean success = properties . containsAll ( requiredProperties ) ; return success ; } } ; branch . addListener ( new DistributedTree . ListenerAdapter ( ) { public void nodeChildAdded ( String parentPath , String childName ) { AbstractCluster . this . nodeAdded ( childName ) ; } public void nodeChildDeleted ( String parentPath , String childName ) { AbstractCluster . this . nodeRemoved ( childName ) ; } } ) ; branch . init ( ) ; controlTree . addListener ( LEADERS , new DistributedTree . ListenerAdapter ( ) { public void nodeChildAdded ( String parentPath , String childName ) { AbstractCluster . this . leaderAdded ( childName ) ; } public void nodeChildDeleted ( String parentPath , String childName ) { AbstractCluster . this . leaderRemoved ( childName ) ; } } ) ; myNodeInfo . writeToTree ( ) ; setReady ( true ) ; super . postInit ( ) ; joined = true ; fireJoinedCluster ( ) ; for ( short id : masters . keySet ( ) ) { if ( id != myId ) fireNodeAdded ( id ) ; } }
This is perhaps ugly but overriding implementations must call this method at the end or at least after calling setControlTree
1,020
protected void sendToNode ( Message message , short node , InetSocketAddress address ) { try { if ( LOG . isDebugEnabled ( ) ) LOG . debug ( "Sending to node {} ({}): {}" , new Object [ ] { node , address , message } ) ; message . cloneDataBuffers ( ) ; final NodePeer peer = peers . get ( node ) ; if ( peer == null ) throw new NodeNotFoundException ( node ) ; peer . sendMessage ( message ) ; executor . submit ( peer ) ; } catch ( InterruptedException ex ) { LOG . error ( "InterruptedException" , ex ) ; throw new RuntimeException ( ex ) ; } catch ( Exception ex ) { LOG . error ( "Error while sending message " + message + " to node " + node , ex ) ; } }
Can block if buffer is full
1,021
public static Throwable unwrap ( Throwable t ) { for ( ; ; ) { if ( t == null ) throw new NullPointerException ( ) ; if ( t instanceof java . util . concurrent . ExecutionException ) t = t . getCause ( ) ; else if ( t instanceof java . lang . reflect . InvocationTargetException ) t = t . getCause ( ) ; else if ( t . getClass ( ) . equals ( RuntimeException . class ) && t . getCause ( ) != null ) t = t . getCause ( ) ; else return t ; } }
Unwraps several common wrapper exceptions and returns the underlying cause .
1,022
public void read ( ByteBuffer buffer ) { Persistables . persistable ( streamableNoBuffers ( ) ) . read ( buffer ) ; final int n = getNumDataBuffers ( ) ; int lengthsPosition = buffer . position ( ) ; buffer . position ( buffer . position ( ) + 2 * n ) ; for ( int i = 0 ; i < n ; i ++ ) { final int size = buffer . getShort ( lengthsPosition ) & 0xFFFF ; lengthsPosition += 2 ; final ByteBuffer b1 = Persistables . slice ( buffer , size ) ; setDataBuffer ( i , b1 ) ; } }
Note that you cannot use this method to read a buffer wrapping the array returned from toByteArray as the internal representation is different!
1,023
public static int calcUtfLength ( String str ) { final int strlen = str . length ( ) ; int utflen = 0 ; for ( int i = 0 ; i < strlen ; i ++ ) { int c = str . charAt ( i ) ; if ( ( c >= 0x0001 ) && ( c <= 0x007F ) ) { utflen ++ ; } else if ( c > 0x07FF ) { utflen += 3 ; } else { utflen += 2 ; } } return utflen ; }
Returns the length in bytes of a string s UTF - 8 encoding .
1,024
private static DefaultListableBeanFactory createBeanFactory ( ) { return new DefaultListableBeanFactory ( ) { { final InstantiationStrategy is = getInstantiationStrategy ( ) ; setInstantiationStrategy ( new InstantiationStrategy ( ) { public Object instantiate ( RootBeanDefinition beanDefinition , String beanName , BeanFactory owner ) throws BeansException { return is . instantiate ( beanDefinition , beanName , owner ) ; } public Object instantiate ( RootBeanDefinition beanDefinition , String beanName , BeanFactory owner , Constructor < ? > ctor , Object [ ] args ) throws BeansException { final Object bean = is . instantiate ( beanDefinition , beanName , owner , ctor , args ) ; addDependencies ( bean , args ) ; return bean ; } public Object instantiate ( RootBeanDefinition beanDefinition , String beanName , BeanFactory owner , Object factoryBean , Method factoryMethod , Object [ ] args ) throws BeansException { final Object bean = is . instantiate ( beanDefinition , beanName , owner , factoryBean , factoryMethod , args ) ; addDependencies ( bean , args ) ; return bean ; } } ) ; } private void addDependencies ( Object bean , Object [ ] args ) { if ( bean instanceof Service ) { for ( Object arg : args ) { if ( arg instanceof Service ) { ( ( Service ) arg ) . addDependedBy ( ( Service ) bean ) ; ( ( Service ) bean ) . addDependsOn ( ( Service ) arg ) ; } } } } } ; }
adds hooks to capture autowired constructor args and add them as dependencies
1,025
private Object doOp ( Op op ) throws TimeoutException { try { if ( op . txn != null ) op . txn . add ( op ) ; Object result = runOp ( op ) ; if ( result == PENDING ) return op . getResult ( timeout , TimeUnit . MILLISECONDS ) ; else if ( result == null && op . isCancelled ( ) ) throw new CancellationException ( ) ; else return result ; } catch ( java . util . concurrent . TimeoutException e ) { throw new TimeoutException ( e ) ; } catch ( InterruptedException e ) { return null ; } catch ( ExecutionException e ) { Throwable ex = e . getCause ( ) ; if ( ex instanceof TimeoutException ) throw ( TimeoutException ) ex ; Throwables . propagateIfPossible ( ex ) ; throw Throwables . propagate ( ex ) ; } }
This one blocks!
1,026
private boolean handleMessageMessengerMsg ( Message . MSG message ) { if ( ! message . isMessenger ( ) ) return false ; if ( receiver == null ) return true ; setOwnerClockPut ( message ) ; if ( message . getLine ( ) == - 1 ) { receiver . receive ( message ) ; if ( message . isReplyRequired ( ) ) send ( Message . MSGACK ( message ) ) ; return true ; } CacheLine line = getLine ( message . getLine ( ) ) ; if ( line == null ) { boolean res = handleMessageNoLine ( message ) ; assert res ; return true ; } synchronized ( line ) { if ( handleNotOwner ( message , line ) ) return true ; receiver . receive ( message ) ; } if ( message . isReplyRequired ( ) ) send ( Message . MSGACK ( message ) ) ; return true ; }
Special handling for msg .
1,027
public static P < Object > propertyRef ( Compare p , String columnName ) { return new RefP ( p , new PropertyReference ( columnName ) ) ; }
build a predicate from a compare operation and a column name
1,028
private String constructDuplicatePathSql ( SqlgGraph sqlgGraph , List < LinkedList < SchemaTableTree > > subQueryLinkedLists , Set < SchemaTableTree > leftJoinOn ) { StringBuilder singlePathSql = new StringBuilder ( "\nFROM (" ) ; int count = 1 ; SchemaTableTree lastOfPrevious = null ; for ( LinkedList < SchemaTableTree > subQueryLinkedList : subQueryLinkedLists ) { SchemaTableTree firstOfNext = null ; boolean last = count == subQueryLinkedLists . size ( ) ; if ( ! last ) { LinkedList < SchemaTableTree > nextList = subQueryLinkedLists . get ( count ) ; firstOfNext = nextList . getFirst ( ) ; } SchemaTableTree firstSchemaTableTree = subQueryLinkedList . getFirst ( ) ; String sql ; if ( last ) { sql = constructSinglePathSql ( sqlgGraph , true , subQueryLinkedList , lastOfPrevious , null , leftJoinOn , false ) ; } else { sql = constructSinglePathSql ( sqlgGraph , true , subQueryLinkedList , lastOfPrevious , firstOfNext ) ; } singlePathSql . append ( sql ) ; if ( count == 1 ) { singlePathSql . append ( "\n) a" ) . append ( count ++ ) . append ( " INNER JOIN (" ) ; } else { singlePathSql . append ( "\n) a" ) . append ( count ) . append ( " ON " ) ; singlePathSql . append ( constructSectionedJoin ( sqlgGraph , lastOfPrevious , firstSchemaTableTree , count ) ) ; if ( count ++ < subQueryLinkedLists . size ( ) ) { singlePathSql . append ( " INNER JOIN (" ) ; } } lastOfPrevious = subQueryLinkedList . getLast ( ) ; } singlePathSql . append ( constructOuterOrderByClause ( sqlgGraph , subQueryLinkedLists ) ) ; String result = "SELECT\n\t" + constructOuterFromClause ( subQueryLinkedLists ) ; return result + singlePathSql ; }
Construct a sql statement for one original path to a leaf node . As the path contains the same label more than once its been split into a List of Stacks .
1,029
private String constructSinglePathSql ( SqlgGraph sqlgGraph , boolean partOfDuplicateQuery , LinkedList < SchemaTableTree > distinctQueryStack , SchemaTableTree lastOfPrevious , SchemaTableTree firstOfNextStack , Set < SchemaTableTree > leftJoinOn , boolean dropStep ) { return constructSelectSinglePathSql ( sqlgGraph , partOfDuplicateQuery , distinctQueryStack , lastOfPrevious , firstOfNextStack , leftJoinOn , dropStep ) ; }
Constructs a sql select statement from the SchemaTableTree call stack . The SchemaTableTree is not used as a tree . It is used only as as SchemaTable with a direction . first and last is needed to facilitate generating the from statement . If both first and last is true then the gremlin does not contain duplicate labels in its path and can be executed in one sql statement . If first and last is not equal then the sql will join across many select statements . The previous select needs to join onto the subsequent select . For this the from needs to select the appropriate field for the join .
1,030
public boolean duplicatesInStack ( LinkedList < SchemaTableTree > distinctQueryStack ) { Set < SchemaTable > alreadyVisited = new HashSet < > ( ) ; for ( SchemaTableTree schemaTableTree : distinctQueryStack ) { if ( ! alreadyVisited . contains ( schemaTableTree . getSchemaTable ( ) ) ) { alreadyVisited . add ( schemaTableTree . getSchemaTable ( ) ) ; } else { return true ; } } return false ; }
Checks if the stack has the same element more than once .
1,031
private static void constructEmitFromClause ( LinkedList < SchemaTableTree > distinctQueryStack , ColumnList cols ) { int count = 1 ; for ( SchemaTableTree schemaTableTree : distinctQueryStack ) { if ( count > 1 ) { if ( ! schemaTableTree . getSchemaTable ( ) . isEdgeTable ( ) && schemaTableTree . isEmit ( ) ) { printEdgeId ( schemaTableTree . parent , cols ) ; } } count ++ ; } }
If emit is true then the edge id also needs to be printed . This is required when there are multiple edges to the same vertex . Only by having access to the edge id can on tell if the vertex needs to be emitted .
1,032
private void removeOrTransformHasContainers ( final SchemaTableTree schemaTableTree ) { Set < HasContainer > toRemove = new HashSet < > ( ) ; Set < HasContainer > toAdd = new HashSet < > ( ) ; for ( HasContainer hasContainer : schemaTableTree . hasContainers ) { if ( hasContainer . getKey ( ) . equals ( label . getAccessor ( ) ) ) { toRemove . add ( hasContainer ) ; } if ( Existence . NULL . equals ( hasContainer . getBiPredicate ( ) ) ) { if ( ! this . getFilteredAllTables ( ) . get ( schemaTableTree . getSchemaTable ( ) . toString ( ) ) . containsKey ( hasContainer . getKey ( ) ) ) { toRemove . add ( hasContainer ) ; } } if ( Contains . without . equals ( hasContainer . getBiPredicate ( ) ) ) { Object o = hasContainer . getValue ( ) ; if ( o instanceof Collection && ( ( Collection < ? > ) o ) . size ( ) == 0 ) { toRemove . add ( hasContainer ) ; toAdd . add ( new HasContainer ( hasContainer . getKey ( ) , new P < > ( Existence . NOTNULL , null ) ) ) ; } } } schemaTableTree . hasContainers . removeAll ( toRemove ) ; schemaTableTree . hasContainers . addAll ( toAdd ) ; }
remove has containers that are not valid anymore transform has containers that are equivalent to simpler statements .
1,033
@ SuppressWarnings ( "unchecked" ) private boolean invalidateByHas ( SchemaTableTree schemaTableTree ) { for ( HasContainer hasContainer : schemaTableTree . hasContainers ) { if ( ! hasContainer . getKey ( ) . equals ( TopologyStrategy . TOPOLOGY_SELECTION_SQLG_SCHEMA ) && ! hasContainer . getKey ( ) . equals ( TopologyStrategy . TOPOLOGY_SELECTION_GLOBAL_UNIQUE_INDEX ) ) { if ( hasContainer . getKey ( ) . equals ( label . getAccessor ( ) ) ) { Preconditions . checkState ( false , "label hasContainers should have been removed by now." ) ; } else if ( hasContainer . getKey ( ) . equals ( T . id . getAccessor ( ) ) ) { if ( hasContainer . getBiPredicate ( ) . equals ( Compare . eq ) ) { Object value = hasContainer . getValue ( ) ; SchemaTable hasContainerLabelSchemaTable = getIDContainerSchemaTable ( schemaTableTree , value ) ; if ( ! hasContainerLabelSchemaTable . equals ( schemaTableTree . getSchemaTable ( ) ) ) { return true ; } } else if ( hasContainer . getBiPredicate ( ) . equals ( Contains . within ) ) { Collection < ? > c = ( Collection < ? > ) hasContainer . getPredicate ( ) . getValue ( ) ; Iterator < ? > it = c . iterator ( ) ; Collection < Object > ok = new LinkedList < > ( ) ; while ( it . hasNext ( ) ) { Object value = it . next ( ) ; SchemaTable hasContainerLabelSchemaTable = getIDContainerSchemaTable ( schemaTableTree , value ) ; if ( hasContainerLabelSchemaTable . equals ( schemaTableTree . getSchemaTable ( ) ) ) { ok . add ( value ) ; } } if ( ok . isEmpty ( ) ) { return true ; } ( ( P < Collection < Object > > ) ( hasContainer . getPredicate ( ) ) ) . setValue ( ok ) ; } } else { if ( hasContainer . getBiPredicate ( ) instanceof FullText && ( ( FullText ) hasContainer . getBiPredicate ( ) ) . getQuery ( ) != null ) { return false ; } if ( ! this . getFilteredAllTables ( ) . get ( schemaTableTree . getSchemaTable ( ) . toString ( ) ) . containsKey ( hasContainer . getKey ( ) ) ) { if ( ! Existence . NULL . equals ( hasContainer . getBiPredicate ( ) ) ) { return true ; } } if ( hasEmptyWithin ( hasContainer ) ) { return true ; } } } else { throw new IllegalStateException ( ) ; } } return false ; }
verify the has containers we have are valid with the schema table tree given
1,034
public boolean shouldSelectProperty ( String property ) { if ( getRoot ( ) . eagerLoad || restrictedProperties == null ) { return true ; } if ( getRoot ( ) . eagerLoad || restrictedProperties . contains ( property ) ) { return true ; } return false ; }
should we select the given property?
1,035
private void calculatePropertyRestrictions ( ) { if ( restrictedProperties == null ) { return ; } for ( org . javatuples . Pair < Traversal . Admin < ? , ? > , Comparator < ? > > comparator : this . getDbComparators ( ) ) { if ( comparator . getValue1 ( ) instanceof ElementValueComparator ) { restrictedProperties . add ( ( ( ElementValueComparator < ? > ) comparator . getValue1 ( ) ) . getPropertyKey ( ) ) ; } else if ( ( comparator . getValue0 ( ) instanceof ElementValueTraversal < ? > || comparator . getValue0 ( ) instanceof TokenTraversal < ? , ? > ) && comparator . getValue1 ( ) instanceof Order ) { Traversal . Admin < ? , ? > t = comparator . getValue0 ( ) ; String key ; if ( t instanceof ElementValueTraversal ) { ElementValueTraversal < ? > elementValueTraversal = ( ElementValueTraversal < ? > ) t ; key = elementValueTraversal . getPropertyKey ( ) ; } else { TokenTraversal < ? , ? > tokenTraversal = ( TokenTraversal < ? , ? > ) t ; if ( tokenTraversal . getToken ( ) . equals ( T . id ) ) { key = Topology . ID ; } else { key = tokenTraversal . getToken ( ) . getAccessor ( ) ; } } if ( key != null ) { restrictedProperties . add ( key ) ; } } } }
calculate property restrictions from explicit restrictions and required properties
1,036
public boolean orderByHasSelectOneStepAndForLabelNotInTree ( ) { Set < String > labels = new HashSet < > ( ) ; for ( ReplacedStep < ? , ? > replacedStep : linearPathToLeafNode ( ) ) { for ( String label : labels ) { labels . add ( SqlgUtil . originalLabel ( label ) ) ; } for ( Pair < Traversal . Admin < ? , ? > , Comparator < ? > > objects : replacedStep . getSqlgComparatorHolder ( ) . getComparators ( ) ) { Traversal . Admin < ? , ? > traversal = objects . getValue0 ( ) ; if ( traversal . getSteps ( ) . size ( ) == 1 && traversal . getSteps ( ) . get ( 0 ) instanceof SelectOneStep ) { SelectOneStep selectOneStep = ( SelectOneStep ) traversal . getSteps ( ) . get ( 0 ) ; Preconditions . checkState ( selectOneStep . getScopeKeys ( ) . size ( ) == 1 , "toOrderByClause expects the selectOneStep to have one scopeKey!" ) ; Preconditions . checkState ( selectOneStep . getLocalChildren ( ) . size ( ) == 1 , "toOrderByClause expects the selectOneStep to have one traversal!" ) ; Preconditions . checkState ( selectOneStep . getLocalChildren ( ) . get ( 0 ) instanceof ElementValueTraversal || selectOneStep . getLocalChildren ( ) . get ( 0 ) instanceof TokenTraversal , "toOrderByClause expects the selectOneStep's traversal to be a ElementValueTraversal or a TokenTraversal!" ) ; String selectKey = ( String ) selectOneStep . getScopeKeys ( ) . iterator ( ) . next ( ) ; if ( ! labels . contains ( selectKey ) ) { return true ; } } } } return false ; }
This happens when a SqlgVertexStep has a SelectOne step where the label is for an element on the path that is before the current optimized steps .
1,037
public static String updateGraph ( SqlgGraph sqlgGraph , String version ) { Connection conn = sqlgGraph . tx ( ) . getConnection ( ) ; try { DatabaseMetaData metadata = conn . getMetaData ( ) ; GraphTraversalSource traversalSource = sqlgGraph . topology ( ) ; List < Vertex > graphVertices = traversalSource . V ( ) . hasLabel ( SQLG_SCHEMA + "." + Topology . SQLG_SCHEMA_GRAPH ) . toList ( ) ; Preconditions . checkState ( graphVertices . size ( ) == 1 , "BUG: There can only ever be one graph vertex, found %s" , graphVertices . size ( ) ) ; Vertex graph = graphVertices . get ( 0 ) ; String oldVersion = graph . value ( SQLG_SCHEMA_GRAPH_VERSION ) ; if ( ! oldVersion . equals ( version ) ) { graph . property ( SQLG_SCHEMA_GRAPH_VERSION , version ) ; graph . property ( SQLG_SCHEMA_GRAPH_DB_VERSION , metadata . getDatabaseProductVersion ( ) ) ; graph . property ( UPDATED_ON , LocalDateTime . now ( ) ) ; } return oldVersion ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
Updates sqlg_schema . V_graph s version to the new version and returns the old version .
1,038
public static void addIndex ( SqlgGraph sqlgGraph , String schema , String label , boolean vertex , String index , IndexType indexType , List < String > properties ) { BatchManager . BatchModeType batchModeType = flushAndSetTxToNone ( sqlgGraph ) ; try { GraphTraversalSource traversalSource = sqlgGraph . topology ( ) ; List < Vertex > abstractLabelVertexes ; if ( vertex ) { abstractLabelVertexes = traversalSource . V ( ) . hasLabel ( SQLG_SCHEMA + "." + SQLG_SCHEMA_SCHEMA ) . has ( SQLG_SCHEMA_SCHEMA_NAME , schema ) . out ( SQLG_SCHEMA_SCHEMA_VERTEX_EDGE ) . has ( "name" , label ) . toList ( ) ; } else { abstractLabelVertexes = traversalSource . V ( ) . hasLabel ( SQLG_SCHEMA + "." + SQLG_SCHEMA_SCHEMA ) . has ( SQLG_SCHEMA_SCHEMA_NAME , schema ) . out ( SQLG_SCHEMA_SCHEMA_VERTEX_EDGE ) . out ( SQLG_SCHEMA_OUT_EDGES_EDGE ) . has ( "name" , label ) . dedup ( ) . toList ( ) ; } Preconditions . checkState ( ! abstractLabelVertexes . isEmpty ( ) , "AbstractLabel %s.%s does not exists" , schema , label ) ; Preconditions . checkState ( abstractLabelVertexes . size ( ) == 1 , "BUG: multiple AbstractLabels found for %s.%s" , schema , label ) ; Vertex abstractLabelVertex = abstractLabelVertexes . get ( 0 ) ; boolean createdIndexVertex = false ; Vertex indexVertex = null ; int ix = 0 ; for ( String property : properties ) { List < Vertex > propertyVertexes = traversalSource . V ( abstractLabelVertex ) . out ( vertex ? SQLG_SCHEMA_VERTEX_PROPERTIES_EDGE : SQLG_SCHEMA_EDGE_PROPERTIES_EDGE ) . has ( "name" , property ) . toList ( ) ; if ( ! createdIndexVertex && ! propertyVertexes . isEmpty ( ) ) { createdIndexVertex = true ; indexVertex = sqlgGraph . addVertex ( T . label , SQLG_SCHEMA + "." + SQLG_SCHEMA_INDEX , SQLG_SCHEMA_INDEX_NAME , index , SQLG_SCHEMA_INDEX_INDEX_TYPE , indexType . toString ( ) , CREATED_ON , LocalDateTime . now ( ) ) ; if ( vertex ) { abstractLabelVertex . addEdge ( SQLG_SCHEMA_VERTEX_INDEX_EDGE , indexVertex ) ; } else { abstractLabelVertex . addEdge ( SQLG_SCHEMA_EDGE_INDEX_EDGE , indexVertex ) ; } } if ( ! propertyVertexes . isEmpty ( ) ) { Preconditions . checkState ( propertyVertexes . size ( ) == 1 , "BUG: multiple Properties %s found for AbstractLabels found for %s.%s" , property , schema , label ) ; Preconditions . checkState ( indexVertex != null ) ; Vertex propertyVertex = propertyVertexes . get ( 0 ) ; indexVertex . addEdge ( SQLG_SCHEMA_INDEX_PROPERTY_EDGE , propertyVertex , SQLG_SCHEMA_INDEX_PROPERTY_EDGE_SEQUENCE , ix ) ; } } } finally { sqlgGraph . tx ( ) . batchMode ( batchModeType ) ; } }
add an index from information schema
1,039
public static void addSubPartition ( SqlgGraph sqlgGraph , Partition partition ) { AbstractLabel abstractLabel = partition . getAbstractLabel ( ) ; addSubPartition ( sqlgGraph , partition . getParentPartition ( ) . getParentPartition ( ) != null , abstractLabel instanceof VertexLabel , abstractLabel . getSchema ( ) . getName ( ) , abstractLabel . getName ( ) , partition . getParentPartition ( ) . getName ( ) , partition . getName ( ) , partition . getPartitionType ( ) , partition . getPartitionExpression ( ) , partition . getFrom ( ) , partition . getTo ( ) , partition . getIn ( ) ) ; }
Adds the partition to a partition . A new Vertex with label Partition is added and in linked to its parent with the SQLG_SCHEMA_PARTITION_PARTITION_EDGE edge label .
1,040
SqlgVertex putVertexIfAbsent ( SqlgGraph sqlgGraph , String schema , String table , Long id ) { RecordId recordId = RecordId . from ( SchemaTable . of ( schema , table ) , id ) ; SqlgVertex sqlgVertex ; if ( this . cacheVertices ) { sqlgVertex = this . vertexCache . get ( recordId ) ; if ( sqlgVertex == null ) { sqlgVertex = new SqlgVertex ( sqlgGraph , id , schema , table ) ; this . vertexCache . put ( recordId , sqlgVertex ) ; return sqlgVertex ; } } else { sqlgVertex = new SqlgVertex ( sqlgGraph , id , schema , table ) ; } return sqlgVertex ; }
The recordId is not referenced in the SqlgVertex . It is important that the value of the WeakHashMap does not reference the key .
1,041
private JsonNode toJson ( ) { ObjectNode result = new ObjectNode ( Topology . OBJECT_MAPPER . getNodeFactory ( ) ) ; ArrayNode propertyArrayNode = new ArrayNode ( Topology . OBJECT_MAPPER . getNodeFactory ( ) ) ; for ( PropertyColumn property : this . properties ) { ObjectNode objectNode = property . toNotifyJson ( ) ; objectNode . put ( "schemaName" , property . getParentLabel ( ) . getSchema ( ) . getName ( ) ) ; objectNode . put ( "abstractLabelLabel" , property . getParentLabel ( ) . getLabel ( ) ) ; propertyArrayNode . add ( objectNode ) ; } result . put ( "name" , getName ( ) ) ; result . set ( "properties" , propertyArrayNode ) ; return result ; }
JSON representation of committed state
1,042
public static ReplacedStep from ( Topology topology ) { ReplacedStep replacedStep = new ReplacedStep < > ( ) ; replacedStep . step = null ; replacedStep . labels = new HashSet < > ( ) ; replacedStep . topology = topology ; replacedStep . fake = true ; return replacedStep ; }
Used for SqlgVertexStepStrategy . It is a fake ReplacedStep to simulate the incoming vertex from which the traversal continues .
1,043
private Map < SchemaTable , List < Multimap < BiPredicate , RecordId > > > groupIdsBySchemaTable ( ) { Map < SchemaTable , List < Multimap < BiPredicate , RecordId > > > result = new HashMap < > ( ) ; for ( HasContainer idHasContainer : this . idHasContainers ) { Map < SchemaTable , Boolean > newHasContainerMap = new HashMap < > ( ) ; @ SuppressWarnings ( "unchecked" ) P < Object > idPredicate = ( P < Object > ) idHasContainer . getPredicate ( ) ; BiPredicate biPredicate = idHasContainer . getBiPredicate ( ) ; if ( biPredicate == Compare . eq && idPredicate . getValue ( ) instanceof Collection && ( ( Collection ) idPredicate . getValue ( ) ) . size ( ) > 1 ) { biPredicate = Contains . within ; } Multimap < BiPredicate , RecordId > biPredicateRecordIdMultimap ; if ( idPredicate . getValue ( ) instanceof Collection ) { @ SuppressWarnings ( "unchecked" ) Collection < Object > ids = ( Collection < Object > ) idPredicate . getValue ( ) ; for ( Object id : ids ) { RecordId recordId = RecordId . from ( id ) ; List < Multimap < BiPredicate , RecordId > > biPredicateRecordIdList = result . get ( recordId . getSchemaTable ( ) ) ; Boolean newHasContainer = newHasContainerMap . get ( recordId . getSchemaTable ( ) ) ; if ( biPredicateRecordIdList == null ) { biPredicateRecordIdList = new ArrayList < > ( ) ; biPredicateRecordIdMultimap = LinkedListMultimap . create ( ) ; biPredicateRecordIdList . add ( biPredicateRecordIdMultimap ) ; result . put ( recordId . getSchemaTable ( ) , biPredicateRecordIdList ) ; newHasContainerMap . put ( recordId . getSchemaTable ( ) , false ) ; } else if ( newHasContainer == null ) { biPredicateRecordIdMultimap = LinkedListMultimap . create ( ) ; biPredicateRecordIdList . add ( biPredicateRecordIdMultimap ) ; newHasContainerMap . put ( recordId . getSchemaTable ( ) , false ) ; } biPredicateRecordIdMultimap = biPredicateRecordIdList . get ( biPredicateRecordIdList . size ( ) - 1 ) ; biPredicateRecordIdMultimap . put ( biPredicate , recordId ) ; } } else { Object id = idPredicate . getValue ( ) ; RecordId recordId = RecordId . from ( id ) ; List < Multimap < BiPredicate , RecordId > > biPredicateRecordIdList = result . computeIfAbsent ( recordId . getSchemaTable ( ) , k -> new ArrayList < > ( ) ) ; biPredicateRecordIdMultimap = LinkedListMultimap . create ( ) ; biPredicateRecordIdList . add ( biPredicateRecordIdMultimap ) ; biPredicateRecordIdMultimap . put ( biPredicate , recordId ) ; } } return result ; }
Groups the idHasContainers by SchemaTable . Each SchemaTable has a list representing the idHasContainers with the relevant BiPredicate and RecordId
1,044
private void captureLabels ( Step < ? , ? > step , Map < String , Object > stepsByLabel ) { for ( String s : step . getLabels ( ) ) { stepsByLabel . put ( s , step ) ; } if ( step instanceof SqlgGraphStep < ? , ? > ) { SqlgGraphStep < ? , ? > sgs = ( SqlgGraphStep < ? , ? > ) step ; for ( ReplacedStep < ? , ? > rs : sgs . getReplacedSteps ( ) ) { for ( String label : rs . getLabels ( ) ) { if ( label . contains ( BaseStrategy . PATH_LABEL_SUFFIX ) ) { stepsByLabel . put ( label . substring ( label . indexOf ( BaseStrategy . PATH_LABEL_SUFFIX ) + BaseStrategy . PATH_LABEL_SUFFIX . length ( ) ) , rs ) ; } else if ( label . contains ( BaseStrategy . EMIT_LABEL_SUFFIX ) ) { stepsByLabel . put ( label . substring ( label . indexOf ( BaseStrategy . EMIT_LABEL_SUFFIX ) + BaseStrategy . EMIT_LABEL_SUFFIX . length ( ) ) , rs ) ; } } } } }
add all labels for the step in the given map
1,045
private String createOrUpdateGraph ( String version ) { String oldVersion = null ; Connection conn = this . sqlgGraph . tx ( ) . getConnection ( ) ; try { DatabaseMetaData metadata = conn . getMetaData ( ) ; String [ ] types = new String [ ] { "TABLE" } ; try ( ResultSet vertexRs = metadata . getTables ( null , Schema . SQLG_SCHEMA , Topology . VERTEX_PREFIX + Topology . GRAPH , types ) ) { if ( ! vertexRs . next ( ) ) { try ( Statement statement = conn . createStatement ( ) ) { String sql = this . sqlDialect . sqlgCreateTopologyGraph ( ) ; statement . execute ( sql ) ; TopologyManager . addGraph ( this . sqlgGraph , version ) ; oldVersion = version ; } } else { try ( ResultSet columnRs = metadata . getColumns ( null , Schema . SQLG_SCHEMA , Topology . VERTEX_PREFIX + Topology . GRAPH , Topology . SQLG_SCHEMA_GRAPH_DB_VERSION ) ) { if ( ! columnRs . next ( ) ) { try ( Statement statement = conn . createStatement ( ) ) { statement . execute ( sqlDialect . addDbVersionToGraph ( metadata ) ) ; } } } GraphTraversalSource traversalSource = sqlgGraph . topology ( ) ; List < Vertex > graphVertices = traversalSource . V ( ) . hasLabel ( SQLG_SCHEMA + "." + Topology . SQLG_SCHEMA_GRAPH ) . toList ( ) ; if ( graphVertices . isEmpty ( ) ) { TopologyManager . addGraph ( this . sqlgGraph , version ) ; oldVersion = version ; } else { oldVersion = TopologyManager . updateGraph ( this . sqlgGraph , version ) ; } } return oldVersion ; } } catch ( SQLException e ) { throw new RuntimeException ( e ) ; } }
create or update the graph metadata
1,046
String getBuildVersion ( ) { if ( this . buildVersion == null ) { Properties prop = new Properties ( ) ; try { URL u = ClassLoader . getSystemResource ( SQLG_APPLICATION_PROPERTIES ) ; if ( u == null ) { u = getClass ( ) . getClassLoader ( ) . getResource ( SQLG_APPLICATION_PROPERTIES ) ; } if ( u != null ) { try ( InputStream is = u . openStream ( ) ) { prop . load ( is ) ; } this . buildVersion = prop . getProperty ( APPLICATION_VERSION ) ; } } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } return this . buildVersion ; }
get the build version
1,047
public String query ( String query ) { try { Connection conn = this . tx ( ) . getConnection ( ) ; ObjectNode result = this . mapper . createObjectNode ( ) ; ArrayNode dataNode = this . mapper . createArrayNode ( ) ; ArrayNode metaNode = this . mapper . createArrayNode ( ) ; Statement statement = conn . createStatement ( ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( query ) ; } ResultSet rs = statement . executeQuery ( query ) ; ResultSetMetaData rsmd = rs . getMetaData ( ) ; boolean first = true ; while ( rs . next ( ) ) { int numColumns = rsmd . getColumnCount ( ) ; ObjectNode obj = this . mapper . createObjectNode ( ) ; for ( int i = 1 ; i < numColumns + 1 ; i ++ ) { String columnName = rsmd . getColumnLabel ( i ) ; int type = rsmd . getColumnType ( i ) ; Object o = type == Types . ARRAY ? rs . getArray ( i ) : rs . getObject ( i ) ; this . sqlDialect . putJsonObject ( obj , columnName , type , o ) ; if ( first ) { this . sqlDialect . putJsonMetaObject ( this . mapper , metaNode , columnName , type , o ) ; } } first = false ; dataNode . add ( obj ) ; } result . set ( "data" , dataNode ) ; result . set ( "meta" , metaNode ) ; return result . toString ( ) ; } catch ( SQLException e ) { throw new RuntimeException ( e ) ; } finally { this . tx ( ) . rollback ( ) ; } }
This is executes a sql query and returns the result as a json string .
1,048
@ SuppressWarnings ( "unchecked" ) private E nextLazy ( ) { List < Emit < SqlgElement > > result = this . elements ; this . elements = null ; return ( E ) result ; }
return the next lazy results
1,049
@ SuppressWarnings ( "deprecation" ) private static Time shiftDST ( LocalTime lt ) { Time t = Time . valueOf ( lt ) ; int offset = Calendar . getInstance ( ) . get ( Calendar . DST_OFFSET ) / 1000 ; int m = t . getSeconds ( ) ; t . setSeconds ( m + offset ) ; return t ; }
Postgres gets confused by DST it sets the timezone badly and then reads the wrong value out so we convert the value to winter time
1,050
public PropertyType sqlTypeToPropertyType ( SqlgGraph sqlgGraph , String schema , String table , String column , int sqlType , String typeName , ListIterator < Triple < String , Integer , String > > metaDataIter ) { switch ( sqlType ) { case Types . BIT : return PropertyType . BOOLEAN ; case Types . SMALLINT : return PropertyType . SHORT ; case Types . INTEGER : return PropertyType . INTEGER ; case Types . BIGINT : return PropertyType . LONG ; case Types . REAL : return PropertyType . FLOAT ; case Types . DOUBLE : return PropertyType . DOUBLE ; case Types . VARCHAR : return PropertyType . STRING ; case Types . TIMESTAMP : return PropertyType . LOCALDATETIME ; case Types . DATE : return PropertyType . LOCALDATE ; case Types . TIME : return PropertyType . LOCALTIME ; case Types . OTHER : switch ( typeName ) { case "jsonb" : return PropertyType . JSON ; case "geometry" : return getPostGisGeometryType ( sqlgGraph , schema , table , column ) ; case "geography" : return getPostGisGeographyType ( sqlgGraph , schema , table , column ) ; default : throw new RuntimeException ( "Other type not supported " + typeName ) ; } case Types . BINARY : return BYTE_ARRAY ; case Types . ARRAY : return sqlArrayTypeNameToPropertyType ( typeName , sqlgGraph , schema , table , column , metaDataIter ) ; default : throw new IllegalStateException ( "Unknown sqlType " + sqlType ) ; } }
This is only used for upgrading from pre sqlg_schema sqlg to a sqlg_schema
1,051
private ListIterator < List < Emit < E > > > elements ( SchemaTable schemaTable , SchemaTableTree rootSchemaTableTree ) { this . sqlgGraph . tx ( ) . readWrite ( ) ; if ( this . sqlgGraph . getSqlDialect ( ) . supportsBatchMode ( ) && this . sqlgGraph . tx ( ) . getBatchManager ( ) . isStreaming ( ) ) { throw new IllegalStateException ( "streaming is in progress, first flush or commit before querying." ) ; } this . replacedStepTree . maybeAddLabelToLeafNodes ( ) ; rootSchemaTableTree . setParentIdsAndIndexes ( this . schemaTableParentIds . get ( schemaTable ) ) ; Set < SchemaTableTree > rootSchemaTableTrees = new HashSet < > ( ) ; rootSchemaTableTrees . add ( rootSchemaTableTree ) ; return new SqlgCompiledResultListIterator < > ( new SqlgCompiledResultIterator < > ( this . sqlgGraph , rootSchemaTableTrees , true ) ) ; }
Called from SqlgVertexStepCompiler which compiled VertexStep and HasSteps . This is only called when not in BatchMode
1,052
void removeColumn ( String schema , String table , String column ) { StringBuilder sql = new StringBuilder ( "ALTER TABLE " ) ; sql . append ( sqlgGraph . getSqlDialect ( ) . maybeWrapInQoutes ( schema ) ) ; sql . append ( "." ) ; sql . append ( sqlgGraph . getSqlDialect ( ) . maybeWrapInQoutes ( table ) ) ; sql . append ( " DROP COLUMN IF EXISTS " ) ; sql . append ( sqlgGraph . getSqlDialect ( ) . maybeWrapInQoutes ( column ) ) ; if ( sqlgGraph . getSqlDialect ( ) . supportsCascade ( ) ) { sql . append ( " CASCADE" ) ; } if ( sqlgGraph . getSqlDialect ( ) . needsSemicolon ( ) ) { sql . append ( ";" ) ; } if ( logger . isDebugEnabled ( ) ) { logger . debug ( sql . toString ( ) ) ; } Connection conn = sqlgGraph . tx ( ) . getConnection ( ) ; try ( Statement stmt = conn . createStatement ( ) ) { stmt . execute ( sql . toString ( ) ) ; } catch ( SQLException e ) { throw new RuntimeException ( e ) ; } }
remove a column from the table
1,053
void removeIndex ( Index idx , boolean preserveData ) { this . getSchema ( ) . getTopology ( ) . lock ( ) ; if ( ! uncommittedRemovedIndexes . contains ( idx . getName ( ) ) ) { uncommittedRemovedIndexes . add ( idx . getName ( ) ) ; TopologyManager . removeIndex ( this . sqlgGraph , idx ) ; if ( ! preserveData ) { idx . delete ( sqlgGraph ) ; } this . getSchema ( ) . getTopology ( ) . fire ( idx , "" , TopologyChangeAction . DELETE ) ; } }
remove a given index that was on this label
1,054
public static int setKeyValuesAsParameterUsingPropertyColumn ( SqlgGraph sqlgGraph , int i , PreparedStatement preparedStatement , Map < String , Pair < PropertyType , Object > > properties ) throws SQLException { i = setKeyValuesAsParameterUsingPropertyColumn ( sqlgGraph , true , i , preparedStatement , properties . values ( ) ) ; return i ; }
This is called for inserts
1,055
public static Triple < Map < String , PropertyType > , Map < String , Object > , Map < String , Object > > validateVertexKeysValues ( SqlDialect sqlDialect , Object [ ] keyValues ) { Map < String , Object > resultAllValues = new LinkedHashMap < > ( ) ; Map < String , Object > resultNotNullValues = new LinkedHashMap < > ( ) ; Map < String , PropertyType > keyPropertyTypeMap = new LinkedHashMap < > ( ) ; if ( keyValues . length % 2 != 0 ) throw Element . Exceptions . providedKeyValuesMustBeAMultipleOfTwo ( ) ; for ( int i = 0 ; i < keyValues . length ; i = i + 2 ) { if ( ! ( keyValues [ i ] instanceof String ) && ! ( keyValues [ i ] instanceof T ) ) { throw Element . Exceptions . providedKeyValuesMustHaveALegalKeyOnEvenIndices ( ) ; } if ( keyValues [ i ] . equals ( T . id ) ) { throw Vertex . Exceptions . userSuppliedIdsNotSupported ( ) ; } if ( ! keyValues [ i ] . equals ( T . label ) ) { String key = ( String ) keyValues [ i ] ; sqlDialect . validateColumnName ( key ) ; Object value = keyValues [ i + 1 ] ; ElementHelper . validateProperty ( key , value ) ; sqlDialect . validateProperty ( key , value ) ; if ( value != null ) { resultNotNullValues . put ( key , value ) ; keyPropertyTypeMap . put ( key , PropertyType . from ( value ) ) ; } else { keyPropertyTypeMap . put ( key , PropertyType . STRING ) ; } resultAllValues . put ( key , value ) ; } } return Triple . of ( keyPropertyTypeMap , resultAllValues , resultNotNullValues ) ; }
Validates the key values and converts it into a Triple with three maps . The left map is a map of keys together with their PropertyType . The middle map is a map of keys together with their values . The right map is a map of keys with values where the values are guaranteed not to be null .
1,056
public SchemaTableTree parse ( SchemaTable schemaTable , ReplacedStepTree replacedStepTree ) { ReplacedStep < ? , ? > rootReplacedStep = replacedStepTree . root ( ) . getReplacedStep ( ) ; Preconditions . checkArgument ( ! rootReplacedStep . isGraphStep ( ) , "Expected VertexStep, found GraphStep" ) ; Set < SchemaTableTree > schemaTableTrees = new HashSet < > ( ) ; SchemaTableTree rootSchemaTableTree = new SchemaTableTree ( this . sqlgGraph , schemaTable , 0 , replacedStepTree . getDepth ( ) ) ; rootSchemaTableTree . setOptionalLeftJoin ( rootReplacedStep . isLeftJoin ( ) ) ; rootSchemaTableTree . setEmit ( rootReplacedStep . isEmit ( ) ) ; rootSchemaTableTree . setUntilFirst ( rootReplacedStep . isUntilFirst ( ) ) ; rootSchemaTableTree . initializeAliasColumnNameMaps ( ) ; rootSchemaTableTree . setStepType ( schemaTable . isVertexTable ( ) ? SchemaTableTree . STEP_TYPE . VERTEX_STEP : SchemaTableTree . STEP_TYPE . EDGE_VERTEX_STEP ) ; schemaTableTrees . add ( rootSchemaTableTree ) ; replacedStepTree . walkReplacedSteps ( schemaTableTrees ) ; rootSchemaTableTree . removeNodesInvalidatedByHas ( ) ; rootSchemaTableTree . removeAllButDeepestAndAddCacheLeafNodes ( replacedStepTree . getDepth ( ) ) ; rootSchemaTableTree . setLocalStep ( true ) ; return rootSchemaTableTree ; }
This is only called for vertex steps . Constructs the label paths from the given schemaTable to the leaf vertex labels for the gremlin query . For each path Sqlg will executeRegularQuery a sql query . The union of the queries is the result the gremlin query . The vertex labels can be calculated from the steps .
1,057
private void createSchemaOnDb ( ) { StringBuilder sql = new StringBuilder ( ) ; sql . append ( topology . getSqlgGraph ( ) . getSqlDialect ( ) . createSchemaStatement ( this . name ) ) ; if ( this . sqlgGraph . getSqlDialect ( ) . needsSemicolon ( ) ) { sql . append ( ";" ) ; } if ( logger . isDebugEnabled ( ) ) { logger . debug ( sql . toString ( ) ) ; } Connection conn = this . sqlgGraph . tx ( ) . getConnection ( ) ; try ( Statement stmt = conn . createStatement ( ) ) { stmt . execute ( sql . toString ( ) ) ; } catch ( SQLException e ) { logger . error ( "schema creation failed " + this . sqlgGraph . toString ( ) , e ) ; throw new RuntimeException ( e ) ; } }
Creates a new schema on the database . i . e . CREATE SCHEMA ... sql statement .
1,058
Map < String , Map < String , PropertyType > > getAllTables ( ) { Map < String , Map < String , PropertyType > > result = new HashMap < > ( ) ; for ( Map . Entry < String , VertexLabel > vertexLabelEntry : this . vertexLabels . entrySet ( ) ) { String vertexQualifiedName = this . name + "." + VERTEX_PREFIX + vertexLabelEntry . getValue ( ) . getLabel ( ) ; result . put ( vertexQualifiedName , vertexLabelEntry . getValue ( ) . getPropertyTypeMap ( ) ) ; } if ( this . topology . isSqlWriteLockHeldByCurrentThread ( ) ) { for ( Map . Entry < String , VertexLabel > vertexLabelEntry : this . uncommittedVertexLabels . entrySet ( ) ) { String vertexQualifiedName = vertexLabelEntry . getKey ( ) ; VertexLabel vertexLabel = vertexLabelEntry . getValue ( ) ; result . put ( vertexQualifiedName , vertexLabel . getPropertyTypeMap ( ) ) ; } } for ( EdgeLabel edgeLabel : this . getEdgeLabels ( ) . values ( ) ) { String edgeQualifiedName = this . name + "." + EDGE_PREFIX + edgeLabel . getLabel ( ) ; result . put ( edgeQualifiedName , edgeLabel . getPropertyTypeMap ( ) ) ; } return result ; }
remove in favour of PropertyColumn
1,059
void loadVertexIndices ( GraphTraversalSource traversalSource , Vertex schemaVertex ) { List < Path > indices = traversalSource . V ( schemaVertex ) . out ( SQLG_SCHEMA_SCHEMA_VERTEX_EDGE ) . as ( "vertex" ) . out ( SQLG_SCHEMA_VERTEX_INDEX_EDGE ) . as ( "index" ) . outE ( SQLG_SCHEMA_INDEX_PROPERTY_EDGE ) . order ( ) . by ( SQLG_SCHEMA_INDEX_PROPERTY_EDGE_SEQUENCE ) . inV ( ) . as ( "property" ) . path ( ) . toList ( ) ; for ( Path vertexIndices : indices ) { Vertex vertexVertex = null ; Vertex vertexIndex = null ; Vertex propertyIndex = null ; List < Set < String > > labelsList = vertexIndices . labels ( ) ; for ( Set < String > labels : labelsList ) { for ( String label : labels ) { switch ( label ) { case "vertex" : vertexVertex = vertexIndices . get ( "vertex" ) ; break ; case "index" : vertexIndex = vertexIndices . get ( "index" ) ; break ; case "property" : propertyIndex = vertexIndices . get ( "property" ) ; break ; case BaseStrategy . SQLG_PATH_FAKE_LABEL : case BaseStrategy . SQLG_PATH_ORDER_RANGE_LABEL : case Schema . MARKER : break ; default : throw new IllegalStateException ( String . format ( "BUG: Only \"vertex\",\"index\" and \"property\" is expected as a label. Found %s" , label ) ) ; } } } Preconditions . checkState ( vertexVertex != null , "BUG: Topology vertex not found." ) ; String schemaName = schemaVertex . value ( SQLG_SCHEMA_SCHEMA_NAME ) ; String tableName = vertexVertex . value ( SQLG_SCHEMA_VERTEX_LABEL_NAME ) ; VertexLabel vertexLabel = this . vertexLabels . get ( schemaName + "." + VERTEX_PREFIX + tableName ) ; if ( vertexLabel == null ) { vertexLabel = new VertexLabel ( this , tableName ) ; this . vertexLabels . put ( schemaName + "." + VERTEX_PREFIX + tableName , vertexLabel ) ; } if ( vertexIndex != null ) { String indexName = vertexIndex . value ( SQLG_SCHEMA_INDEX_NAME ) ; Optional < Index > oidx = vertexLabel . getIndex ( indexName ) ; Index idx ; if ( oidx . isPresent ( ) ) { idx = oidx . get ( ) ; } else { idx = new Index ( indexName , IndexType . fromString ( vertexIndex . value ( SQLG_SCHEMA_INDEX_INDEX_TYPE ) ) , vertexLabel ) ; vertexLabel . addIndex ( idx ) ; } if ( propertyIndex != null ) { String propertyName = propertyIndex . value ( SQLG_SCHEMA_PROPERTY_NAME ) ; vertexLabel . getProperty ( propertyName ) . ifPresent ( ( PropertyColumn pc ) -> idx . addProperty ( pc ) ) ; } } } }
load indices for all vertices in schema
1,060
void removeEdgeLabel ( EdgeLabel edgeLabel , boolean preserveData ) { getTopology ( ) . lock ( ) ; String fn = this . name + "." + EDGE_PREFIX + edgeLabel . getName ( ) ; if ( ! uncommittedRemovedEdgeLabels . contains ( fn ) ) { uncommittedRemovedEdgeLabels . add ( fn ) ; TopologyManager . removeEdgeLabel ( this . sqlgGraph , edgeLabel ) ; for ( VertexLabel lbl : edgeLabel . getOutVertexLabels ( ) ) { lbl . removeOutEdge ( edgeLabel ) ; } for ( VertexLabel lbl : edgeLabel . getInVertexLabels ( ) ) { lbl . removeInEdge ( edgeLabel ) ; } if ( ! preserveData ) { edgeLabel . delete ( ) ; } getTopology ( ) . fire ( edgeLabel , "" , TopologyChangeAction . DELETE ) ; } }
remove a given edge label
1,061
void removeVertexLabel ( VertexLabel vertexLabel , boolean preserveData ) { getTopology ( ) . lock ( ) ; String fn = this . name + "." + VERTEX_PREFIX + vertexLabel . getName ( ) ; if ( ! uncommittedRemovedVertexLabels . contains ( fn ) ) { uncommittedRemovedVertexLabels . add ( fn ) ; TopologyManager . removeVertexLabel ( this . sqlgGraph , vertexLabel ) ; for ( EdgeRole er : vertexLabel . getOutEdgeRoles ( ) . values ( ) ) { er . remove ( preserveData ) ; } for ( EdgeRole er : vertexLabel . getInEdgeRoles ( ) . values ( ) ) { er . remove ( preserveData ) ; } if ( ! preserveData ) { vertexLabel . delete ( ) ; } getTopology ( ) . fire ( vertexLabel , "" , TopologyChangeAction . DELETE ) ; } }
remove a given vertex label
1,062
void removeGlobalUniqueIndex ( GlobalUniqueIndex index , boolean preserveData ) { getTopology ( ) . lock ( ) ; String fn = index . getName ( ) ; if ( ! uncommittedRemovedGlobalUniqueIndexes . contains ( fn ) ) { uncommittedRemovedGlobalUniqueIndexes . add ( fn ) ; TopologyManager . removeGlobalUniqueIndex ( sqlgGraph , fn ) ; if ( ! preserveData ) { getVertexLabel ( index . getName ( ) ) . ifPresent ( ( VertexLabel vl ) -> vl . remove ( false ) ) ; } getTopology ( ) . fire ( index , "" , TopologyChangeAction . DELETE ) ; } }
remove the given global unique index
1,063
void delete ( ) { StringBuilder sql = new StringBuilder ( ) ; sql . append ( "DROP SCHEMA " ) ; sql . append ( this . sqlgGraph . getSqlDialect ( ) . maybeWrapInQoutes ( this . name ) ) ; if ( this . sqlgGraph . getSqlDialect ( ) . supportsCascade ( ) ) { sql . append ( " CASCADE" ) ; } if ( this . sqlgGraph . getSqlDialect ( ) . needsSemicolon ( ) ) { sql . append ( ";" ) ; } if ( logger . isDebugEnabled ( ) ) { logger . debug ( sql . toString ( ) ) ; } Connection conn = this . sqlgGraph . tx ( ) . getConnection ( ) ; try ( Statement stmt = conn . createStatement ( ) ) { stmt . execute ( sql . toString ( ) ) ; } catch ( SQLException e ) { logger . error ( "schema deletion failed " + this . sqlgGraph . toString ( ) , e ) ; throw new RuntimeException ( e ) ; } }
delete schema in DB
1,064
public Optional < EdgeLabel > getOutEdgeLabel ( String edgeLabelName ) { EdgeLabel edgeLabel = getOutEdgeLabels ( ) . get ( this . schema . getName ( ) + "." + edgeLabelName ) ; if ( edgeLabel != null ) { return Optional . of ( edgeLabel ) ; } return Optional . empty ( ) ; }
Out EdgeLabels are always in the same schema as the this VertexLabel schema . So the edgeLabelName must not contain the schema prefix
1,065
void removeEdgeRole ( EdgeRole er , boolean preserveData ) { if ( er . getVertexLabel ( ) != this ) { throw new IllegalStateException ( "Trying to remove a EdgeRole from a non owner VertexLabel" ) ; } Collection < VertexLabel > ers ; switch ( er . getDirection ( ) ) { case BOTH : throw new IllegalStateException ( "BOTH is not a supported direction" ) ; case IN : ers = er . getEdgeLabel ( ) . getInVertexLabels ( ) ; break ; case OUT : ers = er . getEdgeLabel ( ) . getOutVertexLabels ( ) ; break ; default : throw new IllegalStateException ( "Unknown direction!" ) ; } if ( ! ers . contains ( this ) ) { throw new IllegalStateException ( "Trying to remove a EdgeRole from a non owner VertexLabel" ) ; } if ( ers . size ( ) == 1 ) { er . getEdgeLabel ( ) . remove ( preserveData ) ; } else { getSchema ( ) . getTopology ( ) . lock ( ) ; switch ( er . getDirection ( ) ) { case BOTH : throw new IllegalStateException ( "BOTH is not a supported direction" ) ; case IN : uncommittedRemovedInEdgeLabels . put ( er . getEdgeLabel ( ) . getFullName ( ) , EdgeRemoveType . ROLE ) ; er . getEdgeLabel ( ) . removeInVertexLabel ( this , preserveData ) ; break ; case OUT : uncommittedRemovedOutEdgeLabels . put ( er . getEdgeLabel ( ) . getFullName ( ) , EdgeRemoveType . ROLE ) ; er . getEdgeLabel ( ) . removeOutVertexLabel ( this , preserveData ) ; break ; } this . getSchema ( ) . getTopology ( ) . fire ( er , "" , TopologyChangeAction . DELETE ) ; } }
remove a given edge role
1,066
protected String escapeQuotes ( Object o ) { if ( o != null ) { return o . toString ( ) . replace ( "'" , "''" ) ; } return null ; }
escape quotes by doubling them when we need a string inside quotes
1,067
void delete ( ) { String schema = getSchema ( ) . getName ( ) ; String tableName = EDGE_PREFIX + getLabel ( ) ; SqlDialect sqlDialect = this . sqlgGraph . getSqlDialect ( ) ; sqlDialect . assertTableName ( tableName ) ; StringBuilder sql = new StringBuilder ( "DROP TABLE IF EXISTS " ) ; sql . append ( sqlDialect . maybeWrapInQoutes ( schema ) ) ; sql . append ( "." ) ; sql . append ( sqlDialect . maybeWrapInQoutes ( tableName ) ) ; if ( sqlDialect . supportsCascade ( ) ) { sql . append ( " CASCADE" ) ; } if ( logger . isDebugEnabled ( ) ) { logger . debug ( sql . toString ( ) ) ; } if ( sqlDialect . needsSemicolon ( ) ) { sql . append ( ";" ) ; } Connection conn = sqlgGraph . tx ( ) . getConnection ( ) ; try ( Statement stmt = conn . createStatement ( ) ) { stmt . execute ( sql . toString ( ) ) ; } catch ( SQLException e ) { throw new RuntimeException ( e ) ; } }
delete the table
1,068
void removeOutVertexLabel ( VertexLabel lbl , boolean preserveData ) { this . uncommittedRemovedOutVertexLabels . add ( lbl ) ; if ( ! preserveData ) { deleteColumn ( lbl . getFullName ( ) + Topology . OUT_VERTEX_COLUMN_END ) ; } }
remove a vertex label from the out collection
1,069
void removeInVertexLabel ( VertexLabel lbl , boolean preserveData ) { this . uncommittedRemovedInVertexLabels . add ( lbl ) ; if ( ! preserveData ) { deleteColumn ( lbl . getFullName ( ) + Topology . IN_VERTEX_COLUMN_END ) ; } }
remove a vertex label from the in collection
1,070
@ SuppressWarnings ( "unchecked" ) protected boolean doFirst ( ListIterator < Step < ? , ? > > stepIterator , Step < ? , ? > step , MutableInt pathCount ) { if ( step instanceof SelectOneStep ) { if ( stepIterator . hasNext ( ) ) { stepIterator . next ( ) ; return true ; } else { return false ; } } if ( ! ( step instanceof VertexStep || step instanceof EdgeVertexStep || step instanceof ChooseStep || step instanceof OptionalStep ) ) { return false ; } if ( step instanceof OptionalStep ) { if ( unoptimizableOptionalStep ( ( OptionalStep < ? > ) step ) ) { return false ; } } if ( step instanceof ChooseStep ) { if ( unoptimizableChooseStep ( ( ChooseStep < ? , ? , ? > ) step ) ) { return false ; } } this . sqlgStep = constructSqlgStep ( step ) ; TraversalHelper . insertBeforeStep ( this . sqlgStep , step , this . traversal ) ; return true ; }
EdgeOtherVertexStep can not be optimized as the direction information is lost .
1,071
public Long count ( Criteria < T , T > criteria ) { SingularAttribute < ? super T , PK > id = getEntityManager ( ) . getMetamodel ( ) . entity ( entityClass ) . getId ( entityKey ) ; return criteria . select ( Long . class , countDistinct ( id ) ) . getSingleResult ( ) ; }
Count using a pre populated criteria
1,072
public void init ( ) { if ( FacesContext . getCurrentInstance ( ) . getPartialViewContext ( ) . isAjaxRequest ( ) ) { return ; } if ( id != null && ! "" . equals ( id ) ) { entity = crudService . findById ( id ) ; if ( entity == null ) { log . info ( String . format ( "Entity not found with id %s, a new one will be initialized." , id ) ) ; id = null ; entity = initEntity ( ) ; } } }
called via preRenderView or viewAction
1,073
public String handle ( final Object value ) { final StringBuilder s = new StringBuilder ( ) ; if ( value instanceof List ) { final List < ? > l = ( List < ? > ) value ; for ( final Object o : l ) { if ( s . length ( ) > 0 ) { s . append ( ',' ) ; } s . append ( o . toString ( ) ) ; } } else { s . append ( value ) ; } return s . toString ( ) ; }
Converts the list to a comma separated representation .
1,074
void mergeTo ( final NominatimSearchRequest request ) { if ( null == request . getBounded ( ) && null != bounded ) { request . setBounded ( bounded ) ; } if ( null == request . getViewBox ( ) ) { request . setViewBox ( viewBox ) ; } if ( null == request . getPolygonFormat ( ) ) { request . setPolygonFormat ( polygonFormat ) ; } if ( null == request . getAcceptLanguage ( ) && null != acceptLanguage ) { request . setAcceptLanguage ( acceptLanguage . toString ( ) ) ; } }
Merge this default options to the given request .
1,075
private static Object getValue ( final Object o , final Field f ) { try { f . setAccessible ( true ) ; return f . get ( o ) ; } catch ( final IllegalArgumentException e ) { LOGGER . error ( "failure accessing field value {}" , new Object [ ] { f . getName ( ) , e } ) ; } catch ( final IllegalAccessException e ) { LOGGER . error ( "failure accessing field value {}" , new Object [ ] { f . getName ( ) , e } ) ; } return null ; }
Gets a field value regardless of reflection errors .
1,076
public void addExcludedlaceId ( final String placeId ) { if ( null == this . countryCodes ) { this . excludePlaceIds = new ArrayList < String > ( ) ; } this . excludePlaceIds . add ( placeId ) ; }
If you do not want certain openstreetmap objects to appear in the search result give a list of the place_id s you want to skip .
1,077
public static ConfigSource parseMainArgs ( String ... args ) { Map < String , String > result ; if ( args == null ) { result = Collections . emptyMap ( ) ; } else { result = new HashMap < > ( ) ; String prefix = System . getProperty ( "main.args.prefix" , "" ) ; String key = null ; for ( String arg : args ) { if ( arg . startsWith ( "--" ) ) { arg = arg . substring ( 2 ) ; int index = arg . indexOf ( '=' ) ; if ( index > 0 ) { key = arg . substring ( 0 , index ) . trim ( ) ; result . put ( prefix + key , arg . substring ( index + 1 ) . trim ( ) ) ; key = null ; } else { result . put ( prefix + arg , arg ) ; } } else if ( arg . charAt ( 0 ) == '-' ) { key = arg . substring ( 1 ) ; } else { if ( key != null ) { result . put ( prefix + key , arg ) ; key = null ; } else { result . put ( prefix + arg , arg ) ; } } } } return new CLIPropertySource ( result ) ; }
Configure the main arguments herby parsing and mapping the main arguments into configuration properties .
1,078
private List < String > addPath ( List < String > filenames , String additionalPath ) { ArrayList < String > newFilenames = new ArrayList < String > ( filenames . size ( ) ) ; for ( String filename : filenames ) { newFilenames . add ( additionalPath + '/' + filename ) ; } return newFilenames ; }
Modifies a String list of filenames to include an additional path .
1,079
private List < String > scanFileSet ( File sourceDirectory , FileSet fileSet ) { final String [ ] emptyStringArray = { } ; DirectoryScanner scanner = new DirectoryScanner ( ) ; scanner . setBasedir ( sourceDirectory ) ; if ( fileSet . getIncludes ( ) != null && ! fileSet . getIncludes ( ) . isEmpty ( ) ) { scanner . setIncludes ( fileSet . getIncludes ( ) . toArray ( emptyStringArray ) ) ; } else { scanner . setIncludes ( DEFAULT_INCLUDES ) ; } if ( fileSet . getExcludes ( ) != null && ! fileSet . getExcludes ( ) . isEmpty ( ) ) { scanner . setExcludes ( fileSet . getExcludes ( ) . toArray ( emptyStringArray ) ) ; } if ( fileSet . isUseDefaultExcludes ( ) ) { scanner . addDefaultExcludes ( ) ; } scanner . scan ( ) ; return Arrays . asList ( scanner . getIncludedFiles ( ) ) ; }
Scan a fileset and get a list of files which it contains .
1,080
public CompletableFuture < Void > open ( ) { CompletableFuture < Void > future = new CompletableFuture < > ( ) ; context . executor ( ) . execute ( ( ) -> register ( new RegisterAttempt ( 1 , future ) ) ) ; return future ; }
Opens the session manager .
1,081
public CompletableFuture < Void > expire ( ) { CompletableFuture < Void > future = new CompletableFuture < > ( ) ; context . executor ( ) . execute ( ( ) -> { if ( keepAlive != null ) keepAlive . cancel ( ) ; state . setState ( Session . State . EXPIRED ) ; future . complete ( null ) ; } ) ; return future ; }
Expires the manager .
1,082
private void unregister ( boolean retryOnFailure , CompletableFuture < Void > future ) { long sessionId = state . getSessionId ( ) ; if ( state . getState ( ) == Session . State . CLOSED ) { future . complete ( null ) ; return ; } state . getLogger ( ) . debug ( "Unregistering session: {}" , sessionId ) ; if ( keepAlive != null ) { keepAlive . cancel ( ) ; keepAlive = null ; } if ( state . getState ( ) == Session . State . UNSTABLE ) { connection . reset ( ) ; } UnregisterRequest request = UnregisterRequest . builder ( ) . withSession ( sessionId ) . build ( ) ; state . getLogger ( ) . trace ( "{} - Sending {}" , sessionId , request ) ; connection . < UnregisterRequest , UnregisterResponse > sendAndReceive ( request ) . whenComplete ( ( response , error ) -> { if ( state . getState ( ) != Session . State . CLOSED ) { if ( error == null ) { state . getLogger ( ) . trace ( "{} - Received {}" , sessionId , response ) ; if ( response . status ( ) == Response . Status . OK ) { state . setState ( Session . State . CLOSED ) ; future . complete ( null ) ; } else if ( response . error ( ) == CopycatError . Type . UNKNOWN_SESSION_ERROR ) { state . setState ( Session . State . EXPIRED ) ; future . complete ( null ) ; } else if ( retryOnFailure && connection . leader ( ) != null ) { connection . reset ( null , connection . servers ( ) ) ; unregister ( false , future ) ; } else { state . setState ( Session . State . UNSTABLE ) ; future . completeExceptionally ( new ClosedSessionException ( "failed to unregister session" ) ) ; } } else if ( retryOnFailure && connection . leader ( ) != null ) { connection . reset ( null , connection . servers ( ) ) ; unregister ( false , future ) ; } else { state . setState ( Session . State . UNSTABLE ) ; future . completeExceptionally ( new ClosedSessionException ( "failed to unregister session" ) ) ; } } } ) ; }
Unregisters the session .
1,083
public CompletableFuture < Void > kill ( ) { return CompletableFuture . runAsync ( ( ) -> { if ( keepAlive != null ) keepAlive . cancel ( ) ; state . setState ( Session . State . CLOSED ) ; } , context . executor ( ) ) ; }
Kills the client session manager .
1,084
protected AppendResponse handleAppend ( AppendRequest request ) { if ( request . term ( ) < context . getTerm ( ) ) { LOGGER . debug ( "{} - Rejected {}: request term is less than the current term ({})" , context . getCluster ( ) . member ( ) . address ( ) , request , context . getTerm ( ) ) ; return AppendResponse . builder ( ) . withStatus ( Response . Status . OK ) . withTerm ( context . getTerm ( ) ) . withSucceeded ( false ) . withLogIndex ( context . getLog ( ) . lastIndex ( ) ) . build ( ) ; } else { return checkGlobalIndex ( request ) ; } }
Handles an append request .
1,085
protected AppendResponse checkGlobalIndex ( AppendRequest request ) { long currentGlobalIndex = context . getGlobalIndex ( ) ; long nextGlobalIndex = request . globalIndex ( ) ; if ( currentGlobalIndex > 0 && nextGlobalIndex > currentGlobalIndex && nextGlobalIndex > context . getLog ( ) . lastIndex ( ) ) { context . setGlobalIndex ( nextGlobalIndex ) ; context . reset ( ) ; } if ( request . logIndex ( ) != 0 ) { return checkPreviousEntry ( request ) ; } else { return appendEntries ( request ) ; } }
Checks whether the log needs to be truncated based on the globalIndex .
1,086
protected AppendResponse checkPreviousEntry ( AppendRequest request ) { if ( request . logIndex ( ) != 0 && context . getLog ( ) . isEmpty ( ) ) { LOGGER . debug ( "{} - Rejected {}: Previous index ({}) is greater than the local log's last index ({})" , context . getCluster ( ) . member ( ) . address ( ) , request , request . logIndex ( ) , context . getLog ( ) . lastIndex ( ) ) ; return AppendResponse . builder ( ) . withStatus ( Response . Status . OK ) . withTerm ( context . getTerm ( ) ) . withSucceeded ( false ) . withLogIndex ( context . getLog ( ) . lastIndex ( ) ) . build ( ) ; } else if ( request . logIndex ( ) != 0 && context . getLog ( ) . lastIndex ( ) != 0 && request . logIndex ( ) > context . getLog ( ) . lastIndex ( ) ) { LOGGER . debug ( "{} - Rejected {}: Previous index ({}) is greater than the local log's last index ({})" , context . getCluster ( ) . member ( ) . address ( ) , request , request . logIndex ( ) , context . getLog ( ) . lastIndex ( ) ) ; return AppendResponse . builder ( ) . withStatus ( Response . Status . OK ) . withTerm ( context . getTerm ( ) ) . withSucceeded ( false ) . withLogIndex ( context . getLog ( ) . lastIndex ( ) ) . build ( ) ; } return appendEntries ( request ) ; }
Checks the previous entry in the append request for consistency .
1,087
protected AppendResponse appendEntries ( AppendRequest request ) { long lastEntryIndex = request . logIndex ( ) ; if ( ! request . entries ( ) . isEmpty ( ) ) { lastEntryIndex = request . entries ( ) . get ( request . entries ( ) . size ( ) - 1 ) . getIndex ( ) ; } long commitIndex = Math . max ( context . getCommitIndex ( ) , Math . min ( request . commitIndex ( ) , lastEntryIndex ) ) ; for ( Entry entry : request . entries ( ) ) { if ( context . getLog ( ) . lastIndex ( ) < entry . getIndex ( ) && entry . getIndex ( ) <= commitIndex ) { context . getLog ( ) . skip ( entry . getIndex ( ) - context . getLog ( ) . lastIndex ( ) - 1 ) . append ( entry ) ; LOGGER . trace ( "{} - Appended {} to log at index {}" , context . getCluster ( ) . member ( ) . address ( ) , entry , entry . getIndex ( ) ) ; } } long previousCommitIndex = context . getCommitIndex ( ) ; context . setCommitIndex ( commitIndex ) ; context . setGlobalIndex ( request . globalIndex ( ) ) ; if ( context . getCommitIndex ( ) > previousCommitIndex ) { LOGGER . trace ( "{} - Committed entries up to index {}" , context . getCluster ( ) . member ( ) . address ( ) , commitIndex ) ; } context . getStateMachine ( ) . applyAll ( context . getCommitIndex ( ) ) ; return AppendResponse . builder ( ) . withStatus ( Response . Status . OK ) . withTerm ( context . getTerm ( ) ) . withSucceeded ( true ) . withLogIndex ( context . getLog ( ) . lastIndex ( ) ) . build ( ) ; }
Appends entries to the local log .
1,088
protected CompletableFuture < QueryResponse > queryLocal ( QueryEntry entry ) { CompletableFuture < QueryResponse > future = new CompletableFuture < > ( ) ; sequenceQuery ( entry , future ) ; return future ; }
Performs a local query .
1,089
public ServerCommit acquire ( OperationEntry entry , ServerSessionContext session , long timestamp ) { ServerCommit commit = pool . poll ( ) ; if ( commit == null ) { commit = new ServerCommit ( this , log ) ; } commit . reset ( entry , session , timestamp ) ; return commit ; }
Acquires a commit from the pool .
1,090
List < MemberState > getReserveMemberStates ( Comparator < MemberState > comparator ) { List < MemberState > reserveMembers = new ArrayList < > ( getReserveMemberStates ( ) ) ; Collections . sort ( reserveMembers , comparator ) ; return reserveMembers ; }
Returns a list of reserve members .
1,091
CompletableFuture < Void > identify ( ) { Member leader = context . getLeader ( ) ; if ( joinFuture != null && leader != null ) { if ( context . getLeader ( ) . equals ( member ( ) ) ) { if ( context . getState ( ) == CopycatServer . State . LEADER && ! ( ( LeaderState ) context . getServerState ( ) ) . configuring ( ) ) { if ( joinFuture != null ) joinFuture . complete ( null ) ; } else { cancelJoinTimer ( ) ; joinTimeout = context . getThreadContext ( ) . schedule ( context . getElectionTimeout ( ) . multipliedBy ( 2 ) , this :: identify ) ; } } else { cancelJoinTimer ( ) ; joinTimeout = context . getThreadContext ( ) . schedule ( context . getElectionTimeout ( ) . multipliedBy ( 2 ) , this :: identify ) ; LOGGER . debug ( "{} - Sending server identification to {}" , member ( ) . address ( ) , leader . address ( ) ) ; context . getConnections ( ) . getConnection ( leader . serverAddress ( ) ) . thenCompose ( connection -> { ReconfigureRequest request = ReconfigureRequest . builder ( ) . withIndex ( configuration . index ( ) ) . withTerm ( configuration . term ( ) ) . withMember ( member ( ) ) . build ( ) ; LOGGER . trace ( "{} - Sending {} to {}" , member . address ( ) , request , leader . address ( ) ) ; return connection . < ConfigurationRequest , ConfigurationResponse > sendAndReceive ( request ) ; } ) . whenComplete ( ( response , error ) -> { cancelJoinTimer ( ) ; if ( error == null ) { LOGGER . trace ( "{} - Received {}" , member . address ( ) , response ) ; if ( response . status ( ) == Response . Status . OK ) { if ( joinFuture != null ) { joinFuture . complete ( null ) ; } } else if ( response . error ( ) == null || response . error ( ) == CopycatError . Type . CONFIGURATION_ERROR ) { LOGGER . debug ( "{} - Failed to update configuration: configuration change already in progress" , member . address ( ) ) ; joinTimeout = context . getThreadContext ( ) . schedule ( context . getElectionTimeout ( ) . multipliedBy ( 2 ) , this :: identify ) ; } } else { LOGGER . warn ( "{} - Failed to update configuration: {}" , member . address ( ) , error . getMessage ( ) ) ; joinTimeout = context . getThreadContext ( ) . schedule ( context . getElectionTimeout ( ) . multipliedBy ( 2 ) , this :: identify ) ; } } ) ; } } return joinFuture ; }
Identifies this server to the leader .
1,092
private void cancelJoinTimer ( ) { if ( joinTimeout != null ) { LOGGER . trace ( "{} - Cancelling join timeout" , member ( ) . address ( ) ) ; joinTimeout . cancel ( ) ; joinTimeout = null ; } }
Cancels the join timeout .
1,093
private void cancelLeaveTimer ( ) { if ( leaveTimeout != null ) { LOGGER . trace ( "{} - Cancelling leave timeout" , member ( ) . address ( ) ) ; leaveTimeout . cancel ( ) ; leaveTimeout = null ; } }
Cancels the leave timeout .
1,094
private void reassign ( ) { if ( member . type ( ) == Member . Type . ACTIVE && ! member . equals ( context . getLeader ( ) ) ) { int index = 1 ; for ( MemberState member : getActiveMemberStates ( ( m1 , m2 ) -> m1 . getMember ( ) . id ( ) - m2 . getMember ( ) . id ( ) ) ) { if ( ! member . getMember ( ) . equals ( context . getLeader ( ) ) ) { if ( this . member . id ( ) < member . getMember ( ) . id ( ) ) { index ++ ; } else { break ; } } } List < MemberState > sortedPassiveMembers = getPassiveMemberStates ( ( m1 , m2 ) -> m1 . getMember ( ) . id ( ) - m2 . getMember ( ) . id ( ) ) ; assignedMembers = assignMembers ( index , sortedPassiveMembers ) ; } else { assignedMembers = new ArrayList < > ( 0 ) ; } }
Rebuilds assigned member states .
1,095
private List < MemberState > assignMembers ( int index , List < MemberState > sortedMembers ) { List < MemberState > members = new ArrayList < > ( sortedMembers . size ( ) ) ; for ( int i = 0 ; i < sortedMembers . size ( ) ; i ++ ) { if ( ( i + 1 ) % index == 0 ) { members . add ( sortedMembers . get ( i ) ) ; } } return members ; }
Assigns members using consistent hashing .
1,096
private void copyPredicates ( ) { predicates = new ArrayList < > ( groups . size ( ) ) ; for ( List < Segment > group : groups ) { List < OffsetPredicate > groupPredicates = new ArrayList < > ( group . size ( ) ) ; for ( Segment segment : group ) { groupPredicates . add ( segment . offsetPredicate ( ) . copy ( ) ) ; } predicates . add ( groupPredicates ) ; } }
Creates a copy of offset predicates prior to compacting segments to prevent race conditions .
1,097
private Segment compactGroup ( List < Segment > segments , List < OffsetPredicate > predicates ) { Segment firstSegment = segments . iterator ( ) . next ( ) ; Segment compactSegment = manager . createSegment ( SegmentDescriptor . builder ( ) . withId ( firstSegment . descriptor ( ) . id ( ) ) . withVersion ( firstSegment . descriptor ( ) . version ( ) + 1 ) . withIndex ( firstSegment . descriptor ( ) . index ( ) ) . withMaxSegmentSize ( Math . max ( segments . stream ( ) . mapToLong ( s -> s . descriptor ( ) . maxSegmentSize ( ) ) . max ( ) . getAsLong ( ) , manager . storage ( ) . maxSegmentSize ( ) ) ) . withMaxEntries ( Math . max ( segments . stream ( ) . mapToInt ( s -> s . descriptor ( ) . maxEntries ( ) ) . max ( ) . getAsInt ( ) , manager . storage ( ) . maxEntriesPerSegment ( ) ) ) . build ( ) ) ; compactGroup ( segments , predicates , compactSegment ) ; manager . replaceSegments ( segments , compactSegment ) ; return compactSegment ; }
Compacts a group .
1,098
private void compactGroup ( List < Segment > segments , List < OffsetPredicate > predicates , Segment compactSegment ) { for ( int i = 0 ; i < segments . size ( ) ; i ++ ) { compactSegment ( segments . get ( i ) , predicates . get ( i ) , compactSegment ) ; } }
Compacts segments in a group sequentially .
1,099
private void compactSegment ( Segment segment , OffsetPredicate predicate , Segment compactSegment ) { for ( long i = segment . firstIndex ( ) ; i <= segment . lastIndex ( ) ; i ++ ) { checkEntry ( i , segment , predicate , compactSegment ) ; } }
Compacts the given segment .