idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
8,500
public static < K > Consumer < K > from ( K group , K name ) { LettuceAssert . notNull ( group , "Group must not be null" ) ; LettuceAssert . notNull ( name , "Name must not be null" ) ; return new Consumer < > ( group , name ) ; }
Create a new consumer .
8,501
@ TargetApi ( 21 ) public Bundler putSize ( String key , Size value ) { bundle . putSize ( key , value ) ; return this ; }
Inserts a Size value into the mapping of this Bundle replacing any existing value for the given key . Either key or value may be null .
8,502
@ TargetApi ( 21 ) public Bundler putSizeF ( String key , SizeF value ) { bundle . putSizeF ( key , value ) ; return this ; }
Inserts a SizeF value into the mapping of this Bundle replacing any existing value for the given key . Either key or value may be null .
8,503
public < T extends Fragment > T into ( T fragment ) { fragment . setArguments ( get ( ) ) ; return fragment ; }
Set the argument of Fragment .
8,504
private String cleanRoute ( String route ) { if ( ! route . startsWith ( "/" ) ) { route = "/" + route ; } if ( route . endsWith ( "/" ) ) { route = route . substring ( 0 , route . length ( ) - 1 ) ; } return route ; }
Leading slash but no trailing .
8,505
public UUID getMessageId ( ) { final ByteBuffer wrap = ByteBuffer . wrap ( messageIdBytes ) ; return new UUID ( wrap . asLongBuffer ( ) . get ( 0 ) , wrap . asLongBuffer ( ) . get ( 1 ) ) ; }
performance doesn t matter it s only being called during tracing
8,506
public Rule parseRule ( String id , String rule , boolean silent , PipelineClassloader ruleClassLoader ) throws ParseException { final ParseContext parseContext = new ParseContext ( silent ) ; final SyntaxErrorListener errorListener = new SyntaxErrorListener ( parseContext ) ; final RuleLangLexer lexer = new RuleLangLexer ( new ANTLRInputStream ( rule ) ) ; lexer . removeErrorListeners ( ) ; lexer . addErrorListener ( errorListener ) ; final RuleLangParser parser = new RuleLangParser ( new CommonTokenStream ( lexer ) ) ; parser . setErrorHandler ( new DefaultErrorStrategy ( ) ) ; parser . removeErrorListeners ( ) ; parser . addErrorListener ( errorListener ) ; final RuleLangParser . RuleDeclarationContext ruleDeclaration = parser . ruleDeclaration ( ) ; WALKER . walk ( new RuleAstBuilder ( parseContext ) , ruleDeclaration ) ; WALKER . walk ( new RuleTypeAnnotator ( parseContext ) , ruleDeclaration ) ; WALKER . walk ( new RuleTypeChecker ( parseContext ) , ruleDeclaration ) ; if ( parseContext . getErrors ( ) . isEmpty ( ) ) { Rule parsedRule = parseContext . getRules ( ) . get ( 0 ) . withId ( id ) ; if ( ruleClassLoader != null && ConfigurationStateUpdater . isAllowCodeGeneration ( ) ) { try { final Class < ? extends GeneratedRule > generatedClass = codeGenerator . generateCompiledRule ( parsedRule , ruleClassLoader ) ; if ( generatedClass != null ) { parsedRule = parsedRule . toBuilder ( ) . generatedRuleClass ( generatedClass ) . build ( ) ; } } catch ( Exception e ) { log . warn ( "Unable to compile rule {} to native code, falling back to interpreting it: {}" , parsedRule . name ( ) , e . getMessage ( ) ) ; } } return parsedRule ; } throw new ParseException ( parseContext . getErrors ( ) ) ; }
Parses the given rule source and optionally generates a Java class for it if the classloader is not null .
8,507
public Messages process ( Messages messages , InterpreterListener interpreterListener , State state ) { interpreterListener . startProcessing ( ) ; final Set < Tuple2 < String , String > > processingBlacklist = Sets . newHashSet ( ) ; final List < Message > toProcess = Lists . newArrayList ( messages ) ; final List < Message > fullyProcessed = Lists . newArrayListWithExpectedSize ( toProcess . size ( ) ) ; while ( ! toProcess . isEmpty ( ) ) { final MessageCollection currentSet = new MessageCollection ( toProcess ) ; toProcess . clear ( ) ; for ( Message message : currentSet ) { final String msgId = message . getId ( ) ; final Set < String > initialStreamIds = message . getStreams ( ) . stream ( ) . map ( Stream :: getId ) . collect ( Collectors . toSet ( ) ) ; final ImmutableSet < Pipeline > pipelinesToRun = selectPipelines ( interpreterListener , processingBlacklist , message , initialStreamIds , state . getStreamPipelineConnections ( ) ) ; toProcess . addAll ( processForResolvedPipelines ( message , msgId , pipelinesToRun , interpreterListener , state ) ) ; boolean addedStreams = updateStreamBlacklist ( processingBlacklist , message , initialStreamIds ) ; potentiallyDropFilteredMessage ( message ) ; if ( ! addedStreams || message . getFilterOut ( ) ) { log . debug ( "[{}] no new streams matches or dropped message, not running again" , msgId ) ; fullyProcessed . add ( message ) ; } else { log . debug ( "[{}] new streams assigned, running again for those streams" , msgId ) ; toProcess . add ( message ) ; } } } interpreterListener . finishProcessing ( ) ; return new MessageCollection ( fullyProcessed ) ; }
Evaluates all pipelines that apply to the given messages based on the current stream routing of the messages .
8,508
public List < Message > processForPipelines ( Message message , Set < String > pipelineIds , InterpreterListener interpreterListener , State state ) { final Map < String , Pipeline > currentPipelines = state . getCurrentPipelines ( ) ; final ImmutableSet < Pipeline > pipelinesToRun = pipelineIds . stream ( ) . map ( currentPipelines :: get ) . filter ( Objects :: nonNull ) . collect ( ImmutableSet . toImmutableSet ( ) ) ; return processForResolvedPipelines ( message , message . getId ( ) , pipelinesToRun , interpreterListener , state ) ; }
Given a set of pipeline ids process the given message according to the passed state .
8,509
public List < Stream > match ( Message message ) { final Set < Stream > result = Sets . newHashSet ( ) ; final Set < Stream > blackList = Sets . newHashSet ( ) ; for ( final Rule rule : rulesList ) { if ( blackList . contains ( rule . getStream ( ) ) ) { continue ; } final StreamRule streamRule = rule . getStreamRule ( ) ; final StreamRuleType streamRuleType = streamRule . getType ( ) ; final Stream . MatchingType matchingType = rule . getMatchingType ( ) ; if ( ! ruleTypesNotNeedingFieldPresence . contains ( streamRuleType ) && ! message . hasField ( streamRule . getField ( ) ) ) { if ( matchingType == Stream . MatchingType . AND ) { result . remove ( rule . getStream ( ) ) ; blackList . add ( rule . getStream ( ) ) ; } continue ; } final Stream stream ; if ( streamRuleType != StreamRuleType . REGEX ) { stream = rule . match ( message ) ; } else { stream = rule . matchWithTimeOut ( message , streamProcessingTimeout , TimeUnit . MILLISECONDS ) ; } if ( stream == null ) { if ( matchingType == Stream . MatchingType . AND ) { result . remove ( rule . getStream ( ) ) ; blackList . add ( rule . getStream ( ) ) ; } } else { result . add ( stream ) ; if ( matchingType == Stream . MatchingType . OR ) { blackList . add ( rule . getStream ( ) ) ; } } } final Stream defaultStream = defaultStreamProvider . get ( ) ; boolean alreadyRemovedDefaultStream = false ; for ( Stream stream : result ) { streamMetrics . markIncomingMeter ( stream . getId ( ) ) ; if ( stream . getRemoveMatchesFromDefaultStream ( ) ) { if ( alreadyRemovedDefaultStream || message . removeStream ( defaultStream ) ) { alreadyRemovedDefaultStream = true ; if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "Successfully removed default stream <{}> from message <{}>" , defaultStream . getId ( ) , message . getId ( ) ) ; } } else { if ( LOG . isWarnEnabled ( ) ) { LOG . warn ( "Couldn't remove default stream <{}> from message <{}>" , defaultStream . getId ( ) , message . getId ( ) ) ; } } } } if ( ! alreadyRemovedDefaultStream ) { streamMetrics . markIncomingMeter ( defaultStream . getId ( ) ) ; } return ImmutableList . copyOf ( result ) ; }
Returns a list of matching streams for the given message .
8,510
public void registerMetrics ( MetricRegistry metricRegistry , String pipelineId ) { meterName = name ( Pipeline . class , pipelineId , "stage" , String . valueOf ( stage ( ) ) , "executed" ) ; executed = metricRegistry . meter ( meterName ) ; }
Register the metrics attached to this stage .
8,511
public boolean greaterMinor ( Version other ) { return other . major < this . major || other . major == this . major && other . minor < this . minor ; }
Check if this version is higher than the passed other version . Only taking major and minor version number in account .
8,512
public static byte [ ] readBytes ( ByteBuffer buffer , int offset , int size ) { final byte [ ] dest = new byte [ size ] ; buffer . get ( dest , offset , size ) ; return dest ; }
Read a byte array from the given offset and size in the buffer
8,513
public static Map < String , Object > convertValues ( final Map < String , Object > data , final ConfigurationRequest configurationRequest ) throws ValidationException { final Map < String , Object > configuration = Maps . newHashMapWithExpectedSize ( data . size ( ) ) ; final Map < String , Map < String , Object > > configurationFields = configurationRequest . asList ( ) ; for ( final Map . Entry < String , Object > entry : data . entrySet ( ) ) { final String field = entry . getKey ( ) ; final Map < String , Object > fieldDescription = configurationFields . get ( field ) ; if ( fieldDescription == null || fieldDescription . isEmpty ( ) ) { throw new ValidationException ( field , "Unknown configuration field description for field \"" + field + "\"" ) ; } final String type = ( String ) fieldDescription . get ( "type" ) ; Object value ; switch ( type ) { case "text" : case "dropdown" : value = entry . getValue ( ) == null ? "" : String . valueOf ( entry . getValue ( ) ) ; break ; case "number" : try { value = Integer . parseInt ( String . valueOf ( entry . getValue ( ) ) ) ; } catch ( NumberFormatException e ) { if ( "true" . equals ( String . valueOf ( fieldDescription . get ( "is_optional" ) ) ) ) { value = null ; } else { throw new ValidationException ( field , e . getMessage ( ) ) ; } } break ; case "boolean" : value = "true" . equalsIgnoreCase ( String . valueOf ( entry . getValue ( ) ) ) ; break ; case "list" : final List < ? > valueList = entry . getValue ( ) == null ? Collections . emptyList ( ) : ( List < ? > ) entry . getValue ( ) ; value = valueList . stream ( ) . filter ( o -> o != null && o instanceof String ) . map ( String :: valueOf ) . collect ( Collectors . toList ( ) ) ; break ; default : throw new ValidationException ( field , "Unknown configuration field type \"" + type + "\"" ) ; } configuration . put ( field , value ) ; } return configuration ; }
Converts the values in the map to the requested types . This has been copied from the Graylog web interface and should be removed once we have better configuration objects .
8,514
public static Optional < String > extractStreamId ( String filter ) { if ( isNullOrEmpty ( filter ) ) { return Optional . empty ( ) ; } final Matcher streamIdMatcher = filterStreamIdPattern . matcher ( filter ) ; if ( streamIdMatcher . find ( ) ) { return Optional . of ( streamIdMatcher . group ( 2 ) ) ; } return Optional . empty ( ) ; }
Extracts the last stream id from the filter string passed as part of the elasticsearch query . This is used later to pass to possibly existing message decorators for stream - specific configurations .
8,515
public Rule invokableCopy ( FunctionRegistry functionRegistry ) { final Builder builder = toBuilder ( ) ; final Class < ? extends GeneratedRule > ruleClass = generatedRuleClass ( ) ; if ( ruleClass != null ) { try { final Set < Constructor > constructors = ReflectionUtils . getConstructors ( ruleClass ) ; final Constructor onlyElement = Iterables . getOnlyElement ( constructors ) ; final GeneratedRule instance = ( GeneratedRule ) onlyElement . newInstance ( functionRegistry ) ; builder . generatedRule ( instance ) ; } catch ( IllegalAccessException | InstantiationException | InvocationTargetException e ) { LOG . warn ( "Unable to generate code for rule {}: {}" , id ( ) , e ) ; } } return builder . build ( ) ; }
Creates a copy of this Rule with a new instance of the generated rule class if present .
8,516
public Optional < IndexFieldTypesDTO > pollIndex ( final String indexName , final String indexSetId ) { final GetMapping getMapping = new GetMapping . Builder ( ) . addIndex ( indexName ) . build ( ) ; final JestResult result ; try ( final Timer . Context ignored = pollTimer . time ( ) ) { result = JestUtils . execute ( jestClient , getMapping , ( ) -> "Unable to get index mapping for index: " + indexName ) ; } catch ( Exception e ) { if ( LOG . isDebugEnabled ( ) ) { LOG . error ( "Couldn't get mapping for index <{}>" , indexName , e ) ; } else { LOG . error ( "Couldn't get mapping for index <{}>: {}" , indexName , ExceptionUtils . getRootCauseMessage ( e ) ) ; } return Optional . empty ( ) ; } final JsonNode properties = result . getJsonObject ( ) . path ( indexName ) . path ( "mappings" ) . path ( "message" ) . path ( "properties" ) ; if ( properties . isMissingNode ( ) ) { LOG . error ( "Invalid mapping response: {}" , result . getJsonString ( ) ) ; return Optional . empty ( ) ; } final Spliterator < Map . Entry < String , JsonNode > > fieldSpliterator = Spliterators . spliteratorUnknownSize ( properties . fields ( ) , Spliterator . IMMUTABLE ) ; final Set < FieldTypeDTO > fieldsMap = StreamSupport . stream ( fieldSpliterator , false ) . map ( field -> Maps . immutableEntry ( field . getKey ( ) , field . getValue ( ) . path ( "type" ) . asText ( ) ) ) . filter ( field -> ! field . getValue ( ) . isEmpty ( ) ) . map ( field -> FieldTypeDTO . create ( field . getKey ( ) , field . getValue ( ) ) ) . collect ( Collectors . toSet ( ) ) ; return Optional . of ( IndexFieldTypesDTO . create ( indexSetId , indexName , fieldsMap ) ) ; }
Returns the index field types for the given index .
8,517
public static NetFlowV9Header parseHeader ( ByteBuf bb ) { final int version = bb . readUnsignedShort ( ) ; if ( version != 9 ) { throw new InvalidFlowVersionException ( version ) ; } final int count = bb . readUnsignedShort ( ) ; final long sysUptime = bb . readUnsignedInt ( ) ; final long unixSecs = bb . readUnsignedInt ( ) ; final long sequence = bb . readUnsignedInt ( ) ; final long sourceId = bb . readUnsignedInt ( ) ; return NetFlowV9Header . create ( version , count , sysUptime , unixSecs , sequence , sourceId ) ; }
Flow Header Format
8,518
public static List < NetFlowV9Template > parseTemplates ( ByteBuf bb , NetFlowV9FieldTypeRegistry typeRegistry ) { final ImmutableList . Builder < NetFlowV9Template > templates = ImmutableList . builder ( ) ; int len = bb . readUnsignedShort ( ) ; int p = 4 ; while ( p < len ) { final NetFlowV9Template template = parseTemplate ( bb , typeRegistry ) ; templates . add ( template ) ; p += 4 + template . fieldCount ( ) * 4 ; } return templates . build ( ) ; }
Template FlowSet Format
8,519
public static List < Map . Entry < Integer , byte [ ] > > parseTemplatesShallow ( ByteBuf bb ) { final ImmutableList . Builder < Map . Entry < Integer , byte [ ] > > templates = ImmutableList . builder ( ) ; int len = bb . readUnsignedShort ( ) ; int p = 4 ; while ( p < len ) { final int start = bb . readerIndex ( ) ; final int templateId = bb . readUnsignedShort ( ) ; final int fieldCount = bb . readUnsignedShort ( ) ; final ImmutableList . Builder < NetFlowV9FieldDef > fieldDefs = ImmutableList . builder ( ) ; for ( int i = 0 ; i < fieldCount ; i ++ ) { int fieldType = bb . readUnsignedShort ( ) ; int fieldLength = bb . readUnsignedShort ( ) ; } final byte [ ] bytes = ByteBufUtil . getBytes ( bb , start , bb . readerIndex ( ) - start ) ; final Map . Entry < Integer , byte [ ] > template = Maps . immutableEntry ( templateId , bytes ) ; templates . add ( template ) ; p += 4 + fieldCount * 4 ; } return templates . build ( ) ; }
Like above but only retrieves the bytes and template ids
8,520
public static NetFlowV9OptionTemplate parseOptionTemplate ( ByteBuf bb , NetFlowV9FieldTypeRegistry typeRegistry ) { int length = bb . readUnsignedShort ( ) ; final int templateId = bb . readUnsignedShort ( ) ; int optionScopeLength = bb . readUnsignedShort ( ) ; int optionLength = bb . readUnsignedShort ( ) ; int p = bb . readerIndex ( ) ; int endOfScope = p + optionScopeLength ; int endOfOption = endOfScope + optionLength ; int endOfTemplate = p - 10 + length ; final ImmutableList . Builder < NetFlowV9ScopeDef > scopeDefs = ImmutableList . builder ( ) ; while ( bb . readerIndex ( ) < endOfScope ) { int scopeType = bb . readUnsignedShort ( ) ; int scopeLength = bb . readUnsignedShort ( ) ; scopeDefs . add ( NetFlowV9ScopeDef . create ( scopeType , scopeLength ) ) ; } bb . readerIndex ( endOfScope ) ; final ImmutableList . Builder < NetFlowV9FieldDef > optionDefs = ImmutableList . builder ( ) ; while ( bb . readerIndex ( ) < endOfOption ) { int optType = bb . readUnsignedShort ( ) ; int optLength = bb . readUnsignedShort ( ) ; NetFlowV9FieldType t = typeRegistry . get ( optType ) ; if ( t == null ) { t = NetFlowV9FieldType . create ( optType , NetFlowV9FieldType . ValueType . byLength ( optLength ) , "option_" + optType ) ; } optionDefs . add ( NetFlowV9FieldDef . create ( t , optLength ) ) ; } bb . readerIndex ( endOfTemplate ) ; return NetFlowV9OptionTemplate . create ( templateId , scopeDefs . build ( ) , optionDefs . build ( ) ) ; }
Options Template Format
8,521
public static List < NetFlowV9BaseRecord > parseRecords ( ByteBuf bb , Map < Integer , NetFlowV9Template > cache , NetFlowV9OptionTemplate optionTemplate ) { List < NetFlowV9BaseRecord > records = new ArrayList < > ( ) ; int flowSetId = bb . readUnsignedShort ( ) ; int length = bb . readUnsignedShort ( ) ; int end = bb . readerIndex ( ) - 4 + length ; List < NetFlowV9FieldDef > defs ; boolean isOptionTemplate = optionTemplate != null && optionTemplate . templateId ( ) == flowSetId ; if ( isOptionTemplate ) { defs = optionTemplate . optionDefs ( ) ; } else { NetFlowV9Template t = cache . get ( flowSetId ) ; if ( t == null ) { return Collections . emptyList ( ) ; } defs = t . definitions ( ) ; } int unitSize = 0 ; for ( NetFlowV9FieldDef def : defs ) { unitSize += def . length ( ) ; } while ( bb . readerIndex ( ) < end && bb . readableBytes ( ) >= unitSize ) { final ImmutableMap . Builder < String , Object > fields = ImmutableMap . builder ( ) ; for ( NetFlowV9FieldDef def : defs ) { final String key = def . type ( ) . name ( ) . toLowerCase ( Locale . ROOT ) ; final Optional < Object > optValue = def . parse ( bb ) ; optValue . ifPresent ( value -> fields . put ( key , value ) ) ; } if ( isOptionTemplate ) { final ImmutableMap . Builder < Integer , Object > scopes = ImmutableMap . builder ( ) ; for ( NetFlowV9ScopeDef def : optionTemplate . scopeDefs ( ) ) { int t = def . type ( ) ; int len = def . length ( ) ; long l = 0 ; for ( int i = 0 ; i < len ; i ++ ) { l <<= 8 ; l |= bb . readUnsignedByte ( ) ; } scopes . put ( t , l ) ; } records . add ( NetFlowV9OptionRecord . create ( fields . build ( ) , scopes . build ( ) ) ) ; } else { records . add ( NetFlowV9Record . create ( fields . build ( ) ) ) ; } if ( end - bb . readerIndex ( ) < unitSize ) { break ; } } bb . readerIndex ( end ) ; return records ; }
Data FlowSet Format
8,522
public static Integer parseRecordShallow ( ByteBuf bb ) { final int start = bb . readerIndex ( ) ; int usedTemplateId = bb . readUnsignedShort ( ) ; int length = bb . readUnsignedShort ( ) ; int end = bb . readerIndex ( ) - 4 + length ; bb . readerIndex ( end ) ; return usedTemplateId ; }
like above but contains all records for the template id as raw bytes
8,523
public void upgrade ( ) { this . indexSetService . findAll ( ) . stream ( ) . map ( mongoIndexSetFactory :: create ) . flatMap ( indexSet -> getReopenedIndices ( indexSet ) . stream ( ) ) . map ( indexName -> { LOG . debug ( "Marking index {} to be reopened using alias." , indexName ) ; return indexName ; } ) . forEach ( indices :: markIndexReopened ) ; }
Create aliases for legacy reopened indices .
8,524
protected String buildQueryFilter ( String streamId , String query ) { checkArgument ( streamId != null , "streamId parameter cannot be null" ) ; final String trimmedStreamId = streamId . trim ( ) ; checkArgument ( ! trimmedStreamId . isEmpty ( ) , "streamId parameter cannot be empty" ) ; final StringBuilder builder = new StringBuilder ( ) . append ( "streams:" ) . append ( trimmedStreamId ) ; if ( query != null ) { final String trimmedQuery = query . trim ( ) ; if ( ! trimmedQuery . isEmpty ( ) && ! "*" . equals ( trimmedQuery ) ) { builder . append ( " AND (" ) . append ( trimmedQuery ) . append ( ")" ) ; } } return builder . toString ( ) ; }
Combines the given stream ID and query string into a single filter string .
8,525
private int findFrameSizeValueLength ( final ByteBuf buffer ) { final int readerIndex = buffer . readerIndex ( ) ; int index = buffer . forEachByte ( BYTE_PROCESSOR ) ; if ( index >= 0 ) { return index - readerIndex ; } else { return - 1 ; } }
Find the byte length of the frame length value .
8,526
public synchronized Mongo connect ( ) { if ( m == null ) { final String dbName = mongoClientURI . getDatabase ( ) ; if ( isNullOrEmpty ( dbName ) ) { LOG . error ( "The MongoDB database name must not be null or empty (mongodb_uri was: {})" , mongoClientURI ) ; throw new RuntimeException ( "MongoDB database name is missing." ) ; } m = new MongoClient ( mongoClientURI ) ; db = m . getDB ( dbName ) ; db . setWriteConcern ( WriteConcern . ACKNOWLEDGED ) ; mongoDatabase = m . getDatabase ( dbName ) . withWriteConcern ( WriteConcern . ACKNOWLEDGED ) ; } try { db . command ( "{ ping: 1 }" ) ; } catch ( MongoCommandException e ) { if ( e . getCode ( ) == 18 ) { throw new MongoException ( "Couldn't connect to MongoDB. Please check the authentication credentials." , e ) ; } else { throw new MongoException ( "Couldn't connect to MongoDB: " + e . getMessage ( ) , e ) ; } } final Version mongoVersion = getMongoVersion ( m . getDB ( "admin" ) ) ; if ( mongoVersion != null && mongoVersion . lessThan ( MINIMUM_MONGODB_VERSION ) ) { LOG . warn ( "You're running MongoDB {} but Graylog requires at least MongoDB {}. Please upgrade." , mongoVersion , MINIMUM_MONGODB_VERSION ) ; } return m ; }
Connect the instance .
8,527
protected DBSort . SortBuilder getSortBuilder ( String order , String field ) { DBSort . SortBuilder sortBuilder ; if ( "desc" . equalsIgnoreCase ( order ) ) { sortBuilder = DBSort . desc ( field ) ; } else { sortBuilder = DBSort . asc ( field ) ; } return sortBuilder ; }
Returns a sort builder for the given order and field name .
8,528
private void assignMinimumTTL ( List < ? extends DnsAnswer > dnsAnswers , LookupResult . Builder builder ) { if ( config . hasOverrideTTL ( ) ) { builder . cacheTTL ( config . getCacheTTLOverrideMillis ( ) ) ; } else { builder . cacheTTL ( dnsAnswers . stream ( ) . map ( DnsAnswer :: dnsTTL ) . min ( Comparator . comparing ( Long :: valueOf ) ) . get ( ) * 1000 ) ; } }
Assigns the minimum TTL found in the supplied DnsAnswers . The minimum makes sense because this is the least amount of time that at least one of the records is valid for .
8,529
public long countInstallationOfEntityById ( ModelId entityId ) { final String field = String . format ( Locale . ROOT , "%s.%s" , ContentPackInstallation . FIELD_ENTITIES , NativeEntityDescriptor . FIELD_META_ID ) ; return dbCollection . getCount ( DBQuery . is ( field , entityId ) ) ; }
Returns the number of installations the given content pack entity ID is used in .
8,530
public void addStream ( Stream stream ) { indexSets . add ( stream . getIndexSet ( ) ) ; if ( streams . add ( stream ) ) { sizeCounter . inc ( 8 ) ; if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "[Message size update][{}] stream added: {}" , getId ( ) , sizeCounter . getCount ( ) ) ; } } }
Assign the given stream to this message .
8,531
public boolean removeStream ( Stream stream ) { final boolean removed = streams . remove ( stream ) ; if ( removed ) { indexSets . clear ( ) ; for ( Stream s : streams ) { indexSets . add ( s . getIndexSet ( ) ) ; } sizeCounter . dec ( 8 ) ; if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "[Message size update][{}] stream removed: {}" , getId ( ) , sizeCounter . getCount ( ) ) ; } } return removed ; }
Remove the stream assignment from this message .
8,532
public static int toSeconds ( TimeRange timeRange ) { if ( timeRange . getFrom ( ) == null || timeRange . getTo ( ) == null ) { return 0 ; } try { return Seconds . secondsBetween ( timeRange . getFrom ( ) , timeRange . getTo ( ) ) . getSeconds ( ) ; } catch ( IllegalArgumentException e ) { return 0 ; } }
Calculate the number of seconds in the given time range .
8,533
public void handleIndexSetCreation ( final IndexSetCreatedEvent event ) { final String indexSetId = event . indexSet ( ) . id ( ) ; final Optional < IndexSetConfig > optionalIndexSet = indexSetService . get ( indexSetId ) ; if ( optionalIndexSet . isPresent ( ) ) { schedule ( mongoIndexSetFactory . create ( optionalIndexSet . get ( ) ) ) ; } else { LOG . warn ( "Couldn't find newly created index set <{}>" , indexSetId ) ; } }
Creates a new field type polling job for the newly created index set .
8,534
public void handleIndexSetDeletion ( final IndexSetDeletedEvent event ) { final String indexSetId = event . id ( ) ; LOG . debug ( "Disable field type updating for index set <{}>" , indexSetId ) ; cancel ( futures . remove ( indexSetId ) ) ; }
Removes the field type polling job for the now deleted index set .
8,535
public void handleIndexDeletion ( final IndicesDeletedEvent event ) { event . indices ( ) . forEach ( indexName -> { LOG . debug ( "Removing field type information for deleted index <{}>" , indexName ) ; dbService . delete ( indexName ) ; } ) ; }
Removes the index field type data for the deleted index .
8,536
private void schedule ( final IndexSet indexSet ) { final String indexSetId = indexSet . getConfig ( ) . id ( ) ; final String indexSetTitle = indexSet . getConfig ( ) . title ( ) ; final Duration refreshInterval = indexSet . getConfig ( ) . fieldTypeRefreshInterval ( ) ; if ( Duration . ZERO . equals ( refreshInterval ) ) { LOG . debug ( "Skipping index set with ZERO refresh interval <{}/{}>" , indexSetTitle , indexSetId ) ; return ; } if ( ! indexSet . getConfig ( ) . isWritable ( ) ) { LOG . debug ( "Skipping non-writable index set <{}/{}>" , indexSetTitle , indexSetId ) ; return ; } cancel ( futures . get ( indexSetId ) ) ; LOG . debug ( "Schedule index field type updating for index set <{}/{}> every {} ms" , indexSetId , indexSetTitle , refreshInterval . getMillis ( ) ) ; final ScheduledFuture < ? > future = scheduler . scheduleAtFixedRate ( ( ) -> { try { final String activeWriteIndex = indexSet . getActiveWriteIndex ( ) ; if ( activeWriteIndex != null ) { LOG . debug ( "Updating index field types for active write index <{}> in index set <{}/{}>" , activeWriteIndex , indexSetTitle , indexSetId ) ; poller . pollIndex ( activeWriteIndex , indexSetId ) . ifPresent ( dbService :: upsert ) ; } else { LOG . warn ( "Active write index for index set \"{}\" ({}) doesn't exist yet" , indexSetTitle , indexSetId ) ; } } catch ( TooManyAliasesException e ) { LOG . error ( "Couldn't get active write index" , e ) ; } catch ( Exception e ) { LOG . error ( "Couldn't update field types for index set <{}/{}>" , indexSetTitle , indexSetId , e ) ; } } , 0 , refreshInterval . getMillis ( ) , TimeUnit . MILLISECONDS ) ; futures . put ( indexSetId , future ) ; }
Creates a new polling job for the given index set to keep the active write index information up to date .
8,537
public static PluginProperties fromJarFile ( final String filename ) { final Properties properties = new Properties ( ) ; try { final JarFile jarFile = new JarFile ( requireNonNull ( filename ) ) ; final Optional < String > propertiesPath = getPropertiesPath ( jarFile ) ; if ( propertiesPath . isPresent ( ) ) { LOG . debug ( "Loading <{}> from <{}>" , propertiesPath . get ( ) , filename ) ; final ZipEntry entry = jarFile . getEntry ( propertiesPath . get ( ) ) ; if ( entry != null ) { properties . load ( jarFile . getInputStream ( entry ) ) ; } else { LOG . debug ( "Plugin properties <{}> are missing in <{}>" , propertiesPath . get ( ) , filename ) ; } } } catch ( Exception e ) { LOG . debug ( "Unable to load properties from plugin <{}>" , filename , e ) ; } return new PluginProperties ( properties ) ; }
Loads the Graylog plugin properties file from the given JAR file .
8,538
public boolean isConnected ( ) { final Health request = new Health . Builder ( ) . local ( ) . timeout ( Ints . saturatedCast ( requestTimeout . toSeconds ( ) ) ) . build ( ) ; try { final JestResult result = JestUtils . execute ( jestClient , request , ( ) -> "Couldn't check connection status of Elasticsearch" ) ; final int numberOfDataNodes = result . getJsonObject ( ) . path ( "number_of_data_nodes" ) . asInt ( ) ; return numberOfDataNodes > 0 ; } catch ( ElasticsearchException e ) { if ( LOG . isDebugEnabled ( ) ) { LOG . error ( e . getMessage ( ) , e ) ; } return false ; } }
Check if Elasticsearch is available and that there are data nodes in the cluster .
8,539
public void waitForConnectedAndDeflectorHealthy ( long timeout , TimeUnit unit ) throws InterruptedException , TimeoutException { LOG . debug ( "Waiting until the write-active index is healthy again, checking once per second." ) ; final CountDownLatch latch = new CountDownLatch ( 1 ) ; final ScheduledFuture < ? > scheduledFuture = scheduler . scheduleAtFixedRate ( ( ) -> { try { if ( isConnected ( ) && isDeflectorHealthy ( ) ) { LOG . debug ( "Write-active index is healthy again, unblocking waiting threads." ) ; latch . countDown ( ) ; } } catch ( Exception ignore ) { } } , 0 , 1 , TimeUnit . SECONDS ) ; final boolean waitSuccess = latch . await ( timeout , unit ) ; scheduledFuture . cancel ( true ) ; if ( ! waitSuccess ) { throw new TimeoutException ( "Write-active index didn't get healthy within timeout" ) ; } }
Blocks until the Elasticsearch cluster and current write index is healthy again or the given timeout fires .
8,540
public void register ( GracefulShutdownHook shutdownHook ) { if ( isShuttingDown . get ( ) ) { throw new IllegalStateException ( "Couldn't register shutdown hook because shutdown is already in progress" ) ; } shutdownHooks . add ( requireNonNull ( shutdownHook , "shutdownHook cannot be null" ) ) ; }
Register a shutdown hook with the service .
8,541
List < String > sortedJsFiles ( ) { return jsFiles ( ) . stream ( ) . sorted ( ( file1 , file2 ) -> { if ( vendorJsFiles . contains ( file1 ) ) { return - 1 ; } if ( vendorJsFiles . contains ( file2 ) ) { return 1 ; } if ( file1 . equals ( polyfillJsFile ) ) { return - 1 ; } if ( file2 . equals ( polyfillJsFile ) ) { return 1 ; } if ( file1 . equals ( builtinsJsFile ) ) { return - 1 ; } if ( file2 . equals ( builtinsJsFile ) ) { return 1 ; } return file2 . compareTo ( file1 ) ; } ) . collect ( Collectors . toList ( ) ) ; }
Sort JS files in the intended load order so templates don t need to care about it .
8,542
public static < N > ImmutableGraph < N > singletonDirectedGraph ( N node ) { final MutableGraph < N > graph = GraphBuilder . directed ( ) . build ( ) ; graph . addNode ( node ) ; return ImmutableGraph . copyOf ( graph ) ; }
Returns an immutable directed graph containing only the specified node .
8,543
public static < N > ImmutableGraph < N > singletonUndirectedGraph ( N node ) { final MutableGraph < N > graph = GraphBuilder . undirected ( ) . build ( ) ; graph . addNode ( node ) ; return ImmutableGraph . copyOf ( graph ) ; }
Returns an immutable undirected graph containing only the specified node .
8,544
public static < N > ImmutableGraph < N > singletonGraph ( Graph < N > graph , N node ) { final MutableGraph < N > mutableGraph = GraphBuilder . from ( graph ) . build ( ) ; mutableGraph . addNode ( node ) ; return ImmutableGraph . copyOf ( mutableGraph ) ; }
Returns an immutable graph containing only the specified node .
8,545
public static < N > void merge ( MutableGraph < N > graph1 , Graph < N > graph2 ) { for ( N node : graph2 . nodes ( ) ) { graph1 . addNode ( node ) ; } for ( EndpointPair < N > edge : graph2 . edges ( ) ) { graph1 . putEdge ( edge . nodeU ( ) , edge . nodeV ( ) ) ; } }
Merge all nodes and edges of two graphs .
8,546
private String normalizedDn ( String dn ) { if ( isNullOrEmpty ( dn ) ) { return dn ; } else { try { return new Dn ( dn ) . getNormName ( ) ; } catch ( LdapInvalidDnException e ) { LOG . debug ( "Invalid DN" , e ) ; return dn ; } } }
When the given string is a DN the method ensures that the DN gets normalized so it can be used in string comparison .
8,547
public Object convert ( String value ) { if ( value == null || value . isEmpty ( ) ) { return value ; } Object result = Ints . tryParse ( value ) ; if ( result != null ) { return result ; } result = Longs . tryParse ( value ) ; if ( result != null ) { return result ; } result = Doubles . tryParse ( value ) ; if ( result != null ) { return result ; } return value ; }
Attempts to convert the provided string value to a numeric type trying Integer Long and Double in order until successful .
8,548
public String getJSON ( long maxBytes ) { try { switch ( getGELFType ( ) ) { case ZLIB : return Tools . decompressZlib ( payload , maxBytes ) ; case GZIP : return Tools . decompressGzip ( payload , maxBytes ) ; case UNCOMPRESSED : return new String ( payload , StandardCharsets . UTF_8 ) ; case CHUNKED : case UNSUPPORTED : throw new IllegalStateException ( "Unknown GELF type. Not supported." ) ; } } catch ( final IOException e ) { throw new IllegalStateException ( "Failed to decompress the GELF message payload" , e ) ; } return null ; }
Return the JSON payload of the GELF message .
8,549
public Map < String , FieldTypes > get ( final Collection < String > fieldNames , Collection < String > indexNames ) { if ( fieldNames . isEmpty ( ) ) { return Collections . emptyMap ( ) ; } final Map < String , SetMultimap < String , String > > fields = new HashMap < > ( ) ; getTypesStream ( fieldNames , indexNames ) . forEach ( types -> { final String indexName = types . indexName ( ) ; types . fields ( ) . forEach ( fieldType -> { final String fieldName = fieldType . fieldName ( ) ; final String physicalType = fieldType . physicalType ( ) ; if ( fieldNames . contains ( fieldName ) ) { if ( indexNames . isEmpty ( ) || indexNames . contains ( indexName ) ) { if ( ! fields . containsKey ( fieldName ) ) { fields . put ( fieldName , HashMultimap . create ( ) ) ; } fields . get ( fieldName ) . put ( physicalType , indexName ) ; } } } ) ; } ) ; final ImmutableMap . Builder < String , FieldTypes > result = ImmutableMap . builder ( ) ; for ( Map . Entry < String , SetMultimap < String , String > > fieldNameEntry : fields . entrySet ( ) ) { final String fieldName = fieldNameEntry . getKey ( ) ; final Map < String , Collection < String > > physicalTypes = fieldNameEntry . getValue ( ) . asMap ( ) ; final Set < FieldTypes . Type > types = physicalTypes . entrySet ( ) . stream ( ) . map ( entry -> { final String physicalType = entry . getKey ( ) ; final Set < String > indices = ImmutableSet . copyOf ( entry . getValue ( ) ) ; return typeMapper . mapType ( physicalType ) . map ( t -> t . withIndexNames ( indices ) ) ; } ) . filter ( Optional :: isPresent ) . map ( Optional :: get ) . collect ( Collectors . toSet ( ) ) ; result . put ( fieldName , FieldTypes . create ( fieldName , types ) ) ; } return result . build ( ) ; }
Returns a map of field names to the corresponding field types .
8,550
public Optional < FieldTypes . Type > mapType ( String typeName ) { return Optional . ofNullable ( TYPE_MAP . get ( typeName ) ) ; }
Map the given Elasticsearch field type to a Graylog type .
8,551
private ByteBuf checkForCompletion ( GELFMessage gelfMessage ) { if ( ! chunks . isEmpty ( ) && log . isDebugEnabled ( ) ) { log . debug ( "Dumping GELF chunk map [chunks for {} messages]:\n{}" , chunks . size ( ) , humanReadableChunkMap ( ) ) ; } final GELFMessageChunk chunk = new GELFMessageChunk ( gelfMessage , null ) ; final int sequenceCount = chunk . getSequenceCount ( ) ; final String messageId = chunk . getId ( ) ; ChunkEntry entry = new ChunkEntry ( sequenceCount , chunk . getArrival ( ) , messageId ) ; final ChunkEntry existing = chunks . putIfAbsent ( messageId , entry ) ; if ( existing == null ) { waitingMessages . inc ( ) ; sortedEvictionSet . add ( entry ) ; } else { entry = existing ; } final int sequenceNumber = chunk . getSequenceNumber ( ) ; if ( ! entry . payloadArray . compareAndSet ( sequenceNumber , null , chunk ) ) { log . error ( "Received duplicate chunk {} for message {} from {}" , sequenceNumber , messageId , gelfMessage . getSourceAddress ( ) ) ; duplicateChunks . inc ( ) ; return null ; } final int chunkWatermark = entry . chunkSlotsWritten . incrementAndGet ( ) ; if ( chunkWatermark > MAX_CHUNKS ) { getAndCleanupEntry ( messageId ) ; throw new IllegalStateException ( "Maximum number of chunks reached, discarding message" ) ; } if ( chunkWatermark == sequenceCount ) { entry = getAndCleanupEntry ( messageId ) ; final byte [ ] allChunks [ ] = new byte [ sequenceCount ] [ ] ; for ( int i = 0 ; i < entry . payloadArray . length ( ) ; i ++ ) { final GELFMessageChunk messageChunk = entry . payloadArray . get ( i ) ; if ( messageChunk == null ) { log . debug ( "Couldn't read chunk {} of message {}, skipping this chunk." , i , messageId ) ; } else { allChunks [ i ] = messageChunk . getData ( ) ; } } completeMessages . inc ( ) ; return Unpooled . wrappedBuffer ( allChunks ) ; } if ( isOutdated ( entry ) ) { log . debug ( "Not all chunks of <{}> arrived within {}ms. Dropping chunks." , messageId , VALIDITY_PERIOD ) ; expireEntry ( messageId ) ; } return null ; }
Checks whether the presented gelf message chunk completes the incoming raw message and returns it if it does . If the message isn t complete it adds the chunk to the internal buffer and waits for more incoming messages . Outdated chunks are being purged regularly .
8,552
private static Configuration overrideDelimiter ( Configuration configuration ) { configuration . setBoolean ( TcpTransport . CK_USE_NULL_DELIMITER , true ) ; return configuration ; }
has been created with the wrong value .
8,553
private static ADnsAnswer decodeDnsRecord ( DnsRecord dnsRecord , boolean includeIpVersion ) { if ( dnsRecord == null ) { return null ; } LOG . trace ( "Attempting to decode DNS record [{}]" , dnsRecord ) ; byte [ ] ipAddressBytes ; final DefaultDnsRawRecord dnsRawRecord = ( DefaultDnsRawRecord ) dnsRecord ; try { final ByteBuf byteBuf = dnsRawRecord . content ( ) ; ipAddressBytes = new byte [ byteBuf . readableBytes ( ) ] ; int readerIndex = byteBuf . readerIndex ( ) ; byteBuf . getBytes ( readerIndex , ipAddressBytes ) ; } finally { dnsRawRecord . release ( ) ; } LOG . trace ( "The IP address has [{}] bytes" , ipAddressBytes . length ) ; InetAddress ipAddress ; try { ipAddress = InetAddress . getByAddress ( ipAddressBytes ) ; } catch ( UnknownHostException e ) { LOG . error ( "Could not extract IP address from DNS entry [{}]. Cause [{}]" , dnsRecord . toString ( ) , ExceptionUtils . getRootCauseMessage ( e ) ) ; return null ; } LOG . trace ( "The resulting IP address is [{}]" , ipAddress . getHostAddress ( ) ) ; final ADnsAnswer . Builder builder = ADnsAnswer . builder ( ) . ipAddress ( ipAddress . getHostAddress ( ) ) . dnsTTL ( dnsRecord . timeToLive ( ) ) ; if ( includeIpVersion ) { builder . ipVersion ( ipAddress instanceof Inet4Address ? IP_4_VERSION : IP_6_VERSION ) ; } return builder . build ( ) ; }
Picks out the IP address and TTL from the answer response for each record .
8,554
public static FileInfo forPath ( Path path ) { try { final BasicFileAttributes attributes = Files . readAttributes ( path , BasicFileAttributes . class ) ; return FileInfo . builder ( ) . path ( path ) . key ( attributes . fileKey ( ) ) . size ( attributes . size ( ) ) . modificationTime ( attributes . lastModifiedTime ( ) ) . build ( ) ; } catch ( Exception e ) { LOG . error ( "Couldn't get file info for path: {}" , path , e ) ; return EMPTY_FILE_INFO . toBuilder ( ) . path ( path ) . build ( ) ; } }
Create a file info for the given path .
8,555
private ConfigurationRequest getConfigurationRequest ( Map < String , String > userNames ) { ConfigurationRequest configurationRequest = new ConfigurationRequest ( ) ; configurationRequest . addField ( new TextField ( "sender" , "Sender" , "graylog@example.org" , "The sender of sent out mail alerts" , ConfigurationField . Optional . OPTIONAL ) ) ; configurationRequest . addField ( new TextField ( "subject" , "E-Mail Subject" , "Graylog alert for stream: ${stream.title}: ${check_result.resultDescription}" , "The subject of sent out mail alerts" , ConfigurationField . Optional . NOT_OPTIONAL ) ) ; configurationRequest . addField ( new TextField ( "body" , "E-Mail Body" , FormattedEmailAlertSender . bodyTemplate , "The template to generate the body from" , ConfigurationField . Optional . OPTIONAL , TextField . Attribute . TEXTAREA ) ) ; configurationRequest . addField ( new ListField ( CK_USER_RECEIVERS , "User Receivers" , Collections . emptyList ( ) , userNames , "Graylog usernames that should receive this alert" , ConfigurationField . Optional . OPTIONAL ) ) ; configurationRequest . addField ( new ListField ( CK_EMAIL_RECEIVERS , "E-Mail Receivers" , Collections . emptyList ( ) , Collections . emptyMap ( ) , "E-Mail addresses that should receive this alert" , ConfigurationField . Optional . OPTIONAL , ListField . Attribute . ALLOW_CREATE ) ) ; return configurationRequest ; }
I am truly sorry about this but leaking the user list is not okay ...
8,556
public void updateThrottleState ( ThrottleState throttleState ) { if ( ! throttlingAllowed ) { return ; } final boolean throttled = determineIfThrottled ( throttleState ) ; if ( currentlyThrottled . get ( ) ) { if ( throttled ) { return ; } if ( blockLatch == null ) { log . error ( "Expected to see a transport throttle latch, but it is missing. This is a bug, continuing anyway." ) ; return ; } currentlyThrottled . set ( false ) ; handleChangedThrottledState ( false ) ; blockLatch . countDown ( ) ; } else if ( throttled ) { currentlyThrottled . set ( true ) ; handleChangedThrottledState ( true ) ; blockLatch = new CountDownLatch ( 1 ) ; } }
Only executed if the Allow Throttling checkbox is set in the input s configuration .
8,557
public boolean blockUntilUnthrottled ( long timeout , TimeUnit unit ) { if ( blockLatch == null ) { return false ; } try { return blockLatch . await ( timeout , unit ) ; } catch ( InterruptedException e ) { return false ; } }
Blocks until the blockLatch is released or until the timeout is exceeded .
8,558
public static String getSystemInformation ( ) { String ret = System . getProperty ( "java.vendor" ) ; ret += " " + System . getProperty ( "java.version" ) ; ret += " on " + System . getProperty ( "os.name" ) ; ret += " " + System . getProperty ( "os.version" ) ; return ret ; }
Get a String containing version information of JRE OS ...
8,559
public static DateTimeFormatter timeFormatterWithOptionalMilliseconds ( ) { DateTimeParser ms = new DateTimeFormatterBuilder ( ) . appendLiteral ( "." ) . appendFractionOfSecond ( 1 , 3 ) . toParser ( ) ; return new DateTimeFormatterBuilder ( ) . append ( DateTimeFormat . forPattern ( ES_DATE_FORMAT_NO_MS ) . withZoneUTC ( ) ) . appendOptional ( ms ) . toFormatter ( ) ; }
Accepts our ElasticSearch time formats without milliseconds .
8,560
public static String elasticSearchTimeFormatToISO8601 ( String time ) { try { DateTime dt = DateTime . parse ( time , ES_DATE_FORMAT_FORMATTER ) ; return getISO8601String ( dt ) ; } catch ( IllegalArgumentException e ) { return time ; } }
Try to parse a date in ES_DATE_FORMAT format considering it is in UTC and convert it to an ISO8601 date . If an error is encountered in the process it will return the original string .
8,561
public static void silenceUncaughtExceptionsInThisThread ( ) { Thread . currentThread ( ) . setUncaughtExceptionHandler ( new Thread . UncaughtExceptionHandler ( ) { public void uncaughtException ( Thread ignored , Throwable ignored1 ) { } } ) ; }
The default uncaught exception handler will print to STDERR which we don t always want for threads . Using this utility method you can avoid writing to STDERR on a per - thread basis
8,562
public boolean exists ( String indexName ) { try { final JestResult result = jestClient . execute ( new GetSettings . Builder ( ) . addIndex ( indexName ) . build ( ) ) ; return result . isSucceeded ( ) && Iterators . contains ( result . getJsonObject ( ) . fieldNames ( ) , indexName ) ; } catch ( IOException e ) { throw new ElasticsearchException ( "Couldn't check existence of index " + indexName , e ) ; } }
Check if a given name is an existing index .
8,563
public boolean aliasExists ( String alias ) { try { final JestResult result = jestClient . execute ( new GetSettings . Builder ( ) . addIndex ( alias ) . build ( ) ) ; return result . isSucceeded ( ) && ! Iterators . contains ( result . getJsonObject ( ) . fieldNames ( ) , alias ) ; } catch ( IOException e ) { throw new ElasticsearchException ( "Couldn't check existence of alias " + alias , e ) ; } }
Check if a given name is an existing alias .
8,564
public Map < String , Set < String > > getIndexNamesAndAliases ( String indexPattern ) { final GetAliases request = new GetAliases . Builder ( ) . addIndex ( indexPattern ) . setParameter ( "expand_wildcards" , "open" ) . build ( ) ; final JestResult jestResult = JestUtils . execute ( jestClient , request , ( ) -> "Couldn't collect aliases for index pattern " + indexPattern ) ; final ImmutableMap . Builder < String , Set < String > > indexAliasesBuilder = ImmutableMap . builder ( ) ; final Iterator < Map . Entry < String , JsonNode > > it = jestResult . getJsonObject ( ) . fields ( ) ; while ( it . hasNext ( ) ) { final Map . Entry < String , JsonNode > entry = it . next ( ) ; final String indexName = entry . getKey ( ) ; final JsonNode aliasMetaData = entry . getValue ( ) . path ( "aliases" ) ; if ( aliasMetaData . isObject ( ) ) { final ImmutableSet < String > aliasNames = ImmutableSet . copyOf ( aliasMetaData . fieldNames ( ) ) ; indexAliasesBuilder . put ( indexName , aliasNames ) ; } } return indexAliasesBuilder . build ( ) ; }
Returns index names and their aliases . This only returns indices which actually have an alias .
8,565
public Map < String , Object > getIndexTemplate ( IndexSet indexSet ) { final String indexWildcard = indexSet . getIndexWildcard ( ) ; final String analyzer = indexSet . getConfig ( ) . indexAnalyzer ( ) ; return indexMappingFactory . createIndexMapping ( ) . messageTemplate ( indexWildcard , analyzer ) ; }
Returns the generated Elasticsearch index template for the given index set .
8,566
public IndexRangeStats indexRangeStatsOfIndex ( String index ) { final FilterAggregationBuilder builder = AggregationBuilders . filter ( "agg" , QueryBuilders . existsQuery ( Message . FIELD_TIMESTAMP ) ) . subAggregation ( AggregationBuilders . min ( "ts_min" ) . field ( Message . FIELD_TIMESTAMP ) ) . subAggregation ( AggregationBuilders . max ( "ts_max" ) . field ( Message . FIELD_TIMESTAMP ) ) . subAggregation ( AggregationBuilders . terms ( "streams" ) . field ( Message . FIELD_STREAMS ) ) ; final String query = searchSource ( ) . aggregation ( builder ) . size ( 0 ) . toString ( ) ; final Search request = new Search . Builder ( query ) . addIndex ( index ) . setSearchType ( SearchType . DFS_QUERY_THEN_FETCH ) . ignoreUnavailable ( true ) . build ( ) ; if ( LOG . isDebugEnabled ( ) ) { String data = "{}" ; try { data = request . getData ( objectMapper . copy ( ) . enable ( SerializationFeature . INDENT_OUTPUT ) ) ; } catch ( IOException e ) { LOG . debug ( "Couldn't pretty print request payload" , e ) ; } LOG . debug ( "Index range query: _search/{}: {}" , index , data ) ; } final SearchResult result = JestUtils . execute ( jestClient , request , ( ) -> "Couldn't build index range of index " + index ) ; final FilterAggregation f = result . getAggregations ( ) . getFilterAggregation ( "agg" ) ; if ( f == null ) { throw new IndexNotFoundException ( "Couldn't build index range of index " + index + " because it doesn't exist." ) ; } else if ( f . getCount ( ) == 0L ) { LOG . debug ( "No documents with attribute \"timestamp\" found in index <{}>" , index ) ; return IndexRangeStats . EMPTY ; } final MinAggregation minAgg = f . getMinAggregation ( "ts_min" ) ; final DateTime min = new DateTime ( minAgg . getMin ( ) . longValue ( ) , DateTimeZone . UTC ) ; final MaxAggregation maxAgg = f . getMaxAggregation ( "ts_max" ) ; final DateTime max = new DateTime ( maxAgg . getMax ( ) . longValue ( ) , DateTimeZone . UTC ) ; final TermsAggregation streams = f . getTermsAggregation ( "streams" ) ; final List < String > streamIds = streams . getBuckets ( ) . stream ( ) . map ( TermsAggregation . Entry :: getKeyAsString ) . collect ( toList ( ) ) ; return IndexRangeStats . create ( min , max , streamIds ) ; }
Calculate min and max message timestamps in the given index .
8,567
public void markAsAlive ( Node node , boolean isMaster , String restTransportAddress ) { node . getFields ( ) . put ( "last_seen" , Tools . getUTCTimestamp ( ) ) ; node . getFields ( ) . put ( "is_master" , isMaster ) ; node . getFields ( ) . put ( "transport_address" , restTransportAddress ) ; try { save ( node ) ; } catch ( ValidationException e ) { throw new RuntimeException ( "Validation failed." , e ) ; } }
Mark this node as alive and probably update some settings that may have changed since last server boot .
8,568
public static long buff2long ( byte [ ] bs , int offset ) { return ( ( ( long ) ( bs [ offset ] >= 0 ? bs [ offset ] : 256 + bs [ offset ] ) ) << 56 ) | ( ( ( long ) ( bs [ offset + 1 ] >= 0 ? bs [ offset + 1 ] : 256 + bs [ offset + 1 ] ) ) << 48 ) | ( ( ( long ) ( bs [ offset + 2 ] >= 0 ? bs [ offset + 2 ] : 256 + bs [ offset + 2 ] ) ) << 40 ) | ( ( ( long ) ( bs [ offset + 3 ] >= 0 ? bs [ offset + 3 ] : 256 + bs [ offset + 3 ] ) ) << 32 ) | ( ( ( long ) ( bs [ offset + 4 ] >= 0 ? bs [ offset + 4 ] : 256 + bs [ offset + 4 ] ) ) << 24 ) | ( ( ( long ) ( bs [ offset + 5 ] >= 0 ? bs [ offset + 5 ] : 256 + bs [ offset + 5 ] ) ) << 16 ) | ( ( ( long ) ( bs [ offset + 6 ] >= 0 ? bs [ offset + 6 ] : 256 + bs [ offset + 6 ] ) ) << 8 ) | ( bs [ offset + 7 ] >= 0 ? bs [ offset + 7 ] : 256 + bs [ offset + 7 ] ) ; }
buff convert to long
8,569
public static int buff2int ( byte [ ] bs , int offset ) { return ( ( bs [ offset ] >= 0 ? bs [ offset ] : 256 + bs [ offset ] ) << 24 ) | ( ( bs [ offset + 1 ] >= 0 ? bs [ offset + 1 ] : 256 + bs [ offset + 1 ] ) << 16 ) | ( ( bs [ offset + 2 ] >= 0 ? bs [ offset + 2 ] : 256 + bs [ offset + 2 ] ) << 8 ) | ( bs [ offset + 3 ] >= 0 ? bs [ offset + 3 ] : 256 + bs [ offset + 3 ] ) ; }
buff convert to int
8,570
public static Period parsePeriod ( String input , ConfigOrigin originForException , String pathForException ) { String s = ConfigImplUtil . unicodeTrim ( input ) ; String originalUnitString = getUnits ( s ) ; String unitString = originalUnitString ; String numberString = ConfigImplUtil . unicodeTrim ( s . substring ( 0 , s . length ( ) - unitString . length ( ) ) ) ; ChronoUnit units ; if ( numberString . length ( ) == 0 ) throw new ConfigException . BadValue ( originForException , pathForException , "No number in period value '" + input + "'" ) ; if ( unitString . length ( ) > 2 && ! unitString . endsWith ( "s" ) ) unitString = unitString + "s" ; if ( unitString . equals ( "" ) || unitString . equals ( "d" ) || unitString . equals ( "days" ) ) { units = ChronoUnit . DAYS ; } else if ( unitString . equals ( "w" ) || unitString . equals ( "weeks" ) ) { units = ChronoUnit . WEEKS ; } else if ( unitString . equals ( "m" ) || unitString . equals ( "mo" ) || unitString . equals ( "months" ) ) { units = ChronoUnit . MONTHS ; } else if ( unitString . equals ( "y" ) || unitString . equals ( "years" ) ) { units = ChronoUnit . YEARS ; } else { throw new ConfigException . BadValue ( originForException , pathForException , "Could not parse time unit '" + originalUnitString + "' (try d, w, mo, y)" ) ; } try { return periodOf ( Integer . parseInt ( numberString ) , units ) ; } catch ( NumberFormatException e ) { throw new ConfigException . BadValue ( originForException , pathForException , "Could not parse duration number '" + numberString + "'" ) ; } }
Parses a period string . If no units are specified in the string it is assumed to be in days . The returned period is in days . The purpose of this function is to implement the period - related methods in the ConfigObject interface .
8,571
static void addMissing ( List < ConfigException . ValidationProblem > accumulator , ConfigValueType refType , Path path , ConfigOrigin origin ) { addMissing ( accumulator , getDesc ( refType ) , path , origin ) ; }
JavaBean stuff uses this
8,572
private static void checkValidObject ( Path path , AbstractConfigObject reference , AbstractConfigObject value , List < ConfigException . ValidationProblem > accumulator ) { for ( Map . Entry < String , ConfigValue > entry : reference . entrySet ( ) ) { String key = entry . getKey ( ) ; Path childPath ; if ( path != null ) childPath = Path . newKey ( key ) . prepend ( path ) ; else childPath = Path . newKey ( key ) ; AbstractConfigValue v = value . get ( key ) ; if ( v == null ) { addMissing ( accumulator , entry . getValue ( ) , childPath , value . origin ( ) ) ; } else { checkValid ( childPath , entry . getValue ( ) , v , accumulator ) ; } } }
path is null if we re at the root
8,573
static void checkValid ( Path path , ConfigValueType referenceType , AbstractConfigValue value , List < ConfigException . ValidationProblem > accumulator ) { if ( haveCompatibleTypes ( referenceType , value ) ) { if ( referenceType == ConfigValueType . LIST && value instanceof SimpleConfigObject ) { AbstractConfigValue listValue = DefaultTransformer . transform ( value , ConfigValueType . LIST ) ; if ( ! ( listValue instanceof SimpleConfigList ) ) addWrongType ( accumulator , referenceType , value , path ) ; } } else { addWrongType ( accumulator , referenceType , value , path ) ; } }
Used by the JavaBean - based validator
8,574
private static void writeOriginField ( DataOutput out , SerializedField code , Object v ) throws IOException { switch ( code ) { case ORIGIN_DESCRIPTION : out . writeUTF ( ( String ) v ) ; break ; case ORIGIN_LINE_NUMBER : out . writeInt ( ( Integer ) v ) ; break ; case ORIGIN_END_LINE_NUMBER : out . writeInt ( ( Integer ) v ) ; break ; case ORIGIN_TYPE : out . writeByte ( ( Integer ) v ) ; break ; case ORIGIN_URL : out . writeUTF ( ( String ) v ) ; break ; case ORIGIN_RESOURCE : out . writeUTF ( ( String ) v ) ; break ; case ORIGIN_COMMENTS : @ SuppressWarnings ( "unchecked" ) List < String > list = ( List < String > ) v ; int size = list . size ( ) ; out . writeInt ( size ) ; for ( String s : list ) { out . writeUTF ( s ) ; } break ; case ORIGIN_NULL_URL : case ORIGIN_NULL_RESOURCE : case ORIGIN_NULL_COMMENTS : break ; default : throw new IOException ( "Unhandled field from origin: " + code ) ; } }
outer stream instead of field . data
8,575
static void writeOrigin ( DataOutput out , SimpleConfigOrigin origin , SimpleConfigOrigin baseOrigin ) throws IOException { Map < SerializedField , Object > m ; if ( origin != null ) m = origin . toFieldsDelta ( baseOrigin ) ; else m = Collections . emptyMap ( ) ; for ( Map . Entry < SerializedField , Object > e : m . entrySet ( ) ) { FieldOut field = new FieldOut ( e . getKey ( ) ) ; Object v = e . getValue ( ) ; writeOriginField ( field . data , field . code , v ) ; writeField ( out , field ) ; } writeEndMarker ( out ) ; }
not private because we use it to serialize ConfigException
8,576
static SimpleConfigOrigin readOrigin ( DataInput in , SimpleConfigOrigin baseOrigin ) throws IOException { Map < SerializedField , Object > m = new EnumMap < SerializedField , Object > ( SerializedField . class ) ; while ( true ) { Object v = null ; SerializedField field = readCode ( in ) ; switch ( field ) { case END_MARKER : return SimpleConfigOrigin . fromBase ( baseOrigin , m ) ; case ORIGIN_DESCRIPTION : in . readInt ( ) ; v = in . readUTF ( ) ; break ; case ORIGIN_LINE_NUMBER : in . readInt ( ) ; v = in . readInt ( ) ; break ; case ORIGIN_END_LINE_NUMBER : in . readInt ( ) ; v = in . readInt ( ) ; break ; case ORIGIN_TYPE : in . readInt ( ) ; v = in . readUnsignedByte ( ) ; break ; case ORIGIN_URL : in . readInt ( ) ; v = in . readUTF ( ) ; break ; case ORIGIN_RESOURCE : in . readInt ( ) ; v = in . readUTF ( ) ; break ; case ORIGIN_COMMENTS : in . readInt ( ) ; int size = in . readInt ( ) ; List < String > list = new ArrayList < String > ( size ) ; for ( int i = 0 ; i < size ; ++ i ) { list . add ( in . readUTF ( ) ) ; } v = list ; break ; case ORIGIN_NULL_URL : case ORIGIN_NULL_RESOURCE : case ORIGIN_NULL_COMMENTS : in . readInt ( ) ; v = "" ; break ; case ROOT_VALUE : case ROOT_WAS_CONFIG : case VALUE_DATA : case VALUE_ORIGIN : throw new IOException ( "Not expecting this field here: " + field ) ; case UNKNOWN : skipField ( in ) ; break ; } if ( v != null ) m . put ( field , v ) ; } }
not private because we use it to deserialize ConfigException
8,577
static Token newWithoutOrigin ( TokenType tokenType , String debugString , String tokenText ) { return new Token ( tokenType , null , tokenText , debugString ) ; }
this is used for singleton tokens like COMMA or OPEN_CURLY
8,578
ResolveContext restrict ( Path restrictTo ) { if ( restrictTo == restrictToChild ) return this ; else return new ResolveContext ( memos , options , restrictTo , resolveStack , cycleMarkers ) ; }
restrictTo may be null to unrestrict
8,579
static boolean hasFunkyChars ( String s ) { int length = s . length ( ) ; if ( length == 0 ) return false ; for ( int i = 0 ; i < length ; ++ i ) { char c = s . charAt ( i ) ; if ( Character . isLetterOrDigit ( c ) || c == '-' || c == '_' ) continue ; else return true ; } return false ; }
noise from quotes in the rendered path for average cases
8,580
public static Parseable newReader ( Reader reader , ConfigParseOptions options ) { return new ParseableReader ( doNotClose ( reader ) , options ) ; }
is complete .
8,581
private static String convertResourceName ( Class < ? > klass , String resource ) { if ( resource . startsWith ( "/" ) ) { return resource . substring ( 1 ) ; } else { String className = klass . getName ( ) ; int i = className . lastIndexOf ( '.' ) ; if ( i < 0 ) { return resource ; } else { String packageName = className . substring ( 0 , i ) ; String packagePath = packageName . replace ( '.' , '/' ) ; return packagePath + "/" + resource ; } } }
use ClassLoader directly .
8,582
protected SimpleConfigObject withOnlyPathOrNull ( Path path ) { String key = path . first ( ) ; Path next = path . remainder ( ) ; AbstractConfigValue v = value . get ( key ) ; if ( next != null ) { if ( v != null && ( v instanceof AbstractConfigObject ) ) { v = ( ( AbstractConfigObject ) v ) . withOnlyPathOrNull ( next ) ; } else { v = null ; } } if ( v == null ) { return null ; } else { return new SimpleConfigObject ( origin ( ) , Collections . singletonMap ( key , v ) , v . resolveStatus ( ) , ignoresFallbacks ) ; } }
a object .
8,583
private static boolean looksUnsafeForFastParser ( String s ) { boolean lastWasDot = true ; int len = s . length ( ) ; if ( s . isEmpty ( ) ) return true ; if ( s . charAt ( 0 ) == '.' ) return true ; if ( s . charAt ( len - 1 ) == '.' ) return true ; for ( int i = 0 ; i < len ; ++ i ) { char c = s . charAt ( i ) ; if ( ( c >= 'a' && c <= 'z' ) || ( c >= 'A' && c <= 'Z' ) || c == '_' ) { lastWasDot = false ; continue ; } else if ( c == '.' ) { if ( lastWasDot ) return true ; lastWasDot = true ; } else if ( c == '-' ) { if ( lastWasDot ) return true ; continue ; } else { return true ; } } if ( lastWasDot ) return true ; return false ; }
that might require the full parser to deal with .
8,584
static ConfigParseOptions clearForInclude ( ConfigParseOptions options ) { return options . setSyntax ( null ) . setOriginDescription ( null ) . setAllowMissing ( true ) ; }
ConfigIncludeContext does this for us on its options
8,585
public ConfigObject include ( final ConfigIncludeContext context , String name ) { ConfigObject obj = includeWithoutFallback ( context , name ) ; if ( fallback != null ) { return obj . withFallback ( fallback . include ( context , name ) ) ; } else { return obj ; } }
this is the heuristic includer
8,586
static ConfigObject includeWithoutFallback ( final ConfigIncludeContext context , String name ) { URL url ; try { url = new URL ( name ) ; } catch ( MalformedURLException e ) { url = null ; } if ( url != null ) { return includeURLWithoutFallback ( context , url ) ; } else { NameSource source = new RelativeNameSource ( context ) ; return fromBasename ( source , name , context . parseOptions ( ) ) ; } }
the heuristic includer in static form
8,587
private void writeObject ( java . io . ObjectOutputStream out ) throws IOException { out . defaultWriteObject ( ) ; ConfigImplUtil . writeOrigin ( out , origin ) ; }
support it )
8,588
private static < T > void setOriginField ( T hasOriginField , Class < T > clazz , ConfigOrigin origin ) throws IOException { Field f ; try { f = clazz . getDeclaredField ( "origin" ) ; } catch ( NoSuchFieldException e ) { throw new IOException ( clazz . getSimpleName ( ) + " has no origin field?" , e ) ; } catch ( SecurityException e ) { throw new IOException ( "unable to fill out origin field in " + clazz . getSimpleName ( ) , e ) ; } f . setAccessible ( true ) ; try { f . set ( hasOriginField , origin ) ; } catch ( IllegalArgumentException e ) { throw new IOException ( "unable to set origin field" , e ) ; } catch ( IllegalAccessException e ) { throw new IOException ( "unable to set origin field" , e ) ; } }
For deserialization - uses reflection to set the final origin field on the object
8,589
protected final AbstractConfigValue peekAssumingResolved ( String key , Path originalPath ) { try { return attemptPeekWithPartialResolve ( key ) ; } catch ( ConfigException . NotResolved e ) { throw ConfigImpl . improveNotResolved ( originalPath , e ) ; } }
This looks up the key with no transformation or type conversion of any kind and returns null if the key is not present . The object must be resolved along the nodes needed to get the key or ConfigException . NotResolved will be thrown .
8,590
public static ConfigDocument parseReader ( Reader reader , ConfigParseOptions options ) { return Parseable . newReader ( reader , options ) . parseConfigDocument ( ) ; }
Parses a Reader into a ConfigDocument instance .
8,591
public static ConfigDocument parseFile ( File file , ConfigParseOptions options ) { return Parseable . newFile ( file , options ) . parseConfigDocument ( ) ; }
Parses a file into a ConfigDocument instance .
8,592
public static ConfigDocument parseString ( String s , ConfigParseOptions options ) { return Parseable . newString ( s , options ) . parseConfigDocument ( ) ; }
Parses a string which should be valid HOCON or JSON .
8,593
public AbstractConfigValue withFallback ( ConfigMergeable mergeable ) { if ( ignoresFallbacks ( ) ) { return this ; } else { ConfigValue other = ( ( MergeableValue ) mergeable ) . toFallbackValue ( ) ; if ( other instanceof Unmergeable ) { return mergedWithTheUnmergeable ( ( Unmergeable ) other ) ; } else if ( other instanceof AbstractConfigObject ) { return mergedWithObject ( ( AbstractConfigObject ) other ) ; } else { return mergedWithNonObject ( ( AbstractConfigValue ) other ) ; } } }
this is only overridden to change the return type
8,594
public void printSetting ( String path ) { System . out . println ( "The setting '" + path + "' is: " + config . getString ( path ) ) ; }
this is the amazing functionality provided by simple - lib
8,595
public ConfigResolveOptions appendResolver ( ConfigResolver value ) { if ( value == null ) { throw new ConfigException . BugOrBroken ( "null resolver passed to appendResolver" ) ; } else if ( value == this . resolver ) { return this ; } else { return new ConfigResolveOptions ( useSystemEnvironment , allowUnresolved , this . resolver . withFallback ( value ) ) ; } }
Returns options where the given resolver used as a fallback if a reference cannot be otherwise resolved . This resolver will only be called after resolution has failed to substitute with a value from within the config itself and with any other resolvers that have been appended before this one . Multiple resolvers can be added using
8,596
@ SuppressWarnings ( "unchecked" ) ResolveResult < AbstractConfigObject > asObjectResult ( ) { if ( ! ( value instanceof AbstractConfigObject ) ) throw new ConfigException . BugOrBroken ( "Expecting a resolve result to be an object, but it was " + value ) ; Object o = this ; return ( ResolveResult < AbstractConfigObject > ) o ; }
better option? we don t have variance
8,597
public static ConfigSyntax syntaxFromExtension ( String filename ) { if ( filename == null ) return null ; else if ( filename . endsWith ( ".json" ) ) return ConfigSyntax . JSON ; else if ( filename . endsWith ( ".conf" ) ) return ConfigSyntax . CONF ; else if ( filename . endsWith ( ".properties" ) ) return ConfigSyntax . PROPERTIES ; else return null ; }
Guess configuration syntax from given filename .
8,598
static AbstractConfigValue makeReplacement ( ResolveContext context , List < AbstractConfigValue > stack , int skipping ) { List < AbstractConfigValue > subStack = stack . subList ( skipping , stack . size ( ) ) ; if ( subStack . isEmpty ( ) ) { if ( ConfigImpl . traceSubstitutionsEnabled ( ) ) ConfigImpl . trace ( context . depth ( ) , "Nothing else in the merge stack, replacing with null" ) ; return null ; } else { AbstractConfigValue merged = null ; for ( AbstractConfigValue v : subStack ) { if ( merged == null ) merged = v ; else merged = merged . withFallback ( v ) ; } return merged ; } }
static method also used by ConfigDelayedMergeObject ; end may be null
8,599
static boolean stackIgnoresFallbacks ( List < AbstractConfigValue > stack ) { AbstractConfigValue last = stack . get ( stack . size ( ) - 1 ) ; return last . ignoresFallbacks ( ) ; }
static utility shared with ConfigDelayedMergeObject