idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
17,400 | public Object navigation ( Context mContext , Postcard postcard , int requestCode , NavigationCallback callback ) { return _ARouter . getInstance ( ) . navigation ( mContext , postcard , requestCode , callback ) ; } | Launch the navigation . |
17,401 | private void categories ( Set < ? extends Element > elements ) throws IllegalAccessException { if ( CollectionUtils . isNotEmpty ( elements ) ) { for ( Element element : elements ) { TypeElement enclosingElement = ( TypeElement ) element . getEnclosingElement ( ) ; if ( element . getModifiers ( ) . contains ( Modifier . PRIVATE ) ) { throw new IllegalAccessException ( "The inject fields CAN NOT BE 'private'!!! please check field [" + element . getSimpleName ( ) + "] in class [" + enclosingElement . getQualifiedName ( ) + "]" ) ; } if ( parentAndChild . containsKey ( enclosingElement ) ) { parentAndChild . get ( enclosingElement ) . add ( element ) ; } else { List < Element > childs = new ArrayList < > ( ) ; childs . add ( element ) ; parentAndChild . put ( enclosingElement , childs ) ; } } logger . info ( "categories finished." ) ; } } | Categories field find his papa . |
17,402 | private static void register ( String className ) { if ( ! TextUtils . isEmpty ( className ) ) { try { Class < ? > clazz = Class . forName ( className ) ; Object obj = clazz . getConstructor ( ) . newInstance ( ) ; if ( obj instanceof IRouteRoot ) { registerRouteRoot ( ( IRouteRoot ) obj ) ; } else if ( obj instanceof IProviderGroup ) { registerProvider ( ( IProviderGroup ) obj ) ; } else if ( obj instanceof IInterceptorGroup ) { registerInterceptor ( ( IInterceptorGroup ) obj ) ; } else { logger . info ( TAG , "register failed, class name: " + className + " should implements one of IRouteRoot/IProviderGroup/IInterceptorGroup." ) ; } } catch ( Exception e ) { logger . error ( TAG , "register class error:" + className ) ; } } } | register by class name Sacrificing a bit of efficiency to solve the problem that the main dex file size is too large |
17,403 | private static void registerRouteRoot ( IRouteRoot routeRoot ) { markRegisteredByPlugin ( ) ; if ( routeRoot != null ) { routeRoot . loadInto ( Warehouse . groupsIndex ) ; } } | method for arouter - auto - register plugin to register Routers |
17,404 | private static void registerInterceptor ( IInterceptorGroup interceptorGroup ) { markRegisteredByPlugin ( ) ; if ( interceptorGroup != null ) { interceptorGroup . loadInto ( Warehouse . interceptorsIndex ) ; } } | method for arouter - auto - register plugin to register Interceptors |
17,405 | private static void registerProvider ( IProviderGroup providerGroup ) { markRegisteredByPlugin ( ) ; if ( providerGroup != null ) { providerGroup . loadInto ( Warehouse . providersIndex ) ; } } | method for arouter - auto - register plugin to register Providers |
17,406 | public static Postcard buildProvider ( String serviceName ) { RouteMeta meta = Warehouse . providersIndex . get ( serviceName ) ; if ( null == meta ) { return null ; } else { return new Postcard ( meta . getPath ( ) , meta . getGroup ( ) ) ; } } | Build postcard by serviceName |
17,407 | private static void setValue ( Postcard postcard , Integer typeDef , String key , String value ) { if ( TextUtils . isEmpty ( key ) || TextUtils . isEmpty ( value ) ) { return ; } try { if ( null != typeDef ) { if ( typeDef == TypeKind . BOOLEAN . ordinal ( ) ) { postcard . withBoolean ( key , Boolean . parseBoolean ( value ) ) ; } else if ( typeDef == TypeKind . BYTE . ordinal ( ) ) { postcard . withByte ( key , Byte . valueOf ( value ) ) ; } else if ( typeDef == TypeKind . SHORT . ordinal ( ) ) { postcard . withShort ( key , Short . valueOf ( value ) ) ; } else if ( typeDef == TypeKind . INT . ordinal ( ) ) { postcard . withInt ( key , Integer . valueOf ( value ) ) ; } else if ( typeDef == TypeKind . LONG . ordinal ( ) ) { postcard . withLong ( key , Long . valueOf ( value ) ) ; } else if ( typeDef == TypeKind . FLOAT . ordinal ( ) ) { postcard . withFloat ( key , Float . valueOf ( value ) ) ; } else if ( typeDef == TypeKind . DOUBLE . ordinal ( ) ) { postcard . withDouble ( key , Double . valueOf ( value ) ) ; } else if ( typeDef == TypeKind . STRING . ordinal ( ) ) { postcard . withString ( key , value ) ; } else if ( typeDef == TypeKind . PARCELABLE . ordinal ( ) ) { } else if ( typeDef == TypeKind . OBJECT . ordinal ( ) ) { postcard . withString ( key , value ) ; } else { postcard . withString ( key , value ) ; } } else { postcard . withString ( key , value ) ; } } catch ( Throwable ex ) { logger . warning ( Consts . TAG , "LogisticsCenter setValue failed! " + ex . getMessage ( ) ) ; } } | Set value by known type |
17,408 | public int typeExchange ( Element element ) { TypeMirror typeMirror = element . asType ( ) ; if ( typeMirror . getKind ( ) . isPrimitive ( ) ) { return element . asType ( ) . getKind ( ) . ordinal ( ) ; } switch ( typeMirror . toString ( ) ) { case BYTE : return TypeKind . BYTE . ordinal ( ) ; case SHORT : return TypeKind . SHORT . ordinal ( ) ; case INTEGER : return TypeKind . INT . ordinal ( ) ; case LONG : return TypeKind . LONG . ordinal ( ) ; case FLOAT : return TypeKind . FLOAT . ordinal ( ) ; case DOUBEL : return TypeKind . DOUBLE . ordinal ( ) ; case BOOLEAN : return TypeKind . BOOLEAN . ordinal ( ) ; case CHAR : return TypeKind . CHAR . ordinal ( ) ; case STRING : return TypeKind . STRING . ordinal ( ) ; default : if ( types . isSubtype ( typeMirror , parcelableType ) ) { return TypeKind . PARCELABLE . ordinal ( ) ; } else if ( types . isSubtype ( typeMirror , serializableType ) ) { return TypeKind . SERIALIZABLE . ordinal ( ) ; } else { return TypeKind . OBJECT . ordinal ( ) ; } } } | Diagnostics out the true java type |
17,409 | public static RouteMeta build ( RouteType type , Class < ? > destination , String path , String group , int priority , int extra ) { return new RouteMeta ( type , null , destination , null , path , group , null , priority , extra ) ; } | For versions of compiler less than 1 . 0 . 7 contain 1 . 0 . 7 |
17,410 | public static String formatStackTrace ( StackTraceElement [ ] stackTrace ) { StringBuilder sb = new StringBuilder ( ) ; for ( StackTraceElement element : stackTrace ) { sb . append ( " at " ) . append ( element . toString ( ) ) ; sb . append ( "\n" ) ; } return sb . toString ( ) ; } | Print thread stack |
17,411 | public static Map < String , String > splitQueryParameters ( Uri rawUri ) { String query = rawUri . getEncodedQuery ( ) ; if ( query == null ) { return Collections . emptyMap ( ) ; } Map < String , String > paramMap = new LinkedHashMap < > ( ) ; int start = 0 ; do { int next = query . indexOf ( '&' , start ) ; int end = ( next == - 1 ) ? query . length ( ) : next ; int separator = query . indexOf ( '=' , start ) ; if ( separator > end || separator == - 1 ) { separator = end ; } String name = query . substring ( start , separator ) ; if ( ! android . text . TextUtils . isEmpty ( name ) ) { String value = ( separator == end ? "" : query . substring ( separator + 1 , end ) ) ; paramMap . put ( Uri . decode ( name ) , Uri . decode ( value ) ) ; } start = end + 1 ; } while ( start < query . length ( ) ) ; return Collections . unmodifiableMap ( paramMap ) ; } | Split query parameters |
17,412 | public static long midnightUTC ( long epochMillis ) { Calendar day = Calendar . getInstance ( UTC ) ; day . setTimeInMillis ( epochMillis ) ; day . set ( Calendar . MILLISECOND , 0 ) ; day . set ( Calendar . SECOND , 0 ) ; day . set ( Calendar . MINUTE , 0 ) ; day . set ( Calendar . HOUR_OF_DAY , 0 ) ; return day . getTimeInMillis ( ) ; } | For bucketed data floored to the day . For example dependency links . |
17,413 | public ZipkinRule storeSpans ( List < Span > spans ) { try { storage . accept ( spans ) . execute ( ) ; } catch ( IOException e ) { throw Platform . get ( ) . uncheckedIOException ( e ) ; } return this ; } | Stores the given spans directly to setup preconditions for a test . |
17,414 | public List < Span > getTrace ( String traceId ) { try { return storage . spanStore ( ) . getTrace ( traceId ) . execute ( ) ; } catch ( IOException e ) { throw Platform . get ( ) . assertionError ( "I/O exception in in-memory storage" , e ) ; } } | Retrieves a trace by ID which zipkin server has received or null if not present . |
17,415 | @ Bean ( initMethod = "start" ) ScribeCollector scribe ( ZipkinScribeCollectorProperties scribe , CollectorSampler sampler , CollectorMetrics metrics , StorageComponent storage ) { return scribe . toBuilder ( ) . sampler ( sampler ) . metrics ( metrics ) . storage ( storage ) . build ( ) ; } | The init method will block until the scribe port is listening or crash on port conflict |
17,416 | public static Builder newBuilder ( Class < ? > loggingClass ) { if ( loggingClass == null ) throw new NullPointerException ( "loggingClass == null" ) ; return new Builder ( Logger . getLogger ( loggingClass . getName ( ) ) ) ; } | Needed to scope this to the correct logging category |
17,417 | RuntimeException errorStoringSpans ( List < Span > spans , Throwable e ) { metrics . incrementSpansDropped ( spans . size ( ) ) ; StringBuilder msg = appendSpanIds ( spans , new StringBuilder ( "Cannot store spans " ) ) ; return doError ( msg . toString ( ) , e ) ; } | When storing spans an exception can be raised before or after the fact . This adds context of span ids to give logs more relevance . |
17,418 | public boolean shouldInvoke ( C context ) { cleanupExpiredSuppressions ( ) ; if ( cache . containsKey ( context ) ) return false ; Suppression < C > suppression = new Suppression < > ( ticker , context , ticker . read ( ) + ttlNanos ) ; if ( cache . putIfAbsent ( context , suppression ) != null ) return false ; suppressions . offer ( suppression ) ; if ( suppressions . size ( ) > cardinality ) removeOneSuppression ( ) ; return true ; } | Returns true if a given context should be invoked . |
17,419 | public boolean isSampled ( String hexTraceId , boolean debug ) { if ( Boolean . TRUE . equals ( debug ) ) return true ; long traceId = HexCodec . lowerHexToUnsignedLong ( hexTraceId ) ; long t = traceId == Long . MIN_VALUE ? Long . MAX_VALUE : Math . abs ( traceId ) ; return t <= boundary ( ) ; } | Returns true if spans with this trace ID should be recorded to storage . |
17,420 | public static CharSequence jsonEscape ( CharSequence v ) { int length = v . length ( ) ; if ( length == 0 ) return v ; int afterReplacement = 0 ; StringBuilder builder = null ; for ( int i = 0 ; i < length ; i ++ ) { char c = v . charAt ( i ) ; String replacement ; if ( c < 0x80 ) { replacement = REPLACEMENT_CHARS [ c ] ; if ( replacement == null ) continue ; } else if ( c == '\u2028' ) { replacement = U2028 ; } else if ( c == '\u2029' ) { replacement = U2029 ; } else { continue ; } if ( afterReplacement < i ) { if ( builder == null ) builder = new StringBuilder ( length ) ; builder . append ( v , afterReplacement , i ) ; } if ( builder == null ) builder = new StringBuilder ( length ) ; builder . append ( replacement ) ; afterReplacement = i + 1 ; } if ( builder == null ) return v ; if ( afterReplacement < length ) { builder . append ( v , afterReplacement , length ) ; } return builder ; } | Exposed for ElasticSearch HttpBulkIndexer |
17,421 | public String annotationQueryString ( ) { StringBuilder result = new StringBuilder ( ) ; for ( Iterator < Map . Entry < String , String > > i = annotationQuery ( ) . entrySet ( ) . iterator ( ) ; i . hasNext ( ) ; ) { Map . Entry < String , String > next = i . next ( ) ; result . append ( next . getKey ( ) ) ; if ( ! next . getValue ( ) . isEmpty ( ) ) result . append ( '=' ) . append ( next . getValue ( ) ) ; if ( i . hasNext ( ) ) result . append ( " and " ) ; } return result . length ( ) > 0 ? result . toString ( ) : null ; } | Corresponds to query parameter annotationQuery . Ex . http . method = GET and error |
17,422 | public Session create ( CassandraStorage cassandra ) { Closer closer = Closer . create ( ) ; try { Cluster cluster = closer . register ( buildCluster ( cassandra ) ) ; cluster . register ( new QueryLogger . Builder ( ) . build ( ) ) ; Session session ; String keyspace = cassandra . keyspace ( ) ; if ( cassandra . ensureSchema ( ) ) { session = closer . register ( cluster . connect ( ) ) ; Schema . ensureExists ( keyspace , cassandra . searchEnabled ( ) , session ) ; session . execute ( "USE " + keyspace ) ; } else { LOG . debug ( "Skipping schema check on keyspace {} as ensureSchema was false" , keyspace ) ; session = cluster . connect ( keyspace ) ; } initializeUDTs ( session ) ; return session ; } catch ( RuntimeException e ) { try { closer . close ( ) ; } catch ( IOException ignored ) { } throw e ; } } | Creates a session and ensures schema if configured . Closes the cluster and session if any exception occurred . |
17,423 | static int findConnectPort ( List < InetSocketAddress > contactPoints ) { Set < Integer > ports = Sets . newLinkedHashSet ( ) ; for ( InetSocketAddress contactPoint : contactPoints ) { ports . add ( contactPoint . getPort ( ) ) ; } return ports . size ( ) == 1 ? ports . iterator ( ) . next ( ) : 9042 ; } | Returns the consistent port across all contact points or 9042 |
17,424 | public byte [ ] writeList ( List < Span > spans ) { int lengthOfSpans = spans . size ( ) ; if ( lengthOfSpans == 0 ) return EMPTY_ARRAY ; if ( lengthOfSpans == 1 ) return write ( spans . get ( 0 ) ) ; int sizeInBytes = 0 ; int [ ] sizeOfValues = new int [ lengthOfSpans ] ; for ( int i = 0 ; i < lengthOfSpans ; i ++ ) { int sizeOfValue = sizeOfValues [ i ] = SPAN . sizeOfValue ( spans . get ( i ) ) ; sizeInBytes += sizeOfLengthDelimitedField ( sizeOfValue ) ; } Buffer result = Buffer . allocate ( sizeInBytes ) ; for ( int i = 0 ; i < lengthOfSpans ; i ++ ) { writeSpan ( spans . get ( i ) , sizeOfValues [ i ] , result ) ; } return result . toByteArray ( ) ; } | Encodes per ListOfSpans data wireType where field one is repeated spans |
17,425 | void writeSpan ( Span span , int sizeOfSpan , Buffer result ) { result . writeByte ( SPAN . key ) ; result . writeVarint ( sizeOfSpan ) ; SPAN . writeValue ( result , span ) ; } | prevents resizing twice |
17,426 | public static HttpFailure sendErrorResponse ( int code , String body ) { return new HttpFailure ( new MockResponse ( ) . setResponseCode ( code ) . setBody ( body ) ) ; } | Ex code 400 when the server cannot read the spans |
17,427 | HttpResponse validateAndStoreSpans ( SpanBytesDecoder decoder , byte [ ] serializedSpans ) { if ( serializedSpans . length == 0 ) return HttpResponse . of ( HttpStatus . ACCEPTED ) ; try { SpanBytesDecoderDetector . decoderForListMessage ( serializedSpans ) ; } catch ( IllegalArgumentException e ) { metrics . incrementMessagesDropped ( ) ; return HttpResponse . of ( BAD_REQUEST , MediaType . PLAIN_TEXT_UTF_8 , "Expected a " + decoder + " encoded list\n" ) ; } SpanBytesDecoder unexpectedDecoder = testForUnexpectedFormat ( decoder , serializedSpans ) ; if ( unexpectedDecoder != null ) { metrics . incrementMessagesDropped ( ) ; return HttpResponse . of ( BAD_REQUEST , MediaType . PLAIN_TEXT_UTF_8 , "Expected a " + decoder + " encoded list, but received: " + unexpectedDecoder + "\n" ) ; } CompletableCallback result = new CompletableCallback ( ) ; List < Span > spans = new ArrayList < > ( ) ; if ( ! decoder . decodeList ( serializedSpans , spans ) ) { throw new IllegalArgumentException ( "Empty " + decoder . name ( ) + " message" ) ; } collector . accept ( spans , result ) ; return HttpResponse . from ( result ) ; } | This synchronously decodes the message so that users can see data errors . |
17,428 | public static JsonReader enterPath ( JsonReader reader , String path1 , String path2 ) throws IOException { return enterPath ( reader , path1 ) != null ? enterPath ( reader , path2 ) : null ; } | This saves you from having to define nested types to read a single value |
17,429 | String spanIndexTemplate ( ) { String result = "{\n" + " \"TEMPLATE\": \"${__INDEX__}:" + SPAN + "-*\",\n" + " \"settings\": {\n" + " \"index.number_of_shards\": ${__NUMBER_OF_SHARDS__},\n" + " \"index.number_of_replicas\": ${__NUMBER_OF_REPLICAS__},\n" + " \"index.requests.cache.enable\": true,\n" + " \"index.mapper.dynamic\": false,\n" + " \"analysis\": {\n" + " \"analyzer\": {\n" + " \"traceId_analyzer\": {\n" + " \"type\": \"custom\",\n" + " \"tokenizer\": \"keyword\",\n" + " \"filter\": \"traceId_filter\"\n" + " }\n" + " },\n" + " \"filter\": {\n" + " \"traceId_filter\": {\n" + " \"type\": \"pattern_capture\",\n" + " \"patterns\": [\"([0-9a-f]{1,16})$\"],\n" + " \"preserve_original\": true\n" + " }\n" + " }\n" + " }\n" + " },\n" ; if ( searchEnabled ) { return result + ( " \"mappings\": {\n" + " \"_default_\": {\n" + " DISABLE_ALL" + " \"dynamic_templates\": [\n" + " {\n" + " \"strings\": {\n" + " \"mapping\": {\n" + " KEYWORD,\n" + " \"ignore_above\": 256\n" + " },\n" + " \"match_mapping_type\": \"string\",\n" + " \"match\": \"*\"\n" + " }\n" + " }\n" + " ]\n" + " },\n" + " \"" + SPAN + "\": {\n" + " \"_source\": {\"excludes\": [\"_q\"] },\n" + " \"properties\": {\n" + " \"traceId\": ${__TRACE_ID_MAPPING__},\n" + " \"name\": { KEYWORD },\n" + " \"localEndpoint\": {\n" + " \"type\": \"object\",\n" + " \"dynamic\": false,\n" + " \"properties\": { \"serviceName\": { KEYWORD } }\n" + " },\n" + " \"remoteEndpoint\": {\n" + " \"type\": \"object\",\n" + " \"dynamic\": false,\n" + " \"properties\": { \"serviceName\": { KEYWORD } }\n" + " },\n" + " \"timestamp_millis\": {\n" + " \"type\": \"date\",\n" + " \"format\": \"epoch_millis\"\n" + " },\n" + " \"duration\": { \"type\": \"long\" },\n" + " \"annotations\": { \"enabled\": false },\n" + " \"tags\": { \"enabled\": false },\n" + " \"_q\": { KEYWORD }\n" + " }\n" + " }\n" + " }\n" + "}" ) ; } return result + ( " \"mappings\": {\n" + " \"_default_\": { DISABLE_ALL },\n" + " \"" + SPAN + "\": {\n" + " \"properties\": {\n" + " \"traceId\": ${__TRACE_ID_MAPPING__},\n" + " \"annotations\": { \"enabled\": false },\n" + " \"tags\": { \"enabled\": false }\n" + " }\n" + " }\n" + " }\n" + "}" ) ; } | Templatized due to version differences . Only fields used in search are declared |
17,430 | @ Order ( 1 ) ArmeriaServerConfigurator notFoundMetricCollector ( ) { return sb -> sb . service ( PathMapping . ofGlob ( "/**" ) , ( ctx , req ) -> HttpResponse . of ( HttpStatus . NOT_FOUND ) ) ; } | service is handled by this . |
17,431 | private static Tag uri ( RequestLog requestLog ) { int status = requestLog . statusCode ( ) ; if ( status > 299 && status < 400 ) return URI_REDIRECTION ; if ( status == 404 ) return URI_NOT_FOUND ; String uri = getPathInfo ( requestLog ) ; if ( uri . startsWith ( "/zipkin" ) ) { if ( uri . equals ( "/zipkin/" ) || uri . equals ( "/zipkin" ) || uri . startsWith ( "/zipkin/traces/" ) || uri . equals ( "/zipkin/dependency" ) || uri . equals ( "/zipkin/traceViewer" ) ) { return URI_CROSSROADS ; } if ( uri . startsWith ( "/zipkin/api" ) ) { uri = uri . replaceFirst ( "/zipkin" , "" ) ; } } if ( uri . startsWith ( "/api/v2/trace/" ) ) return URI_TRACE_V2 ; return Tag . of ( "uri" , uri ) ; } | Ensure metrics cardinality doesn t blow up on variables |
17,432 | static String getPathInfo ( RequestLog requestLog ) { String uri = requestLog . path ( ) ; if ( ! StringUtils . hasText ( uri ) ) return "/" ; return uri . replaceAll ( "//+" , "/" ) . replaceAll ( "/$" , "" ) ; } | from io . micrometer . spring . web . servlet . WebMvcTags |
17,433 | public static Dependencies fromThrift ( ByteBuffer bytes ) { long startTs = 0L ; long endTs = 0L ; List < DependencyLink > links = Collections . emptyList ( ) ; while ( true ) { ThriftField thriftField = ThriftField . read ( bytes ) ; if ( thriftField . type == TYPE_STOP ) break ; if ( thriftField . isEqualTo ( START_TS ) ) { startTs = bytes . getLong ( ) ; } else if ( thriftField . isEqualTo ( END_TS ) ) { endTs = bytes . getLong ( ) ; } else if ( thriftField . isEqualTo ( LINKS ) ) { int length = ThriftCodec . readListLength ( bytes ) ; if ( length == 0 ) continue ; links = new ArrayList < > ( length ) ; for ( int i = 0 ; i < length ; i ++ ) { links . add ( DependencyLinkAdapter . read ( bytes ) ) ; } } else { skip ( bytes , thriftField . type ) ; } } return Dependencies . create ( startTs , endTs , links ) ; } | Reads from bytes serialized in TBinaryProtocol |
17,434 | public ByteBuffer toThrift ( ) { Buffer buffer = Buffer . allocate ( sizeInBytes ( ) ) ; write ( buffer ) ; return ByteBuffer . wrap ( buffer . toByteArray ( ) ) ; } | Writes the current instance in TBinaryProtocol |
17,435 | public static Dependencies create ( long startTs , long endTs , List < DependencyLink > links ) { return new Dependencies ( startTs , endTs , links ) ; } | timestamps are in epoch milliseconds |
17,436 | public static void propagateIfFatal ( Throwable t ) { if ( t instanceof VirtualMachineError ) { throw ( VirtualMachineError ) t ; } else if ( t instanceof ThreadDeath ) { throw ( ThreadDeath ) t ; } else if ( t instanceof LinkageError ) { throw ( LinkageError ) t ; } } | Taken from RxJava throwIfFatal which was taken from scala |
17,437 | Set < String > removeServiceIfTraceId ( String lowTraceId ) { Set < String > result = new LinkedHashSet < > ( ) ; for ( Map . Entry < String , Collection < String > > entry : delegate . entrySet ( ) ) { Collection < String > lowTraceIds = entry . getValue ( ) ; if ( lowTraceIds . remove ( lowTraceId ) && lowTraceIds . isEmpty ( ) ) { result . add ( entry . getKey ( ) ) ; } } delegate . keySet ( ) . removeAll ( result ) ; return result ; } | Returns service names orphaned by removing the trace ID |
17,438 | public static int utf8SizeInBytes ( CharSequence string ) { int sizeInBytes = 0 ; for ( int i = 0 , len = string . length ( ) ; i < len ; i ++ ) { char ch = string . charAt ( i ) ; if ( ch < 0x80 ) { sizeInBytes ++ ; while ( i < len - 1 ) { ch = string . charAt ( i + 1 ) ; if ( ch >= 0x80 ) break ; i ++ ; sizeInBytes ++ ; } } else if ( ch < 0x800 ) { sizeInBytes += 2 ; } else if ( ch < 0xd800 || ch > 0xdfff ) { sizeInBytes += 3 ; } else { int low = i + 1 < len ? string . charAt ( i + 1 ) : 0 ; if ( ch > 0xdbff || low < 0xdc00 || low > 0xdfff ) { sizeInBytes ++ ; } else { sizeInBytes += 4 ; i ++ ; } } } return sizeInBytes ; } | This returns the bytes needed to transcode a UTF - 16 Java String to UTF - 8 bytes . |
17,439 | public static int asciiSizeInBytes ( long v ) { if ( v == 0 ) return 1 ; if ( v == Long . MIN_VALUE ) return 20 ; boolean negative = false ; if ( v < 0 ) { v = - v ; negative = true ; } int width = v < 100000000L ? v < 10000L ? v < 100L ? v < 10L ? 1 : 2 : v < 1000L ? 3 : 4 : v < 1000000L ? v < 100000L ? 5 : 6 : v < 10000000L ? 7 : 8 : v < 1000000000000L ? v < 10000000000L ? v < 1000000000L ? 9 : 10 : v < 100000000000L ? 11 : 12 : v < 1000000000000000L ? v < 10000000000000L ? 13 : v < 100000000000000L ? 14 : 15 : v < 100000000000000000L ? v < 10000000000000000L ? 16 : 17 : v < 1000000000000000000L ? 18 : 19 ; return negative ? width + 1 : width ; } | Binary search for character width which favors matching lower numbers . |
17,440 | public void writeUtf8 ( CharSequence string ) { for ( int i = 0 , len = string . length ( ) ; i < len ; i ++ ) { char ch = string . charAt ( i ) ; if ( ch < 0x80 ) { writeByte ( ch ) ; while ( i < len - 1 ) { ch = string . charAt ( i + 1 ) ; if ( ch >= 0x80 ) break ; i ++ ; writeByte ( ch ) ; } } else if ( ch < 0x800 ) { writeByte ( 0xc0 | ( ch >> 6 ) ) ; writeByte ( 0x80 | ( ch & 0x3f ) ) ; } else if ( ch < 0xd800 || ch > 0xdfff ) { writeByte ( 0xe0 | ( ch >> 12 ) ) ; writeByte ( 0x80 | ( ( ch >> 6 ) & 0x3f ) ) ; writeByte ( 0x80 | ( ch & 0x3f ) ) ; } else { if ( ! Character . isHighSurrogate ( ch ) ) { writeByte ( '?' ) ; continue ; } if ( i == len - 1 ) { writeByte ( '?' ) ; break ; } char low = string . charAt ( ++ i ) ; if ( ! Character . isLowSurrogate ( low ) ) { writeByte ( '?' ) ; writeByte ( Character . isHighSurrogate ( low ) ? '?' : low ) ; continue ; } int codePoint = Character . toCodePoint ( ch , low ) ; writeByte ( 0xf0 | ( codePoint >> 18 ) ) ; writeByte ( 0x80 | ( ( codePoint >> 12 ) & 0x3f ) ) ; writeByte ( 0x80 | ( ( codePoint >> 6 ) & 0x3f ) ) ; writeByte ( 0x80 | ( codePoint & 0x3f ) ) ; } } } | This transcodes a UTF - 16 Java String to UTF - 8 bytes . |
17,441 | public void writeAscii ( long v ) { if ( v == 0 ) { writeByte ( '0' ) ; return ; } if ( v == Long . MIN_VALUE ) { writeAscii ( "-9223372036854775808" ) ; return ; } if ( v < 0 ) { writeByte ( '-' ) ; v = - v ; } writeBackwards ( v ) ; } | Adapted from okio . Buffer . writeDecimalLong |
17,442 | int readVarint32 ( ) { byte b ; if ( ( b = readByte ( ) ) >= 0 ) { return b ; } int result = b & 0x7f ; if ( ( b = readByte ( ) ) >= 0 ) { return result | b << 7 ; } result |= ( b & 0x7f ) << 7 ; if ( ( b = readByte ( ) ) >= 0 ) { return result | b << 14 ; } result |= ( b & 0x7f ) << 14 ; if ( ( b = readByte ( ) ) >= 0 ) { return result | b << 21 ; } result |= ( b & 0x7f ) << 21 ; b = readByte ( ) ; if ( ( b & 0xf0 ) != 0 ) { throw new IllegalArgumentException ( "Greater than 32-bit varint at position " + ( pos ( ) - 1 ) ) ; } return result | b << 28 ; } | included in the main api as this is used commonly for example reading proto tags |
17,443 | public static Call . Mapper < List < List < Span > > , List < List < Span > > > filterTraces ( QueryRequest request ) { return new FilterTracesIfClashOnLowerTraceId ( request ) ; } | Filters the mutable input client - side when there s a clash on lower 64 - bits of a trace ID . |
17,444 | static boolean hasClashOnLowerTraceId ( List < List < Span > > input ) { int traceCount = input . size ( ) ; if ( traceCount <= 1 ) return false ; Set < String > traceIdLows = new LinkedHashSet < > ( ) ; boolean clash = false ; for ( int i = 0 ; i < traceCount ; i ++ ) { String traceId = lowerTraceId ( input . get ( i ) . get ( 0 ) . traceId ( ) ) ; if ( ! traceIdLows . add ( traceId ) ) { clash = true ; break ; } } return clash ; } | necessary is also more efficient . |
17,445 | public Builder clear ( ) { traceId = null ; parentId = null ; id = null ; kind = null ; name = null ; timestamp = 0L ; duration = 0L ; localEndpoint = null ; remoteEndpoint = null ; if ( annotations != null ) annotations . clear ( ) ; if ( tags != null ) tags . clear ( ) ; flags = 0 ; return this ; } | bit field for timestamp and duration |
17,446 | public Builder merge ( Span source ) { if ( traceId == null ) traceId = source . traceId ; if ( id == null ) id = source . id ; if ( parentId == null ) parentId = source . parentId ; if ( kind == null ) kind = source . kind ; if ( name == null ) name = source . name ; if ( timestamp == 0L ) timestamp = source . timestamp ; if ( duration == 0L ) duration = source . duration ; if ( localEndpoint == null ) { localEndpoint = source . localEndpoint ; } else if ( source . localEndpoint != null ) { localEndpoint = localEndpoint . toBuilder ( ) . merge ( source . localEndpoint ) . build ( ) ; } if ( remoteEndpoint == null ) { remoteEndpoint = source . remoteEndpoint ; } else if ( source . remoteEndpoint != null ) { remoteEndpoint = remoteEndpoint . toBuilder ( ) . merge ( source . remoteEndpoint ) . build ( ) ; } if ( ! source . annotations . isEmpty ( ) ) { if ( annotations == null ) { annotations = new ArrayList < > ( source . annotations . size ( ) ) ; } annotations . addAll ( source . annotations ) ; } if ( ! source . tags . isEmpty ( ) ) { if ( tags == null ) tags = new TreeMap < > ( ) ; tags . putAll ( source . tags ) ; } flags = flags | source . flags ; return this ; } | Used to merge multiple incomplete spans representing the same operation on the same host . Do not use this to merge spans that occur on different hosts . |
17,447 | public Builder traceId ( long high , long low ) { if ( high == 0L && low == 0L ) throw new IllegalArgumentException ( "empty trace ID" ) ; char [ ] result = new char [ high != 0L ? 32 : 16 ] ; int pos = 0 ; if ( high != 0L ) { writeHexLong ( result , pos , high ) ; pos += 16 ; } writeHexLong ( result , pos , low ) ; this . traceId = new String ( result ) ; return this ; } | Encodes 64 or 128 bits from the input into a hex trace ID . |
17,448 | public static Call . Mapper < List < List < Span > > , List < List < Span > > > create ( QueryRequest request ) { return new FilterTraces ( request ) ; } | Filters the mutable input based on the query |
17,449 | static SelectTraceIdsFromSpan . Factory initialiseSelectTraceIdsFromSpan ( Session session ) { try { return new SelectTraceIdsFromSpan . Factory ( session ) ; } catch ( DriverException ex ) { LOG . warn ( "failed to prepare annotation_query index statements: " + ex . getMessage ( ) ) ; return null ; } } | This makes it possible to safely drop the annotations_query SASI . |
17,450 | public Call < List < List < Span > > > getTraces ( QueryRequest request ) { if ( ! searchEnabled ) return Call . emptyList ( ) ; TimestampRange timestampRange = timestampRange ( request ) ; final int traceIndexFetchSize = request . limit ( ) * indexFetchMultiplier ; List < Call < Map < String , Long > > > callsToIntersect = new ArrayList < > ( ) ; List < String > annotationKeys = CassandraUtil . annotationKeys ( request ) ; for ( String annotationKey : annotationKeys ) { if ( spanTable == null ) { throw new IllegalArgumentException ( request . annotationQueryString ( ) + " query unsupported due to missing annotation_query index" ) ; } callsToIntersect . add ( spanTable . newCall ( request . serviceName ( ) , annotationKey , timestampRange , traceIndexFetchSize ) ) ; } if ( request . remoteServiceName ( ) != null || request . spanName ( ) != null || request . minDuration ( ) != null || callsToIntersect . isEmpty ( ) ) { callsToIntersect . add ( newBucketedTraceIdCall ( request , timestampRange , traceIndexFetchSize ) ) ; } if ( callsToIntersect . size ( ) == 1 ) { return callsToIntersect . get ( 0 ) . map ( traceIdsSortedByDescTimestamp ( ) ) . flatMap ( spans . newFlatMapper ( request ) ) ; } IntersectKeySets intersectedTraceIds = new IntersectKeySets ( callsToIntersect ) ; return intersectedTraceIds . flatMap ( spans . newFlatMapper ( request ) ) ; } | This fans out into a number of requests corresponding to query input . In simplest case there is less than a day of data queried and only one expression . This implies one call to fetch trace IDs and another to retrieve the span details . |
17,451 | Call < Map < String , Long > > newBucketedTraceIdCall ( QueryRequest request , TimestampRange timestampRange , int traceIndexFetchSize ) { String spanName = null != request . spanName ( ) ? request . spanName ( ) : "" ; Long minDuration = request . minDuration ( ) , maxDuration = request . maxDuration ( ) ; int startBucket = CassandraUtil . durationIndexBucket ( timestampRange . startMillis * 1000 ) ; int endBucket = CassandraUtil . durationIndexBucket ( timestampRange . endMillis * 1000 ) ; if ( startBucket > endBucket ) { throw new IllegalArgumentException ( "Start bucket (" + startBucket + ") > end bucket (" + endBucket + ")" ) ; } String serviceName = null != request . serviceName ( ) ? request . serviceName ( ) : "" ; List < SelectTraceIdsFromServiceSpan . Input > serviceSpans = new ArrayList < > ( ) ; List < SelectTraceIdsFromServiceRemoteService . Input > serviceRemoteServices = new ArrayList < > ( ) ; String remoteService = request . remoteServiceName ( ) ; for ( int bucket = endBucket ; bucket >= startBucket ; bucket -- ) { boolean addSpanQuery = true ; if ( remoteService != null ) { if ( traceIdsFromServiceRemoteService == null ) { throw new IllegalArgumentException ( "remoteService=" + remoteService + " unsupported due to missing table " + TABLE_SERVICE_REMOTE_SERVICES ) ; } serviceRemoteServices . add ( traceIdsFromServiceRemoteService . newInput ( serviceName , remoteService , bucket , timestampRange , traceIndexFetchSize ) ) ; addSpanQuery = ! spanName . isEmpty ( ) || minDuration != null ; } if ( ! addSpanQuery ) continue ; serviceSpans . add ( traceIdsFromServiceSpan . newInput ( serviceName , spanName , bucket , minDuration , maxDuration , timestampRange , traceIndexFetchSize ) ) ; } if ( "" . equals ( serviceName ) ) { Call < List < String > > serviceNames = getServiceNames ( ) ; if ( serviceRemoteServices . isEmpty ( ) ) { return serviceNames . flatMap ( traceIdsFromServiceSpan . newFlatMapper ( serviceSpans ) ) ; } else if ( serviceSpans . isEmpty ( ) ) { return serviceNames . flatMap ( traceIdsFromServiceRemoteService . newFlatMapper ( serviceRemoteServices ) ) ; } return serviceNames . flatMap ( new AggregateFlatMapper < > ( traceIdsFromServiceSpan . newFlatMapper ( serviceSpans ) , traceIdsFromServiceRemoteService . newFlatMapper ( serviceRemoteServices ) ) ) ; } if ( serviceRemoteServices . isEmpty ( ) ) { return traceIdsFromServiceSpan . newCall ( serviceSpans ) ; } else if ( serviceSpans . isEmpty ( ) ) { return traceIdsFromServiceRemoteService . newCall ( serviceRemoteServices ) ; } else { return new IntersectMaps < > ( asList ( traceIdsFromServiceSpan . newCall ( serviceSpans ) , traceIdsFromServiceRemoteService . newCall ( serviceRemoteServices ) ) ) ; } } | and speculatively query those first . |
17,452 | static < T > int listSizeInBytes ( Buffer . Writer < T > writer , List < T > values ) { int sizeInBytes = 5 ; for ( int i = 0 , length = values . size ( ) ; i < length ; i ++ ) { sizeInBytes += writer . sizeInBytes ( values . get ( i ) ) ; } return sizeInBytes ; } | Encoding overhead is thrift type plus 32 - bit length prefix |
17,453 | public static long lowerHexToUnsignedLong ( String lowerHex ) { int length = lowerHex . length ( ) ; if ( length < 1 || length > 32 ) throw isntLowerHexLong ( lowerHex ) ; int beginIndex = length > 16 ? length - 16 : 0 ; return lowerHexToUnsignedLong ( lowerHex , beginIndex ) ; } | Parses a 1 to 32 character lower - hex string with no prefix into an unsigned long tossing any bits higher than 64 . |
17,454 | public static long lowerHexToUnsignedLong ( String lowerHex , int index ) { long result = 0 ; for ( int endIndex = Math . min ( index + 16 , lowerHex . length ( ) ) ; index < endIndex ; index ++ ) { char c = lowerHex . charAt ( index ) ; result <<= 4 ; if ( c >= '0' && c <= '9' ) { result |= c - '0' ; } else if ( c >= 'a' && c <= 'f' ) { result |= c - 'a' + 10 ; } else { throw isntLowerHexLong ( lowerHex ) ; } } return result ; } | Parses a 16 character lower - hex string with no prefix into an unsigned long starting at the spe index . |
17,455 | static int compareEndpoint ( Endpoint left , Endpoint right ) { if ( left == null ) { return ( right == null ) ? 0 : - 1 ; } else if ( right == null ) { return 1 ; } int byService = nullSafeCompareTo ( left . serviceName ( ) , right . serviceName ( ) , false ) ; if ( byService != 0 ) return byService ; int byIpV4 = nullSafeCompareTo ( left . ipv4 ( ) , right . ipv4 ( ) , false ) ; if ( byIpV4 != 0 ) return byIpV4 ; return nullSafeCompareTo ( left . ipv6 ( ) , right . ipv6 ( ) , false ) ; } | Put spans with null endpoints first so that their data can be attached to the first span with the same ID and endpoint . It is possible that a server can get the same request on a different port . Not addressing this . |
17,456 | public static < T > byte [ ] write ( Buffer . Writer < T > writer , T value ) { Buffer b = Buffer . allocate ( writer . sizeInBytes ( value ) ) ; try { writer . write ( value , b ) ; } catch ( RuntimeException e ) { byte [ ] bytes = b . toByteArray ( ) ; int lengthWritten = bytes . length ; for ( int i = 0 ; i < bytes . length ; i ++ ) { if ( bytes [ i ] == 0 ) { lengthWritten = i ; break ; } } final byte [ ] bytesWritten ; if ( lengthWritten == bytes . length ) { bytesWritten = bytes ; } else { bytesWritten = new byte [ lengthWritten ] ; System . arraycopy ( bytes , 0 , bytesWritten , 0 , lengthWritten ) ; } String written = new String ( bytesWritten , UTF_8 ) ; String message = format ( "Bug found using %s to write %s as json. Wrote %s/%s bytes: %s" , writer . getClass ( ) . getSimpleName ( ) , value . getClass ( ) . getSimpleName ( ) , lengthWritten , bytes . length , written ) ; throw Platform . get ( ) . assertionError ( message , e ) ; } return b . toByteArray ( ) ; } | Inability to encode is a programming bug . |
17,457 | static byte [ ] writeTraces ( SpanBytesEncoder codec , List < List < zipkin2 . Span > > traces ) { int length = traces . size ( ) ; int sizeInBytes = 2 ; if ( length > 1 ) sizeInBytes += length - 1 ; for ( int i = 0 ; i < length ; i ++ ) { List < zipkin2 . Span > spans = traces . get ( i ) ; int jLength = spans . size ( ) ; sizeInBytes += 2 ; if ( jLength > 1 ) sizeInBytes += jLength - 1 ; for ( int j = 0 ; j < jLength ; j ++ ) { sizeInBytes += codec . sizeInBytes ( spans . get ( j ) ) ; } } byte [ ] out = new byte [ sizeInBytes ] ; int pos = 0 ; out [ pos ++ ] = '[' ; for ( int i = 0 ; i < length ; i ++ ) { pos += codec . encodeList ( traces . get ( i ) , out , pos ) ; if ( i + 1 < length ) out [ pos ++ ] = ',' ; } out [ pos ] = ']' ; return out ; } | This is inlined here as there isn t enough re - use to warrant it being in the zipkin2 library |
17,458 | @ Get ( "/metrics" ) public ObjectNode fetchMetricsFromMicrometer ( ) { ObjectNode metricsJson = factory . objectNode ( ) ; for ( Meter meter : meterRegistry . getMeters ( ) ) { String name = meter . getId ( ) . getName ( ) ; if ( ! name . startsWith ( "zipkin_collector" ) ) continue ; String transport = meter . getId ( ) . getTag ( "transport" ) ; if ( transport == null ) continue ; switch ( meter . getId ( ) . getType ( ) ) { case COUNTER : metricsJson . put ( "counter." + name + "." + transport , ( ( Counter ) meter ) . count ( ) ) ; continue ; case GAUGE : metricsJson . put ( "gauge." + name + "." + transport , ( ( Gauge ) meter ) . value ( ) ) ; } } return metricsJson ; } | Extracts Zipkin metrics to provide backward compatibility |
17,459 | @ Get ( "/health" ) public HttpResponse getHealth ( ) throws JsonProcessingException { Health health = healthEndpoint . health ( ) ; Map < String , Object > healthJson = new LinkedHashMap < > ( ) ; healthJson . put ( "status" , health . getStatus ( ) . getCode ( ) ) ; healthJson . put ( "zipkin" , health . getDetails ( ) . get ( "zipkin" ) ) ; byte [ ] body = mapper . writer ( ) . writeValueAsBytes ( healthJson ) ; HttpHeaders headers = HttpHeaders . of ( statusMapper . mapStatus ( health . getStatus ( ) ) ) . contentType ( MediaType . JSON ) . setInt ( HttpHeaderNames . CONTENT_LENGTH , body . length ) ; return HttpResponse . of ( headers , HttpData . of ( body ) ) ; } | in future in favour of Actuator endpoints |
17,460 | public static HostAndPort fromString ( String hostPort , int defaultPort ) { if ( hostPort == null ) throw new NullPointerException ( "hostPort == null" ) ; String host = hostPort ; int endHostIndex = hostPort . length ( ) ; if ( hostPort . startsWith ( "[" ) ) { endHostIndex = hostPort . lastIndexOf ( ']' ) + 1 ; host = hostPort . substring ( 1 , endHostIndex == 0 ? 1 : endHostIndex - 1 ) ; if ( ! Endpoint . newBuilder ( ) . parseIp ( host ) ) { throw new IllegalArgumentException ( hostPort + " contains an invalid IPv6 literal" ) ; } } else { int colonIndex = hostPort . indexOf ( ':' ) , nextColonIndex = hostPort . lastIndexOf ( ':' ) ; if ( colonIndex >= 0 ) { if ( colonIndex == nextColonIndex ) { host = hostPort . substring ( 0 , colonIndex ) ; endHostIndex = colonIndex ; } else if ( ! Endpoint . newBuilder ( ) . parseIp ( hostPort ) ) { throw new IllegalArgumentException ( hostPort + " is an invalid IPv6 literal" ) ; } } } if ( host . isEmpty ( ) ) throw new IllegalArgumentException ( hostPort + " has an empty host" ) ; if ( endHostIndex + 1 < hostPort . length ( ) && hostPort . charAt ( endHostIndex ) == ':' ) { return new HostAndPort ( host , validatePort ( hostPort . substring ( endHostIndex + 1 ) , hostPort ) ) ; } return new HostAndPort ( host , defaultPort ) ; } | Constructs a host - port pair from the given string defaulting to the indicated port if absent |
17,461 | static void apply ( HttpCall . Factory callFactory , String name , String indexTemplate ) throws IOException { HttpUrl templateUrl = callFactory . baseUrl . newBuilder ( "_template" ) . addPathSegment ( name ) . build ( ) ; Request getTemplate = new Request . Builder ( ) . url ( templateUrl ) . tag ( "get-template" ) . build ( ) ; try { callFactory . newCall ( getTemplate , BodyConverters . NULL ) . execute ( ) ; } catch ( IllegalStateException e ) { Request updateTemplate = new Request . Builder ( ) . url ( templateUrl ) . put ( RequestBody . create ( ElasticsearchStorage . APPLICATION_JSON , indexTemplate ) ) . tag ( "update-template" ) . build ( ) ; callFactory . newCall ( updateTemplate , BodyConverters . NULL ) . execute ( ) ; } } | This is a blocking call used inside a lazy . That s because no writes should occur until the template is available . |
17,462 | static Set < String > annotationKeys ( Span span ) { Set < String > annotationKeys = new LinkedHashSet < > ( ) ; String localServiceName = span . localServiceName ( ) ; if ( localServiceName == null ) return Collections . emptySet ( ) ; for ( Annotation a : span . annotations ( ) ) { if ( CORE_ANNOTATIONS . contains ( a . value ( ) ) ) continue ; annotationKeys . add ( localServiceName + ":" + a . value ( ) ) ; } for ( Map . Entry < String , String > e : span . tags ( ) . entrySet ( ) ) { if ( e . getValue ( ) . length ( ) <= LONGEST_VALUE_TO_INDEX ) { annotationKeys . add ( localServiceName + ":" + e . getKey ( ) ) ; annotationKeys . add ( localServiceName + ":" + e . getKey ( ) + ":" + e . getValue ( ) ) ; } } return annotationKeys ; } | Returns keys that concatenate the serviceName associated with an annotation or tag . |
17,463 | SpanNode addChild ( SpanNode child ) { if ( child == null ) throw new NullPointerException ( "child == null" ) ; if ( child == this ) throw new IllegalArgumentException ( "circular dependency on " + this ) ; if ( children . equals ( Collections . emptyList ( ) ) ) children = new ArrayList < > ( ) ; children . add ( child ) ; child . parent = this ; return this ; } | Adds the child IFF it isn t already a child . |
17,464 | public HttpCall < Void > newCall ( ) { HttpUrl url = pipeline != null ? http . baseUrl . newBuilder ( "_bulk" ) . addQueryParameter ( "pipeline" , pipeline ) . build ( ) : http . baseUrl . resolve ( "_bulk" ) ; Request request = new Request . Builder ( ) . url ( url ) . tag ( tag ) . post ( RequestBody . create ( APPLICATION_JSON , body . readByteString ( ) ) ) . build ( ) ; return http . newCall ( request , maybeFlush ) ; } | Creates a bulk request when there is more than one object to store |
17,465 | static < T > List < T > list ( T ... elements ) { return new ArrayList < > ( Arrays . asList ( elements ) ) ; } | Returns a mutable list |
17,466 | static < T > T maybeGet ( Record record , TableField < Record , T > field , T defaultValue ) { if ( record . fieldsRow ( ) . indexOf ( field ) < 0 ) { return defaultValue ; } else { T result = record . get ( field ) ; return result != null ? result : defaultValue ; } } | returns the default value if the column doesn t exist or the result was null |
17,467 | protected String __string ( int offset ) { offset += bb . getInt ( offset ) ; int length = bb . getInt ( offset ) ; return utf8 . decodeUtf8 ( bb , offset + SIZEOF_INT , length ) ; } | Create a Java String from UTF - 8 data stored inside the FlatBuffer . |
17,468 | protected int __vector_len ( int offset ) { offset += bb_pos ; offset += bb . getInt ( offset ) ; return bb . getInt ( offset ) ; } | Get the length of a vector . |
17,469 | protected ByteBuffer __vector_as_bytebuffer ( int vector_offset , int elem_size ) { int o = __offset ( vector_offset ) ; if ( o == 0 ) return null ; ByteBuffer bb = this . bb . duplicate ( ) . order ( ByteOrder . LITTLE_ENDIAN ) ; int vectorstart = __vector ( o ) ; bb . position ( vectorstart ) ; bb . limit ( vectorstart + __vector_len ( o ) * elem_size ) ; return bb ; } | Get a whole vector as a ByteBuffer . |
17,470 | protected ByteBuffer __vector_in_bytebuffer ( ByteBuffer bb , int vector_offset , int elem_size ) { int o = this . __offset ( vector_offset ) ; if ( o == 0 ) return null ; int vectorstart = __vector ( o ) ; bb . rewind ( ) ; bb . limit ( vectorstart + __vector_len ( o ) * elem_size ) ; bb . position ( vectorstart ) ; return bb ; } | Initialize vector as a ByteBuffer . |
17,471 | protected Table __union ( Table t , int offset ) { offset += bb_pos ; t . bb_pos = offset + bb . getInt ( offset ) ; t . bb = bb ; t . vtable_start = t . bb_pos - bb . getInt ( t . bb_pos ) ; t . vtable_size = bb . getShort ( t . vtable_start ) ; return t ; } | Initialize any Table - derived type to point to the union at the given offset . |
17,472 | protected void sortTables ( int [ ] offsets , final ByteBuffer bb ) { Integer [ ] off = new Integer [ offsets . length ] ; for ( int i = 0 ; i < offsets . length ; i ++ ) off [ i ] = offsets [ i ] ; java . util . Arrays . sort ( off , new java . util . Comparator < Integer > ( ) { public int compare ( Integer o1 , Integer o2 ) { return keysCompare ( o1 , o2 , bb ) ; } } ) ; for ( int i = 0 ; i < offsets . length ; i ++ ) offsets [ i ] = off [ i ] ; } | Sort tables by the key . |
17,473 | protected static int compareStrings ( int offset_1 , int offset_2 , ByteBuffer bb ) { offset_1 += bb . getInt ( offset_1 ) ; offset_2 += bb . getInt ( offset_2 ) ; int len_1 = bb . getInt ( offset_1 ) ; int len_2 = bb . getInt ( offset_2 ) ; int startPos_1 = offset_1 + SIZEOF_INT ; int startPos_2 = offset_2 + SIZEOF_INT ; int len = Math . min ( len_1 , len_2 ) ; for ( int i = 0 ; i < len ; i ++ ) { if ( bb . get ( i + startPos_1 ) != bb . get ( i + startPos_2 ) ) return bb . get ( i + startPos_1 ) - bb . get ( i + startPos_2 ) ; } return len_1 - len_2 ; } | Compare two strings in the buffer . |
17,474 | protected static int compareStrings ( int offset_1 , byte [ ] key , ByteBuffer bb ) { offset_1 += bb . getInt ( offset_1 ) ; int len_1 = bb . getInt ( offset_1 ) ; int len_2 = key . length ; int startPos_1 = offset_1 + Constants . SIZEOF_INT ; int len = Math . min ( len_1 , len_2 ) ; for ( int i = 0 ; i < len ; i ++ ) { if ( bb . get ( i + startPos_1 ) != key [ i ] ) return bb . get ( i + startPos_1 ) - key [ i ] ; } return len_1 - len_2 ; } | Compare string from the buffer with the String object . |
17,475 | public int encodedLength ( CharSequence in ) { final Cache cache = CACHE . get ( ) ; int estimated = ( int ) ( in . length ( ) * cache . encoder . maxBytesPerChar ( ) ) ; if ( cache . lastOutput == null || cache . lastOutput . capacity ( ) < estimated ) { cache . lastOutput = ByteBuffer . allocate ( Math . max ( 128 , estimated ) ) ; } cache . lastOutput . clear ( ) ; cache . lastInput = in ; CharBuffer wrap = ( in instanceof CharBuffer ) ? ( CharBuffer ) in : CharBuffer . wrap ( in ) ; CoderResult result = cache . encoder . encode ( wrap , cache . lastOutput , true ) ; if ( result . isError ( ) ) { try { result . throwException ( ) ; } catch ( CharacterCodingException e ) { throw new IllegalArgumentException ( "bad character encoding" , e ) ; } } return cache . lastOutput . remaining ( ) ; } | the length of the encoded string . |
17,476 | public static ByteBuffer removeSizePrefix ( ByteBuffer bb ) { ByteBuffer s = bb . duplicate ( ) ; s . position ( s . position ( ) + SIZE_PREFIX_LENGTH ) ; return s ; } | Create a duplicate of a size - prefixed ByteBuffer that has its position advanced just past the size prefix . |
17,477 | protected String readJsonFrom ( final InputStream json ) throws IOException { return isJsonFormat ( ) ? JsonValue . readHjson ( IOUtils . toString ( json , StandardCharsets . UTF_8 ) ) . toString ( ) : String . join ( "\n" , IOUtils . readLines ( json , StandardCharsets . UTF_8 ) ) ; } | Read json from stream . |
17,478 | protected void configureObjectMapper ( final ObjectMapper mapper ) { mapper . configure ( SerializationFeature . FAIL_ON_EMPTY_BEANS , false ) . configure ( DeserializationFeature . FAIL_ON_UNKNOWN_PROPERTIES , false ) ; mapper . setSerializationInclusion ( JsonInclude . Include . NON_EMPTY ) ; mapper . setVisibility ( PropertyAccessor . SETTER , JsonAutoDetect . Visibility . PROTECTED_AND_PUBLIC ) ; mapper . setVisibility ( PropertyAccessor . GETTER , JsonAutoDetect . Visibility . PROTECTED_AND_PUBLIC ) ; mapper . setVisibility ( PropertyAccessor . IS_GETTER , JsonAutoDetect . Visibility . PROTECTED_AND_PUBLIC ) ; if ( isDefaultTypingEnabled ( ) ) { mapper . enableDefaultTyping ( ObjectMapper . DefaultTyping . NON_FINAL , JsonTypeInfo . As . PROPERTY ) ; } mapper . findAndRegisterModules ( ) ; } | Configure mapper . |
17,479 | protected T readObjectFromString ( final String jsonString ) { try { LOGGER . trace ( "Attempting to consume [{}]" , jsonString ) ; return this . objectMapper . readValue ( jsonString , getTypeToSerialize ( ) ) ; } catch ( final Exception e ) { LOGGER . error ( "Cannot read/parse [{}] to deserialize into type [{}]. This may be caused " + "in the absence of a configuration/support module that knows how to interpret the fragment, " + "specially if the fragment describes a CAS registered service definition. " + "Internal parsing error is [{}]" , DigestUtils . abbreviate ( jsonString ) , getTypeToSerialize ( ) , e . getMessage ( ) ) ; LOGGER . debug ( e . getMessage ( ) , e ) ; } return null ; } | Read object from json . |
17,480 | protected URI determineUnauthorizedServiceRedirectUrl ( final RequestContext context ) { val redirectUrl = WebUtils . getUnauthorizedRedirectUrlFromFlowScope ( context ) ; val currentEvent = context . getCurrentEvent ( ) ; val eventAttributes = currentEvent . getAttributes ( ) ; LOGGER . debug ( "Finalizing the unauthorized redirect URL [{}] when processing event [{}] with attributes [{}]" , redirectUrl , currentEvent . getId ( ) , eventAttributes ) ; return redirectUrl ; } | Determine unauthorized service redirect url . |
17,481 | protected static List < String > getSupportedNameIdFormats ( final SamlRegisteredService service , final SamlRegisteredServiceServiceProviderMetadataFacade adaptor ) { val supportedNameFormats = adaptor . getSupportedNameIdFormats ( ) ; LOGGER . debug ( "Metadata for [{}] declares the following NameIDs [{}]" , adaptor . getEntityId ( ) , supportedNameFormats ) ; if ( supportedNameFormats . isEmpty ( ) ) { supportedNameFormats . add ( NameIDType . TRANSIENT ) ; LOGGER . debug ( "No supported nameId formats could be determined from metadata. Added default [{}]" , NameIDType . TRANSIENT ) ; } if ( StringUtils . isNotBlank ( service . getRequiredNameIdFormat ( ) ) ) { val fmt = parseAndBuildRequiredNameIdFormat ( service ) ; supportedNameFormats . add ( 0 , fmt ) ; LOGGER . debug ( "Added required nameId format [{}] based on saml service configuration for [{}]" , fmt , service . getServiceId ( ) ) ; } return supportedNameFormats ; } | Gets supported name id formats . |
17,482 | protected NameID finalizeNameId ( final NameID nameid , final RequestAbstractType authnRequest , final Object assertion , final List < String > supportedNameFormats , final SamlRegisteredService service , final SamlRegisteredServiceServiceProviderMetadataFacade adaptor ) { if ( StringUtils . isNotBlank ( service . getNameIdQualifier ( ) ) ) { nameid . setNameQualifier ( service . getNameIdQualifier ( ) ) ; } else { val issuer = SamlIdPUtils . getIssuerFromSamlObject ( authnRequest ) ; nameid . setNameQualifier ( issuer ) ; } if ( StringUtils . isNotBlank ( service . getServiceProviderNameIdQualifier ( ) ) ) { nameid . setSPNameQualifier ( service . getServiceProviderNameIdQualifier ( ) ) ; } else { nameid . setSPNameQualifier ( adaptor . getEntityId ( ) ) ; } return nameid ; } | Finalize name id name id . |
17,483 | protected void validateRequiredNameIdFormatIfAny ( final RequestAbstractType authnRequest , final SamlRegisteredServiceServiceProviderMetadataFacade adaptor , final List < String > supportedNameFormats , final String requiredNameFormat ) { if ( StringUtils . isNotBlank ( requiredNameFormat ) && ! supportedNameFormats . contains ( requiredNameFormat ) ) { LOGGER . warn ( "Required NameID format [{}] in the AuthN request issued by [{}] is not supported based on the metadata for [{}]. " + "The requested NameID format may not be honored. You should consult the metadata for this service " + "and ensure the requested NameID format is present in the collection of supported " + "metadata formats in the metadata, which are the following: [{}]" , requiredNameFormat , SamlIdPUtils . getIssuerFromSamlObject ( authnRequest ) , adaptor . getEntityId ( ) , adaptor . getSupportedNameIdFormats ( ) ) ; } } | Validate required name id format if any . |
17,484 | protected static String getRequiredNameIdFormatIfAny ( final RequestAbstractType authnRequest ) { val nameIDPolicy = getNameIDPolicy ( authnRequest ) ; val requiredNameFormat = nameIDPolicy != null ? nameIDPolicy . getFormat ( ) : null ; LOGGER . debug ( "AuthN request indicates [{}] is the required NameID format" , requiredNameFormat ) ; return requiredNameFormat ; } | Gets required name id format if any . |
17,485 | protected NameID determineNameId ( final RequestAbstractType authnRequest , final Object assertion , final List < String > supportedNameFormats , final SamlRegisteredService service , final SamlRegisteredServiceServiceProviderMetadataFacade adaptor ) { for ( val nameFormat : supportedNameFormats ) { LOGGER . debug ( "Evaluating NameID format [{}]" , nameFormat ) ; val nameid = encodeNameIdBasedOnNameFormat ( authnRequest , assertion , nameFormat , service , adaptor ) ; if ( nameid != null ) { LOGGER . debug ( "Determined NameID based on format [{}] to be [{}]" , nameFormat , nameid . getValue ( ) ) ; return nameid ; } } LOGGER . warn ( "No NameID could be determined based on the supported formats [{}]" , supportedNameFormats ) ; return null ; } | Determine name id name id . |
17,486 | protected NameID encodeNameIdBasedOnNameFormat ( final RequestAbstractType authnRequest , final Object assertion , final String nameFormat , final SamlRegisteredService service , final SamlRegisteredServiceServiceProviderMetadataFacade adaptor ) { try { if ( authnRequest instanceof AttributeQuery ) { val query = AttributeQuery . class . cast ( authnRequest ) ; val nameID = query . getSubject ( ) . getNameID ( ) ; nameID . detach ( ) ; LOGGER . debug ( "Choosing NameID format [{}] with value [{}] for attribute query" , nameID . getFormat ( ) , nameID . getValue ( ) ) ; return nameID ; } val attribute = prepareNameIdAttribute ( assertion , nameFormat , adaptor ) ; val encoder = prepareNameIdEncoder ( authnRequest , nameFormat , attribute , service , adaptor ) ; LOGGER . debug ( "Encoding NameID based on [{}]" , nameFormat ) ; var nameId = encoder . encode ( attribute ) ; LOGGER . debug ( "Final NameID encoded with format [{}] has value [{}]" , nameId . getFormat ( ) , nameId . getValue ( ) ) ; return nameId ; } catch ( final Exception e ) { LOGGER . error ( e . getMessage ( ) , e ) ; } return null ; } | Encode name id based on name format name id . |
17,487 | protected IdPAttribute prepareNameIdAttribute ( final Object casAssertion , final String nameFormat , final SamlRegisteredServiceServiceProviderMetadataFacade adaptor ) { val assertion = Assertion . class . cast ( casAssertion ) ; val attribute = new IdPAttribute ( AttributePrincipal . class . getName ( ) ) ; val principalName = assertion . getPrincipal ( ) . getName ( ) ; LOGGER . debug ( "Preparing NameID attribute for principal [{}]" , principalName ) ; val nameIdValue = getNameIdValueFromNameFormat ( nameFormat , adaptor , principalName ) ; val value = new StringAttributeValue ( nameIdValue ) ; LOGGER . debug ( "NameID attribute value is set to [{}]" , value ) ; attribute . setValues ( CollectionUtils . wrap ( value ) ) ; return attribute ; } | Prepare name id attribute id p attribute . |
17,488 | protected static SAML2StringNameIDEncoder prepareNameIdEncoder ( final RequestAbstractType authnRequest , final String nameFormat , final IdPAttribute attribute , final SamlRegisteredService service , final SamlRegisteredServiceServiceProviderMetadataFacade adaptor ) { val encoder = new SAML2StringNameIDEncoder ( ) ; encoder . setNameFormat ( nameFormat ) ; if ( getNameIDPolicy ( authnRequest ) != null ) { val qualifier = getNameIDPolicy ( authnRequest ) . getSPNameQualifier ( ) ; LOGGER . debug ( "NameID qualifier is set to [{}]" , qualifier ) ; encoder . setNameQualifier ( qualifier ) ; } return encoder ; } | Prepare name id encoder saml 2 string name id encoder . |
17,489 | public Principal getAuthenticationPrincipal ( final String ticketGrantingTicketId ) { try { val ticketGrantingTicket = this . centralAuthenticationService . getTicket ( ticketGrantingTicketId , TicketGrantingTicket . class ) ; return ticketGrantingTicket . getAuthentication ( ) . getPrincipal ( ) ; } catch ( final InvalidTicketException e ) { LOGGER . warn ( "Ticket-granting ticket [{}] cannot be found in the ticket registry." , e . getMessage ( ) ) ; LOGGER . debug ( e . getMessage ( ) , e ) ; } LOGGER . warn ( "In the absence of valid ticket-granting ticket, the authentication principal cannot be determined. Returning [{}]" , NullPrincipal . class . getSimpleName ( ) ) ; return NullPrincipal . getInstance ( ) ; } | Gets authentication principal . |
17,490 | protected String generateToken ( final Service service , final Map < String , String > parameters ) { val ticketId = parameters . get ( CasProtocolConstants . PARAMETER_TICKET ) ; return this . tokenTicketBuilder . build ( ticketId , service ) ; } | Generate token string . |
17,491 | public static Optional < JsonWebKeySet > getJsonWebKeySet ( final OidcRegisteredService service ) { try { LOGGER . trace ( "Loading JSON web key from [{}]" , service . getJwks ( ) ) ; val resource = getJsonWebKeySetResource ( service ) ; val jsonWebKeySet = buildJsonWebKeySet ( resource ) ; if ( jsonWebKeySet == null || jsonWebKeySet . getJsonWebKeys ( ) . isEmpty ( ) ) { LOGGER . warn ( "No JSON web keys could be found for [{}]" , service ) ; return Optional . empty ( ) ; } val badKeysCount = jsonWebKeySet . getJsonWebKeys ( ) . stream ( ) . filter ( k -> StringUtils . isBlank ( k . getAlgorithm ( ) ) && StringUtils . isBlank ( k . getKeyId ( ) ) && StringUtils . isBlank ( k . getKeyType ( ) ) ) . count ( ) ; if ( badKeysCount == jsonWebKeySet . getJsonWebKeys ( ) . size ( ) ) { LOGGER . warn ( "No valid JSON web keys could be found for [{}]" , service ) ; return Optional . empty ( ) ; } val webKey = getJsonWebKeyFromJsonWebKeySet ( jsonWebKeySet ) ; if ( Objects . requireNonNull ( webKey ) . getPublicKey ( ) == null ) { LOGGER . warn ( "JSON web key retrieved [{}] has no associated public key" , webKey . getKeyId ( ) ) ; return Optional . empty ( ) ; } return Optional . of ( jsonWebKeySet ) ; } catch ( final Exception e ) { LOGGER . error ( e . getMessage ( ) , e ) ; } return Optional . empty ( ) ; } | Gets json web key set . |
17,492 | public static RsaJsonWebKey getJsonWebKeyFromJsonWebKeySet ( final JsonWebKeySet jwks ) { if ( jwks . getJsonWebKeys ( ) . isEmpty ( ) ) { LOGGER . warn ( "No JSON web keys are available in the keystore" ) ; return null ; } val key = ( RsaJsonWebKey ) jwks . getJsonWebKeys ( ) . get ( 0 ) ; if ( StringUtils . isBlank ( key . getAlgorithm ( ) ) ) { LOGGER . warn ( "Located JSON web key [{}] has no algorithm defined" , key ) ; } if ( StringUtils . isBlank ( key . getKeyId ( ) ) ) { LOGGER . warn ( "Located JSON web key [{}] has no key id defined" , key ) ; } if ( key . getPublicKey ( ) == null ) { LOGGER . warn ( "Located JSON web key [{}] has no public key" , key ) ; return null ; } return key ; } | Gets json web key from jwks . |
17,493 | public < T > T getProperty ( final String name , final Class < T > clazz ) { if ( containsProperty ( name ) ) { val item = this . properties . get ( name ) ; if ( item == null ) { return null ; } if ( ! clazz . isAssignableFrom ( item . getClass ( ) ) ) { throw new ClassCastException ( "Object [" + item + " is of type " + item . getClass ( ) + " when we were expecting " + clazz ) ; } return ( T ) item ; } return null ; } | Gets property . |
17,494 | protected T encodeFinalResponse ( final HttpServletRequest request , final HttpServletResponse response , final SamlRegisteredService service , final SamlRegisteredServiceServiceProviderMetadataFacade adaptor , final T finalResponse , final String binding , final RequestAbstractType authnRequest , final Object assertion , final MessageContext messageContext ) { val scratch = messageContext . getSubcontext ( ScratchContext . class , true ) ; val encodeResponse = ( Boolean ) scratch . getMap ( ) . getOrDefault ( SamlProtocolConstants . PARAMETER_ENCODE_RESPONSE , Boolean . TRUE ) ; if ( encodeResponse ) { val relayState = request != null ? request . getParameter ( SamlProtocolConstants . PARAMETER_SAML_RELAY_STATE ) : StringUtils . EMPTY ; LOGGER . trace ( "RelayState is [{}]" , relayState ) ; return encode ( service , finalResponse , response , request , adaptor , relayState , binding , authnRequest , assertion ) ; } return finalResponse ; } | Encode final response . |
17,495 | protected Assertion buildSamlAssertion ( final RequestAbstractType authnRequest , final HttpServletRequest request , final HttpServletResponse response , final Object casAssertion , final SamlRegisteredService service , final SamlRegisteredServiceServiceProviderMetadataFacade adaptor , final String binding , final MessageContext messageContext ) { return samlResponseBuilderConfigurationContext . getSamlProfileSamlAssertionBuilder ( ) . build ( authnRequest , request , response , casAssertion , service , adaptor , binding , messageContext ) ; } | Build saml assertion assertion . |
17,496 | protected Issuer buildSamlResponseIssuer ( final String entityId ) { val issuer = newIssuer ( entityId ) ; issuer . setFormat ( Issuer . ENTITY ) ; return issuer ; } | Build entity issuer issuer . |
17,497 | protected SAMLObject encryptAssertion ( final Assertion assertion , final HttpServletRequest request , final HttpServletResponse response , final SamlRegisteredService service , final SamlRegisteredServiceServiceProviderMetadataFacade adaptor ) throws SamlException { if ( service . isEncryptAssertions ( ) ) { LOGGER . debug ( "SAML service [{}] requires assertions to be encrypted" , adaptor . getEntityId ( ) ) ; return samlResponseBuilderConfigurationContext . getSamlObjectEncrypter ( ) . encode ( assertion , service , adaptor ) ; } LOGGER . debug ( "SAML registered service [{}] does not require assertions to be encrypted" , adaptor . getEntityId ( ) ) ; return assertion ; } | Encrypt assertion . |
17,498 | public static boolean isValidRegex ( final String pattern ) { try { if ( pattern != null ) { Pattern . compile ( pattern ) ; return true ; } } catch ( final PatternSyntaxException exception ) { LOGGER . debug ( "Pattern [{}] is not a valid regex." , pattern ) ; } return false ; } | Check to see if the specified pattern is a valid regular expression . |
17,499 | public static Pattern concatenate ( final Collection < ? > requiredValues , final boolean caseInsensitive ) { val pattern = requiredValues . stream ( ) . map ( Object :: toString ) . collect ( Collectors . joining ( "|" , "(" , ")" ) ) ; return createPattern ( pattern , caseInsensitive ? Pattern . CASE_INSENSITIVE : 0 ) ; } | Concatenate all elements in the given collection to form a regex pattern . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.