idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
7,300 | final Deferred < Object > delete ( final byte [ ] key , final byte [ ] [ ] qualifiers ) { return client . delete ( new DeleteRequest ( table , key , FAMILY , qualifiers ) ) ; } | Deletes the given cells from the data table . |
7,301 | static String date ( final long timestamp ) { if ( ( timestamp & Const . SECOND_MASK ) != 0 ) { return new Date ( timestamp ) . toString ( ) ; } else { return new Date ( timestamp * 1000 ) . toString ( ) ; } } | Transforms a UNIX timestamp into a human readable date . |
7,302 | public static List < Scanner > getScanners ( final Query query ) { final List < Scanner > scanners = new ArrayList < Scanner > ( Const . SALT_WIDTH ( ) > 0 ? Const . SALT_BUCKETS ( ) : 1 ) ; if ( Const . SALT_WIDTH ( ) > 0 ) { for ( int i = 0 ; i < Const . SALT_BUCKETS ( ) ; i ++ ) { scanners . add ( ( ( TsdbQuery ) query ) . getScanner ( i ) ) ; } } else { scanners . add ( ( ( TsdbQuery ) query ) . getScanner ( ) ) ; } return scanners ; } | Returns a set of scanners one for each bucket if salted or one scanner if salting is disabled . |
7,303 | public static long baseTime ( final TSDB tsdb , final byte [ ] row ) { return Bytes . getUnsignedInt ( row , Const . SALT_WIDTH ( ) + TSDB . metrics_width ( ) ) ; } | Extracts the timestamp from a row key . |
7,304 | public static void setBaseTime ( final byte [ ] row , int base_time ) { Bytes . setInt ( row , base_time , Const . SALT_WIDTH ( ) + TSDB . metrics_width ( ) ) ; } | Sets the time in a raw data table row key |
7,305 | public static Cell parseSingleValue ( final KeyValue column ) { if ( column . qualifier ( ) . length == 2 || ( column . qualifier ( ) . length == 4 && inMilliseconds ( column . qualifier ( ) ) ) ) { final ArrayList < KeyValue > row = new ArrayList < KeyValue > ( 1 ) ; row . add ( column ) ; final ArrayList < Cell > cells = extractDataPoints ( row , 1 ) ; if ( cells . isEmpty ( ) ) { return null ; } return cells . get ( 0 ) ; } throw new IllegalDataException ( "Qualifier does not appear to be a single data point: " + column ) ; } | Extracts a Cell from a single data point fixing potential errors with the qualifier flags |
7,306 | public static byte getValueLengthFromQualifier ( final byte [ ] qualifier , final int offset ) { validateQualifier ( qualifier , offset ) ; short length ; if ( ( qualifier [ offset ] & Const . MS_BYTE_FLAG ) == Const . MS_BYTE_FLAG ) { length = ( short ) ( qualifier [ offset + 3 ] & Internal . LENGTH_MASK ) ; } else { length = ( short ) ( qualifier [ offset + 1 ] & Internal . LENGTH_MASK ) ; } return ( byte ) ( length + 1 ) ; } | Returns the length of the value in bytes parsed from the qualifier |
7,307 | public static short getFlagsFromQualifier ( final byte [ ] qualifier , final int offset ) { validateQualifier ( qualifier , offset ) ; if ( ( qualifier [ offset ] & Const . MS_BYTE_FLAG ) == Const . MS_BYTE_FLAG ) { return ( short ) ( qualifier [ offset + 3 ] & Internal . FLAGS_MASK ) ; } else { return ( short ) ( qualifier [ offset + 1 ] & Internal . FLAGS_MASK ) ; } } | Parses the flag bits from the qualifier |
7,308 | public static boolean isFloat ( final byte [ ] qualifier , final int offset ) { validateQualifier ( qualifier , offset ) ; if ( ( qualifier [ offset ] & Const . MS_BYTE_FLAG ) == Const . MS_BYTE_FLAG ) { return ( qualifier [ offset + 3 ] & Const . FLAG_FLOAT ) == Const . FLAG_FLOAT ; } else { return ( qualifier [ offset + 1 ] & Const . FLAG_FLOAT ) == Const . FLAG_FLOAT ; } } | Parses the qualifier to determine if the data is a floating point value . 4 bytes == Float 8 bytes == Double |
7,309 | public static byte [ ] extractQualifier ( final byte [ ] qualifier , final int offset ) { validateQualifier ( qualifier , offset ) ; if ( ( qualifier [ offset ] & Const . MS_BYTE_FLAG ) == Const . MS_BYTE_FLAG ) { return new byte [ ] { qualifier [ offset ] , qualifier [ offset + 1 ] , qualifier [ offset + 2 ] , qualifier [ offset + 3 ] } ; } else { return new byte [ ] { qualifier [ offset ] , qualifier [ offset + 1 ] } ; } } | Extracts the 2 or 4 byte qualifier from a compacted byte array |
7,310 | public static byte [ ] buildQualifier ( final long timestamp , final short flags ) { final long base_time ; if ( ( timestamp & Const . SECOND_MASK ) != 0 ) { base_time = ( ( timestamp / 1000 ) - ( ( timestamp / 1000 ) % Const . MAX_TIMESPAN ) ) ; final int qual = ( int ) ( ( ( timestamp - ( base_time * 1000 ) << ( Const . MS_FLAG_BITS ) ) | flags ) | Const . MS_FLAG ) ; return Bytes . fromInt ( qual ) ; } else { base_time = ( timestamp - ( timestamp % Const . MAX_TIMESPAN ) ) ; final short qual = ( short ) ( ( timestamp - base_time ) << Const . FLAG_BITS | flags ) ; return Bytes . fromShort ( qual ) ; } } | Returns a 2 or 4 byte qualifier based on the timestamp and the flags . If the timestamp is in seconds this returns a 2 byte qualifier . If it s in milliseconds returns a 4 byte qualifier |
7,311 | private static void validateQualifier ( final byte [ ] qualifier , final int offset ) { if ( offset < 0 || offset >= qualifier . length - 1 ) { throw new IllegalDataException ( "Offset of [" + offset + "] is out of bounds for the qualifier length of [" + qualifier . length + "]" ) ; } } | Checks the qualifier to verify that it has data and that the offset is within bounds |
7,312 | public static long getMaxUnsignedValueOnBytes ( final int width ) { if ( width < 0 || width > 8 ) { throw new IllegalArgumentException ( "Width must be from 1 to 8 bytes: " + width ) ; } if ( width < 8 ) { return ( ( long ) 1 << width * Byte . SIZE ) - 1 ; } else { return Long . MAX_VALUE ; } } | Simple helper to calculate the max value for any width of long from 0 to 8 bytes . |
7,313 | public static byte [ ] vleEncodeLong ( final long value ) { if ( Byte . MIN_VALUE <= value && value <= Byte . MAX_VALUE ) { return new byte [ ] { ( byte ) value } ; } else if ( Short . MIN_VALUE <= value && value <= Short . MAX_VALUE ) { return Bytes . fromShort ( ( short ) value ) ; } else if ( Integer . MIN_VALUE <= value && value <= Integer . MAX_VALUE ) { return Bytes . fromInt ( ( int ) value ) ; } else { return Bytes . fromLong ( value ) ; } } | Encodes a long on 1 2 4 or 8 bytes |
7,314 | public static long getTimeStampFromNonDP ( final long base_time , byte [ ] quantifier ) { long ret = base_time ; if ( quantifier . length == 3 ) { ret += quantifier [ 1 ] << 8 | ( quantifier [ 2 ] & 0xFF ) ; ret *= 1000 ; } else if ( quantifier . length == 5 ) { ret *= 1000 ; ret += ( quantifier [ 1 ] & 0xFF ) << 24 | ( quantifier [ 2 ] & 0xFF ) << 16 | ( quantifier [ 3 ] & 0xFF ) << 8 | quantifier [ 4 ] & 0xFF ; } else { throw new IllegalArgumentException ( "Quantifier is not valid: " + Bytes . pretty ( quantifier ) ) ; } return ret ; } | Get timestamp from base time and quantifier for non datapoints . The returned time will always be in ms . |
7,315 | public static HistogramDataPoint decodeHistogramDataPoint ( final TSDB tsdb , final KeyValue kv ) { long timestamp = Internal . baseTime ( kv . key ( ) ) ; return decodeHistogramDataPoint ( tsdb , timestamp , kv . qualifier ( ) , kv . value ( ) ) ; } | Decode the histogram point from the given key value |
7,316 | public static HistogramDataPoint decodeHistogramDataPoint ( final TSDB tsdb , final long base_time , final byte [ ] qualifier , final byte [ ] value ) { final HistogramDataPointCodec decoder = tsdb . histogramManager ( ) . getCodec ( ( int ) value [ 0 ] ) ; long timestamp = getTimeStampFromNonDP ( base_time , qualifier ) ; final Histogram histogram = decoder . decode ( value , true ) ; return new SimpleHistogramDataPointAdapter ( histogram , timestamp ) ; } | Decode the histogram point from the given key and values |
7,317 | public static boolean isValidQuery ( final RollupQuery rollup_query ) { return ( rollup_query != null && rollup_query . rollup_interval != null && ! rollup_query . rollup_interval . isDefaultInterval ( ) ) ; } | Does it contain a valid rollup interval mainly says it is not the default rollup . Default rollup is of same resolution as raw data . So if true which means the raw cell column qualifier is encoded with the aggregate function and the cell is not appended or compacted |
7,318 | public void setQuery ( final String metric , final Map < String , String > tags ) { this . metric = metric ; this . tags = tags ; metric_uid = tsdb . getUID ( UniqueIdType . METRIC , metric ) ; tag_uids = Tags . resolveAll ( tsdb , tags ) ; } | Sets the query to perform |
7,319 | public static Deferred < byte [ ] > tsuidFromMetric ( final TSDB tsdb , final String metric , final Map < String , String > tags ) { if ( metric == null || metric . isEmpty ( ) ) { throw new IllegalArgumentException ( "The metric cannot be empty" ) ; } if ( tags == null || tags . isEmpty ( ) ) { throw new IllegalArgumentException ( "Tags cannot be null or empty " + "when getting a TSUID" ) ; } final byte [ ] metric_uid = new byte [ TSDB . metrics_width ( ) ] ; class TagsCB implements Callback < byte [ ] , ArrayList < byte [ ] > > { public byte [ ] call ( final ArrayList < byte [ ] > tag_list ) throws Exception { final byte [ ] tsuid = new byte [ metric_uid . length + ( ( TSDB . tagk_width ( ) + TSDB . tagv_width ( ) ) * tag_list . size ( ) ) ] ; int idx = 0 ; System . arraycopy ( metric_uid , 0 , tsuid , 0 , metric_uid . length ) ; idx += metric_uid . length ; for ( final byte [ ] t : tag_list ) { System . arraycopy ( t , 0 , tsuid , idx , t . length ) ; idx += t . length ; } return tsuid ; } public String toString ( ) { return "Tag resolution callback" ; } } class MetricCB implements Callback < Deferred < byte [ ] > , byte [ ] > { public Deferred < byte [ ] > call ( final byte [ ] uid ) throws Exception { System . arraycopy ( uid , 0 , metric_uid , 0 , uid . length ) ; return Tags . resolveAllAsync ( tsdb , tags ) . addCallback ( new TagsCB ( ) ) ; } public String toString ( ) { return "Metric resolution callback" ; } } return tsdb . getUIDAsync ( UniqueIdType . METRIC , metric ) . addCallbackDeferring ( new MetricCB ( ) ) ; } | Converts the given metric and tags to a TSUID by resolving the strings to their UIDs . Note that the resulting TSUID may not exist if the combination was not written to TSDB |
7,320 | private Deferred < IncomingDataPoint > resolveNames ( final IncomingDataPoint dp ) { if ( metric != null ) { dp . setMetric ( metric ) ; dp . setTags ( ( HashMap < String , String > ) tags ) ; return Deferred . fromResult ( dp ) ; } class TagsCB implements Callback < IncomingDataPoint , HashMap < String , String > > { public IncomingDataPoint call ( final HashMap < String , String > tags ) throws Exception { dp . setTags ( tags ) ; return dp ; } public String toString ( ) { return "Tags resolution CB" ; } } class MetricCB implements Callback < Deferred < IncomingDataPoint > , String > { public Deferred < IncomingDataPoint > call ( final String name ) throws Exception { dp . setMetric ( name ) ; final List < byte [ ] > tags = UniqueId . getTagPairsFromTSUID ( tsuid ) ; return Tags . resolveIdsAsync ( tsdb , tags ) . addCallback ( new TagsCB ( ) ) ; } public String toString ( ) { return "Metric resolution CB" ; } } final byte [ ] metric_uid = Arrays . copyOfRange ( tsuid , 0 , TSDB . metrics_width ( ) ) ; return tsdb . getUidName ( UniqueIdType . METRIC , metric_uid ) . addCallbackDeferring ( new MetricCB ( ) ) ; } | Resolve the UIDs to names . If the query was for a metric and tags then we can just use those . |
7,321 | private Scanner getScanner ( ) { final Scanner scanner = tsdb . getClient ( ) . newScanner ( tsdb . metaTable ( ) ) ; scanner . setStartKey ( metric_uid ) ; final long stop = UniqueId . uidToLong ( metric_uid , TSDB . metrics_width ( ) ) + 1 ; scanner . setStopKey ( UniqueId . longToUID ( stop , TSDB . metrics_width ( ) ) ) ; scanner . setFamily ( TSMeta . FAMILY ( ) ) ; if ( ! tags . isEmpty ( ) ) { final short name_width = TSDB . tagk_width ( ) ; final short value_width = TSDB . tagv_width ( ) ; final short tagsize = ( short ) ( name_width + value_width ) ; final StringBuilder buf = new StringBuilder ( 15 + ( ( 13 + tagsize ) * ( tags . size ( ) ) ) ) ; buf . append ( "(?s)" + "^.{" ) . append ( TSDB . metrics_width ( ) ) . append ( "}" ) ; final Iterator < byte [ ] > tags = this . tag_uids . iterator ( ) ; byte [ ] tag = tags . hasNext ( ) ? tags . next ( ) : null ; do { buf . append ( "(?:.{" ) . append ( tagsize ) . append ( "})*\\Q" ) ; UniqueId . addIdToRegexp ( buf , tag ) ; tag = tags . hasNext ( ) ? tags . next ( ) : null ; } while ( tag != null ) ; buf . append ( "(?:.{" ) . append ( tagsize ) . append ( "})*$" ) ; scanner . setKeyRegexp ( buf . toString ( ) , CHARSET ) ; } return scanner ; } | Configures the scanner for a specific metric and optional tags |
7,322 | public static int getRollupBasetime ( final long timestamp , final RollupInterval interval ) { if ( timestamp < 0 ) { throw new IllegalArgumentException ( "Not supporting negative " + "timestamps at this time: " + timestamp ) ; } if ( interval . getUnits ( ) == 'h' ) { int modulo = Const . MAX_TIMESPAN ; if ( interval . getUnitMultiplier ( ) > 1 ) { modulo = interval . getUnitMultiplier ( ) * 60 * 60 ; } if ( ( timestamp & Const . SECOND_MASK ) != 0 ) { return ( int ) ( ( timestamp / 1000 ) - ( ( timestamp / 1000 ) % modulo ) ) ; } else { return ( int ) ( timestamp - ( timestamp % modulo ) ) ; } } else { final long time_milliseconds = ( timestamp & Const . SECOND_MASK ) != 0 ? timestamp : timestamp * 1000 ; final Calendar calendar = Calendar . getInstance ( Const . UTC_TZ ) ; calendar . setTimeInMillis ( time_milliseconds ) ; calendar . set ( Calendar . HOUR_OF_DAY , 0 ) ; calendar . set ( Calendar . MINUTE , 0 ) ; calendar . set ( Calendar . SECOND , 0 ) ; switch ( interval . getUnits ( ) ) { case 'd' : break ; case 'n' : calendar . set ( Calendar . DAY_OF_MONTH , 1 ) ; break ; case 'y' : calendar . set ( Calendar . DAY_OF_MONTH , 1 ) ; calendar . set ( Calendar . MONTH , 0 ) ; break ; default : throw new IllegalArgumentException ( "Unrecogznied span: " + interval ) ; } return ( int ) ( calendar . getTimeInMillis ( ) / 1000 ) ; } } | Calculates the base time for a rollup interval the time that can be stored in the row key . |
7,323 | public static long getTimestampFromRollupQualifier ( final byte [ ] qualifier , final long base_time , final RollupInterval interval , final int offset ) { return ( base_time * 1000 ) + getOffsetFromRollupQualifier ( qualifier , offset , interval ) ; } | Returns the absolute timestamp of a data point qualifier in milliseconds |
7,324 | private void loadFromFile ( ) { if ( file_location != null && ! file_location . isEmpty ( ) ) { final File file = new File ( file_location ) ; if ( ! file . exists ( ) ) { LOG . warn ( "Query override file " + file_location + " does not exist" ) ; return ; } try { final String raw_json = Files . toString ( file , Const . UTF8_CHARSET ) ; if ( raw_json != null && ! raw_json . isEmpty ( ) ) { final Set < QueryLimitOverrideItem > cached_items = JSON . parseToObject ( raw_json , TR_OVERRIDES ) ; for ( final QueryLimitOverrideItem override : cached_items ) { QueryLimitOverrideItem existing = overrides . get ( override . getRegex ( ) ) ; if ( existing == null || ! existing . equals ( override ) ) { overrides . put ( override . getRegex ( ) , override ) ; } } final Iterator < Entry < String , QueryLimitOverrideItem > > iterator = overrides . entrySet ( ) . iterator ( ) ; while ( iterator . hasNext ( ) ) { final Entry < String , QueryLimitOverrideItem > entry = iterator . next ( ) ; boolean matched = false ; for ( final QueryLimitOverrideItem override : cached_items ) { if ( override . getRegex ( ) . equals ( entry . getKey ( ) ) ) { matched = true ; break ; } } if ( ! matched ) { iterator . remove ( ) ; } } } LOG . info ( "Successfully loaded query overrides: " + this ) ; } catch ( Exception e ) { LOG . error ( "Failed to read cache file for query limit override: " + this , e ) ; } } } | Attempts to load the file from disk |
7,325 | public void skipWhitespaces ( ) { for ( int i = mark ; i < chars . length ; i ++ ) { if ( Character . isWhitespace ( chars [ i ] ) ) { mark ++ ; } else { break ; } } } | Increments the mark over white spaces |
7,326 | public static final void setGlobalTags ( final Config config ) { if ( config == null ) { throw new IllegalArgumentException ( "Configuration cannot be null." ) ; } if ( config . getBoolean ( "tsd.core.stats_with_port" ) ) { global_tags = new HashMap < String , String > ( 1 ) ; global_tags . put ( "port" , config . getString ( "tsd.network.port" ) ) ; } } | Parses the configuration to determine if any extra tags should be included with every stat emitted . |
7,327 | public void setStartTime ( final long timestamp ) { if ( timestamp < 0 || ( ( timestamp & Const . SECOND_MASK ) != 0 && timestamp > 9999999999999L ) ) { throw new IllegalArgumentException ( "Invalid timestamp: " + timestamp ) ; } else if ( end_time != UNSET && timestamp >= getEndTime ( ) ) { throw new IllegalArgumentException ( "new start time (" + timestamp + ") is greater than or equal to end time: " + getEndTime ( ) ) ; } start_time = timestamp ; } | Sets the start time for the query |
7,328 | public void downsample ( final long interval , final Aggregator downsampler ) { if ( downsampler == Aggregators . NONE ) { throw new IllegalArgumentException ( "cannot use the NONE " + "aggregator for downsampling" ) ; } downsample ( interval , downsampler , FillPolicy . NONE ) ; } | Sets an optional downsampling function with interpolation on this query . |
7,329 | private byte [ ] tableToBeScanned ( ) { final byte [ ] tableName ; if ( RollupQuery . isValidQuery ( rollup_query ) ) { if ( pre_aggregate ) { tableName = rollup_query . getRollupInterval ( ) . getGroupbyTable ( ) ; } else { tableName = rollup_query . getRollupInterval ( ) . getTemporalTable ( ) ; } } else if ( pre_aggregate ) { tableName = tsdb . getDefaultInterval ( ) . getGroupbyTable ( ) ; } else { tableName = tsdb . dataTable ( ) ; } return tableName ; } | Identify the table to be scanned based on the roll up and pre - aggregate query parameters |
7,330 | private long getScanStartTimeSeconds ( ) { long start = getStartTime ( ) ; if ( ( start & Const . SECOND_MASK ) != 0L ) { start /= 1000L ; } if ( rollup_query != null ) { long base_time = RollupUtils . getRollupBasetime ( start , rollup_query . getRollupInterval ( ) ) ; if ( rate ) { base_time = RollupUtils . getRollupBasetime ( base_time - 1 , rollup_query . getRollupInterval ( ) ) ; } return base_time ; } long interval_aligned_ts = start ; if ( downsampler != null && downsampler . getInterval ( ) > 0 ) { final long interval_offset = ( 1000L * start ) % downsampler . getInterval ( ) ; interval_aligned_ts -= interval_offset / 1000L ; } final long timespan_offset = interval_aligned_ts % Const . MAX_TIMESPAN ; final long timespan_aligned_ts = interval_aligned_ts - timespan_offset ; return timespan_aligned_ts > 0L ? timespan_aligned_ts : 0L ; } | Returns the UNIX timestamp from which we must start scanning . |
7,331 | private long getScanEndTimeSeconds ( ) { long end = getEndTime ( ) ; if ( ( end & Const . SECOND_MASK ) != 0L ) { end /= 1000L ; if ( end - ( end * 1000 ) < 1 ) { end ++ ; } } if ( rollup_query != null ) { return RollupUtils . getRollupBasetime ( end + ( rollup_query . getRollupInterval ( ) . getIntervalSeconds ( ) * rollup_query . getRollupInterval ( ) . getIntervals ( ) ) , rollup_query . getRollupInterval ( ) ) ; } if ( downsampler != null && downsampler . getInterval ( ) > 0 ) { final long interval_offset = ( 1000L * end ) % downsampler . getInterval ( ) ; final long interval_aligned_ts = end + ( downsampler . getInterval ( ) - interval_offset ) / 1000L ; final long timespan_offset = interval_aligned_ts % Const . MAX_TIMESPAN ; return ( 0L == timespan_offset ) ? interval_aligned_ts : interval_aligned_ts + ( Const . MAX_TIMESPAN - timespan_offset ) ; } else { final long timespan_offset = end % Const . MAX_TIMESPAN ; return end + ( Const . MAX_TIMESPAN - timespan_offset ) ; } } | Returns the UNIX timestamp at which we must stop scanning . |
7,332 | private void createAndSetFilter ( final Scanner scanner ) { QueryUtil . setDataTableScanFilter ( scanner , group_bys , row_key_literals , explicit_tags , enable_fuzzy_filter , ( end_time == UNSET ? - 1 : ( int ) getScanEndTimeSeconds ( ) ) ) ; } | Sets the server - side regexp filter on the scanner . In order to find the rows with the relevant tags we use a server - side filter that matches a regular expression on the row key . |
7,333 | public void transformDownSamplerToRollupQuery ( final Aggregator group_by , final String str_interval ) { if ( downsampler != null && downsampler . getInterval ( ) > 0 ) { if ( tsdb . getRollupConfig ( ) != null ) { try { best_match_rollups = tsdb . getRollupConfig ( ) . getRollupInterval ( downsampler . getInterval ( ) / 1000 , str_interval ) ; rollup_query = new RollupQuery ( best_match_rollups . remove ( 0 ) , downsampler . getFunction ( ) , downsampler . getInterval ( ) , group_by ) ; if ( group_by == Aggregators . COUNT ) { aggregator = Aggregators . SUM ; } } catch ( NoSuchRollupForIntervalException nre ) { LOG . error ( "There is no such rollup for the downsample interval " + str_interval + ". So fall back to the default tsdb down" + " sampling approach and it requires raw data scan." ) ; rollup_query = null ; return ; } if ( rollup_query . getRollupInterval ( ) . isDefaultInterval ( ) ) { rollup_query = null ; } } } } | Transform downsampler properties to rollup properties if the rollup is enabled at configuration level and down sampler is set . It falls back to raw data and down sampling if there is no RollupInterval is configured against this down sample interval |
7,334 | private void transformRollupQueryToDownSampler ( ) { if ( rollup_query != null ) { downsampler = new DownsamplingSpecification ( rollup_query . getRollupInterval ( ) . getIntervalSeconds ( ) * 1000 , rollup_query . getRollupAgg ( ) , ( downsampler != null ? downsampler . getFillPolicy ( ) : FillPolicy . ZERO ) ) ; rollup_query = null ; } } | Transform rollup query to downsampler It is mainly useful when it scan on raw data on fallback . |
7,335 | public boolean copyChanges ( final Tree tree , final boolean overwrite ) { if ( tree == null ) { throw new IllegalArgumentException ( "Cannot copy a null tree" ) ; } if ( tree_id != tree . tree_id ) { throw new IllegalArgumentException ( "Tree IDs do not match" ) ; } if ( overwrite || tree . changed . get ( "name" ) ) { name = tree . name ; changed . put ( "name" , true ) ; } if ( overwrite || tree . changed . get ( "description" ) ) { description = tree . description ; changed . put ( "description" , true ) ; } if ( overwrite || tree . changed . get ( "notes" ) ) { notes = tree . notes ; changed . put ( "notes" , true ) ; } if ( overwrite || tree . changed . get ( "strict_match" ) ) { strict_match = tree . strict_match ; changed . put ( "strict_match" , true ) ; } if ( overwrite || tree . changed . get ( "enabled" ) ) { enabled = tree . enabled ; changed . put ( "enabled" , true ) ; } if ( overwrite || tree . changed . get ( "store_failures" ) ) { store_failures = tree . store_failures ; changed . put ( "store_failures" , true ) ; } for ( boolean has_changes : changed . values ( ) ) { if ( has_changes ) { return true ; } } return false ; } | Copies changes from the incoming tree into the local tree overriding if called to . Only parses user mutable fields excluding rules . |
7,336 | public void addRule ( final TreeRule rule ) { if ( rule == null ) { throw new IllegalArgumentException ( "Null rules are not accepted" ) ; } if ( rules == null ) { rules = new TreeMap < Integer , TreeMap < Integer , TreeRule > > ( ) ; } TreeMap < Integer , TreeRule > level = rules . get ( rule . getLevel ( ) ) ; if ( level == null ) { level = new TreeMap < Integer , TreeRule > ( ) ; level . put ( rule . getOrder ( ) , rule ) ; rules . put ( rule . getLevel ( ) , level ) ; } else { level . put ( rule . getOrder ( ) , rule ) ; } changed . put ( "rules" , true ) ; } | Adds the given rule to the tree replacing anything in the designated spot |
7,337 | public void addCollision ( final String tsuid , final String existing_tsuid ) { if ( tsuid == null || tsuid . isEmpty ( ) ) { throw new IllegalArgumentException ( "Empty or null collisions not allowed" ) ; } if ( collisions == null ) { collisions = new HashMap < String , String > ( ) ; } if ( ! collisions . containsKey ( tsuid ) ) { collisions . put ( tsuid , existing_tsuid ) ; changed . put ( "collisions" , true ) ; } } | Adds a TSUID to the collision local list must then be synced with storage |
7,338 | public void addNotMatched ( final String tsuid , final String message ) { if ( tsuid == null || tsuid . isEmpty ( ) ) { throw new IllegalArgumentException ( "Empty or null non matches not allowed" ) ; } if ( not_matched == null ) { not_matched = new HashMap < String , String > ( ) ; } if ( ! not_matched . containsKey ( tsuid ) ) { not_matched . put ( tsuid , message ) ; changed . put ( "not_matched" , true ) ; } } | Adds a TSUID to the not - matched local list when strict_matching is enabled . Must be synced with storage . |
7,339 | public Deferred < Boolean > storeTree ( final TSDB tsdb , final boolean overwrite ) { if ( tree_id < 1 || tree_id > 65535 ) { throw new IllegalArgumentException ( "Invalid Tree ID" ) ; } boolean has_changes = false ; for ( Map . Entry < String , Boolean > entry : changed . entrySet ( ) ) { if ( entry . getValue ( ) ) { has_changes = true ; break ; } } if ( ! has_changes ) { LOG . debug ( this + " does not have changes, skipping sync to storage" ) ; throw new IllegalStateException ( "No changes detected in the tree" ) ; } final class StoreTreeCB implements Callback < Deferred < Boolean > , Tree > { final private Tree local_tree ; public StoreTreeCB ( final Tree local_tree ) { this . local_tree = local_tree ; } public Deferred < Boolean > call ( final Tree fetched_tree ) throws Exception { Tree stored_tree = fetched_tree ; final byte [ ] original_tree = stored_tree == null ? new byte [ 0 ] : stored_tree . toStorageJson ( ) ; if ( stored_tree == null ) { stored_tree = local_tree ; } else { stored_tree . copyChanges ( local_tree , overwrite ) ; } initializeChangedMap ( ) ; final PutRequest put = new PutRequest ( tsdb . treeTable ( ) , Tree . idToBytes ( tree_id ) , TREE_FAMILY , TREE_QUALIFIER , stored_tree . toStorageJson ( ) ) ; return tsdb . getClient ( ) . compareAndSet ( put , original_tree ) ; } } return fetchTree ( tsdb , tree_id ) . addCallbackDeferring ( new StoreTreeCB ( this ) ) ; } | Attempts to store the tree definition via a CompareAndSet call . |
7,340 | public TreeRule getRule ( final int level , final int order ) { if ( rules == null || rules . isEmpty ( ) ) { return null ; } TreeMap < Integer , TreeRule > rule_level = rules . get ( level ) ; if ( rule_level == null || rule_level . isEmpty ( ) ) { return null ; } return rule_level . get ( order ) ; } | Retrieves a single rule from the rule set given a level and order |
7,341 | public static Deferred < Tree > fetchTree ( final TSDB tsdb , final int tree_id ) { if ( tree_id < 1 || tree_id > 65535 ) { throw new IllegalArgumentException ( "Invalid Tree ID" ) ; } final GetRequest get = new GetRequest ( tsdb . treeTable ( ) , idToBytes ( tree_id ) ) ; get . family ( TREE_FAMILY ) ; final class FetchTreeCB implements Callback < Deferred < Tree > , ArrayList < KeyValue > > { public Deferred < Tree > call ( ArrayList < KeyValue > row ) throws Exception { if ( row == null || row . isEmpty ( ) ) { return Deferred . fromResult ( null ) ; } final Tree tree = new Tree ( ) ; tree . setTreeId ( bytesToId ( row . get ( 0 ) . key ( ) ) ) ; for ( KeyValue column : row ) { if ( Bytes . memcmp ( TREE_QUALIFIER , column . qualifier ( ) ) == 0 ) { final Tree local_tree = JSON . parseToObject ( column . value ( ) , Tree . class ) ; tree . created = local_tree . created ; tree . description = local_tree . description ; tree . name = local_tree . name ; tree . notes = local_tree . notes ; tree . strict_match = local_tree . strict_match ; tree . enabled = local_tree . enabled ; tree . store_failures = local_tree . store_failures ; } else if ( Bytes . memcmp ( TreeRule . RULE_PREFIX ( ) , column . qualifier ( ) , 0 , TreeRule . RULE_PREFIX ( ) . length ) == 0 ) { final TreeRule rule = TreeRule . parseFromStorage ( column ) ; tree . addRule ( rule ) ; } } return Deferred . fromResult ( tree ) ; } } return tsdb . getClient ( ) . get ( get ) . addCallbackDeferring ( new FetchTreeCB ( ) ) ; } | Attempts to fetch the given tree from storage loading the rule set at the same time . |
7,342 | public static Deferred < List < Tree > > fetchAllTrees ( final TSDB tsdb ) { final Deferred < List < Tree > > result = new Deferred < List < Tree > > ( ) ; final class AllTreeScanner implements Callback < Object , ArrayList < ArrayList < KeyValue > > > { private final List < Tree > trees = new ArrayList < Tree > ( ) ; private final Scanner scanner ; public AllTreeScanner ( ) { scanner = setupAllTreeScanner ( tsdb ) ; } public Object fetchTrees ( ) { return scanner . nextRows ( ) . addCallback ( this ) ; } public Object call ( ArrayList < ArrayList < KeyValue > > rows ) throws Exception { if ( rows == null ) { result . callback ( trees ) ; return null ; } for ( ArrayList < KeyValue > row : rows ) { final Tree tree = new Tree ( ) ; for ( KeyValue column : row ) { if ( column . qualifier ( ) . length >= TREE_QUALIFIER . length && Bytes . memcmp ( TREE_QUALIFIER , column . qualifier ( ) ) == 0 ) { final Tree local_tree = JSON . parseToObject ( column . value ( ) , Tree . class ) ; tree . created = local_tree . created ; tree . description = local_tree . description ; tree . name = local_tree . name ; tree . notes = local_tree . notes ; tree . strict_match = local_tree . strict_match ; tree . enabled = local_tree . enabled ; tree . store_failures = local_tree . store_failures ; tree . setTreeId ( bytesToId ( row . get ( 0 ) . key ( ) ) ) ; } else if ( column . qualifier ( ) . length > TreeRule . RULE_PREFIX ( ) . length && Bytes . memcmp ( TreeRule . RULE_PREFIX ( ) , column . qualifier ( ) , 0 , TreeRule . RULE_PREFIX ( ) . length ) == 0 ) { final TreeRule rule = TreeRule . parseFromStorage ( column ) ; tree . addRule ( rule ) ; } } if ( tree . tree_id > 0 ) { trees . add ( tree ) ; } } return fetchTrees ( ) ; } } new AllTreeScanner ( ) . fetchTrees ( ) ; return result ; } | Attempts to retrieve all trees from the UID table including their rules . If no trees were found the result will be an empty list |
7,343 | private byte [ ] toStorageJson ( ) { final ByteArrayOutputStream output = new ByteArrayOutputStream ( ) ; try { final JsonGenerator json = JSON . getFactory ( ) . createGenerator ( output ) ; json . writeStartObject ( ) ; json . writeStringField ( "name" , name ) ; json . writeStringField ( "description" , description ) ; json . writeStringField ( "notes" , notes ) ; json . writeBooleanField ( "strictMatch" , strict_match ) ; json . writeNumberField ( "created" , created ) ; json . writeBooleanField ( "enabled" , enabled ) ; json . writeBooleanField ( "storeFailures" , store_failures ) ; json . writeEndObject ( ) ; json . close ( ) ; return output . toByteArray ( ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } | Converts the object to a JSON byte array necessary for CAS calls and to keep redundant data down |
7,344 | private static boolean printResult ( final ArrayList < KeyValue > row , final byte [ ] family , final boolean formard ) { if ( null == row || row . isEmpty ( ) ) { return false ; } final byte [ ] key = row . get ( 0 ) . key ( ) ; String name = formard ? CliUtils . fromBytes ( key ) : null ; String id = formard ? null : Arrays . toString ( key ) ; boolean printed = false ; for ( final KeyValue kv : row ) { if ( ! Bytes . equals ( kv . family ( ) , family ) ) { continue ; } printed = true ; if ( formard ) { id = Arrays . toString ( kv . value ( ) ) ; } else { name = CliUtils . fromBytes ( kv . value ( ) ) ; } System . out . println ( CliUtils . fromBytes ( kv . qualifier ( ) ) + ' ' + name + ": " + id ) ; } return printed ; } | Helper to print the cells in a given family for a given row if any . |
7,345 | private static int findAndPrintRow ( final HBaseClient client , final byte [ ] table , final byte [ ] key , final byte [ ] family , boolean formard ) { final GetRequest get = new GetRequest ( table , key ) ; get . family ( family ) ; ArrayList < KeyValue > row ; try { row = client . get ( get ) . joinUninterruptibly ( ) ; } catch ( HBaseException e ) { LOG . error ( "Get failed: " + get , e ) ; return 1 ; } catch ( Exception e ) { LOG . error ( "WTF? Unexpected exception type, get=" + get , e ) ; return 42 ; } return printResult ( row , family , formard ) ? 0 : 1 ; } | Gets a given row in HBase and prints it on standard output . |
7,346 | private static int extactLookupId ( final HBaseClient client , final byte [ ] table , final short idwidth , final String kind , final byte [ ] id ) { final UniqueId uid = new UniqueId ( client , table , kind , ( int ) idwidth ) ; try { final String name = uid . getName ( id ) ; System . out . println ( kind + ' ' + name + ": " + Arrays . toString ( id ) ) ; return 0 ; } catch ( NoSuchUniqueId e ) { LOG . error ( e . getMessage ( ) ) ; return 1 ; } } | Looks up an ID for a given kind and prints it if found . |
7,347 | private static byte [ ] idInBytes ( final short idwidth , final long lid ) { if ( idwidth <= 0 ) { throw new AssertionError ( "negative idwidth: " + idwidth ) ; } final byte [ ] id = Bytes . fromLong ( lid ) ; for ( int i = 0 ; i < id . length - idwidth ; i ++ ) { if ( id [ i ] != 0 ) { System . err . println ( lid + " is too large to fit on " + idwidth + " bytes. Maybe you forgot to adjust --idwidth?" ) ; return null ; } } return Arrays . copyOfRange ( id , id . length - idwidth , id . length ) ; } | Transforms an ID into the corresponding byte array . |
7,348 | private static int extactLookupName ( final HBaseClient client , final byte [ ] table , final short idwidth , final String kind , final String name ) { final UniqueId uid = new UniqueId ( client , table , kind , ( int ) idwidth ) ; try { final byte [ ] id = uid . getId ( name ) ; System . out . println ( kind + ' ' + name + ": " + Arrays . toString ( id ) ) ; return 0 ; } catch ( NoSuchUniqueName e ) { LOG . error ( e . getMessage ( ) ) ; return 1 ; } } | Looks up a name for a given kind and prints it if found . |
7,349 | private static int purgeTree ( final TSDB tsdb , final int tree_id , final boolean delete_definition ) throws Exception { final TreeSync sync = new TreeSync ( tsdb , 0 , 1 , 0 ) ; return sync . purgeTree ( tree_id , delete_definition ) ; } | Attempts to delete the branches leaves collisions and not - matched entries for a given tree . Optionally removes the tree definition itself |
7,350 | public Deferred < TreeMap < byte [ ] , Span > > fetch ( ) { if ( tags . isEmpty ( ) ) { return Deferred . fromResult ( null ) ; } startFetch ( ) ; return results ; } | Initiate the get requests and return the tree map of results . |
7,351 | private void startFetch ( ) { prepareConcurrentMultiGetTasks ( ) ; fetch_start_time = System . currentTimeMillis ( ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Start to fetch data using multiget, there will be " + multi_get_wait_cnt + " multigets to call" ) ; } for ( int con_idx = 0 ; con_idx < concurrency_multi_get ; ++ con_idx ) { final List < MultiGetTask > con_mul_get_tasks = multi_get_tasks . get ( con_idx ) ; final int task_index = multi_get_indexs . get ( con_idx ) . incrementAndGet ( ) ; if ( task_index < con_mul_get_tasks . size ( ) ) { final MultiGetTask task = con_mul_get_tasks . get ( task_index ) ; final MulGetCB mgcb = new MulGetCB ( con_idx , task . getTSUIDs ( ) , task . getGets ( ) ) ; mgcb . fetch ( ) ; } } } | Start the work of firing up X concurrent get requests . |
7,352 | public static void setDataTableScanFilter ( final Scanner scanner , final List < byte [ ] > group_bys , final ByteMap < byte [ ] [ ] > row_key_literals , final boolean explicit_tags , final boolean enable_fuzzy_filter , final int end_time ) { if ( ( group_bys == null || group_bys . isEmpty ( ) ) && ( row_key_literals == null || row_key_literals . isEmpty ( ) ) ) { return ; } final int prefix_width = Const . SALT_WIDTH ( ) + TSDB . metrics_width ( ) + Const . TIMESTAMP_BYTES ; final short name_width = TSDB . tagk_width ( ) ; final short value_width = TSDB . tagv_width ( ) ; final byte [ ] fuzzy_key ; final byte [ ] fuzzy_mask ; if ( explicit_tags && enable_fuzzy_filter ) { fuzzy_key = new byte [ prefix_width + ( row_key_literals . size ( ) * ( name_width + value_width ) ) ] ; fuzzy_mask = new byte [ prefix_width + ( row_key_literals . size ( ) * ( name_width + value_width ) ) ] ; System . arraycopy ( scanner . getCurrentKey ( ) , 0 , fuzzy_key , 0 , scanner . getCurrentKey ( ) . length ) ; } else { fuzzy_key = fuzzy_mask = null ; } final String regex = getRowKeyUIDRegex ( group_bys , row_key_literals , explicit_tags , fuzzy_key , fuzzy_mask ) ; final KeyRegexpFilter regex_filter = new KeyRegexpFilter ( regex . toString ( ) , Const . ASCII_CHARSET ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Regex for scanner: " + scanner + ": " + byteRegexToString ( regex ) ) ; } if ( ! ( explicit_tags && enable_fuzzy_filter ) ) { scanner . setFilter ( regex_filter ) ; return ; } scanner . setStartKey ( fuzzy_key ) ; final byte [ ] stop_key = Arrays . copyOf ( fuzzy_key , fuzzy_key . length ) ; Internal . setBaseTime ( stop_key , end_time ) ; int idx = Const . SALT_WIDTH ( ) + TSDB . metrics_width ( ) + Const . TIMESTAMP_BYTES + TSDB . tagk_width ( ) ; while ( idx < stop_key . length ) { for ( int i = 0 ; i < TSDB . tagv_width ( ) ; i ++ ) { stop_key [ idx ++ ] = ( byte ) 0xFF ; } idx += TSDB . tagk_width ( ) ; } scanner . setStopKey ( stop_key ) ; final List < ScanFilter > filters = new ArrayList < ScanFilter > ( 2 ) ; filters . add ( new FuzzyRowFilter ( new FuzzyRowFilter . FuzzyFilterPair ( fuzzy_key , fuzzy_mask ) ) ) ; filters . add ( regex_filter ) ; scanner . setFilter ( new FilterList ( filters ) ) ; } | Sets a filter or filter list on the scanner based on whether or not the query had tags it needed to match . |
7,353 | public static String getRowKeyTSUIDRegex ( final List < String > tsuids ) { Collections . sort ( tsuids ) ; final short metric_width = TSDB . metrics_width ( ) ; int tags_length = 0 ; final ArrayList < byte [ ] > uids = new ArrayList < byte [ ] > ( tsuids . size ( ) ) ; for ( final String tsuid : tsuids ) { final String tags = tsuid . substring ( metric_width * 2 ) ; final byte [ ] tag_bytes = UniqueId . stringToUid ( tags ) ; tags_length += tag_bytes . length ; uids . add ( tag_bytes ) ; } final StringBuilder buf = new StringBuilder ( 13 + ( tsuids . size ( ) * 11 ) + tags_length ) ; buf . append ( "(?s)" + "^.{" ) . append ( Const . SALT_WIDTH ( ) + metric_width + Const . TIMESTAMP_BYTES ) . append ( "}(" ) ; for ( final byte [ ] tags : uids ) { buf . append ( "\\Q" ) ; addId ( buf , tags , true ) ; buf . append ( '|' ) ; } buf . setCharAt ( buf . length ( ) - 1 , ')' ) ; buf . append ( "$" ) ; return buf . toString ( ) ; } | Creates a regular expression with a list of or d TUIDs to compare against the rows in storage . |
7,354 | public static Scanner getMetricScanner ( final TSDB tsdb , final int salt_bucket , final byte [ ] metric , final int start , final int stop , final byte [ ] table , final byte [ ] family ) { final short metric_width = TSDB . metrics_width ( ) ; final int metric_salt_width = metric_width + Const . SALT_WIDTH ( ) ; final byte [ ] start_row = new byte [ metric_salt_width + Const . TIMESTAMP_BYTES ] ; final byte [ ] end_row = new byte [ metric_salt_width + Const . TIMESTAMP_BYTES ] ; if ( Const . SALT_WIDTH ( ) > 0 ) { final byte [ ] salt = RowKey . getSaltBytes ( salt_bucket ) ; System . arraycopy ( salt , 0 , start_row , 0 , Const . SALT_WIDTH ( ) ) ; System . arraycopy ( salt , 0 , end_row , 0 , Const . SALT_WIDTH ( ) ) ; } Bytes . setInt ( start_row , start , metric_salt_width ) ; Bytes . setInt ( end_row , stop , metric_salt_width ) ; System . arraycopy ( metric , 0 , start_row , Const . SALT_WIDTH ( ) , metric_width ) ; System . arraycopy ( metric , 0 , end_row , Const . SALT_WIDTH ( ) , metric_width ) ; final Scanner scanner = tsdb . getClient ( ) . newScanner ( table ) ; scanner . setMaxNumRows ( tsdb . getConfig ( ) . scanner_maxNumRows ( ) ) ; scanner . setStartKey ( start_row ) ; scanner . setStopKey ( end_row ) ; scanner . setFamily ( family ) ; return scanner ; } | Compiles an HBase scanner against the main data table |
7,355 | public static void addId ( final StringBuilder buf , final byte [ ] id , final boolean close ) { boolean backslash = false ; for ( final byte b : id ) { buf . append ( ( char ) ( b & 0xFF ) ) ; if ( b == 'E' && backslash ) { buf . append ( "\\\\E\\Q" ) ; } else { backslash = b == '\\' ; } } if ( close ) { buf . append ( "\\E" ) ; } } | Appends the given UID to the given regular expression buffer |
7,356 | public static String byteRegexToString ( final String regexp ) { final StringBuilder buf = new StringBuilder ( ) ; for ( int i = 0 ; i < regexp . length ( ) ; i ++ ) { if ( i > 0 && regexp . charAt ( i - 1 ) == 'Q' ) { if ( regexp . charAt ( i - 3 ) == '*' ) { byte [ ] tagk = new byte [ TSDB . tagk_width ( ) ] ; for ( int x = 0 ; x < TSDB . tagk_width ( ) ; x ++ ) { tagk [ x ] = ( byte ) regexp . charAt ( i + x ) ; } i += TSDB . tagk_width ( ) ; buf . append ( Arrays . toString ( tagk ) ) ; } else { byte [ ] tagv = new byte [ TSDB . tagv_width ( ) ] ; for ( int x = 0 ; x < TSDB . tagv_width ( ) ; x ++ ) { tagv [ x ] = ( byte ) regexp . charAt ( i + x ) ; } i += TSDB . tagv_width ( ) ; buf . append ( Arrays . toString ( tagv ) ) ; } } else { buf . append ( regexp . charAt ( i ) ) ; } } return buf . toString ( ) ; } | Little helper to print out the regular expression by converting the UID bytes to an array . |
7,357 | private void computeUnion ( ) { final ByteMap < ExpressionDataPoint [ ] > ordered_union = new ByteMap < ExpressionDataPoint [ ] > ( ) ; final Iterator < ITimeSyncedIterator > it = queries . values ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { final ITimeSyncedIterator sub = it . next ( ) ; final ExpressionDataPoint [ ] dps = sub . values ( ) ; final ByteMap < Integer > local_tags = new ByteMap < Integer > ( ) ; for ( int i = 0 ; i < sub . size ( ) ; i ++ ) { final byte [ ] key = flattenTags ( union_on_query_tagks , include_agg_tags , dps [ i ] , sub ) ; local_tags . put ( key , i ) ; ExpressionDataPoint [ ] udps = ordered_union . get ( key ) ; if ( udps == null ) { udps = new ExpressionDataPoint [ queries . size ( ) ] ; ordered_union . put ( key , udps ) ; } udps [ sub . getIndex ( ) ] = dps [ i ] ; } } if ( ordered_union . size ( ) < 1 ) { return ; } setCurrentAndMeta ( ordered_union ) ; } | Computes the union of all sets matching on tags and optionally the aggregated tags across each variable . |
7,358 | static byte [ ] flattenTags ( final boolean use_query_tags , final boolean include_agg_tags , final ExpressionDataPoint dp , final ITimeSyncedIterator sub ) { if ( dp . tags ( ) == null || dp . tags ( ) . isEmpty ( ) ) { return HBaseClient . EMPTY_ARRAY ; } final int tagk_width = TSDB . tagk_width ( ) ; final int tagv_width = TSDB . tagv_width ( ) ; final ByteSet query_tagks ; final int tag_size ; if ( use_query_tags ) { int i = 0 ; if ( sub . getQueryTagKs ( ) != null && ! sub . getQueryTagKs ( ) . isEmpty ( ) ) { query_tagks = sub . getQueryTagKs ( ) ; for ( final Map . Entry < byte [ ] , byte [ ] > pair : dp . tags ( ) . entrySet ( ) ) { if ( query_tagks . contains ( pair . getKey ( ) ) ) { i ++ ; } } } else { query_tagks = new ByteSet ( ) ; } tag_size = i ; } else { query_tagks = new ByteSet ( ) ; tag_size = dp . tags ( ) . size ( ) ; } final int length = ( tag_size * ( tagk_width + tagv_width ) ) + ( include_agg_tags ? ( dp . aggregatedTags ( ) . size ( ) * tagk_width ) : 0 ) ; final byte [ ] key = new byte [ length ] ; int idx = 0 ; for ( final Entry < byte [ ] , byte [ ] > pair : dp . tags ( ) . entrySet ( ) ) { if ( use_query_tags && ! query_tagks . contains ( pair . getKey ( ) ) ) { continue ; } System . arraycopy ( pair . getKey ( ) , 0 , key , idx , tagk_width ) ; idx += tagk_width ; System . arraycopy ( pair . getValue ( ) , 0 , key , idx , tagv_width ) ; idx += tagv_width ; } if ( include_agg_tags ) { for ( final byte [ ] tagk : dp . aggregatedTags ( ) ) { System . arraycopy ( tagk , 0 , key , idx , tagk_width ) ; idx += tagk_width ; } } return key ; } | Creates a key based on the concatenation of the tag pairs then the agg tag keys . |
7,359 | public void reset ( DataPoint dp ) { this . timestamp = dp . timestamp ( ) ; this . is_integer = dp . isInteger ( ) ; if ( is_integer ) { this . value = dp . longValue ( ) ; } else { this . value = Double . doubleToRawLongBits ( dp . doubleValue ( ) ) ; } } | Resets with a new data point . |
7,360 | public static MutableDataPoint ofLongValue ( final long timestamp , final long value ) { final MutableDataPoint dp = new MutableDataPoint ( ) ; dp . reset ( timestamp , value ) ; return dp ; } | Resets with a new pair of a timestamp and a long value . |
7,361 | public void setSeries ( final String metric , final Map < String , String > tags ) { IncomingDataPoints . checkMetricAndTags ( metric , tags ) ; try { row_key = IncomingDataPoints . rowKeyTemplate ( tsdb , metric , tags ) ; RowKey . prefixKeyWithSalt ( row_key ) ; reset ( ) ; } catch ( RuntimeException e ) { throw e ; } catch ( Exception e ) { throw new RuntimeException ( "Should never happen" , e ) ; } } | Sets the metric name and tags of this batch . This method only need be called if there is a desire to reuse the data structure after the data has been flushed . This will reset all cached information in this data structure . |
7,362 | private void reset ( ) { size = 0 ; qualifier_index = 0 ; value_index = 0 ; base_time = Long . MIN_VALUE ; last_timestamp = Long . MIN_VALUE ; } | Resets the indices without overwriting the buffers . So the same amount of space will remain allocated . |
7,363 | public Deferred < Object > persist ( ) { final byte [ ] q = Arrays . copyOfRange ( batched_qualifier , 0 , qualifier_index ) ; final byte [ ] v = Arrays . copyOfRange ( batched_value , 0 , value_index ) ; final byte [ ] r = Arrays . copyOfRange ( row_key , 0 , row_key . length ) ; final long base_time = this . base_time ; System . out . println ( Arrays . toString ( q ) + " " + Arrays . toString ( v ) + " " + Arrays . toString ( r ) ) ; reset ( ) ; return tsdb . put ( r , q , v , base_time ) ; } | A copy of the values is created and sent with a put request . A reset is initialized which makes this data structure ready to be reused for the same metric and tags but for a different hour of data . |
7,364 | private void ensureCapacity ( final byte [ ] next_qualifier , final byte [ ] next_value ) { if ( qualifier_index + next_qualifier . length >= batched_qualifier . length ) { batched_qualifier = Arrays . copyOf ( batched_qualifier , batched_qualifier . length * 2 ) ; } if ( value_index + next_value . length >= batched_value . length ) { batched_value = Arrays . copyOf ( batched_value , batched_value . length * 2 ) ; } } | Checks the size of the qualifier and value arrays to make sure we have space . If not then we double the size of the arrays . This way a row allocates space for a full hour of second data but if the user requires millisecond storage with more than 3600 points it will expand . |
7,365 | private void append ( final byte [ ] next_qualifier , final byte [ ] next_value ) { ensureCapacity ( next_qualifier , next_value ) ; System . arraycopy ( next_value , 0 , batched_value , value_index , next_value . length ) ; value_index += next_value . length ; System . arraycopy ( next_qualifier , 0 , batched_qualifier , qualifier_index , next_qualifier . length ) ; qualifier_index += next_qualifier . length ; } | Appends the value and qualifier to the appropriate arrays |
7,366 | private int qualifierOffset ( final int i ) { int offset = 0 ; for ( int j = 0 ; j < i ; j ++ ) { offset += Internal . getQualifierLength ( batched_qualifier , offset ) ; } return offset ; } | Computes the proper offset to reach qualifier |
7,367 | private boolean isInteger ( final int i , final int q_offset ) { final short flags = Internal . getFlagsFromQualifier ( batched_qualifier , q_offset ) ; return ( flags & Const . FLAG_FLOAT ) == 0x0 ; } | Tells whether or not the ith value is integer . Uses pre - computed qualifier offset . |
7,368 | private void handleExpressionQuery ( final TSDB tsdb , final HttpQuery query ) { final net . opentsdb . query . pojo . Query v2_query = JSON . parseToObject ( query . getContent ( ) , net . opentsdb . query . pojo . Query . class ) ; v2_query . validate ( ) ; checkAuthorization ( tsdb , query . channel ( ) , v2_query ) ; final QueryExecutor executor = new QueryExecutor ( tsdb , v2_query ) ; executor . execute ( query ) ; } | Handles an expression query |
7,369 | private static void parseMTypeSubQuery ( final String query_string , TSQuery data_query ) { if ( query_string == null || query_string . isEmpty ( ) ) { throw new BadRequestException ( "The query string was empty" ) ; } final String [ ] parts = Tags . splitString ( query_string , ':' ) ; int i = parts . length ; if ( i < 2 || i > 5 ) { throw new BadRequestException ( "Invalid parameter m=" + query_string + " (" + ( i < 2 ? "not enough" : "too many" ) + " :-separated parts)" ) ; } final TSSubQuery sub_query = new TSSubQuery ( ) ; sub_query . setAggregator ( parts [ 0 ] ) ; i -- ; List < TagVFilter > filters = new ArrayList < TagVFilter > ( ) ; sub_query . setMetric ( Tags . parseWithMetricAndFilters ( parts [ i ] , filters ) ) ; sub_query . setFilters ( filters ) ; for ( int x = 1 ; x < parts . length - 1 ; x ++ ) { if ( parts [ x ] . toLowerCase ( ) . startsWith ( "rate" ) ) { sub_query . setRate ( true ) ; if ( parts [ x ] . indexOf ( "{" ) >= 0 ) { sub_query . setRateOptions ( QueryRpc . parseRateOptions ( true , parts [ x ] ) ) ; } } else if ( Character . isDigit ( parts [ x ] . charAt ( 0 ) ) ) { sub_query . setDownsample ( parts [ x ] ) ; } else if ( parts [ x ] . equalsIgnoreCase ( "pre-agg" ) ) { sub_query . setPreAggregate ( true ) ; } else if ( parts [ x ] . toLowerCase ( ) . startsWith ( "rollup_" ) ) { sub_query . setRollupUsage ( parts [ x ] ) ; } else if ( parts [ x ] . toLowerCase ( ) . startsWith ( "percentiles" ) ) { sub_query . setPercentiles ( QueryRpc . parsePercentiles ( parts [ x ] ) ) ; } else if ( parts [ x ] . toLowerCase ( ) . startsWith ( "show-histogram-buckets" ) ) { sub_query . setShowHistogramBuckets ( true ) ; } else if ( parts [ x ] . toLowerCase ( ) . startsWith ( "explicit_tags" ) ) { sub_query . setExplicitTags ( true ) ; } } if ( data_query . getQueries ( ) == null ) { final ArrayList < TSSubQuery > subs = new ArrayList < TSSubQuery > ( 1 ) ; data_query . setQueries ( subs ) ; } data_query . getQueries ( ) . add ( sub_query ) ; } | Parses a query string m = ... type query and adds it to the TSQuery . This will generate a TSSubQuery and add it to the TSQuery if successful |
7,370 | private static void parseTsuidTypeSubQuery ( final String query_string , TSQuery data_query ) { if ( query_string == null || query_string . isEmpty ( ) ) { throw new BadRequestException ( "The tsuid query string was empty" ) ; } final String [ ] parts = Tags . splitString ( query_string , ':' ) ; int i = parts . length ; if ( i < 2 || i > 5 ) { throw new BadRequestException ( "Invalid parameter m=" + query_string + " (" + ( i < 2 ? "not enough" : "too many" ) + " :-separated parts)" ) ; } final TSSubQuery sub_query = new TSSubQuery ( ) ; sub_query . setAggregator ( parts [ 0 ] ) ; i -- ; final List < String > tsuid_array = Arrays . asList ( parts [ i ] . split ( "," ) ) ; sub_query . setTsuids ( tsuid_array ) ; for ( int x = 1 ; x < parts . length - 1 ; x ++ ) { if ( parts [ x ] . toLowerCase ( ) . startsWith ( "rate" ) ) { sub_query . setRate ( true ) ; if ( parts [ x ] . indexOf ( "{" ) >= 0 ) { sub_query . setRateOptions ( QueryRpc . parseRateOptions ( true , parts [ x ] ) ) ; } } else if ( Character . isDigit ( parts [ x ] . charAt ( 0 ) ) ) { sub_query . setDownsample ( parts [ x ] ) ; } else if ( parts [ x ] . toLowerCase ( ) . startsWith ( "percentiles" ) ) { sub_query . setPercentiles ( QueryRpc . parsePercentiles ( parts [ x ] ) ) ; } else if ( parts [ x ] . toLowerCase ( ) . startsWith ( "show-histogram-buckets" ) ) { sub_query . setShowHistogramBuckets ( true ) ; } } if ( data_query . getQueries ( ) == null ) { final ArrayList < TSSubQuery > subs = new ArrayList < TSSubQuery > ( 1 ) ; data_query . setQueries ( subs ) ; } data_query . getQueries ( ) . add ( sub_query ) ; } | Parses a tsuid = ... type query and adds it to the TSQuery . This will generate a TSSubQuery and add it to the TSQuery if successful |
7,371 | private LastPointQuery parseLastPointQuery ( final TSDB tsdb , final HttpQuery http_query ) { final LastPointQuery query = new LastPointQuery ( ) ; if ( http_query . hasQueryStringParam ( "resolve" ) ) { query . setResolveNames ( true ) ; } if ( http_query . hasQueryStringParam ( "back_scan" ) ) { try { query . setBackScan ( Integer . parseInt ( http_query . getQueryStringParam ( "back_scan" ) ) ) ; } catch ( NumberFormatException e ) { throw new BadRequestException ( "Unable to parse back_scan parameter" ) ; } } final List < String > ts_queries = http_query . getQueryStringParams ( "timeseries" ) ; final List < String > tsuid_queries = http_query . getQueryStringParams ( "tsuids" ) ; final int num_queries = ( ts_queries != null ? ts_queries . size ( ) : 0 ) + ( tsuid_queries != null ? tsuid_queries . size ( ) : 0 ) ; final List < LastPointSubQuery > sub_queries = new ArrayList < LastPointSubQuery > ( num_queries ) ; if ( ts_queries != null ) { for ( String ts_query : ts_queries ) { sub_queries . add ( LastPointSubQuery . parseTimeSeriesQuery ( ts_query ) ) ; } } if ( tsuid_queries != null ) { for ( String tsuid_query : tsuid_queries ) { sub_queries . add ( LastPointSubQuery . parseTSUIDQuery ( tsuid_query ) ) ; } } query . setQueries ( sub_queries ) ; return query ; } | Parses a last point query from the URI string |
7,372 | public Map < String , String > getPrintableHeaders ( ) { final Map < String , String > headers = new HashMap < String , String > ( request . headers ( ) . entries ( ) . size ( ) ) ; for ( final Entry < String , String > header : request . headers ( ) . entries ( ) ) { if ( header . getKey ( ) . toLowerCase ( ) . equals ( "cookie" ) ) { headers . put ( header . getKey ( ) , "*******" ) ; } else { if ( headers . containsKey ( header . getKey ( ) ) ) { headers . put ( header . getKey ( ) , headers . get ( header . getKey ( ) ) + "," + header . getValue ( ) ) ; } else { headers . put ( header . getKey ( ) , header . getValue ( ) ) ; } } } return headers ; } | Copies the header list and obfuscates the cookie header in case it contains auth tokens etc . Note that it flattens duplicate headers keys as comma separated lists per the RFC |
7,373 | public Map < String , String > getHeaders ( ) { final Map < String , String > headers = new HashMap < String , String > ( request . headers ( ) . entries ( ) . size ( ) ) ; for ( final Entry < String , String > header : request . headers ( ) . entries ( ) ) { if ( headers . containsKey ( header . getKey ( ) ) ) { headers . put ( header . getKey ( ) , headers . get ( header . getKey ( ) ) + "," + header . getValue ( ) ) ; } else { headers . put ( header . getKey ( ) , header . getValue ( ) ) ; } } return headers ; } | Copies the header list so modifications won t affect the original set . Note that it flattens duplicate headers keys as comma separated lists per the RFC |
7,374 | public String getHeaderValue ( final String headerName ) { if ( headerName == null ) { return null ; } return request . headers ( ) . get ( headerName ) ; } | Return the value of the given HTTP Header first match wins |
7,375 | public Map < String , List < String > > getQueryString ( ) { if ( querystring == null ) { try { querystring = new QueryStringDecoder ( request . getUri ( ) ) . getParameters ( ) ; } catch ( IllegalArgumentException e ) { throw new BadRequestException ( "Bad query string: " + e . getMessage ( ) ) ; } } return querystring ; } | Returns the query string parameters passed in the URI . |
7,376 | public Charset getCharset ( ) { for ( String type : this . request . headers ( ) . getAll ( "Content-Type" ) ) { int idx = type . toUpperCase ( ) . indexOf ( "CHARSET=" ) ; if ( idx > 1 ) { String charset = type . substring ( idx + 8 ) ; return Charset . forName ( charset ) ; } } return Charset . forName ( "UTF-8" ) ; } | Attempts to parse the character set from the request header . If not set defaults to UTF - 8 |
7,377 | public void done ( ) { final int processing_time = processingTimeMillis ( ) ; final String url = request . getUri ( ) ; final String msg = String . format ( "HTTP %s done in %d ms" , url , processing_time ) ; if ( url . startsWith ( "/api/put" ) && LOG . isDebugEnabled ( ) ) { LOG . debug ( msg ) ; } else { logInfo ( msg ) ; } logInfo ( "HTTP " + request . getUri ( ) + " done in " + processing_time + "ms" ) ; } | Method to call after writing the HTTP response to the wire . The default is to simply log the request info . Can be overridden by subclasses . |
7,378 | public void sendStatusOnly ( final HttpResponseStatus status ) { if ( ! chan . isConnected ( ) ) { if ( stats != null ) { stats . markSendFailed ( ) ; } done ( ) ; return ; } response . setStatus ( status ) ; final boolean keepalive = HttpHeaders . isKeepAlive ( request ) ; if ( keepalive ) { HttpHeaders . setContentLength ( response , 0 ) ; } final ChannelFuture future = chan . write ( response ) ; if ( stats != null ) { future . addListener ( new SendSuccess ( ) ) ; } if ( ! keepalive ) { future . addListener ( ChannelFutureListener . CLOSE ) ; } done ( ) ; } | Send just the status code without a body used for 204 or 304 |
7,379 | public Deferred < ArrayList < Object > > flush ( ) { final int size = size ( ) ; if ( size > 0 ) { LOG . info ( "Flushing all old outstanding rows out of " + size + " rows" ) ; } final long now = System . currentTimeMillis ( ) ; return flush ( now / 1000 - Const . MAX_TIMESPAN - 1 , Integer . MAX_VALUE ) ; } | Forces a flush of the all old entries in the compaction queue . |
7,380 | private Deferred < ArrayList < Object > > flush ( final long cut_off , int maxflushes ) { assert maxflushes > 0 : "maxflushes must be > 0, but I got " + maxflushes ; maxflushes = Math . min ( maxflushes , size ( ) ) ; if ( maxflushes == 0 ) { return Deferred . fromResult ( new ArrayList < Object > ( 0 ) ) ; } final ArrayList < Deferred < Object > > ds = new ArrayList < Deferred < Object > > ( Math . min ( maxflushes , max_concurrent_flushes ) ) ; int nflushes = 0 ; int seed = ( int ) ( System . nanoTime ( ) % 3 ) ; for ( final byte [ ] row : this . keySet ( ) ) { if ( maxflushes == 0 ) { break ; } if ( seed == row . hashCode ( ) % 3 ) { continue ; } final long base_time = Bytes . getUnsignedInt ( row , Const . SALT_WIDTH ( ) + metric_width ) ; if ( base_time > cut_off ) { break ; } else if ( nflushes == max_concurrent_flushes ) { break ; } if ( super . remove ( row ) == null ) { continue ; } nflushes ++ ; maxflushes -- ; size . decrementAndGet ( ) ; ds . add ( tsdb . get ( row ) . addCallbacks ( compactcb , handle_read_error ) ) ; } final Deferred < ArrayList < Object > > group = Deferred . group ( ds ) ; if ( nflushes == max_concurrent_flushes && maxflushes > 0 ) { tsdb . getClient ( ) . flush ( ) ; final int maxflushez = maxflushes ; final class FlushMoreCB implements Callback < Deferred < ArrayList < Object > > , ArrayList < Object > > { public Deferred < ArrayList < Object > > call ( final ArrayList < Object > arg ) { return flush ( cut_off , maxflushez ) ; } public String toString ( ) { return "Continue flushing with cut_off=" + cut_off + ", maxflushes=" + maxflushez ; } } group . addCallbackDeferring ( new FlushMoreCB ( ) ) ; } return group ; } | Flushes all the rows in the compaction queue older than the cutoff time . |
7,381 | void executeBulk ( final TSDB tsdb , final HttpMethod method , HttpQuery query ) { if ( method == HttpMethod . POST || method == HttpMethod . PUT ) { executeBulkUpdate ( tsdb , method , query ) ; } else if ( method == HttpMethod . DELETE ) { executeBulkDelete ( tsdb , query ) ; } else { throw new BadRequestException ( HttpResponseStatus . METHOD_NOT_ALLOWED , "Method not allowed" , "The HTTP method [" + query . method ( ) . getName ( ) + "] is not permitted for this endpoint" ) ; } } | Performs CRUD methods on a list of annotation objects to reduce calls to the API . |
7,382 | void executeBulkUpdate ( final TSDB tsdb , final HttpMethod method , HttpQuery query ) { final List < Annotation > notes ; try { notes = query . serializer ( ) . parseAnnotationsV1 ( ) ; } catch ( IllegalArgumentException e ) { throw new BadRequestException ( e ) ; } catch ( JSONException e ) { throw new BadRequestException ( e ) ; } final List < Deferred < Annotation > > callbacks = new ArrayList < Deferred < Annotation > > ( notes . size ( ) ) ; class SyncCB implements Callback < Deferred < Annotation > , Boolean > { final private Annotation note ; public SyncCB ( final Annotation note ) { this . note = note ; } public Deferred < Annotation > call ( Boolean success ) throws Exception { if ( ! success ) { throw new BadRequestException ( HttpResponseStatus . INTERNAL_SERVER_ERROR , "Failed to save an Annotation to storage" , "This may be caused by another process modifying storage data: " + note ) ; } return Annotation . getAnnotation ( tsdb , note . getTSUID ( ) , note . getStartTime ( ) ) ; } } class IndexCB implements Callback < Deferred < Annotation > , Annotation > { public Deferred < Annotation > call ( final Annotation note ) throws Exception { tsdb . indexAnnotation ( note ) ; return Deferred . fromResult ( note ) ; } } for ( Annotation note : notes ) { try { Deferred < Annotation > deferred = note . syncToStorage ( tsdb , method == HttpMethod . PUT ) . addCallbackDeferring ( new SyncCB ( note ) ) ; Deferred < Annotation > indexer = deferred . addCallbackDeferring ( new IndexCB ( ) ) ; callbacks . add ( indexer ) ; } catch ( IllegalStateException e ) { LOG . info ( "No changes for annotation: " + note ) ; } catch ( IllegalArgumentException e ) { throw new BadRequestException ( HttpResponseStatus . BAD_REQUEST , e . getMessage ( ) , "Annotation error: " + note , e ) ; } } try { Deferred . group ( callbacks ) . joinUninterruptibly ( ) ; notes . clear ( ) ; for ( Deferred < Annotation > note : callbacks ) { notes . add ( note . joinUninterruptibly ( ) ) ; } query . sendReply ( query . serializer ( ) . formatAnnotationsV1 ( notes ) ) ; } catch ( IllegalArgumentException e ) { throw new BadRequestException ( e ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } | Performs CRU methods on a list of annotation objects to reduce calls to the API . Only supports body content and adding or updating annotation objects . Deletions are separate . |
7,383 | private AnnotationBulkDelete parseBulkDeleteQS ( final HttpQuery query ) { final AnnotationBulkDelete settings = new AnnotationBulkDelete ( ) ; settings . start_time = query . getRequiredQueryStringParam ( "start_time" ) ; settings . end_time = query . getQueryStringParam ( "end_time" ) ; if ( query . hasQueryStringParam ( "tsuids" ) ) { String [ ] tsuids = query . getQueryStringParam ( "tsuids" ) . split ( "," ) ; settings . tsuids = new ArrayList < String > ( tsuids . length ) ; for ( String tsuid : tsuids ) { settings . tsuids . add ( tsuid . trim ( ) ) ; } } if ( query . hasQueryStringParam ( "global" ) ) { settings . global = true ; } return settings ; } | Parses a query string for a bulk delet request |
7,384 | public static SearchType parseSearchType ( final String type ) { if ( type == null || type . isEmpty ( ) ) { throw new IllegalArgumentException ( "Type provided was null or empty" ) ; } if ( type . toLowerCase ( ) . equals ( "tsmeta" ) ) { return SearchType . TSMETA ; } else if ( type . toLowerCase ( ) . equals ( "tsmeta_summary" ) ) { return SearchType . TSMETA_SUMMARY ; } else if ( type . toLowerCase ( ) . equals ( "tsuids" ) ) { return SearchType . TSUIDS ; } else if ( type . toLowerCase ( ) . equals ( "uidmeta" ) ) { return SearchType . UIDMETA ; } else if ( type . toLowerCase ( ) . equals ( "annotation" ) ) { return SearchType . ANNOTATION ; } else if ( type . toLowerCase ( ) . equals ( "lookup" ) ) { return SearchType . LOOKUP ; } else { throw new IllegalArgumentException ( "Unknown type: " + type ) ; } } | Converts the human readable string to the proper enum |
7,385 | private void populateNextRate ( ) { final MutableDataPoint prev_data = new MutableDataPoint ( ) ; if ( source . hasNext ( ) ) { prev_data . reset ( next_data ) ; next_data . reset ( source . next ( ) ) ; final long t0 = prev_data . timestamp ( ) ; final long t1 = next_data . timestamp ( ) ; if ( t1 <= t0 ) { throw new IllegalStateException ( "Next timestamp (" + t1 + ") is supposed to be " + " strictly greater than the previous one (" + t0 + "), but it's" + " not. this=" + this ) ; } final double time_delta_secs = ( ( double ) ( t1 - t0 ) / 1000.0 ) ; double difference ; if ( prev_data . isInteger ( ) && next_data . isInteger ( ) ) { difference = next_data . longValue ( ) - prev_data . longValue ( ) ; } else { difference = next_data . toDouble ( ) - prev_data . toDouble ( ) ; } if ( options . isCounter ( ) && difference < 0 ) { if ( options . getDropResets ( ) ) { populateNextRate ( ) ; return ; } if ( prev_data . isInteger ( ) && next_data . isInteger ( ) ) { difference = options . getCounterMax ( ) - prev_data . longValue ( ) + next_data . longValue ( ) ; } else { difference = options . getCounterMax ( ) - prev_data . toDouble ( ) + next_data . toDouble ( ) ; } final double rate = difference / time_delta_secs ; if ( options . getResetValue ( ) > RateOptions . DEFAULT_RESET_VALUE && rate > options . getResetValue ( ) ) { next_rate . reset ( next_data . timestamp ( ) , 0.0D ) ; } else { next_rate . reset ( next_data . timestamp ( ) , rate ) ; } } else { next_rate . reset ( next_data . timestamp ( ) , ( difference / time_delta_secs ) ) ; } } else { next_rate . reset ( INVALID_TIMESTAMP , 0 ) ; } } | Populate the next rate . |
7,386 | public long cacheSize ( ) { if ( use_lru ) { return ( int ) ( lru_name_cache . size ( ) + lru_id_cache . size ( ) ) ; } return name_cache . size ( ) + id_cache . size ( ) ; } | Returns the number of elements stored in the internal cache . |
7,387 | public void dropCaches ( ) { if ( use_lru ) { lru_name_cache . invalidateAll ( ) ; lru_id_cache . invalidateAll ( ) ; } else { name_cache . clear ( ) ; id_cache . clear ( ) ; } } | Causes this instance to discard all its in - memory caches . |
7,388 | public Deferred < String > getNameAsync ( final byte [ ] id ) { if ( id . length != id_width ) { throw new IllegalArgumentException ( "Wrong id.length = " + id . length + " which is != " + id_width + " required for '" + kind ( ) + '\'' ) ; } final String name = getNameFromCache ( id ) ; if ( name != null ) { incrementCacheHits ( ) ; return Deferred . fromResult ( name ) ; } incrementCacheMiss ( ) ; class GetNameCB implements Callback < String , String > { public String call ( final String name ) { if ( name == null ) { throw new NoSuchUniqueId ( kind ( ) , id ) ; } if ( use_mode ) { switch ( mode ) { case READONLY : addNameToCache ( id , name ) ; break ; case WRITEONLY : break ; default : addNameToCache ( id , name ) ; addIdToCache ( name , id ) ; } } else { addNameToCache ( id , name ) ; addIdToCache ( name , id ) ; } return name ; } } return getNameFromHBase ( id ) . addCallback ( new GetNameCB ( ) ) ; } | Finds the name associated with a given ID . |
7,389 | private void cacheMapping ( final String name , final byte [ ] id ) { addIdToCache ( name , id ) ; addNameToCache ( id , name ) ; } | Adds the bidirectional mapping in the cache . |
7,390 | private static Scanner getSuggestScanner ( final HBaseClient client , final byte [ ] tsd_uid_table , final String search , final byte [ ] kind_or_null , final int max_results ) { final byte [ ] start_row ; final byte [ ] end_row ; if ( search . isEmpty ( ) ) { start_row = START_ROW ; end_row = END_ROW ; } else { start_row = toBytes ( search ) ; end_row = Arrays . copyOf ( start_row , start_row . length ) ; end_row [ start_row . length - 1 ] ++ ; } final Scanner scanner = client . newScanner ( tsd_uid_table ) ; scanner . setStartKey ( start_row ) ; scanner . setStopKey ( end_row ) ; scanner . setFamily ( ID_FAMILY ) ; if ( kind_or_null != null ) { scanner . setQualifier ( kind_or_null ) ; } scanner . setMaxNumRows ( max_results <= 4096 ? max_results : 4096 ) ; return scanner ; } | Creates a scanner that scans the right range of rows for suggestions . |
7,391 | private void hbasePutWithRetry ( final PutRequest put , short attempts , short wait ) throws HBaseException { put . setBufferable ( false ) ; while ( attempts -- > 0 ) { try { client . put ( put ) . joinUninterruptibly ( ) ; return ; } catch ( HBaseException e ) { if ( attempts > 0 ) { LOG . error ( "Put failed, attempts left=" + attempts + " (retrying in " + wait + " ms), put=" + put , e ) ; try { Thread . sleep ( wait ) ; } catch ( InterruptedException ie ) { throw new RuntimeException ( "interrupted" , ie ) ; } wait *= 2 ; } else { throw e ; } } catch ( Exception e ) { LOG . error ( "WTF? Unexpected exception type, put=" + put , e ) ; } } throw new IllegalStateException ( "This code should never be reached!" ) ; } | Attempts to run the PutRequest given in argument retrying if needed . |
7,392 | public static long uidToLong ( final byte [ ] uid , final short uid_length ) { if ( uid . length != uid_length ) { throw new IllegalArgumentException ( "UID was " + uid . length + " bytes long but expected to be " + uid_length ) ; } final byte [ ] uid_raw = new byte [ 8 ] ; System . arraycopy ( uid , 0 , uid_raw , 8 - uid_length , uid_length ) ; return Bytes . getLong ( uid_raw ) ; } | Converts a UID to an integer value . The array must be the same length as uid_length or an exception will be thrown . |
7,393 | public static byte [ ] longToUID ( final long uid , final short width ) { final byte [ ] padded = Bytes . fromLong ( uid ) ; for ( int i = 0 ; i < padded . length - width ; i ++ ) { if ( padded [ i ] != 0 ) { final String message = "UID " + Long . toString ( uid ) + " was too large for " + width + " bytes" ; LOG . error ( "OMG " + message ) ; throw new IllegalStateException ( message ) ; } } return Arrays . copyOfRange ( padded , padded . length - width , padded . length ) ; } | Converts a Long to a byte array with the proper UID width |
7,394 | public static void addIdToRegexp ( final StringBuilder buf , final byte [ ] id ) { boolean backslash = false ; for ( final byte b : id ) { buf . append ( ( char ) ( b & 0xFF ) ) ; if ( b == 'E' && backslash ) { buf . append ( "\\\\E\\Q" ) ; } else { backslash = b == '\\' ; } } buf . append ( "\\E" ) ; } | Appends the given UID to the given string buffer followed by \\ E . |
7,395 | public static UniqueIdType stringToUniqueIdType ( final String type ) { if ( type . toLowerCase ( ) . equals ( "metric" ) || type . toLowerCase ( ) . equals ( "metrics" ) ) { return UniqueIdType . METRIC ; } else if ( type . toLowerCase ( ) . equals ( "tagk" ) ) { return UniqueIdType . TAGK ; } else if ( type . toLowerCase ( ) . equals ( "tagv" ) ) { return UniqueIdType . TAGV ; } else { throw new IllegalArgumentException ( "Invalid type requested: " + type ) ; } } | Attempts to convert the given string to a type enumerator |
7,396 | public static byte [ ] getTSUIDFromKey ( final byte [ ] row_key , final short metric_width , final short timestamp_width ) { int idx = 0 ; final int tag_pair_width = TSDB . tagk_width ( ) + TSDB . tagv_width ( ) ; final int tags_length = row_key . length - ( Const . SALT_WIDTH ( ) + metric_width + timestamp_width ) ; if ( tags_length < tag_pair_width || ( tags_length % tag_pair_width ) != 0 ) { throw new IllegalArgumentException ( "Row key is missing tags or it is corrupted " + Arrays . toString ( row_key ) ) ; } final byte [ ] tsuid = new byte [ row_key . length - timestamp_width - Const . SALT_WIDTH ( ) ] ; for ( int i = Const . SALT_WIDTH ( ) ; i < row_key . length ; i ++ ) { if ( i < Const . SALT_WIDTH ( ) + metric_width || i >= ( Const . SALT_WIDTH ( ) + metric_width + timestamp_width ) ) { tsuid [ idx ] = row_key [ i ] ; idx ++ ; } } return tsuid ; } | Extracts the TSUID from a storage row key that includes the timestamp . |
7,397 | public static List < byte [ ] > getTagsFromTSUID ( final String tsuid ) { if ( tsuid == null || tsuid . isEmpty ( ) ) { throw new IllegalArgumentException ( "Missing TSUID" ) ; } if ( tsuid . length ( ) <= TSDB . metrics_width ( ) * 2 ) { throw new IllegalArgumentException ( "TSUID is too short, may be missing tags" ) ; } final List < byte [ ] > tags = new ArrayList < byte [ ] > ( ) ; final int pair_width = ( TSDB . tagk_width ( ) * 2 ) + ( TSDB . tagv_width ( ) * 2 ) ; for ( int i = TSDB . metrics_width ( ) * 2 ; i < tsuid . length ( ) ; i += pair_width ) { if ( i + pair_width > tsuid . length ( ) ) { throw new IllegalArgumentException ( "The TSUID appears to be malformed, improper tag width" ) ; } String tag = tsuid . substring ( i , i + ( TSDB . tagk_width ( ) * 2 ) ) ; tags . add ( UniqueId . stringToUid ( tag ) ) ; tag = tsuid . substring ( i + ( TSDB . tagk_width ( ) * 2 ) , i + pair_width ) ; tags . add ( UniqueId . stringToUid ( tag ) ) ; } return tags ; } | Extracts a list of tagks and tagvs as individual values in a list |
7,398 | public static Deferred < Map < String , Long > > getUsedUIDs ( final TSDB tsdb , final byte [ ] [ ] kinds ) { final class GetCB implements Callback < Map < String , Long > , ArrayList < KeyValue > > { public Map < String , Long > call ( final ArrayList < KeyValue > row ) throws Exception { final Map < String , Long > results = new HashMap < String , Long > ( 3 ) ; if ( row == null || row . isEmpty ( ) ) { LOG . info ( "Could not find the UID assignment row" ) ; for ( final byte [ ] kind : kinds ) { results . put ( new String ( kind , CHARSET ) , 0L ) ; } return results ; } for ( final KeyValue column : row ) { results . put ( new String ( column . qualifier ( ) , CHARSET ) , Bytes . getLong ( column . value ( ) ) ) ; } for ( final byte [ ] kind : kinds ) { if ( results . get ( new String ( kind , CHARSET ) ) == null ) { results . put ( new String ( kind , CHARSET ) , 0L ) ; } } return results ; } } final GetRequest get = new GetRequest ( tsdb . uidTable ( ) , MAXID_ROW ) ; get . family ( ID_FAMILY ) ; get . qualifiers ( kinds ) ; return tsdb . getClient ( ) . get ( get ) . addCallback ( new GetCB ( ) ) ; } | Returns a map of max UIDs from storage for the given list of UID types |
7,399 | public static void preloadUidCache ( final TSDB tsdb , final ByteMap < UniqueId > uid_cache_map ) throws HBaseException { int max_results = tsdb . getConfig ( ) . getInt ( "tsd.core.preload_uid_cache.max_entries" ) ; LOG . info ( "Preloading uid cache with max_results=" + max_results ) ; if ( max_results <= 0 ) { return ; } Scanner scanner = null ; try { int num_rows = 0 ; scanner = getSuggestScanner ( tsdb . getClient ( ) , tsdb . uidTable ( ) , "" , null , max_results ) ; for ( ArrayList < ArrayList < KeyValue > > rows = scanner . nextRows ( ) . join ( ) ; rows != null ; rows = scanner . nextRows ( ) . join ( ) ) { for ( final ArrayList < KeyValue > row : rows ) { for ( KeyValue kv : row ) { final String name = fromBytes ( kv . key ( ) ) ; final byte [ ] kind = kv . qualifier ( ) ; final byte [ ] id = kv . value ( ) ; LOG . debug ( "id='{}', name='{}', kind='{}'" , Arrays . toString ( id ) , name , fromBytes ( kind ) ) ; UniqueId uid_cache = uid_cache_map . get ( kind ) ; if ( uid_cache != null ) { uid_cache . cacheMapping ( name , id ) ; } } num_rows += row . size ( ) ; row . clear ( ) ; if ( num_rows >= max_results ) { break ; } } } for ( UniqueId unique_id_table : uid_cache_map . values ( ) ) { LOG . info ( "After preloading, uid cache '{}' has {} ids and {} names." , unique_id_table . kind ( ) , unique_id_table . use_lru ? unique_id_table . lru_id_cache . size ( ) : unique_id_table . id_cache . size ( ) , unique_id_table . use_lru ? unique_id_table . lru_name_cache . size ( ) : unique_id_table . name_cache . size ( ) ) ; } } catch ( Exception e ) { if ( e instanceof HBaseException ) { throw ( HBaseException ) e ; } else if ( e instanceof RuntimeException ) { throw ( RuntimeException ) e ; } else { throw new RuntimeException ( "Error while preloading IDs" , e ) ; } } finally { if ( scanner != null ) { scanner . close ( ) ; } } } | Pre - load UID caches scanning up to tsd . core . preload_uid_cache . max_entries rows from the UID table . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.