idx int64 0 165k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
12,200 | public static void skipIndex ( DataInput in ) throws IOException { int columnIndexSize = in . readInt ( ) ; if ( in instanceof FileDataInput ) { FileUtils . skipBytesFully ( in , columnIndexSize ) ; } else { byte [ ] skip = new byte [ columnIndexSize ] ; in . readFully ( skip ) ; } } | Skip the index |
12,201 | public static List < IndexInfo > deserializeIndex ( FileDataInput in , CType type ) throws IOException { int columnIndexSize = in . readInt ( ) ; if ( columnIndexSize == 0 ) return Collections . < IndexInfo > emptyList ( ) ; ArrayList < IndexInfo > indexList = new ArrayList < IndexInfo > ( ) ; FileMark mark = in . mark ( ) ; ISerializer < IndexInfo > serializer = type . indexSerializer ( ) ; while ( in . bytesPastMark ( mark ) < columnIndexSize ) { indexList . add ( serializer . deserialize ( in ) ) ; } assert in . bytesPastMark ( mark ) == columnIndexSize ; return indexList ; } | Deserialize the index into a structure and return it |
12,202 | public Timer newTimer ( String opType , int sampleCount ) { final Timer timer = new Timer ( sampleCount ) ; if ( ! timers . containsKey ( opType ) ) timers . put ( opType , new ArrayList < Timer > ( ) ) ; timers . get ( opType ) . add ( timer ) ; return timer ; } | build a new timer and add it to the set of running timers . |
12,203 | public void close ( org . apache . hadoop . mapred . Reporter reporter ) throws IOException { close ( ) ; } | Fills the deprecated RecordWriter interface for streaming . |
12,204 | static SelectStatement forSelection ( CFMetaData cfm , Selection selection ) { return new SelectStatement ( cfm , 0 , defaultParameters , selection , null ) ; } | queried data through processColumnFamily . |
12,205 | private boolean selectACollection ( ) { if ( ! cfm . comparator . hasCollections ( ) ) return false ; for ( ColumnDefinition def : selection . getColumns ( ) ) { if ( def . type . isCollection ( ) && def . type . isMultiCell ( ) ) return true ; } return false ; } | Returns true if a non - frozen collection is selected false otherwise . |
12,206 | private static Composite addEOC ( Composite composite , Bound eocBound ) { return eocBound == Bound . END ? composite . end ( ) : composite . start ( ) ; } | Adds an EOC to the specified Composite . |
12,207 | private static void addValue ( CBuilder builder , ColumnDefinition def , ByteBuffer value ) throws InvalidRequestException { if ( value == null ) throw new InvalidRequestException ( String . format ( "Invalid null value in condition for column %s" , def . name ) ) ; builder . add ( value ) ; } | Adds the specified value to the specified builder |
12,208 | void processColumnFamily ( ByteBuffer key , ColumnFamily cf , QueryOptions options , long now , Selection . ResultSetBuilder result ) throws InvalidRequestException { CFMetaData cfm = cf . metadata ( ) ; ByteBuffer [ ] keyComponents = null ; if ( cfm . getKeyValidator ( ) instanceof CompositeType ) { keyComponents = ( ( CompositeType ) cfm . getKeyValidator ( ) ) . split ( key ) ; } else { keyComponents = new ByteBuffer [ ] { key } ; } Iterator < Cell > cells = cf . getSortedColumns ( ) . iterator ( ) ; if ( sliceRestriction != null ) cells = applySliceRestriction ( cells , options ) ; CQL3Row . RowIterator iter = cfm . comparator . CQL3RowBuilder ( cfm , now ) . group ( cells ) ; CQL3Row staticRow = iter . getStaticRow ( ) ; if ( staticRow != null && ! iter . hasNext ( ) && ! usesSecondaryIndexing && hasNoClusteringColumnsRestriction ( ) ) { result . newRow ( ) ; for ( ColumnDefinition def : selection . getColumns ( ) ) { switch ( def . kind ) { case PARTITION_KEY : result . add ( keyComponents [ def . position ( ) ] ) ; break ; case STATIC : addValue ( result , def , staticRow , options ) ; break ; default : result . add ( ( ByteBuffer ) null ) ; } } return ; } while ( iter . hasNext ( ) ) { CQL3Row cql3Row = iter . next ( ) ; result . newRow ( ) ; for ( ColumnDefinition def : selection . getColumns ( ) ) { switch ( def . kind ) { case PARTITION_KEY : result . add ( keyComponents [ def . position ( ) ] ) ; break ; case CLUSTERING_COLUMN : result . add ( cql3Row . getClusteringColumn ( def . position ( ) ) ) ; break ; case COMPACT_VALUE : result . add ( cql3Row . getColumn ( null ) ) ; break ; case REGULAR : addValue ( result , def , cql3Row , options ) ; break ; case STATIC : addValue ( result , def , staticRow , options ) ; break ; } } } } | Used by ModificationStatement for CAS operations |
12,209 | private boolean isRestrictedByMultipleContains ( ColumnDefinition columnDef ) { if ( ! columnDef . type . isCollection ( ) ) return false ; Restriction restriction = metadataRestrictions . get ( columnDef . name ) ; if ( ! ( restriction instanceof Contains ) ) return false ; Contains contains = ( Contains ) restriction ; return ( contains . numberOfValues ( ) + contains . numberOfKeys ( ) ) > 1 ; } | Checks if the specified column is restricted by multiple contains or contains key . |
12,210 | synchronized void requestReport ( CountDownLatch signal ) { if ( finalReport != null ) { report = finalReport ; finalReport = new TimingInterval ( 0 ) ; signal . countDown ( ) ; } else reportRequest = signal ; } | checks to see if the timer is dead ; if not requests a report and otherwise fulfills the request itself |
12,211 | public synchronized void close ( ) { if ( reportRequest == null ) finalReport = buildReport ( ) ; else { finalReport = new TimingInterval ( 0 ) ; report = buildReport ( ) ; reportRequest . countDown ( ) ; reportRequest = null ; } } | closes the timer ; if a request is outstanding it furnishes the request otherwise it populates finalReport |
12,212 | public void write ( WritableByteChannel channel ) throws IOException { long totalSize = totalSize ( ) ; RandomAccessReader file = sstable . openDataReader ( ) ; ChecksumValidator validator = new File ( sstable . descriptor . filenameFor ( Component . CRC ) ) . exists ( ) ? DataIntegrityMetadata . checksumValidator ( sstable . descriptor ) : null ; transferBuffer = validator == null ? new byte [ DEFAULT_CHUNK_SIZE ] : new byte [ validator . chunkSize ] ; compressedOutput = new LZFOutputStream ( Channels . newOutputStream ( channel ) ) ; long progress = 0L ; try { for ( Pair < Long , Long > section : sections ) { long start = validator == null ? section . left : validator . chunkStart ( section . left ) ; int readOffset = ( int ) ( section . left - start ) ; file . seek ( start ) ; if ( validator != null ) validator . seek ( start ) ; long length = section . right - start ; long bytesRead = 0 ; while ( bytesRead < length ) { long lastBytesRead = write ( file , validator , readOffset , length , bytesRead ) ; bytesRead += lastBytesRead ; progress += ( lastBytesRead - readOffset ) ; session . progress ( sstable . descriptor , ProgressInfo . Direction . OUT , progress , totalSize ) ; readOffset = 0 ; } compressedOutput . flush ( ) ; } } finally { FileUtils . closeQuietly ( file ) ; FileUtils . closeQuietly ( validator ) ; } } | Stream file of specified sections to given channel . |
12,213 | protected long write ( RandomAccessReader reader , ChecksumValidator validator , int start , long length , long bytesTransferred ) throws IOException { int toTransfer = ( int ) Math . min ( transferBuffer . length , length - bytesTransferred ) ; int minReadable = ( int ) Math . min ( transferBuffer . length , reader . length ( ) - reader . getFilePointer ( ) ) ; reader . readFully ( transferBuffer , 0 , minReadable ) ; if ( validator != null ) validator . validate ( transferBuffer , 0 , minReadable ) ; limiter . acquire ( toTransfer - start ) ; compressedOutput . write ( transferBuffer , start , ( toTransfer - start ) ) ; return toTransfer ; } | Sequentially read bytes from the file and write them to the output stream |
12,214 | public static long sizeOnHeapOf ( ByteBuffer [ ] array ) { long allElementsSize = 0 ; for ( int i = 0 ; i < array . length ; i ++ ) if ( array [ i ] != null ) allElementsSize += sizeOnHeapOf ( array [ i ] ) ; return allElementsSize + sizeOfArray ( array ) ; } | Memory a ByteBuffer array consumes . |
12,215 | public static long sizeOnHeapOf ( ByteBuffer buffer ) { if ( buffer . isDirect ( ) ) return BUFFER_EMPTY_SIZE ; if ( buffer . capacity ( ) > buffer . remaining ( ) ) return buffer . remaining ( ) ; return BUFFER_EMPTY_SIZE + sizeOfArray ( buffer . capacity ( ) , 1 ) ; } | Memory a byte buffer consumes |
12,216 | public static DebuggableThreadPoolExecutor createWithMaximumPoolSize ( String threadPoolName , int size , int keepAliveTime , TimeUnit unit ) { return new DebuggableThreadPoolExecutor ( size , Integer . MAX_VALUE , keepAliveTime , unit , new LinkedBlockingQueue < Runnable > ( ) , new NamedThreadFactory ( threadPoolName ) ) ; } | Returns a ThreadPoolExecutor with a fixed maximum number of threads but whose threads are terminated when idle for too long . When all threads are actively executing tasks new tasks are queued . |
12,217 | public void execute ( Runnable command ) { super . execute ( isTracing ( ) && ! ( command instanceof TraceSessionWrapper ) ? new TraceSessionWrapper < Object > ( Executors . callable ( command , null ) ) : command ) ; } | execute does not call newTaskFor |
12,218 | public void executeCLIStatement ( String statement ) throws CharacterCodingException , TException , TimedOutException , NotFoundException , NoSuchFieldException , InvalidRequestException , UnavailableException , InstantiationException , IllegalAccessException { Tree tree = CliCompiler . compileQuery ( statement ) ; try { switch ( tree . getType ( ) ) { case CliParser . NODE_EXIT : cleanupAndExit ( ) ; break ; case CliParser . NODE_THRIFT_GET : executeGet ( tree ) ; break ; case CliParser . NODE_THRIFT_GET_WITH_CONDITIONS : executeGetWithConditions ( tree ) ; break ; case CliParser . NODE_HELP : executeHelp ( tree ) ; break ; case CliParser . NODE_THRIFT_SET : executeSet ( tree ) ; break ; case CliParser . NODE_THRIFT_DEL : executeDelete ( tree ) ; break ; case CliParser . NODE_THRIFT_COUNT : executeCount ( tree ) ; break ; case CliParser . NODE_ADD_KEYSPACE : executeAddKeySpace ( tree . getChild ( 0 ) ) ; break ; case CliParser . NODE_ADD_COLUMN_FAMILY : executeAddColumnFamily ( tree . getChild ( 0 ) ) ; break ; case CliParser . NODE_UPDATE_KEYSPACE : executeUpdateKeySpace ( tree . getChild ( 0 ) ) ; break ; case CliParser . NODE_UPDATE_COLUMN_FAMILY : executeUpdateColumnFamily ( tree . getChild ( 0 ) ) ; break ; case CliParser . NODE_DEL_COLUMN_FAMILY : executeDelColumnFamily ( tree ) ; break ; case CliParser . NODE_DEL_KEYSPACE : executeDelKeySpace ( tree ) ; break ; case CliParser . NODE_SHOW_CLUSTER_NAME : executeShowClusterName ( ) ; break ; case CliParser . NODE_SHOW_VERSION : executeShowVersion ( ) ; break ; case CliParser . NODE_SHOW_KEYSPACES : executeShowKeySpaces ( ) ; break ; case CliParser . NODE_SHOW_SCHEMA : executeShowSchema ( tree ) ; break ; case CliParser . NODE_DESCRIBE : executeDescribe ( tree ) ; break ; case CliParser . NODE_DESCRIBE_CLUSTER : executeDescribeCluster ( ) ; break ; case CliParser . NODE_USE_TABLE : executeUseKeySpace ( tree ) ; break ; case CliParser . NODE_TRACE_NEXT_QUERY : executeTraceNextQuery ( ) ; break ; case CliParser . NODE_CONNECT : executeConnect ( tree ) ; break ; case CliParser . NODE_LIST : executeList ( tree ) ; break ; case CliParser . NODE_TRUNCATE : executeTruncate ( tree . getChild ( 0 ) . getText ( ) ) ; break ; case CliParser . NODE_ASSUME : executeAssumeStatement ( tree ) ; break ; case CliParser . NODE_CONSISTENCY_LEVEL : executeConsistencyLevelStatement ( tree ) ; break ; case CliParser . NODE_THRIFT_INCR : executeIncr ( tree , 1L ) ; break ; case CliParser . NODE_THRIFT_DECR : executeIncr ( tree , - 1L ) ; break ; case CliParser . NODE_DROP_INDEX : executeDropIndex ( tree ) ; break ; case CliParser . NODE_NO_OP : break ; default : sessionState . err . println ( "Invalid Statement (Type: " + tree . getType ( ) + ")" ) ; if ( sessionState . batch ) System . exit ( 2 ) ; break ; } } catch ( SchemaDisagreementException e ) { throw new RuntimeException ( "schema does not match across nodes, (try again later)." , e ) ; } } | Execute a CLI Statement |
12,219 | private void executeSet ( Tree statement ) throws TException , InvalidRequestException , UnavailableException , TimedOutException { if ( ! CliMain . isConnected ( ) || ! hasKeySpace ( ) ) return ; long startTime = System . nanoTime ( ) ; Tree columnFamilySpec = statement . getChild ( 0 ) ; Tree keyTree = columnFamilySpec . getChild ( 1 ) ; String columnFamily = CliCompiler . getColumnFamily ( columnFamilySpec , currentCfDefs ( ) ) ; CfDef cfDef = getCfDef ( columnFamily ) ; int columnSpecCnt = CliCompiler . numColumnSpecifiers ( columnFamilySpec ) ; String value = CliUtils . unescapeSQLString ( statement . getChild ( 1 ) . getText ( ) ) ; Tree valueTree = statement . getChild ( 1 ) ; byte [ ] superColumnName = null ; ByteBuffer columnName ; if ( columnSpecCnt == 0 ) { sessionState . err . println ( "No cell name specified, (type 'help;' or '?' for help on syntax)." ) ; return ; } else if ( columnSpecCnt == 1 ) { if ( cfDef . column_type . equals ( "Super" ) ) { sessionState . out . println ( "Column family " + columnFamily + " may only contain SuperColumns" ) ; return ; } columnName = getColumnName ( columnFamily , columnFamilySpec . getChild ( 2 ) ) ; } else { assert ( columnSpecCnt == 2 ) : "serious parsing error (this is a bug)." ; superColumnName = getColumnName ( columnFamily , columnFamilySpec . getChild ( 2 ) ) . array ( ) ; columnName = getSubColumnName ( columnFamily , columnFamilySpec . getChild ( 3 ) ) ; } ByteBuffer columnValueInBytes ; switch ( valueTree . getType ( ) ) { case CliParser . FUNCTION_CALL : columnValueInBytes = convertValueByFunction ( valueTree , cfDef , columnName , true ) ; break ; default : columnValueInBytes = columnValueAsBytes ( columnName , columnFamily , value ) ; } ColumnParent parent = new ColumnParent ( columnFamily ) ; if ( superColumnName != null ) parent . setSuper_column ( superColumnName ) ; Column columnToInsert = new Column ( columnName ) . setValue ( columnValueInBytes ) . setTimestamp ( FBUtilities . timestampMicros ( ) ) ; if ( statement . getChildCount ( ) == 3 ) { String ttl = statement . getChild ( 2 ) . getText ( ) ; try { columnToInsert . setTtl ( Integer . parseInt ( ttl ) ) ; } catch ( NumberFormatException e ) { sessionState . err . println ( String . format ( "TTL '%s' is invalid, should be a positive integer." , ttl ) ) ; return ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } thriftClient . insert ( getKeyAsBytes ( columnFamily , keyTree ) , parent , columnToInsert , consistencyLevel ) ; sessionState . out . println ( "Value inserted." ) ; elapsedTime ( startTime ) ; } | Execute SET statement |
12,220 | private void executeIncr ( Tree statement , long multiplier ) throws TException , NotFoundException , InvalidRequestException , UnavailableException , TimedOutException { if ( ! CliMain . isConnected ( ) || ! hasKeySpace ( ) ) return ; Tree columnFamilySpec = statement . getChild ( 0 ) ; String columnFamily = CliCompiler . getColumnFamily ( columnFamilySpec , currentCfDefs ( ) ) ; ByteBuffer key = getKeyAsBytes ( columnFamily , columnFamilySpec . getChild ( 1 ) ) ; int columnSpecCnt = CliCompiler . numColumnSpecifiers ( columnFamilySpec ) ; byte [ ] superColumnName = null ; ByteBuffer columnName ; if ( columnSpecCnt == 1 ) { columnName = getColumnName ( columnFamily , columnFamilySpec . getChild ( 2 ) ) ; } else if ( columnSpecCnt == 2 ) { superColumnName = getColumnName ( columnFamily , columnFamilySpec . getChild ( 2 ) ) . array ( ) ; columnName = getSubColumnName ( columnFamily , columnFamilySpec . getChild ( 3 ) ) ; } else { sessionState . out . println ( "Invalid row, super column, or column specification." ) ; return ; } ColumnParent parent = new ColumnParent ( columnFamily ) ; if ( superColumnName != null ) parent . setSuper_column ( superColumnName ) ; long value = 1L ; if ( statement . getChildCount ( ) == 2 ) { String byValue = statement . getChild ( 1 ) . getText ( ) ; try { value = Long . parseLong ( byValue ) ; } catch ( NumberFormatException e ) { sessionState . err . println ( String . format ( "'%s' is an invalid value, should be an integer." , byValue ) ) ; return ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } CounterColumn columnToInsert = new CounterColumn ( columnName , multiplier * value ) ; thriftClient . add ( key , parent , columnToInsert , consistencyLevel ) ; sessionState . out . printf ( "Value %s%n" , multiplier < 0 ? "decremented." : "incremented." ) ; } | Execute INCR statement |
12,221 | private void executeAddKeySpace ( Tree statement ) { if ( ! CliMain . isConnected ( ) ) return ; String keyspaceName = CliUtils . unescapeSQLString ( statement . getChild ( 0 ) . getText ( ) ) ; KsDef ksDef = new KsDef ( keyspaceName , DEFAULT_PLACEMENT_STRATEGY , new LinkedList < CfDef > ( ) ) ; try { String mySchemaVersion = thriftClient . system_add_keyspace ( updateKsDefAttributes ( statement , ksDef ) ) ; sessionState . out . println ( mySchemaVersion ) ; keyspacesMap . put ( keyspaceName , thriftClient . describe_keyspace ( keyspaceName ) ) ; } catch ( InvalidRequestException e ) { throw new RuntimeException ( e ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } | Add a keyspace |
12,222 | private void executeAddColumnFamily ( Tree statement ) { if ( ! CliMain . isConnected ( ) || ! hasKeySpace ( ) ) return ; CfDef cfDef = new CfDef ( keySpace , CliUtils . unescapeSQLString ( statement . getChild ( 0 ) . getText ( ) ) ) ; try { String mySchemaVersion = thriftClient . system_add_column_family ( updateCfDefAttributes ( statement , cfDef ) ) ; sessionState . out . println ( mySchemaVersion ) ; keyspacesMap . put ( keySpace , thriftClient . describe_keyspace ( keySpace ) ) ; } catch ( InvalidRequestException e ) { throw new RuntimeException ( e ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } | Add a column family |
12,223 | private void executeUpdateKeySpace ( Tree statement ) { if ( ! CliMain . isConnected ( ) ) return ; try { String keyspaceName = CliCompiler . getKeySpace ( statement , thriftClient . describe_keyspaces ( ) ) ; KsDef currentKsDef = getKSMetaData ( keyspaceName ) ; KsDef updatedKsDef = updateKsDefAttributes ( statement , currentKsDef ) ; String mySchemaVersion = thriftClient . system_update_keyspace ( updatedKsDef ) ; sessionState . out . println ( mySchemaVersion ) ; keyspacesMap . remove ( keyspaceName ) ; getKSMetaData ( keyspaceName ) ; } catch ( InvalidRequestException e ) { throw new RuntimeException ( e ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } | Update existing keyspace identified by name |
12,224 | private void executeUpdateColumnFamily ( Tree statement ) { if ( ! CliMain . isConnected ( ) || ! hasKeySpace ( ) ) return ; String cfName = CliCompiler . getColumnFamily ( statement , currentCfDefs ( ) ) ; try { CfDef cfDef = getCfDef ( thriftClient . describe_keyspace ( this . keySpace ) , cfName , true ) ; if ( cfDef == null ) throw new RuntimeException ( "Column Family " + cfName + " was not found in the current keyspace." ) ; String mySchemaVersion = thriftClient . system_update_column_family ( updateCfDefAttributes ( statement , cfDef ) ) ; sessionState . out . println ( mySchemaVersion ) ; keyspacesMap . put ( keySpace , thriftClient . describe_keyspace ( keySpace ) ) ; } catch ( InvalidRequestException e ) { throw new RuntimeException ( e ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } | Update existing column family identified by name |
12,225 | private KsDef updateKsDefAttributes ( Tree statement , KsDef ksDefToUpdate ) { KsDef ksDef = new KsDef ( ksDefToUpdate ) ; ksDef . setCf_defs ( new LinkedList < CfDef > ( ) ) ; for ( int i = 1 ; i < statement . getChildCount ( ) ; i += 2 ) { String currentStatement = statement . getChild ( i ) . getText ( ) . toUpperCase ( ) ; AddKeyspaceArgument mArgument = AddKeyspaceArgument . valueOf ( currentStatement ) ; String mValue = statement . getChild ( i + 1 ) . getText ( ) ; switch ( mArgument ) { case PLACEMENT_STRATEGY : ksDef . setStrategy_class ( CliUtils . unescapeSQLString ( mValue ) ) ; break ; case STRATEGY_OPTIONS : ksDef . setStrategy_options ( getStrategyOptionsFromTree ( statement . getChild ( i + 1 ) ) ) ; break ; case DURABLE_WRITES : ksDef . setDurable_writes ( Boolean . parseBoolean ( mValue ) ) ; break ; default : assert ( false ) ; } } if ( ksDef . getStrategy_class ( ) . contains ( ".NetworkTopologyStrategy" ) ) { Map < String , String > currentStrategyOptions = ksDef . getStrategy_options ( ) ; if ( currentStrategyOptions == null || currentStrategyOptions . isEmpty ( ) ) { SimpleSnitch snitch = new SimpleSnitch ( ) ; Map < String , String > options = new HashMap < String , String > ( ) ; try { options . put ( snitch . getDatacenter ( InetAddress . getLocalHost ( ) ) , "1" ) ; } catch ( UnknownHostException e ) { throw new RuntimeException ( e ) ; } ksDef . setStrategy_options ( options ) ; } } return ksDef ; } | Used to update keyspace definition attributes |
12,226 | private void executeDelKeySpace ( Tree statement ) throws TException , InvalidRequestException , NotFoundException , SchemaDisagreementException { if ( ! CliMain . isConnected ( ) ) return ; String keyspaceName = CliCompiler . getKeySpace ( statement , thriftClient . describe_keyspaces ( ) ) ; String version = thriftClient . system_drop_keyspace ( keyspaceName ) ; sessionState . out . println ( version ) ; if ( keyspaceName . equals ( keySpace ) ) keySpace = null ; } | Delete a keyspace |
12,227 | private void executeDelColumnFamily ( Tree statement ) throws TException , InvalidRequestException , NotFoundException , SchemaDisagreementException { if ( ! CliMain . isConnected ( ) || ! hasKeySpace ( ) ) return ; String cfName = CliCompiler . getColumnFamily ( statement , currentCfDefs ( ) ) ; String mySchemaVersion = thriftClient . system_drop_column_family ( cfName ) ; sessionState . out . println ( mySchemaVersion ) ; } | Delete a column family |
12,228 | private void showKeyspace ( PrintStream output , KsDef ksDef ) { output . append ( "create keyspace " ) . append ( CliUtils . maybeEscapeName ( ksDef . name ) ) ; writeAttr ( output , true , "placement_strategy" , normaliseType ( ksDef . strategy_class , "org.apache.cassandra.locator" ) ) ; if ( ksDef . strategy_options != null && ! ksDef . strategy_options . isEmpty ( ) ) { final StringBuilder opts = new StringBuilder ( ) ; opts . append ( "{" ) ; String prefix = "" ; for ( Map . Entry < String , String > opt : ksDef . strategy_options . entrySet ( ) ) { opts . append ( prefix ) . append ( CliUtils . escapeSQLString ( opt . getKey ( ) ) ) . append ( " : " ) . append ( CliUtils . escapeSQLString ( opt . getValue ( ) ) ) ; prefix = ", " ; } opts . append ( "}" ) ; writeAttrRaw ( output , false , "strategy_options" , opts . toString ( ) ) ; } writeAttr ( output , false , "durable_writes" , ksDef . durable_writes ) ; output . append ( ";" ) . append ( NEWLINE ) ; output . append ( NEWLINE ) ; output . append ( "use " ) . append ( CliUtils . maybeEscapeName ( ksDef . name ) ) . append ( ";" ) ; output . append ( NEWLINE ) ; output . append ( NEWLINE ) ; Collections . sort ( ksDef . cf_defs , new CfDefNamesComparator ( ) ) ; for ( CfDef cfDef : ksDef . cf_defs ) showColumnFamily ( output , cfDef ) ; output . append ( NEWLINE ) ; output . append ( NEWLINE ) ; } | Creates a CLI script to create the Keyspace it s Column Families |
12,229 | private void showColumnMeta ( PrintStream output , CfDef cfDef , ColumnDef colDef ) { output . append ( NEWLINE + TAB + TAB + "{" ) ; final AbstractType < ? > comparator = getFormatType ( cfDef . column_type . equals ( "Super" ) ? cfDef . subcomparator_type : cfDef . comparator_type ) ; output . append ( "column_name : '" + CliUtils . escapeSQLString ( comparator . getString ( colDef . name ) ) + "'," + NEWLINE ) ; String validationClass = normaliseType ( colDef . validation_class , "org.apache.cassandra.db.marshal" ) ; output . append ( TAB + TAB + "validation_class : " + CliUtils . escapeSQLString ( validationClass ) ) ; if ( colDef . isSetIndex_name ( ) ) { output . append ( "," ) . append ( NEWLINE ) . append ( TAB + TAB + "index_name : '" + CliUtils . escapeSQLString ( colDef . index_name ) + "'," + NEWLINE ) . append ( TAB + TAB + "index_type : " + CliUtils . escapeSQLString ( Integer . toString ( colDef . index_type . getValue ( ) ) ) ) ; if ( colDef . index_options != null && ! colDef . index_options . isEmpty ( ) ) { output . append ( "," ) . append ( NEWLINE ) ; output . append ( TAB + TAB + "index_options : {" + NEWLINE ) ; int numOpts = colDef . index_options . size ( ) ; for ( Map . Entry < String , String > entry : colDef . index_options . entrySet ( ) ) { String option = CliUtils . escapeSQLString ( entry . getKey ( ) ) ; String optionValue = CliUtils . escapeSQLString ( entry . getValue ( ) ) ; output . append ( TAB + TAB + TAB ) . append ( "'" + option + "' : '" ) . append ( optionValue ) . append ( "'" ) ; if ( -- numOpts > 0 ) output . append ( "," ) . append ( NEWLINE ) ; } output . append ( "}" ) ; } } output . append ( "}" ) ; } | Writes the supplied ColumnDef to the StringBuilder as a cli script . |
12,230 | private boolean hasKeySpace ( boolean printError ) { boolean hasKeyspace = keySpace != null ; if ( ! hasKeyspace && printError ) sessionState . err . println ( "Not authorized to a working keyspace." ) ; return hasKeyspace ; } | Returns true if this . keySpace is set false otherwise |
12,231 | private IndexType getIndexTypeFromString ( String indexTypeAsString ) { IndexType indexType ; try { indexType = IndexType . findByValue ( new Integer ( indexTypeAsString ) ) ; } catch ( NumberFormatException e ) { try { indexType = IndexType . valueOf ( indexTypeAsString ) ; } catch ( IllegalArgumentException ie ) { throw new RuntimeException ( "IndexType '" + indexTypeAsString + "' is unsupported." , ie ) ; } } if ( indexType == null ) { throw new RuntimeException ( "IndexType '" + indexTypeAsString + "' is unsupported." ) ; } return indexType ; } | Getting IndexType object from indexType string |
12,232 | private ByteBuffer subColumnNameAsBytes ( String superColumn , String columnFamily ) { CfDef columnFamilyDef = getCfDef ( columnFamily ) ; return subColumnNameAsBytes ( superColumn , columnFamilyDef ) ; } | Converts sub - column name into ByteBuffer according to comparator type |
12,233 | private ByteBuffer subColumnNameAsBytes ( String superColumn , CfDef columnFamilyDef ) { String comparatorClass = columnFamilyDef . subcomparator_type ; if ( comparatorClass == null ) { sessionState . out . println ( String . format ( "Notice: defaulting to BytesType subcomparator for '%s'" , columnFamilyDef . getName ( ) ) ) ; comparatorClass = "BytesType" ; } return getBytesAccordingToType ( superColumn , getFormatType ( comparatorClass ) ) ; } | Converts column name into ByteBuffer according to comparator type |
12,234 | private AbstractType < ? > getValidatorForValue ( CfDef cfDef , byte [ ] columnNameInBytes ) { String defaultValidator = cfDef . default_validation_class ; for ( ColumnDef columnDefinition : cfDef . getColumn_metadata ( ) ) { byte [ ] nameInBytes = columnDefinition . getName ( ) ; if ( Arrays . equals ( nameInBytes , columnNameInBytes ) ) { return getFormatType ( columnDefinition . getValidation_class ( ) ) ; } } if ( defaultValidator != null && ! defaultValidator . isEmpty ( ) ) { return getFormatType ( defaultValidator ) ; } return null ; } | Get validator for specific column value |
12,235 | public static AbstractType < ? > getTypeByFunction ( String functionName ) { Function function ; try { function = Function . valueOf ( functionName . toUpperCase ( ) ) ; } catch ( IllegalArgumentException e ) { String message = String . format ( "Function '%s' not found. Available functions: %s" , functionName , Function . getFunctionNames ( ) ) ; throw new RuntimeException ( message , e ) ; } return function . getValidator ( ) ; } | Get AbstractType by function name |
12,236 | private void updateColumnMetaData ( CfDef columnFamily , ByteBuffer columnName , String validationClass ) { ColumnDef column = getColumnDefByName ( columnFamily , columnName ) ; if ( column != null ) { if ( column . getValidation_class ( ) . equals ( validationClass ) ) return ; column . setValidation_class ( validationClass ) ; } else { List < ColumnDef > columnMetaData = new ArrayList < ColumnDef > ( columnFamily . getColumn_metadata ( ) ) ; columnMetaData . add ( new ColumnDef ( columnName , validationClass ) ) ; columnFamily . setColumn_metadata ( columnMetaData ) ; } } | Used to locally update column family definition with new column metadata |
12,237 | private ColumnDef getColumnDefByName ( CfDef columnFamily , ByteBuffer columnName ) { for ( ColumnDef columnDef : columnFamily . getColumn_metadata ( ) ) { byte [ ] currName = columnDef . getName ( ) ; if ( ByteBufferUtil . compare ( currName , columnName ) == 0 ) { return columnDef ; } } return null ; } | Get specific ColumnDef in column family meta data by column name |
12,238 | private String formatSubcolumnName ( String keyspace , String columnFamily , ByteBuffer name ) { return getFormatType ( getCfDef ( keyspace , columnFamily ) . subcomparator_type ) . getString ( name ) ; } | returns sub - column name in human - readable format |
12,239 | private String formatColumnName ( String keyspace , String columnFamily , ByteBuffer name ) { return getFormatType ( getCfDef ( keyspace , columnFamily ) . comparator_type ) . getString ( name ) ; } | retuns column name in human - readable format |
12,240 | private void elapsedTime ( long startTime ) { long eta = System . nanoTime ( ) - startTime ; sessionState . out . print ( "Elapsed time: " ) ; if ( eta < 10000000 ) { sessionState . out . print ( Math . round ( eta / 10000.0 ) / 100.0 ) ; } else { sessionState . out . print ( Math . round ( eta / 1000000.0 ) ) ; } sessionState . out . println ( " msec(s)." ) ; } | Print elapsed time . Print 2 fraction digits if eta is under 10 ms . |
12,241 | public void seek ( long pos ) throws IOException { long inSegmentPos = pos - segmentOffset ; if ( inSegmentPos < 0 || inSegmentPos > buffer . capacity ( ) ) throw new IOException ( String . format ( "Seek position %d is not within mmap segment (seg offs: %d, length: %d)" , pos , segmentOffset , buffer . capacity ( ) ) ) ; seekInternal ( ( int ) inSegmentPos ) ; } | IOException otherwise . |
12,242 | public void index ( ByteBuffer key , ColumnFamily columnFamily ) { Log . debug ( "Indexing row %s in index %s " , key , logName ) ; lock . readLock ( ) . lock ( ) ; try { if ( rowService != null ) { long timestamp = System . currentTimeMillis ( ) ; rowService . index ( key , columnFamily , timestamp ) ; } } catch ( RuntimeException e ) { Log . error ( "Error while indexing row %s" , key ) ; throw e ; } finally { lock . readLock ( ) . unlock ( ) ; } } | Index the given row . |
12,243 | public void delete ( DecoratedKey key , OpOrder . Group opGroup ) { Log . debug ( "Removing row %s from index %s" , key , logName ) ; lock . writeLock ( ) . lock ( ) ; try { rowService . delete ( key ) ; rowService = null ; } catch ( RuntimeException e ) { Log . error ( e , "Error deleting row %s" , key ) ; throw e ; } finally { lock . writeLock ( ) . unlock ( ) ; } } | cleans up deleted columns from cassandra cleanup compaction |
12,244 | public void sendTreeRequests ( Collection < InetAddress > endpoints ) { List < InetAddress > allEndpoints = new ArrayList < > ( endpoints ) ; allEndpoints . add ( FBUtilities . getBroadcastAddress ( ) ) ; if ( parallelismDegree != RepairParallelism . PARALLEL ) { List < ListenableFuture < InetAddress > > snapshotTasks = new ArrayList < > ( allEndpoints . size ( ) ) ; for ( InetAddress endpoint : allEndpoints ) { SnapshotTask snapshotTask = new SnapshotTask ( desc , endpoint ) ; snapshotTasks . add ( snapshotTask ) ; taskExecutor . execute ( snapshotTask ) ; } ListenableFuture < List < InetAddress > > allSnapshotTasks = Futures . allAsList ( snapshotTasks ) ; Futures . addCallback ( allSnapshotTasks , new FutureCallback < List < InetAddress > > ( ) { public void onSuccess ( List < InetAddress > endpoints ) { sendTreeRequestsInternal ( endpoints ) ; } public void onFailure ( Throwable throwable ) { logger . error ( "Error occurred during snapshot phase" , throwable ) ; listener . failedSnapshot ( ) ; failed = true ; } } , taskExecutor ) ; } else { sendTreeRequestsInternal ( allEndpoints ) ; } } | Send merkle tree request to every involved neighbor . |
12,245 | public synchronized int addTree ( InetAddress endpoint , MerkleTree tree ) { try { requestsSent . await ( ) ; } catch ( InterruptedException e ) { throw new AssertionError ( "Interrupted while waiting for requests to be sent" ) ; } if ( tree == null ) failed = true ; else trees . add ( new TreeResponse ( endpoint , tree ) ) ; return treeRequests . completed ( endpoint ) ; } | Add a new received tree and return the number of remaining tree to be received for the job to be complete . |
12,246 | public String getString ( ByteBuffer bytes ) { TypeSerializer < T > serializer = getSerializer ( ) ; serializer . validate ( bytes ) ; return serializer . toString ( serializer . deserialize ( bytes ) ) ; } | get a string representation of the bytes suitable for log messages |
12,247 | public boolean isValueCompatibleWith ( AbstractType < ? > otherType ) { return isValueCompatibleWithInternal ( ( otherType instanceof ReversedType ) ? ( ( ReversedType ) otherType ) . baseType : otherType ) ; } | Returns true if values of the other AbstractType can be read and reasonably interpreted by the this AbstractType . Note that this is a weaker version of isCompatibleWith as it does not require that both type compare values the same way . |
12,248 | public int compareCollectionMembers ( ByteBuffer v1 , ByteBuffer v2 , ByteBuffer collectionName ) { return compare ( v1 , v2 ) ; } | An alternative comparison function used by CollectionsType in conjunction with CompositeType . |
12,249 | private static void writeKey ( PrintStream out , String value ) { writeJSON ( out , value ) ; out . print ( ": " ) ; } | JSON Hash Key serializer |
12,250 | private static List < Object > serializeColumn ( Cell cell , CFMetaData cfMetaData ) { CellNameType comparator = cfMetaData . comparator ; ArrayList < Object > serializedColumn = new ArrayList < Object > ( ) ; serializedColumn . add ( comparator . getString ( cell . name ( ) ) ) ; if ( cell instanceof DeletedCell ) { serializedColumn . add ( cell . getLocalDeletionTime ( ) ) ; } else { AbstractType < ? > validator = cfMetaData . getValueValidator ( cell . name ( ) ) ; serializedColumn . add ( validator . getString ( cell . value ( ) ) ) ; } serializedColumn . add ( cell . timestamp ( ) ) ; if ( cell instanceof DeletedCell ) { serializedColumn . add ( "d" ) ; } else if ( cell instanceof ExpiringCell ) { serializedColumn . add ( "e" ) ; serializedColumn . add ( ( ( ExpiringCell ) cell ) . getTimeToLive ( ) ) ; serializedColumn . add ( cell . getLocalDeletionTime ( ) ) ; } else if ( cell instanceof CounterCell ) { serializedColumn . add ( "c" ) ; serializedColumn . add ( ( ( CounterCell ) cell ) . timestampOfLastDelete ( ) ) ; } return serializedColumn ; } | Serialize a given cell to a List of Objects that jsonMapper knows how to turn into strings . Format is |
12,251 | private static void serializeRow ( SSTableIdentityIterator row , DecoratedKey key , PrintStream out ) { serializeRow ( row . getColumnFamily ( ) . deletionInfo ( ) , row , row . getColumnFamily ( ) . metadata ( ) , key , out ) ; } | Get portion of the columns and serialize in loop while not more columns left in the row |
12,252 | public static void enumeratekeys ( Descriptor desc , PrintStream outs , CFMetaData metadata ) throws IOException { KeyIterator iter = new KeyIterator ( desc ) ; try { DecoratedKey lastKey = null ; while ( iter . hasNext ( ) ) { DecoratedKey key = iter . next ( ) ; if ( lastKey != null && lastKey . compareTo ( key ) > 0 ) throw new IOException ( "Key out of order! " + lastKey + " > " + key ) ; lastKey = key ; outs . println ( metadata . getKeyValidator ( ) . getString ( key . getKey ( ) ) ) ; checkStream ( outs ) ; } } finally { iter . close ( ) ; } } | Enumerate row keys from an SSTableReader and write the result to a PrintStream . |
12,253 | public static void export ( Descriptor desc , PrintStream outs , Collection < String > toExport , String [ ] excludes , CFMetaData metadata ) throws IOException { SSTableReader sstable = SSTableReader . open ( desc ) ; RandomAccessReader dfile = sstable . openDataReader ( ) ; try { IPartitioner partitioner = sstable . partitioner ; if ( excludes != null ) toExport . removeAll ( Arrays . asList ( excludes ) ) ; outs . println ( "[" ) ; int i = 0 ; DecoratedKey lastKey = null ; for ( String key : toExport ) { DecoratedKey decoratedKey = partitioner . decorateKey ( metadata . getKeyValidator ( ) . fromString ( key ) ) ; if ( lastKey != null && lastKey . compareTo ( decoratedKey ) > 0 ) throw new IOException ( "Key out of order! " + lastKey + " > " + decoratedKey ) ; lastKey = decoratedKey ; RowIndexEntry entry = sstable . getPosition ( decoratedKey , SSTableReader . Operator . EQ ) ; if ( entry == null ) continue ; dfile . seek ( entry . position ) ; ByteBufferUtil . readWithShortLength ( dfile ) ; DeletionInfo deletionInfo = new DeletionInfo ( DeletionTime . serializer . deserialize ( dfile ) ) ; Iterator < OnDiskAtom > atomIterator = sstable . metadata . getOnDiskIterator ( dfile , sstable . descriptor . version ) ; checkStream ( outs ) ; if ( i != 0 ) outs . println ( "," ) ; i ++ ; serializeRow ( deletionInfo , atomIterator , sstable . metadata , decoratedKey , outs ) ; } outs . println ( "\n]" ) ; outs . flush ( ) ; } finally { dfile . close ( ) ; } } | Export specific rows from an SSTable and write the resulting JSON to a PrintStream . |
12,254 | static void export ( SSTableReader reader , PrintStream outs , String [ ] excludes , CFMetaData metadata ) throws IOException { Set < String > excludeSet = new HashSet < String > ( ) ; if ( excludes != null ) excludeSet = new HashSet < String > ( Arrays . asList ( excludes ) ) ; SSTableIdentityIterator row ; ISSTableScanner scanner = reader . getScanner ( ) ; try { outs . println ( "[" ) ; int i = 0 ; while ( scanner . hasNext ( ) ) { row = ( SSTableIdentityIterator ) scanner . next ( ) ; String currentKey = row . getColumnFamily ( ) . metadata ( ) . getKeyValidator ( ) . getString ( row . getKey ( ) . getKey ( ) ) ; if ( excludeSet . contains ( currentKey ) ) continue ; else if ( i != 0 ) outs . println ( "," ) ; serializeRow ( row , row . getKey ( ) , outs ) ; checkStream ( outs ) ; i ++ ; } outs . println ( "\n]" ) ; outs . flush ( ) ; } finally { scanner . close ( ) ; } } | than once from within the same process . |
12,255 | public static void export ( Descriptor desc , PrintStream outs , String [ ] excludes , CFMetaData metadata ) throws IOException { export ( SSTableReader . open ( desc ) , outs , excludes , metadata ) ; } | Export an SSTable and write the resulting JSON to a PrintStream . |
12,256 | public static void export ( Descriptor desc , String [ ] excludes , CFMetaData metadata ) throws IOException { export ( desc , System . out , excludes , metadata ) ; } | Export an SSTable and write the resulting JSON to standard out . |
12,257 | public static void main ( String [ ] args ) throws ConfigurationException { String usage = String . format ( "Usage: %s <sstable> [-k key [-k key [...]] -x key [-x key [...]]]%n" , SSTableExport . class . getName ( ) ) ; CommandLineParser parser = new PosixParser ( ) ; try { cmd = parser . parse ( options , args ) ; } catch ( ParseException e1 ) { System . err . println ( e1 . getMessage ( ) ) ; System . err . println ( usage ) ; System . exit ( 1 ) ; } if ( cmd . getArgs ( ) . length != 1 ) { System . err . println ( "You must supply exactly one sstable" ) ; System . err . println ( usage ) ; System . exit ( 1 ) ; } String [ ] keys = cmd . getOptionValues ( KEY_OPTION ) ; String [ ] excludes = cmd . getOptionValues ( EXCLUDEKEY_OPTION ) ; String ssTableFileName = new File ( cmd . getArgs ( ) [ 0 ] ) . getAbsolutePath ( ) ; DatabaseDescriptor . loadSchemas ( false ) ; Descriptor descriptor = Descriptor . fromFilename ( ssTableFileName ) ; if ( Schema . instance . getKSMetaData ( descriptor . ksname ) == null ) { System . err . println ( String . format ( "Filename %s references to nonexistent keyspace: %s!" , ssTableFileName , descriptor . ksname ) ) ; System . exit ( 1 ) ; } Keyspace keyspace = Keyspace . open ( descriptor . ksname ) ; String baseName = descriptor . cfname ; if ( descriptor . cfname . contains ( "." ) ) { String [ ] parts = descriptor . cfname . split ( "\\." , 2 ) ; baseName = parts [ 0 ] ; } ColumnFamilyStore cfStore = null ; try { cfStore = keyspace . getColumnFamilyStore ( baseName ) ; } catch ( IllegalArgumentException e ) { System . err . println ( String . format ( "The provided column family is not part of this cassandra keyspace: keyspace = %s, column family = %s" , descriptor . ksname , descriptor . cfname ) ) ; System . exit ( 1 ) ; } try { if ( cmd . hasOption ( ENUMERATEKEYS_OPTION ) ) { enumeratekeys ( descriptor , System . out , cfStore . metadata ) ; } else { if ( ( keys != null ) && ( keys . length > 0 ) ) export ( descriptor , System . out , Arrays . asList ( keys ) , excludes , cfStore . metadata ) ; else export ( descriptor , excludes , cfStore . metadata ) ; } } catch ( IOException e ) { e . printStackTrace ( System . err ) ; } System . exit ( 0 ) ; } | Given arguments specifying an SSTable and optionally an output file export the contents of the SSTable to JSON . |
12,258 | public SemanticVersion findSupportingVersion ( SemanticVersion ... versions ) { for ( SemanticVersion version : versions ) { if ( isSupportedBy ( version ) ) return version ; } return null ; } | Returns a version that is backward compatible with this version amongst a list of provided version or null if none can be found . |
12,259 | private Pair < List < SSTableReader > , Multimap < DataTracker , SSTableReader > > getCompactingAndNonCompactingSSTables ( ) { List < SSTableReader > allCompacting = new ArrayList < > ( ) ; Multimap < DataTracker , SSTableReader > allNonCompacting = HashMultimap . create ( ) ; for ( Keyspace ks : Keyspace . all ( ) ) { for ( ColumnFamilyStore cfStore : ks . getColumnFamilyStores ( ) ) { Set < SSTableReader > nonCompacting , allSSTables ; do { allSSTables = cfStore . getDataTracker ( ) . getSSTables ( ) ; nonCompacting = Sets . newHashSet ( cfStore . getDataTracker ( ) . getUncompactingSSTables ( allSSTables ) ) ; } while ( ! ( nonCompacting . isEmpty ( ) || cfStore . getDataTracker ( ) . markCompacting ( nonCompacting ) ) ) ; allNonCompacting . putAll ( cfStore . getDataTracker ( ) , nonCompacting ) ; allCompacting . addAll ( Sets . difference ( allSSTables , nonCompacting ) ) ; } } return Pair . create ( allCompacting , allNonCompacting ) ; } | Returns a Pair of all compacting and non - compacting sstables . Non - compacting sstables will be marked as compacting . |
12,260 | public static List < SSTableReader > redistributeSummaries ( List < SSTableReader > compacting , List < SSTableReader > nonCompacting , long memoryPoolBytes ) throws IOException { long total = 0 ; for ( SSTableReader sstable : Iterables . concat ( compacting , nonCompacting ) ) total += sstable . getIndexSummaryOffHeapSize ( ) ; List < SSTableReader > oldFormatSSTables = new ArrayList < > ( ) ; for ( SSTableReader sstable : nonCompacting ) { logger . trace ( "SSTable {} cannot be re-sampled due to old sstable format" , sstable ) ; if ( ! sstable . descriptor . version . hasSamplingLevel ) oldFormatSSTables . add ( sstable ) ; } nonCompacting . removeAll ( oldFormatSSTables ) ; logger . debug ( "Beginning redistribution of index summaries for {} sstables with memory pool size {} MB; current spaced used is {} MB" , nonCompacting . size ( ) , memoryPoolBytes / 1024L / 1024L , total / 1024.0 / 1024.0 ) ; final Map < SSTableReader , Double > readRates = new HashMap < > ( nonCompacting . size ( ) ) ; double totalReadsPerSec = 0.0 ; for ( SSTableReader sstable : nonCompacting ) { if ( sstable . getReadMeter ( ) != null ) { Double readRate = sstable . getReadMeter ( ) . fifteenMinuteRate ( ) ; totalReadsPerSec += readRate ; readRates . put ( sstable , readRate ) ; } } logger . trace ( "Total reads/sec across all sstables in index summary resize process: {}" , totalReadsPerSec ) ; List < SSTableReader > sstablesByHotness = new ArrayList < > ( nonCompacting ) ; Collections . sort ( sstablesByHotness , new ReadRateComparator ( readRates ) ) ; long remainingBytes = memoryPoolBytes ; for ( SSTableReader sstable : Iterables . concat ( compacting , oldFormatSSTables ) ) remainingBytes -= sstable . getIndexSummaryOffHeapSize ( ) ; logger . trace ( "Index summaries for compacting SSTables are using {} MB of space" , ( memoryPoolBytes - remainingBytes ) / 1024.0 / 1024.0 ) ; List < SSTableReader > newSSTables = adjustSamplingLevels ( sstablesByHotness , totalReadsPerSec , remainingBytes ) ; total = 0 ; for ( SSTableReader sstable : Iterables . concat ( compacting , oldFormatSSTables , newSSTables ) ) total += sstable . getIndexSummaryOffHeapSize ( ) ; logger . debug ( "Completed resizing of index summaries; current approximate memory used: {} MB" , total / 1024.0 / 1024.0 ) ; return newSSTables ; } | Attempts to fairly distribute a fixed pool of memory for index summaries across a set of SSTables based on their recent read rates . |
12,261 | public ByteBuffer getByteBuffer ( ) throws InvalidRequestException { switch ( type ) { case STRING : return AsciiType . instance . fromString ( text ) ; case INTEGER : return IntegerType . instance . fromString ( text ) ; case UUID : return LexicalUUIDType . instance . fromString ( text ) ; case FLOAT : return FloatType . instance . fromString ( text ) ; } return null ; } | Returns the typed value serialized to a ByteBuffer . |
12,262 | private boolean selfAssign ( ) { if ( ! get ( ) . canAssign ( true ) ) return false ; for ( SEPExecutor exec : pool . executors ) { if ( exec . takeWorkPermit ( true ) ) { Work work = new Work ( exec ) ; if ( assign ( work , true ) ) return true ; pool . schedule ( work ) ; assert get ( ) . assigned != null ; return true ; } } return false ; } | try to assign ourselves an executor with work available |
12,263 | private void startSpinning ( ) { assert get ( ) == Work . WORKING ; pool . spinningCount . incrementAndGet ( ) ; set ( Work . SPINNING ) ; } | collection at the same time |
12,264 | private void doWaitSpin ( ) { long sleep = 10000L * pool . spinningCount . get ( ) ; sleep = Math . min ( 1000000 , sleep ) ; sleep *= Math . random ( ) ; sleep = Math . max ( 10000 , sleep ) ; long start = System . nanoTime ( ) ; Long target = start + sleep ; if ( pool . spinning . putIfAbsent ( target , this ) != null ) return ; LockSupport . parkNanos ( sleep ) ; pool . spinning . remove ( target , this ) ; long end = System . nanoTime ( ) ; long spin = end - start ; long stopCheck = pool . stopCheck . addAndGet ( spin ) ; maybeStop ( stopCheck , end ) ; if ( prevStopCheck + spin == stopCheck ) soleSpinnerSpinTime += spin ; else soleSpinnerSpinTime = 0 ; prevStopCheck = stopCheck ; } | perform a sleep - spin incrementing pool . stopCheck accordingly |
12,265 | private void maybeStop ( long stopCheck , long now ) { long delta = now - stopCheck ; if ( delta <= 0 ) { if ( pool . stopCheck . compareAndSet ( stopCheck , now - stopCheckInterval ) ) { if ( ! assign ( Work . STOP_SIGNALLED , true ) ) pool . schedule ( Work . STOP_SIGNALLED ) ; } } else if ( soleSpinnerSpinTime > stopCheckInterval && pool . spinningCount . get ( ) == 1 ) { assign ( Work . STOP_SIGNALLED , true ) ; } else { while ( delta > stopCheckInterval * 2 && ! pool . stopCheck . compareAndSet ( stopCheck , now - stopCheckInterval ) ) { stopCheck = pool . stopCheck . get ( ) ; delta = now - stopCheck ; } } } | realtime we have spun too much and deschedule ; if we get too far behind realtime we reset to our initial offset |
12,266 | public List < Row > postReconciliationProcessing ( List < IndexExpression > clause , List < Row > rows ) { return rows ; } | Combines index query results from multiple nodes . This is done by the coordinator node after it has reconciled the replica responses . |
12,267 | public boolean isIndexBuilt ( ByteBuffer columnName ) { return SystemKeyspace . isIndexBuilt ( baseCfs . keyspace . getName ( ) , getNameForSystemKeyspace ( columnName ) ) ; } | Checks if the index for specified column is fully built |
12,268 | protected void buildIndexBlocking ( ) { logger . info ( String . format ( "Submitting index build of %s for data in %s" , getIndexName ( ) , StringUtils . join ( baseCfs . getSSTables ( ) , ", " ) ) ) ; try ( Refs < SSTableReader > sstables = baseCfs . selectAndReference ( ColumnFamilyStore . CANONICAL_SSTABLES ) . refs ) { SecondaryIndexBuilder builder = new SecondaryIndexBuilder ( baseCfs , Collections . singleton ( getIndexName ( ) ) , new ReducingKeyIterator ( sstables ) ) ; Future < ? > future = CompactionManager . instance . submitIndexBuild ( builder ) ; FBUtilities . waitOnFuture ( future ) ; forceBlockingFlush ( ) ; setIndexBuilt ( ) ; } logger . info ( "Index build of {} complete" , getIndexName ( ) ) ; } | Builds the index using the data in the underlying CFS Blocks till it s complete |
12,269 | public Future < ? > buildIndexAsync ( ) { boolean allAreBuilt = true ; for ( ColumnDefinition cdef : columnDefs ) { if ( ! SystemKeyspace . isIndexBuilt ( baseCfs . keyspace . getName ( ) , getNameForSystemKeyspace ( cdef . name . bytes ) ) ) { allAreBuilt = false ; break ; } } if ( allAreBuilt ) return null ; Runnable runnable = new Runnable ( ) { public void run ( ) { baseCfs . forceBlockingFlush ( ) ; buildIndexBlocking ( ) ; } } ; FutureTask < ? > f = new FutureTask < Object > ( runnable , null ) ; new Thread ( f , "Creating index: " + getIndexName ( ) ) . start ( ) ; return f ; } | Builds the index using the data in the underlying CF non blocking |
12,270 | public DecoratedKey getIndexKeyFor ( ByteBuffer value ) { ByteBuffer name = columnDefs . iterator ( ) . next ( ) . name . bytes ; return new BufferDecoratedKey ( new LocalToken ( baseCfs . metadata . getColumnDefinition ( name ) . type , value ) , value ) ; } | Returns the decoratedKey for a column value |
12,271 | public static SecondaryIndex createInstance ( ColumnFamilyStore baseCfs , ColumnDefinition cdef ) throws ConfigurationException { SecondaryIndex index ; switch ( cdef . getIndexType ( ) ) { case KEYS : index = new KeysIndex ( ) ; break ; case COMPOSITES : index = CompositesIndex . create ( cdef ) ; break ; case CUSTOM : assert cdef . getIndexOptions ( ) != null ; String class_name = cdef . getIndexOptions ( ) . get ( CUSTOM_INDEX_OPTION_NAME ) ; assert class_name != null ; try { index = ( SecondaryIndex ) Class . forName ( class_name ) . newInstance ( ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } break ; default : throw new RuntimeException ( "Unknown index type: " + cdef . getIndexName ( ) ) ; } index . addColumnDef ( cdef ) ; index . validateOptions ( ) ; index . setBaseCfs ( baseCfs ) ; return index ; } | This is the primary way to create a secondary index instance for a CF column . It will validate the index_options before initializing . |
12,272 | public static CellNameType getIndexComparator ( CFMetaData baseMetadata , ColumnDefinition cdef ) { switch ( cdef . getIndexType ( ) ) { case KEYS : return new SimpleDenseCellNameType ( keyComparator ) ; case COMPOSITES : return CompositesIndex . getIndexComparator ( baseMetadata , cdef ) ; case CUSTOM : return null ; } throw new AssertionError ( ) ; } | Returns the index comparator for index backed by CFS or null . |
12,273 | public RepairFuture submitRepairSession ( UUID parentRepairSession , Range < Token > range , String keyspace , RepairParallelism parallelismDegree , Set < InetAddress > endpoints , String ... cfnames ) { if ( cfnames . length == 0 ) return null ; RepairSession session = new RepairSession ( parentRepairSession , range , keyspace , parallelismDegree , endpoints , cfnames ) ; if ( session . endpoints . isEmpty ( ) ) return null ; RepairFuture futureTask = new RepairFuture ( session ) ; executor . execute ( futureTask ) ; return futureTask ; } | Requests repairs for the given keyspace and column families . |
12,274 | public static Set < InetAddress > getNeighbors ( String keyspaceName , Range < Token > toRepair , Collection < String > dataCenters , Collection < String > hosts ) { StorageService ss = StorageService . instance ; Map < Range < Token > , List < InetAddress > > replicaSets = ss . getRangeToAddressMap ( keyspaceName ) ; Range < Token > rangeSuperSet = null ; for ( Range < Token > range : ss . getLocalRanges ( keyspaceName ) ) { if ( range . contains ( toRepair ) ) { rangeSuperSet = range ; break ; } else if ( range . intersects ( toRepair ) ) { throw new IllegalArgumentException ( "Requested range intersects a local range but is not fully contained in one; this would lead to imprecise repair" ) ; } } if ( rangeSuperSet == null || ! replicaSets . containsKey ( rangeSuperSet ) ) return Collections . emptySet ( ) ; Set < InetAddress > neighbors = new HashSet < > ( replicaSets . get ( rangeSuperSet ) ) ; neighbors . remove ( FBUtilities . getBroadcastAddress ( ) ) ; if ( dataCenters != null ) { TokenMetadata . Topology topology = ss . getTokenMetadata ( ) . cloneOnlyTokenMap ( ) . getTopology ( ) ; Set < InetAddress > dcEndpoints = Sets . newHashSet ( ) ; Multimap < String , InetAddress > dcEndpointsMap = topology . getDatacenterEndpoints ( ) ; for ( String dc : dataCenters ) { Collection < InetAddress > c = dcEndpointsMap . get ( dc ) ; if ( c != null ) dcEndpoints . addAll ( c ) ; } return Sets . intersection ( neighbors , dcEndpoints ) ; } else if ( hosts != null ) { Set < InetAddress > specifiedHost = new HashSet < > ( ) ; for ( final String host : hosts ) { try { final InetAddress endpoint = InetAddress . getByName ( host . trim ( ) ) ; if ( endpoint . equals ( FBUtilities . getBroadcastAddress ( ) ) || neighbors . contains ( endpoint ) ) specifiedHost . add ( endpoint ) ; } catch ( UnknownHostException e ) { throw new IllegalArgumentException ( "Unknown host specified " + host , e ) ; } } if ( ! specifiedHost . contains ( FBUtilities . getBroadcastAddress ( ) ) ) throw new IllegalArgumentException ( "The current host must be part of the repair" ) ; if ( specifiedHost . size ( ) <= 1 ) { String msg = "Repair requires at least two endpoints that are neighbours before it can continue, the endpoint used for this repair is %s, " + "other available neighbours are %s but these neighbours were not part of the supplied list of hosts to use during the repair (%s)." ; throw new IllegalArgumentException ( String . format ( msg , specifiedHost , neighbors , hosts ) ) ; } specifiedHost . remove ( FBUtilities . getBroadcastAddress ( ) ) ; return specifiedHost ; } return neighbors ; } | Return all of the neighbors with whom we share the provided range . |
12,275 | private static String decodeString ( ByteBuffer src ) throws CharacterCodingException { CharsetDecoder theDecoder = decoder . get ( ) ; theDecoder . reset ( ) ; final CharBuffer dst = CharBuffer . allocate ( ( int ) ( ( double ) src . remaining ( ) * theDecoder . maxCharsPerByte ( ) ) ) ; CoderResult cr = theDecoder . decode ( src , dst , true ) ; if ( ! cr . isUnderflow ( ) ) cr . throwException ( ) ; cr = theDecoder . flush ( dst ) ; if ( ! cr . isUnderflow ( ) ) cr . throwException ( ) ; return dst . flip ( ) . toString ( ) ; } | is resolved in a release used by Cassandra . |
12,276 | public void activate ( ) { String pidFile = System . getProperty ( "cassandra-pidfile" ) ; try { try { MBeanServer mbs = ManagementFactory . getPlatformMBeanServer ( ) ; mbs . registerMBean ( new StandardMBean ( new NativeAccess ( ) , NativeAccessMBean . class ) , new ObjectName ( MBEAN_NAME ) ) ; } catch ( Exception e ) { logger . error ( "error registering MBean {}" , MBEAN_NAME , e ) ; } setup ( ) ; if ( pidFile != null ) { new File ( pidFile ) . deleteOnExit ( ) ; } if ( System . getProperty ( "cassandra-foreground" ) == null ) { System . out . close ( ) ; System . err . close ( ) ; } start ( ) ; } catch ( Throwable e ) { logger . error ( "Exception encountered during startup" , e ) ; e . printStackTrace ( ) ; System . out . println ( "Exception encountered during startup: " + e . getMessage ( ) ) ; System . exit ( 3 ) ; } } | A convenience method to initialize and start the daemon in one shot . |
12,277 | protected static String toInternalName ( String name , boolean keepCase ) { return keepCase ? name : name . toLowerCase ( Locale . US ) ; } | Converts the specified name into the name used internally . |
12,278 | public Node < E > append ( E value , int maxSize ) { Node < E > newTail = new Node < > ( randomLevel ( ) , value ) ; lock . writeLock ( ) . lock ( ) ; try { if ( size >= maxSize ) return null ; size ++ ; Node < E > tail = head ; for ( int i = maxHeight - 1 ; i >= newTail . height ( ) ; i -- ) { Node < E > next ; while ( ( next = tail . next ( i ) ) != null ) tail = next ; tail . size [ i ] ++ ; } for ( int i = newTail . height ( ) - 1 ; i >= 0 ; i -- ) { Node < E > next ; while ( ( next = tail . next ( i ) ) != null ) tail = next ; tail . setNext ( i , newTail ) ; newTail . setPrev ( i , tail ) ; } return newTail ; } finally { lock . writeLock ( ) . unlock ( ) ; } } | regardless of its future position in the list from other modifications |
12,279 | public void remove ( Node < E > node ) { lock . writeLock ( ) . lock ( ) ; assert node . value != null ; node . value = null ; try { size -- ; for ( int i = 0 ; i < node . height ( ) ; i ++ ) { Node < E > prev = node . prev ( i ) ; Node < E > next = node . next ( i ) ; assert prev != null ; prev . setNext ( i , next ) ; if ( next != null ) next . setPrev ( i , prev ) ; prev . size [ i ] += node . size [ i ] - 1 ; } for ( int i = node . height ( ) ; i < maxHeight ; i ++ ) { while ( i == node . height ( ) ) node = node . prev ( i - 1 ) ; node . size [ i ] -- ; } } finally { lock . writeLock ( ) . unlock ( ) ; } } | remove the provided node and its associated value from the list |
12,280 | public E get ( int index ) { lock . readLock ( ) . lock ( ) ; try { if ( index >= size ) return null ; index ++ ; int c = 0 ; Node < E > finger = head ; for ( int i = maxHeight - 1 ; i >= 0 ; i -- ) { while ( c + finger . size [ i ] <= index ) { c += finger . size [ i ] ; finger = finger . next ( i ) ; } } assert c == index ; return finger . value ; } finally { lock . readLock ( ) . unlock ( ) ; } } | retrieve the item at the provided index or return null if the index is past the end of the list |
12,281 | private boolean isWellFormed ( ) { for ( int i = 0 ; i < maxHeight ; i ++ ) { int c = 0 ; for ( Node node = head ; node != null ; node = node . next ( i ) ) { if ( node . prev ( i ) != null && node . prev ( i ) . next ( i ) != node ) return false ; if ( node . next ( i ) != null && node . next ( i ) . prev ( i ) != node ) return false ; c += node . size [ i ] ; if ( i + 1 < maxHeight && node . parent ( i + 1 ) . next ( i + 1 ) == node . next ( i ) ) { if ( node . parent ( i + 1 ) . size [ i + 1 ] != c ) return false ; c = 0 ; } } if ( i == maxHeight - 1 && c != size + 1 ) return false ; } return true ; } | don t create a separate unit test - tools tree doesn t currently warrant them |
12,282 | public int binarySearch ( RowPosition key ) { int low = 0 , mid = offsetCount , high = mid - 1 , result = - 1 ; while ( low <= high ) { mid = ( low + high ) >> 1 ; result = - DecoratedKey . compareTo ( partitioner , ByteBuffer . wrap ( getKey ( mid ) ) , key ) ; if ( result > 0 ) { low = mid + 1 ; } else if ( result == 0 ) { return mid ; } else { high = mid - 1 ; } } return - mid - ( result < 0 ? 1 : 2 ) ; } | Harmony s Collections implementation |
12,283 | public void reloadClasses ( ) { File triggerDirectory = FBUtilities . cassandraTriggerDir ( ) ; if ( triggerDirectory == null ) return ; customClassLoader = new CustomClassLoader ( parent , triggerDirectory ) ; cachedTriggers . clear ( ) ; } | Reload the triggers which is already loaded Invoking this will update the class loader so new jars can be loaded . |
12,284 | private List < Mutation > executeInternal ( ByteBuffer key , ColumnFamily columnFamily ) { Map < String , TriggerDefinition > triggers = columnFamily . metadata ( ) . getTriggers ( ) ; if ( triggers . isEmpty ( ) ) return null ; List < Mutation > tmutations = Lists . newLinkedList ( ) ; Thread . currentThread ( ) . setContextClassLoader ( customClassLoader ) ; try { for ( TriggerDefinition td : triggers . values ( ) ) { ITrigger trigger = cachedTriggers . get ( td . classOption ) ; if ( trigger == null ) { trigger = loadTriggerInstance ( td . classOption ) ; cachedTriggers . put ( td . classOption , trigger ) ; } Collection < Mutation > temp = trigger . augment ( key , columnFamily ) ; if ( temp != null ) tmutations . addAll ( temp ) ; } return tmutations ; } catch ( Exception ex ) { throw new RuntimeException ( String . format ( "Exception while creating trigger on CF with ID: %s" , columnFamily . id ( ) ) , ex ) ; } finally { Thread . currentThread ( ) . setContextClassLoader ( parent ) ; } } | Switch class loader before using the triggers for the column family if not loaded them with the custom class loader . |
12,285 | private static CharArraySet getDefaultStopwords ( String language ) { switch ( language ) { case "English" : return EnglishAnalyzer . getDefaultStopSet ( ) ; case "French" : return FrenchAnalyzer . getDefaultStopSet ( ) ; case "Spanish" : return SpanishAnalyzer . getDefaultStopSet ( ) ; case "Portuguese" : return PortugueseAnalyzer . getDefaultStopSet ( ) ; case "Italian" : return ItalianAnalyzer . getDefaultStopSet ( ) ; case "Romanian" : return RomanianAnalyzer . getDefaultStopSet ( ) ; case "German" : return GermanAnalyzer . getDefaultStopSet ( ) ; case "Dutch" : return DutchAnalyzer . getDefaultStopSet ( ) ; case "Swedish" : return SwedishAnalyzer . getDefaultStopSet ( ) ; case "Norwegian" : return NorwegianAnalyzer . getDefaultStopSet ( ) ; case "Danish" : return DanishAnalyzer . getDefaultStopSet ( ) ; case "Russian" : return RussianAnalyzer . getDefaultStopSet ( ) ; case "Finnish" : return FinnishAnalyzer . getDefaultStopSet ( ) ; case "Irish" : return IrishAnalyzer . getDefaultStopSet ( ) ; case "Hungarian" : return HungarianAnalyzer . getDefaultStopSet ( ) ; case "Turkish" : return SpanishAnalyzer . getDefaultStopSet ( ) ; case "Armenian" : return SpanishAnalyzer . getDefaultStopSet ( ) ; case "Basque" : return BasqueAnalyzer . getDefaultStopSet ( ) ; case "Catalan" : return CatalanAnalyzer . getDefaultStopSet ( ) ; default : return CharArraySet . EMPTY_SET ; } } | Returns the default stopwords set used by Lucene language analyzer for the specified language . |
12,286 | public static void setInputColumns ( Configuration conf , String columns ) { if ( columns == null || columns . isEmpty ( ) ) return ; conf . set ( INPUT_CQL_COLUMNS_CONFIG , columns ) ; } | Set the CQL columns for the input of this job . |
12,287 | public static void setInputCQLPageRowSize ( Configuration conf , String cqlPageRowSize ) { if ( cqlPageRowSize == null ) { throw new UnsupportedOperationException ( "cql page row size may not be null" ) ; } conf . set ( INPUT_CQL_PAGE_ROW_SIZE_CONFIG , cqlPageRowSize ) ; } | Set the CQL query Limit for the input of this job . |
12,288 | public static void setInputWhereClauses ( Configuration conf , String clauses ) { if ( clauses == null || clauses . isEmpty ( ) ) return ; conf . set ( INPUT_CQL_WHERE_CLAUSE_CONFIG , clauses ) ; } | Set the CQL user defined where clauses for the input of this job . |
12,289 | public static void setOutputCql ( Configuration conf , String cql ) { if ( cql == null || cql . isEmpty ( ) ) return ; conf . set ( OUTPUT_CQL , cql ) ; } | Set the CQL prepared statement for the output of this job . |
12,290 | public long getTimestamp ( ) { while ( true ) { long current = System . currentTimeMillis ( ) * 1000 ; long last = lastTimestampMicros . get ( ) ; long tstamp = last >= current ? last + 1 : current ; if ( lastTimestampMicros . compareAndSet ( last , tstamp ) ) return tstamp ; } } | This clock guarantees that updates for the same ClientState will be ordered in the sequence seen even if multiple updates happen in the same millisecond . |
12,291 | public void login ( AuthenticatedUser user ) throws AuthenticationException { if ( ! user . isAnonymous ( ) && ! Auth . isExistingUser ( user . getName ( ) ) ) throw new AuthenticationException ( String . format ( "User %s doesn't exist - create it with CREATE USER query first" , user . getName ( ) ) ) ; this . user = user ; } | Attempts to login the given user . |
12,292 | protected static boolean notAllowedStrategy ( DockerSlaveTemplate template ) { if ( isNull ( template ) ) { LOG . debug ( "Skipping DockerProvisioningStrategy because: template is null" ) ; return true ; } final RetentionStrategy retentionStrategy = template . getRetentionStrategy ( ) ; if ( isNull ( retentionStrategy ) ) { LOG . debug ( "Skipping DockerProvisioningStrategy because: strategy is null for {}" , template ) ; } if ( retentionStrategy instanceof DockerOnceRetentionStrategy ) { if ( template . getNumExecutors ( ) == 1 ) { LOG . debug ( "Applying faster provisioning for single executor template {}" , template ) ; return false ; } else { LOG . debug ( "Skipping DockerProvisioningStrategy because: numExecutors is {} for {}" , template . getNumExecutors ( ) , template ) ; return true ; } } if ( retentionStrategy instanceof RetentionStrategy . Demand ) { LOG . debug ( "Applying faster provisioning for Demand strategy for template {}" , template ) ; return false ; } LOG . trace ( "Skipping YAD provisioning for unknown mix of configuration for {}" , template ) ; return true ; } | Exclude unknown mix of configuration . |
12,293 | public void pullImage ( DockerImagePullStrategy pullStrategy , String imageName ) throws InterruptedException { LOG . info ( "Pulling image {} with {} strategy..." , imageName , pullStrategy ) ; final List < Image > images = getDockerCli ( ) . listImagesCmd ( ) . withShowAll ( true ) . exec ( ) ; NameParser . ReposTag repostag = NameParser . parseRepositoryTag ( imageName ) ; final String fullImageName = repostag . repos + ":" + ( repostag . tag . isEmpty ( ) ? "latest" : repostag . tag ) ; boolean hasImage = Iterables . any ( images , image -> nonNull ( image . getRepoTags ( ) ) && Arrays . asList ( image . getRepoTags ( ) ) . contains ( fullImageName ) ) ; boolean pull = hasImage ? pullStrategy . pullIfExists ( imageName ) : pullStrategy . pullIfNotExists ( imageName ) ; if ( pull ) { LOG . info ( "Pulling image '{}' {}. This may take awhile..." , imageName , hasImage ? "again" : "since one was not found" ) ; long startTime = System . currentTimeMillis ( ) ; getDockerCli ( ) . pullImageCmd ( imageName ) . exec ( new PullImageResultCallback ( ) ) . awaitSuccess ( ) ; long pullTime = System . currentTimeMillis ( ) - startTime ; LOG . info ( "Finished pulling image '{}', took {} ms" , imageName , pullTime ) ; } } | Pull docker image on this docker host . |
12,294 | public String buildImage ( Map < String , File > plugins ) throws IOException , InterruptedException { LOG . debug ( "Building image for {}" , plugins ) ; final File buildDir = new File ( targetDir ( ) . getAbsolutePath ( ) + "/docker-it/build-image" ) ; if ( buildDir . exists ( ) ) { deleteDirectory ( buildDir ) ; } if ( ! buildDir . mkdirs ( ) ) { throw new IllegalStateException ( "Can't create temp directory " + buildDir . getAbsolutePath ( ) ) ; } final String dockerfile = generateDockerfileFor ( plugins ) ; final File dockerfileFile = new File ( buildDir , "Dockerfile" ) ; writeStringToFile ( dockerfileFile , dockerfile ) ; final File buildHomePath = new File ( buildDir , JenkinsDockerImage . JENKINS_DEFAULT . homePath ) ; final File jenkinsConfig = new File ( buildHomePath , "config.xml" ) ; DockerHPIContainerUtil . copyResourceFromClass ( DockerHPIContainerUtil . class , "config.xml" , jenkinsConfig ) ; writeStringToFile ( new File ( buildHomePath , "jenkins.install.UpgradeWizard.state" ) , "2.19.4" ) ; writeStringToFile ( new File ( buildHomePath , "jenkins.install.InstallUtil.lastExecVersion" ) , "2.19.4" ) ; final File pluginDir = new File ( buildHomePath , "/plugins/" ) ; if ( ! pluginDir . mkdirs ( ) ) { throw new IllegalStateException ( "Can't create dirs " + pluginDir . getAbsolutePath ( ) ) ; } for ( Map . Entry < String , File > entry : plugins . entrySet ( ) ) { final File dst = new File ( pluginDir + "/" + entry . getKey ( ) ) ; copyFile ( entry . getValue ( ) , dst ) ; } try { LOG . info ( "Building data-image..." ) ; return getDockerCli ( ) . buildImageCmd ( buildDir ) . withTag ( DATA_IMAGE ) . withForcerm ( true ) . exec ( new BuildImageResultCallback ( ) { public void onNext ( BuildResponseItem item ) { String text = item . getStream ( ) ; if ( nonNull ( text ) ) { LOG . debug ( StringUtils . removeEnd ( text , NL ) ) ; } super . onNext ( item ) ; } } ) . awaitImageId ( ) ; } finally { buildDir . delete ( ) ; } } | Build docker image containing specified plugins . |
12,295 | public String runFreshJenkinsContainer ( DockerImagePullStrategy pullStrategy , boolean forceRefresh ) throws IOException , SettingsBuildingException , InterruptedException { LOG . debug ( "Entering run fresh jenkins container." ) ; pullImage ( pullStrategy , JENKINS_DEFAULT . getDockerImageName ( ) ) ; final Map < String , String > labels = new HashMap < > ( ) ; labels . put ( "test.displayName" , description . getDisplayName ( ) ) ; LOG . debug ( "Removing existed container before" ) ; try { final List < Container > containers = getDockerCli ( ) . listContainersCmd ( ) . withShowAll ( true ) . exec ( ) ; for ( Container c : containers ) { if ( c . getLabels ( ) . equals ( labels ) ) { LOG . debug ( "Removing {}, for labels: '{}'" , c , labels ) ; getDockerCli ( ) . removeContainerCmd ( c . getId ( ) ) . withForce ( true ) . exec ( ) ; break ; } } } catch ( NotFoundException ex ) { LOG . debug ( "Container wasn't found, that's ok" ) ; } LOG . debug ( "Recreating data container without data-image doesn't make sense, so reuse boolean." ) ; String dataContainerId = getDataContainerId ( forceRefresh ) ; final String id = getDockerCli ( ) . createContainerCmd ( JENKINS_DEFAULT . getDockerImageName ( ) ) . withEnv ( CONTAINER_JAVA_OPTS ) . withExposedPorts ( new ExposedPort ( JENKINS_DEFAULT . tcpPort ) ) . withPortSpecs ( String . format ( "%d/tcp" , JENKINS_DEFAULT . tcpPort ) ) . withHostConfig ( HostConfig . newHostConfig ( ) . withPortBindings ( PortBinding . parse ( "0.0.0.0:48000:48000" ) ) . withVolumesFrom ( new VolumesFrom ( dataContainerId ) ) . withPublishAllPorts ( true ) ) . withLabels ( labels ) . exec ( ) . getId ( ) ; provisioned . add ( id ) ; LOG . debug ( "Starting container" ) ; getDockerCli ( ) . startContainerCmd ( id ) . exec ( ) ; return id ; } | Run record and remove after test container with jenkins . |
12,296 | public String generateDockerfileFor ( Map < String , File > plugins ) throws IOException { StringBuilder builder = new StringBuilder ( ) ; builder . append ( "FROM scratch" ) . append ( NL ) . append ( "MAINTAINER Kanstantsin Shautsou <kanstantsin.sha@gmail.com>" ) . append ( NL ) . append ( "COPY ./ /" ) . append ( NL ) . append ( "VOLUME /usr/share/jenkins/ref" ) . append ( NL ) ; for ( Map . Entry < String , String > entry : generateLabels ( plugins ) . entrySet ( ) ) { builder . append ( "LABEL " ) . append ( entry . getKey ( ) ) . append ( "=" ) . append ( entry . getValue ( ) ) . append ( NL ) ; } return builder . toString ( ) ; } | Dockerfile as String based on sratch for placing plugins . |
12,297 | private DockerCLI createCliWithWait ( URL url , int port ) throws InterruptedException , IOException { DockerCLI tempCli = null ; boolean connected = false ; int i = 0 ; while ( i <= 10 && ! connected ) { i ++ ; try { final CLIConnectionFactory factory = new CLIConnectionFactory ( ) . url ( url ) ; tempCli = new DockerCLI ( factory , port ) ; final String channelName = tempCli . getChannel ( ) . getName ( ) ; if ( channelName . contains ( "CLI connection to" ) ) { tempCli . upgrade ( ) ; connected = true ; LOG . debug ( channelName ) ; } else { LOG . debug ( "Cli connection is not via CliPort '{}'. Sleeping for 5s..." , channelName ) ; tempCli . close ( ) ; Thread . sleep ( 5 * 1000 ) ; } } catch ( IOException e ) { LOG . debug ( "Jenkins is not available. Sleeping for 5s..." , e . getMessage ( ) ) ; Thread . sleep ( 5 * 1000 ) ; } } if ( ! connected ) { throw new IOException ( "Can't connect to {}" + url . toString ( ) ) ; } LOG . info ( "Jenkins future {}" , url ) ; LOG . info ( "Jenkins future {}/configure" , url ) ; LOG . info ( "Jenkins future {}/log/all" , url ) ; return tempCli ; } | Create DockerCLI connection against specified jnlpSlaveAgent port |
12,298 | @ GuardedBy ( "hudson.model.Queue.lock" ) public long check ( final AbstractCloudComputer c ) { final AbstractCloudSlave computerNode = c . getNode ( ) ; if ( c . isIdle ( ) && computerNode != null ) { final long idleMilliseconds = System . currentTimeMillis ( ) - c . getIdleStartMilliseconds ( ) ; if ( idleMilliseconds > MINUTES . toMillis ( idleMinutes ) ) { LOG . info ( "Disconnecting {}, after {} min timeout." , c . getName ( ) , idleMinutes ) ; try { computerNode . terminate ( ) ; } catch ( InterruptedException | IOException e ) { LOG . warn ( "Failed to terminate {}" , c . getName ( ) , e ) ; } } } return 1 ; } | While x - stream serialisation buggy copy implementation . |
12,299 | public static boolean isSomethingHappening ( Jenkins jenkins ) { if ( ! jenkins . getQueue ( ) . isEmpty ( ) ) return true ; for ( Computer n : jenkins . getComputers ( ) ) if ( ! n . isIdle ( ) ) return true ; return false ; } | Returns true if Hudson is building something or going to build something . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.