idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
8,700
Scroll scroll ( String scrollId , ScrollReader reader ) throws IOException { InputStream scroll = client . scroll ( scrollId ) ; try { return reader . read ( scroll ) ; } finally { if ( scroll instanceof StatsAware ) { stats . aggregate ( ( ( StatsAware ) scroll ) . stats ( ) ) ; } } }
consume the scroll
8,701
public static String encodePath ( String path ) { try { return URIUtil . encodePath ( path , "UTF-8" ) ; } catch ( URIException ex ) { throw new EsHadoopIllegalArgumentException ( "Cannot encode path segment [" + path + "]" , ex ) ; } }
Encodes characters in the string except for those allowed in an absolute path .
8,702
public static String concatenateAndUriEncode ( Collection < ? > list , String delimiter ) { Collection < String > escaped = new ArrayList < String > ( ) ; if ( list != null ) { for ( Object object : list ) { escaped . add ( encode ( object . toString ( ) ) ) ; } } return StringUtils . concatenate ( escaped , delimiter ) ; }
Encodes each string value of the list and concatenates the results using the supplied delimiter .
8,703
public static boolean isExplicitlyRequested ( String candidate , String ... indices ) { boolean result = false ; for ( String indexOrAlias : indices ) { boolean include = true ; if ( indexOrAlias . charAt ( 0 ) == '+' || indexOrAlias . charAt ( 0 ) == '-' ) { include = indexOrAlias . charAt ( 0 ) == '+' ; indexOrAlias = indexOrAlias . substring ( 1 ) ; } if ( indexOrAlias . equals ( "*" ) || indexOrAlias . equals ( "_all" ) ) { return false ; } if ( Regex . isSimpleMatchPattern ( indexOrAlias ) ) { if ( Regex . simpleMatch ( indexOrAlias , candidate ) ) { if ( include ) { result = true ; } else { return false ; } } } else { if ( candidate . equals ( indexOrAlias ) ) { if ( include ) { result = true ; } else { return false ; } } } } return result ; }
Checks if the provided candidate is explicitly contained in the provided indices .
8,704
public BulkActionResponse bulk ( Resource resource , TrackingBytesArray data ) { long start = network . transportStats ( ) . netTotalTime ; Response response = execute ( PUT , resource . bulk ( ) , data ) ; long spent = network . transportStats ( ) . netTotalTime - start ; stats . bulkTotal ++ ; stats . docsSent += data . entries ( ) ; stats . bulkTotalTime += spent ; return new BulkActionResponse ( parseBulkActionResponse ( response ) , response . status ( ) , spent ) ; }
Executes a single bulk operation against the provided resource using the passed data as the request body . This method will retry bulk requests if the entire bulk request fails but will not retry singular document failures .
8,705
public static QueryBuilder parse ( String raw , boolean isQuery ) throws IOException { if ( raw . startsWith ( "?" ) ) { return parseURI ( raw . substring ( 1 ) ) ; } else if ( raw . startsWith ( "{" ) ) { return new RawQueryBuilder ( raw , isQuery ) ; } else { throw new IllegalArgumentException ( "Failed to parse query: " + raw ) ; } }
Builds a QueryBuilder from the given string
8,706
public void initialize ( Configuration conf , Properties tbl , Properties partitionProperties ) throws SerDeException { inspector = HiveUtils . structObjectInspector ( tbl ) ; structTypeInfo = HiveUtils . typeInfo ( inspector ) ; cfg = conf ; List < Settings > settingSources = new ArrayList < > ( ) ; settingSources . add ( HadoopSettingsManager . loadFrom ( tbl ) ) ; if ( cfg != null ) { settingSources . add ( HadoopSettingsManager . loadFrom ( cfg ) ) ; } settings = new CompositeSettings ( settingSources ) ; alias = HiveUtils . alias ( settings ) ; HiveUtils . fixHive13InvalidComments ( settings , tbl ) ; trace = log . isTraceEnabled ( ) ; outputJSON = settings . getOutputAsJson ( ) ; if ( outputJSON ) { jsonFieldName = new Text ( HiveUtils . discoverJsonFieldName ( settings , alias ) ) ; } }
implemented to actually get access to the raw properties
8,707
private FieldExtractor _createExtractorFor ( Metadata metadata ) { if ( version . onOrAfter ( EsMajorVersion . V_6_X ) ) { switch ( metadata ) { case TTL : case TIMESTAMP : return new UnsupportedMetadataFieldExtractor ( metadata , version ) ; } } return createExtractorFor ( metadata ) ; }
If a metadata tag is unsupported for this version of Elasticsearch then a
8,708
private void writeLegacyFormatting ( List < Object > list , Object paramExtractor ) { if ( paramExtractor != null ) { list . add ( "{\"params\":" ) ; list . add ( paramExtractor ) ; list . add ( "," ) ; } else { list . add ( "{" ) ; } if ( HAS_SCRIPT ) { if ( HAS_LANG ) { list . add ( SCRIPT_LANG_1X ) ; } list . add ( SCRIPT_1X ) ; if ( UPSERT ) { list . add ( ",\"upsert\":" ) ; } } else { if ( UPSERT ) { list . add ( "\"doc_as_upsert\":true," ) ; } list . add ( "\"doc\":" ) ; } }
Script format meant for versions 1 . x to 2 . x . Required format for 1 . x and below .
8,709
private void writeStrictFormatting ( List < Object > list , Object paramExtractor , String scriptToUse ) { if ( HAS_SCRIPT ) { list . add ( scriptToUse ) ; if ( HAS_LANG ) { list . add ( SCRIPT_LANG_5X ) ; } if ( paramExtractor != null ) { list . add ( ",\"params\":" ) ; list . add ( paramExtractor ) ; } list . add ( "}" ) ; if ( UPSERT ) { list . add ( ",\"upsert\":" ) ; } } else { list . add ( "{" ) ; if ( UPSERT ) { list . add ( "\"doc_as_upsert\":true," ) ; } list . add ( "\"doc\":" ) ; } }
Script format meant for versions 2 . x to 5 . x . Required format for 5 . x and above .
8,710
public void add ( BytesRef payload ) { if ( payload . length ( ) > ba . available ( ) ) { if ( autoFlush ) { flush ( ) ; } else { throw new EsHadoopIllegalStateException ( String . format ( "Auto-flush disabled and bulk buffer full; disable manual flush or increase " + "capacity [current size %s]; bailing out" , ba . capacity ( ) ) ) ; } } data . copyFrom ( payload ) ; dataEntries ++ ; if ( bufferEntriesThreshold > 0 && dataEntries >= bufferEntriesThreshold ) { if ( autoFlush ) { flush ( ) ; } else { if ( dataEntries > bufferEntriesThreshold ) { throw new EsHadoopIllegalStateException ( String . format ( "Auto-flush disabled and maximum number of entries surpassed; disable manual " + "flush or increase capacity [current size %s]; bailing out" , bufferEntriesThreshold ) ) ; } } } }
Adds an entry to the bulk request potentially flushing if the request reaches capacity .
8,711
private BytesRef validateEditedEntry ( byte [ ] retryDataBuffer ) { BytesRef result = new BytesRef ( ) ; byte closeBrace = '}' ; byte newline = '\n' ; int newlines = 0 ; for ( byte b : retryDataBuffer ) { if ( b == newline ) { newlines ++ ; } } result . add ( retryDataBuffer ) ; byte lastByte = retryDataBuffer [ retryDataBuffer . length - 1 ] ; if ( lastByte == newline ) { if ( newlines != 2 ) { throw new EsHadoopIllegalArgumentException ( "Encountered malformed data entry for bulk write retry. " + "Data contains [" + newlines + "] newline characters (\\n) but expected to have [2]." ) ; } } else if ( lastByte == closeBrace ) { if ( newlines != 1 ) { throw new EsHadoopIllegalArgumentException ( "Encountered malformed data entry for bulk write retry. " + "Data contains [" + newlines + "] newline characters (\\n) but expected to have [1]." ) ; } byte [ ] trailingNewline = new byte [ ] { newline } ; result . add ( trailingNewline ) ; } return result ; }
Validate the byte contents of a bulk entry that has been edited before being submitted for retry .
8,712
private void initFlushOperation ( String bulkLoggingID , boolean retryOperation , long retriedDocs , long waitTime ) { if ( retryOperation ) { if ( waitTime > 0L ) { debugLog ( bulkLoggingID , "Retrying [%d] entries after backing off for [%s] ms" , retriedDocs , TimeValue . timeValueMillis ( waitTime ) ) ; try { Thread . sleep ( waitTime ) ; } catch ( InterruptedException e ) { debugLog ( bulkLoggingID , "Thread interrupted - giving up on retrying..." ) ; throw new EsHadoopException ( "Thread interrupted - giving up on retrying..." , e ) ; } } else { debugLog ( bulkLoggingID , "Retrying [%d] entries immediately (without backoff)" , retriedDocs ) ; } } else { debugLog ( bulkLoggingID , "Sending batch of [%d] bytes/[%s] entries" , data . length ( ) , dataEntries ) ; } }
Logs flushing messages and performs backoff waiting if there is a wait time for retry .
8,713
private String createDebugTxnID ( ) { if ( LOG . isDebugEnabled ( ) ) { return ( Integer . toString ( hashCode ( ) ) + Long . toString ( System . currentTimeMillis ( ) ) ) ; } return null ; }
Creates a semi - unique string to reasonably identify a bulk transaction .
8,714
public void close ( ) { try { if ( ! hadWriteErrors ) { flush ( ) ; } else { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Dirty close; ignoring last existing write batch..." ) ; } } if ( requiresRefreshAfterBulk && executedBulkWrite ) { restClient . refresh ( resource ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( String . format ( "Refreshing index [%s]" , resource ) ) ; } } } finally { for ( IBulkWriteErrorHandler handler : documentBulkErrorHandlers ) { handler . close ( ) ; } } }
Flushes and closes the bulk processor to further writes .
8,715
public void exitBlock ( ) { if ( open == 0 ) { return ; } if ( open < 0 ) { throw new IllegalStateException ( "Parser is no longer nested in any blocks at the level in which it was " + "created. You must create a new block aware parser to track the levels above this one." ) ; } while ( open > 0 ) { Token t = delegate . nextToken ( ) ; if ( t == null ) { return ; } updateLevelBasedOn ( t ) ; } }
If this parser is reading tokens from an object or an array that is nested below its original nesting level it will consume and skip all tokens until it reaches the end of the block that it was created on . The underlying parser will be left on the END_X token for the block .
8,716
private boolean validate ( PooledTransport transport ) { try { Response response = transport . execute ( validationRequest ) ; return response . hasSucceeded ( ) ; } catch ( IOException ioe ) { log . warn ( "Could not validate pooled connection on lease. Releasing pooled connection and trying again..." , ioe ) ; return false ; } }
Used to validate an idle pooled transport is still good for consumption .
8,717
synchronized Transport borrowTransport ( ) { long now = System . currentTimeMillis ( ) ; List < PooledTransport > garbageTransports = new ArrayList < PooledTransport > ( ) ; PooledTransport candidate = null ; for ( Map . Entry < PooledTransport , Long > entry : idle . entrySet ( ) ) { PooledTransport transport = entry . getKey ( ) ; if ( validate ( transport ) ) { candidate = transport ; break ; } else { garbageTransports . add ( transport ) ; } } for ( PooledTransport transport : garbageTransports ) { idle . remove ( transport ) ; release ( transport ) ; } if ( candidate == null ) { candidate = create ( ) ; } else { idle . remove ( candidate ) ; } leased . put ( candidate , now ) ; return new LeasedTransport ( candidate , this ) ; }
Borrows a Transport from this pool . If there are no pooled Transports available a new one is created .
8,718
private synchronized void returnTransport ( Transport returning ) { long now = System . currentTimeMillis ( ) ; PooledTransport unwrapped ; if ( returning instanceof LeasedTransport ) { LeasedTransport leasedTransport = ( LeasedTransport ) returning ; unwrapped = leasedTransport . delegate ; } else if ( returning instanceof PooledTransport ) { unwrapped = ( PooledTransport ) returning ; } else { throw new EsHadoopIllegalStateException ( "Cannot return a non-poolable Transport to the pool" ) ; } if ( leased . containsKey ( unwrapped ) ) { leased . remove ( unwrapped ) ; idle . put ( unwrapped , now ) ; } else { throw new EsHadoopIllegalStateException ( "Cannot return a Transport object to a pool that was not sourced from the pool" ) ; } }
Returns a transport to the pool .
8,719
synchronized int removeOldConnections ( ) { long now = System . currentTimeMillis ( ) ; long expirationTime = now - idleTransportTimeout . millis ( ) ; List < PooledTransport > removeFromIdle = new ArrayList < PooledTransport > ( ) ; for ( Map . Entry < PooledTransport , Long > idleEntry : idle . entrySet ( ) ) { long lastUsed = idleEntry . getValue ( ) ; if ( lastUsed < expirationTime ) { PooledTransport removed = idleEntry . getKey ( ) ; if ( log . isTraceEnabled ( ) ) { log . trace ( "Expiring idle transport for job [" + jobPoolingKey + "], transport: [" + removed . toString ( ) + "]. Last used [" + new TimeValue ( now - lastUsed ) + "] ago. Expired [" + idleTransportTimeout + "] ago." ) ; } release ( removed ) ; removeFromIdle . add ( removed ) ; } } for ( PooledTransport toRemove : removeFromIdle ) { idle . remove ( toRemove ) ; } return idle . size ( ) + leased . size ( ) ; }
Cleans the pool by removing any resources that have been idle for longer than the configured transport pool idle time .
8,720
private boolean isPopulatedMixedValueMap ( ResourceFieldSchema schema , int field , Tuple object ) { if ( schema . getType ( ) != DataType . MAP ) { return false ; } try { Object fieldValue = object . get ( field ) ; Map < ? , ? > map = ( Map < ? , ? > ) fieldValue ; return schema . getSchema ( ) == null && ! ( map == null || map . isEmpty ( ) ) ; } catch ( ExecException e ) { throw new EsHadoopIllegalStateException ( e ) ; } }
Checks to see if the given field is a schema - less Map that has values .
8,721
public static MappingSet parseMappings ( Map < String , Object > content , boolean includeTypeName ) { Iterator < Map . Entry < String , Object > > indices = content . entrySet ( ) . iterator ( ) ; List < Mapping > indexMappings = new ArrayList < Mapping > ( ) ; while ( indices . hasNext ( ) ) { parseIndexMappings ( indices . next ( ) , indexMappings , includeTypeName ) ; } return new MappingSet ( indexMappings ) ; }
Convert the deserialized mapping request body into an object
8,722
protected void writeObjectHeader ( List < Object > list ) { list . add ( "{\"" + getOperation ( ) + "\":{" ) ; boolean commaMightBeNeeded = false ; commaMightBeNeeded = addExtractorOrDynamicValue ( list , getMetadataExtractorOrFallback ( Metadata . INDEX , indexExtractor ) , "" , commaMightBeNeeded ) ; commaMightBeNeeded = addExtractorOrDynamicValue ( list , getMetadataExtractorOrFallback ( Metadata . TYPE , typeExtractor ) , "\"_type\":" , commaMightBeNeeded ) ; commaMightBeNeeded = id ( list , commaMightBeNeeded ) ; commaMightBeNeeded = addExtractorOrDynamicValue ( list , getMetadataExtractorOrFallback ( Metadata . PARENT , parentExtractor ) , requestParameterNames . parent , commaMightBeNeeded ) ; commaMightBeNeeded = addExtractorOrDynamicValueAsFieldWriter ( list , getMetadataExtractorOrFallback ( Metadata . ROUTING , routingExtractor ) , requestParameterNames . routing , commaMightBeNeeded ) ; commaMightBeNeeded = addExtractorOrDynamicValue ( list , getMetadataExtractorOrFallback ( Metadata . TTL , ttlExtractor ) , "\"_ttl\":" , commaMightBeNeeded ) ; commaMightBeNeeded = addExtractorOrDynamicValue ( list , getMetadataExtractorOrFallback ( Metadata . TIMESTAMP , timestampExtractor ) , "\"_timestamp\":" , commaMightBeNeeded ) ; Object versionField = getMetadataExtractorOrFallback ( Metadata . VERSION , versionExtractor ) ; if ( versionField != null ) { if ( commaMightBeNeeded ) { list . add ( "," ) ; commaMightBeNeeded = false ; } commaMightBeNeeded = true ; list . add ( requestParameterNames . version ) ; list . add ( versionField ) ; Object versionTypeField = getMetadataExtractorOrFallback ( Metadata . VERSION_TYPE , versionTypeExtractor ) ; if ( versionTypeField != null ) { if ( commaMightBeNeeded ) { list . add ( "," ) ; commaMightBeNeeded = false ; } commaMightBeNeeded = true ; list . add ( requestParameterNames . versionType ) ; list . add ( versionTypeField ) ; } } otherHeader ( list , commaMightBeNeeded ) ; list . add ( "}}\n" ) ; }
write action & metadata header
8,723
private boolean addExtractorOrDynamicValue ( List < Object > list , Object extractor , String header , boolean commaMightBeNeeded ) { if ( extractor != null ) { if ( commaMightBeNeeded ) { list . add ( "," ) ; } list . add ( header ) ; list . add ( extractor ) ; return true ; } return commaMightBeNeeded ; }
If extractor is present this will add the header to the template followed by the extractor . If a comma is needed the comma will be inserted before the header .
8,724
private boolean addExtractorOrDynamicValueAsFieldWriter ( List < Object > list , FieldExtractor extractor , String header , boolean commaMightBeNeeded ) { if ( extractor != null ) { String head = header ; if ( commaMightBeNeeded ) { head = "," + head ; } list . add ( new FieldWriter ( head , extractor ) ) ; return true ; } return commaMightBeNeeded ; }
If extractor is present this will combine the header and extractor into a FieldWriter allowing the FieldWriter to determine when and if to write the header value based on the given document s data . If a comma is needed it is appended to the header string before being passed to the FieldWriter .
8,725
protected FieldExtractor getMetadataExtractorOrFallback ( Metadata meta , FieldExtractor fallbackExtractor ) { if ( metaExtractor != null ) { FieldExtractor metaFE = metaExtractor . get ( meta ) ; if ( metaFE != null ) { return metaFE ; } } return fallbackExtractor ; }
Get the extractor for a given field trying first one from a MetadataExtractor and failing that falling back to the provided static one
8,726
public Mapping filter ( Collection < String > includes , Collection < String > excludes ) { if ( includes . isEmpty ( ) && excludes . isEmpty ( ) ) { return this ; } List < Field > filtered = new ArrayList < Field > ( ) ; List < FieldFilter . NumberedInclude > convertedIncludes = FieldFilter . toNumberedFilter ( includes ) ; boolean intact = true ; for ( Field fl : this . getFields ( ) ) { intact &= filterField ( fl , null , filtered , convertedIncludes , excludes ) ; } return ( intact ? this : new Mapping ( this . getIndex ( ) , this . getType ( ) , filtered ) ) ; }
Filters out fields based on the provided include and exclude information and returns a Mapping object
8,727
public Map < String , FieldType > flatten ( ) { if ( fields == null || fields . length == 0 ) { return Collections . < String , FieldType > emptyMap ( ) ; } Map < String , FieldType > map = new LinkedHashMap < String , FieldType > ( ) ; for ( Field nestedField : fields ) { addSubFieldToMap ( map , nestedField , null ) ; } return map ; }
Takes a mapping tree and returns a map of all of its fields flattened and paired with their field types .
8,728
public static List < PartitionDefinition > assignPartitions ( List < PartitionDefinition > partitions , int currentTask , int totalTasks ) { int esPartitions = partitions . size ( ) ; if ( totalTasks >= esPartitions ) { return ( currentTask >= esPartitions ? Collections . < PartitionDefinition > emptyList ( ) : Collections . singletonList ( partitions . get ( currentTask ) ) ) ; } else { int partitionsPerTask = esPartitions / totalTasks ; int remainder = esPartitions % totalTasks ; int partitionsPerCurrentTask = partitionsPerTask ; if ( currentTask < remainder ) { partitionsPerCurrentTask ++ ; } int offset = partitionsPerTask * currentTask ; if ( currentTask != 0 ) { offset += ( remainder > currentTask ? 1 : remainder ) ; } if ( partitionsPerCurrentTask == 1 ) { return Collections . singletonList ( partitions . get ( offset ) ) ; } List < PartitionDefinition > pa = new ArrayList < PartitionDefinition > ( partitionsPerCurrentTask ) ; for ( int index = offset ; index < offset + partitionsPerCurrentTask ; index ++ ) { pa . add ( partitions . get ( index ) ) ; } return pa ; } }
expects currentTask to start from 0
8,729
private static RestRepository initSingleIndex ( Settings settings , long currentInstance , Resource resource , Log log ) { if ( log . isDebugEnabled ( ) ) { log . debug ( String . format ( "Resource [%s] resolves as a single index" , resource ) ) ; } RestRepository repository = new RestRepository ( settings ) ; if ( repository . touch ( ) ) { if ( repository . waitForYellow ( ) ) { log . warn ( String . format ( "Timed out waiting for index [%s] to reach yellow health" , resource ) ) ; } } if ( settings . getNodesWANOnly ( ) ) { String node = SettingsUtils . getPinnedNode ( settings ) ; if ( log . isDebugEnabled ( ) ) { log . debug ( String . format ( "Partition writer instance [%s] assigned to [%s]" , currentInstance , node ) ) ; } return repository ; } if ( settings . getNodesClientOnly ( ) ) { String clientNode = repository . getRestClient ( ) . getCurrentNode ( ) ; if ( log . isDebugEnabled ( ) ) { log . debug ( String . format ( "Client-node routing detected; partition writer instance [%s] assigned to [%s]" , currentInstance , clientNode ) ) ; } return repository ; } Map < ShardInfo , NodeInfo > targetShards = repository . getWriteTargetPrimaryShards ( settings . getNodesClientOnly ( ) ) ; repository . close ( ) ; Assert . isTrue ( ! targetShards . isEmpty ( ) , String . format ( "Cannot determine write shards for [%s]; likely its format is incorrect (maybe it contains illegal characters? or all shards failed?)" , resource ) ) ; List < ShardInfo > orderedShards = new ArrayList < ShardInfo > ( targetShards . keySet ( ) ) ; Collections . sort ( orderedShards ) ; if ( log . isTraceEnabled ( ) ) { log . trace ( String . format ( "Partition writer instance [%s] discovered [%s] primary shards %s" , currentInstance , orderedShards . size ( ) , orderedShards ) ) ; } if ( currentInstance <= 0 ) { currentInstance = new Random ( ) . nextInt ( targetShards . size ( ) ) + 1 ; } int bucket = ( int ) ( currentInstance % targetShards . size ( ) ) ; ShardInfo chosenShard = orderedShards . get ( bucket ) ; NodeInfo targetNode = targetShards . get ( chosenShard ) ; SettingsUtils . pinNode ( settings , targetNode . getPublishAddress ( ) ) ; String node = SettingsUtils . getPinnedNode ( settings ) ; repository = new RestRepository ( settings ) ; if ( log . isDebugEnabled ( ) ) { log . debug ( String . format ( "Partition writer instance [%s] assigned to primary shard [%s] at address [%s]" , currentInstance , chosenShard . getName ( ) , node ) ) ; } return repository ; }
Validate and configure a rest repository for writing to an index . The index is potentially created if it does not exist and the client is pinned to a node that hosts one of the index s primary shards based on its currentInstance number .
8,730
private static RestRepository initMultiIndices ( Settings settings , long currentInstance , Resource resource , Log log ) { if ( log . isDebugEnabled ( ) ) { log . debug ( String . format ( "Resource [%s] resolves as an index pattern" , resource ) ) ; } String node = SettingsUtils . getPinnedNode ( settings ) ; if ( log . isDebugEnabled ( ) ) { log . debug ( String . format ( "Partition writer instance [%s] assigned to [%s]" , currentInstance , node ) ) ; } return new RestRepository ( settings ) ; }
Creates a RestRepository for use with a multi - index resource pattern . The client is left pinned to the original node that it was pinned to since the shard locations cannot be determined at all .
8,731
private static RestRepository initAliasWrite ( GetAliasesRequestBuilder . Response response , Settings settings , long currentInstance , Resource resource , Log log ) { if ( log . isDebugEnabled ( ) ) { log . debug ( String . format ( "Resource [%s] resolves as an index alias" , resource ) ) ; } Map < String , Map < String , IndicesAliases . Alias > > indexAliasTable = response . getIndices ( ) . getAll ( ) ; if ( indexAliasTable . size ( ) < 1 ) { throw new EsHadoopIllegalArgumentException ( "Cannot initialize alias write resource [" + resource . index ( ) + "] if it does not have any alias entries." ) ; } else if ( indexAliasTable . size ( ) > 1 ) { String currentWriteIndex = null ; for ( Map . Entry < String , Map < String , IndicesAliases . Alias > > indexRow : indexAliasTable . entrySet ( ) ) { String indexName = indexRow . getKey ( ) ; Map < String , IndicesAliases . Alias > aliases = indexRow . getValue ( ) ; IndicesAliases . Alias aliasInfo = aliases . get ( resource . index ( ) ) ; if ( aliasInfo . isWriteIndex ( ) ) { currentWriteIndex = indexName ; break ; } } if ( currentWriteIndex == null ) { throw new EsHadoopIllegalArgumentException ( "Attempting to write to alias [" + resource . index ( ) + "], " + "but detected multiple indices [" + indexAliasTable . size ( ) + "] with no write index selected. " + "Bailing out..." ) ; } else { if ( log . isDebugEnabled ( ) ) { log . debug ( String . format ( "Writing to currently configured write-index [%s]" , currentWriteIndex ) ) ; } } } else { if ( log . isDebugEnabled ( ) ) { log . debug ( String . format ( "Writing to the alias's single configured index [%s]" , indexAliasTable . keySet ( ) . iterator ( ) . next ( ) ) ) ; } } String node = SettingsUtils . getPinnedNode ( settings ) ; if ( log . isDebugEnabled ( ) ) { log . debug ( String . format ( "Partition writer instance [%s] assigned to [%s]" , currentInstance , node ) ) ; } return new RestRepository ( settings ) ; }
Validate and configure a rest repository for writing to an alias backed by a valid write - index . This validation only checks that an alias is valid at time of job start and makes no guarantees about the alias changing during the execution .
8,732
public static String toCanonicalFilePath ( URL fileURL ) throws URISyntaxException , IOException { if ( fileURL == null ) { return null ; } if ( ! "jar" . equals ( fileURL . getProtocol ( ) ) && ! "file" . equals ( fileURL . getProtocol ( ) ) ) { return null ; } if ( "jar" . equals ( fileURL . getProtocol ( ) ) ) { JarURLConnection jarURLConnection = ( JarURLConnection ) fileURL . openConnection ( ) ; fileURL = jarURLConnection . getJarFileURL ( ) ; } URI fileURI = fileURL . toURI ( ) ; File file = new File ( fileURI ) ; File canonicalFile = file . getCanonicalFile ( ) ; return canonicalFile . toURI ( ) . toString ( ) ; }
Convert either a file or jar url into a local canonical file or null if the file is a different scheme .
8,733
public static void initCredentials ( Job job ) { Configuration configuration = job . getConfiguration ( ) ; Settings settings = HadoopSettingsManager . loadFrom ( configuration ) ; InitializationUtils . setUserProviderIfNotSet ( settings , HadoopUserProvider . class , LOG ) ; UserProvider userProvider = UserProvider . create ( settings ) ; if ( userProvider . isEsKerberosEnabled ( ) ) { User user = userProvider . getUser ( ) ; ClusterInfo clusterInfo = settings . getClusterInfoOrNull ( ) ; RestClient bootstrap = new RestClient ( settings ) ; try { if ( clusterInfo == null ) { clusterInfo = bootstrap . mainInfo ( ) ; } TokenUtil . addTokenForJob ( bootstrap , clusterInfo . getClusterName ( ) , user , job ) ; } catch ( EsHadoopException ex ) { throw new EsHadoopIllegalArgumentException ( String . format ( "Cannot detect ES version - " + "typically this happens if the network/Elasticsearch cluster is not accessible or when targeting " + "a WAN/Cloud instance without the proper setting '%s'" , ConfigurationOptions . ES_NODES_WAN_ONLY ) , ex ) ; } finally { bootstrap . close ( ) ; } } else { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Ignoring Elasticsearch credentials since Kerberos Auth is not enabled." ) ; } } }
Given the settings contained within a job object retrieve an authentication token from either the currently logged in user or from the Elasticsearch cluster and add it to the job s credential set .
8,734
public static void initCredentials ( JobConf jobConf ) { Settings settings = HadoopSettingsManager . loadFrom ( jobConf ) ; InitializationUtils . setUserProviderIfNotSet ( settings , HadoopUserProvider . class , LOG ) ; UserProvider userProvider = UserProvider . create ( settings ) ; if ( userProvider . isEsKerberosEnabled ( ) ) { User user = userProvider . getUser ( ) ; ClusterInfo clusterInfo = settings . getClusterInfoOrNull ( ) ; RestClient bootstrap = new RestClient ( settings ) ; try { if ( clusterInfo == null ) { clusterInfo = bootstrap . mainInfo ( ) ; } TokenUtil . addTokenForJobConf ( bootstrap , clusterInfo . getClusterName ( ) , user , jobConf ) ; } catch ( EsHadoopException ex ) { throw new EsHadoopIllegalArgumentException ( String . format ( "Cannot detect ES version - " + "typically this happens if the network/Elasticsearch cluster is not accessible or when targeting " + "a WAN/Cloud instance without the proper setting '%s'" , ConfigurationOptions . ES_NODES_WAN_ONLY ) , ex ) ; } finally { bootstrap . close ( ) ; } } else { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Ignoring Elasticsearch credentials since Kerberos Auth is not enabled." ) ; } } }
Given the settings contained within the job conf retrieve an authentication token from either the currently logged in user or from the Elasticsearch cluster and add it to the job s credential set .
8,735
public static int levenshteinDistance ( CharSequence one , CharSequence another , int threshold ) { int n = one . length ( ) ; int m = another . length ( ) ; if ( n == 0 ) { return m <= threshold ? m : - 1 ; } else if ( m == 0 ) { return n <= threshold ? n : - 1 ; } if ( n > m ) { final CharSequence tmp = one ; one = another ; another = tmp ; n = m ; m = another . length ( ) ; } int p [ ] = new int [ n + 1 ] ; int d [ ] = new int [ n + 1 ] ; int _d [ ] ; final int boundary = Math . min ( n , threshold ) + 1 ; for ( int i = 0 ; i < boundary ; i ++ ) { p [ i ] = i ; } Arrays . fill ( p , boundary , p . length , Integer . MAX_VALUE ) ; Arrays . fill ( d , Integer . MAX_VALUE ) ; for ( int j = 1 ; j <= m ; j ++ ) { final char t_j = another . charAt ( j - 1 ) ; d [ 0 ] = j ; final int min = Math . max ( 1 , j - threshold ) ; final int max = ( j > Integer . MAX_VALUE - threshold ) ? n : Math . min ( n , j + threshold ) ; if ( min > max ) { return - 1 ; } if ( min > 1 ) { d [ min - 1 ] = Integer . MAX_VALUE ; } for ( int i = min ; i <= max ; i ++ ) { if ( one . charAt ( i - 1 ) == t_j ) { d [ i ] = p [ i - 1 ] ; } else { d [ i ] = 1 + Math . min ( Math . min ( d [ i - 1 ] , p [ i ] ) , p [ i - 1 ] ) ; } } _d = p ; p = d ; d = _d ; } if ( p [ n ] <= threshold ) { return p [ n ] ; } return - 1 ; }
returns - 1 if the two strings are within the given threshold of each other - 1 otherwise
8,736
public static String toJsonString ( Object value ) { if ( value == null ) { return "null" ; } else if ( value . getClass ( ) . equals ( String . class ) ) { return "\"" + StringUtils . jsonEncoding ( value . toString ( ) ) + "\"" ; } else { return value . toString ( ) ; } }
return the value in a JSON friendly way
8,737
static InetAddress [ ] getGlobalInterfaces ( ) throws SocketException { List < InetAddress > list = new ArrayList < InetAddress > ( ) ; for ( NetworkInterface intf : getInterfaces ( ) ) { if ( intf . isUp ( ) ) { for ( InetAddress address : Collections . list ( intf . getInetAddresses ( ) ) ) { if ( address . isLoopbackAddress ( ) == false && address . isSiteLocalAddress ( ) == false && address . isLinkLocalAddress ( ) == false ) { list . add ( address ) ; } } } } return list . toArray ( new InetAddress [ list . size ( ) ] ) ; }
Returns all global scope addresses for interfaces that are up .
8,738
protected String getFQDN ( URI requestURI ) throws UnknownHostException { String host = requestURI . getHost ( ) ; InetAddress address = InetAddress . getByName ( host ) ; return address . getCanonicalHostName ( ) ; }
Get the FQDN of the request uri s address reverse resolving if needed .
8,739
private void initializeNegotiator ( URI requestURI , SpnegoCredentials spnegoCredentials ) throws UnknownHostException , AuthenticationException , GSSException { if ( spnegoNegotiator == null ) { String servicePrincipal = spnegoCredentials . getServicePrincipalName ( ) ; if ( spnegoCredentials . getServicePrincipalName ( ) . contains ( HOSTNAME_PATTERN ) ) { String fqdn = getFQDN ( requestURI ) ; String [ ] components = spnegoCredentials . getServicePrincipalName ( ) . split ( "[/@]" ) ; if ( components . length != 3 || ! components [ 1 ] . equals ( HOSTNAME_PATTERN ) ) { throw new AuthenticationException ( "Malformed service principal name [" + spnegoCredentials . getServicePrincipalName ( ) + "]. To use host substitution, the principal must be of the format [serviceName/_HOST@REALM.NAME]." ) ; } servicePrincipal = components [ 0 ] + "/" + fqdn . toLowerCase ( ) + "@" + components [ 2 ] ; } User userInfo = spnegoCredentials . getUserProvider ( ) . getUser ( ) ; KerberosPrincipal principal = userInfo . getKerberosPrincipal ( ) ; if ( principal == null ) { throw new EsHadoopIllegalArgumentException ( "Could not locate Kerberos Principal on currently logged in user." ) ; } spnegoNegotiator = new SpnegoNegotiator ( principal . getName ( ) , servicePrincipal ) ; } }
Creates the negotiator if it is not yet created or does nothing if the negotiator is already initialized .
8,740
private String getNegotiateToken ( ) throws GSSException { if ( spnegoNegotiator == null ) { throw new IllegalStateException ( "Negotiator not yet initialized." ) ; } String authString ; if ( StringUtils . hasText ( challenge ) ) { authString = spnegoNegotiator . send ( challenge ) ; } else { authString = spnegoNegotiator . send ( ) ; } this . challenge = null ; if ( authString != null ) { authString = EsHadoopAuthPolicies . NEGOTIATE + " " + authString ; } return authString ; }
Attempts to retrieve the next negotiation token to send consuming any previously set challenge data .
8,741
private String authenticate ( Credentials credentials , URI requestURI ) throws AuthenticationException { if ( ! ( credentials instanceof SpnegoCredentials ) ) { throw new AuthenticationException ( "Invalid credentials type provided to " + this . getClass ( ) . getName ( ) + "." + "Expected " + SpnegoCredentials . class . getName ( ) + " but got " + credentials . getClass ( ) . getName ( ) ) ; } final SpnegoCredentials spnegoCredentials = ( SpnegoCredentials ) credentials ; try { initializeNegotiator ( requestURI , spnegoCredentials ) ; return getNegotiateToken ( ) ; } catch ( GSSException e ) { throw new AuthenticationException ( "Could not authenticate" , e ) ; } catch ( UnknownHostException e ) { throw new AuthenticationException ( "Could not authenticate" , e ) ; } }
Implementation method that returns the text to send via the Authenticate header on the next request .
8,742
public void ensureMutualAuth ( String returnChallenge ) throws AuthenticationException { try { processChallenge ( returnChallenge ) ; } catch ( MalformedChallengeException mce ) { throw new AuthenticationException ( "Received invalid response header for mutual authentication" , mce ) ; } try { String token = getNegotiateToken ( ) ; if ( ! spnegoNegotiator . established ( ) || token != null ) { throw new AuthenticationException ( "Could not complete SPNEGO Authentication, Mutual Authentication Failed" ) ; } } catch ( GSSException gsse ) { throw new AuthenticationException ( "Could not complete SPNEGO Authentication" , gsse ) ; } }
Authenticating requests with SPNEGO means that a request will execute before the client is sure that the server is mutually authenticated . This means that at best if mutual auth is requested the client cannot trust that the server is giving accurate information or in the case that the client has already sent data further communication with the server should not happen .
8,743
public ClusterInfo getClusterInfoOrNull ( ) { String clusterName = getProperty ( InternalConfigurationOptions . INTERNAL_ES_CLUSTER_NAME ) ; if ( clusterName == null ) { return null ; } String clusterUUID = getProperty ( InternalConfigurationOptions . INTERNAL_ES_CLUSTER_UUID ) ; EsMajorVersion version = getInternalVersionOrThrow ( ) ; return new ClusterInfo ( new ClusterName ( clusterName , clusterUUID ) , version ) ; }
Get the internal cluster name and version or null if not present in the settings
8,744
Bitmap generateGradient ( ) { int [ ] [ ] pixels = new int [ 1080 ] [ 1920 ] ; for ( int y = 0 ; y < 1080 ; y ++ ) { for ( int x = 0 ; x < 1920 ; x ++ ) { int r = ( int ) ( y / 1080f * 255 ) ; int g = ( int ) ( x / 1920f * 255 ) ; int b = ( int ) ( ( Math . hypot ( x , y ) / Math . hypot ( 1080 , 1920 ) ) * 255 ) ; pixels [ y ] [ x ] = r << 16 | g << 8 | b ; } } return new Bitmap ( pixels ) ; }
Returns a bitmap that lights up red subpixels at the bottom green subpixels on the right and blue subpixels in bottom - right .
8,745
private void fillBuf ( ) throws IOException { int result = in . read ( buf , 0 , buf . length ) ; if ( result == - 1 ) { throw new EOFException ( ) ; } pos = 0 ; end = result ; }
Reads new input data into the buffer . Call only with pos == end or end == - 1 depending on the desired outcome if the function throws .
8,746
void submit ( final LoadAndDisplayImageTask task ) { taskDistributor . execute ( new Runnable ( ) { public void run ( ) { File image = configuration . diskCache . get ( task . getLoadingUri ( ) ) ; boolean isImageCachedOnDisk = image != null && image . exists ( ) ; initExecutorsIfNeed ( ) ; if ( isImageCachedOnDisk ) { taskExecutorForCachedImages . execute ( task ) ; } else { taskExecutor . execute ( task ) ; } } } ) ; }
Submits task to execution pool
8,747
public static boolean copyStream ( InputStream is , OutputStream os , CopyListener listener , int bufferSize ) throws IOException { int current = 0 ; int total = is . available ( ) ; if ( total <= 0 ) { total = DEFAULT_IMAGE_TOTAL_SIZE ; } final byte [ ] bytes = new byte [ bufferSize ] ; int count ; if ( shouldStopLoading ( listener , current , total ) ) return false ; while ( ( count = is . read ( bytes , 0 , bufferSize ) ) != - 1 ) { os . write ( bytes , 0 , count ) ; current += count ; if ( shouldStopLoading ( listener , current , total ) ) return false ; } os . flush ( ) ; return true ; }
Copies stream fires progress events by listener can be interrupted by listener .
8,748
public static void readAndCloseStream ( InputStream is ) { final byte [ ] bytes = new byte [ DEFAULT_BUFFER_SIZE ] ; try { while ( is . read ( bytes , 0 , DEFAULT_BUFFER_SIZE ) != - 1 ) ; } catch ( IOException ignored ) { } finally { closeSilently ( is ) ; } }
Reads all data from stream and close it silently
8,749
private E unlinkFirst ( ) { Node < E > f = first ; if ( f == null ) return null ; Node < E > n = f . next ; E item = f . item ; f . item = null ; f . next = f ; first = n ; if ( n == null ) last = null ; else n . prev = null ; -- count ; notFull . signal ( ) ; return item ; }
Removes and returns first element or null if empty .
8,750
private E unlinkLast ( ) { Node < E > l = last ; if ( l == null ) return null ; Node < E > p = l . prev ; E item = l . item ; l . item = null ; l . prev = l ; last = p ; if ( p == null ) first = null ; else p . next = null ; -- count ; notFull . signal ( ) ; return item ; }
Removes and returns last element or null if empty .
8,751
public void clear ( ) { final ReentrantLock lock = this . lock ; lock . lock ( ) ; try { for ( Node < E > f = first ; f != null ; ) { f . item = null ; Node < E > n = f . next ; f . prev = null ; f . next = null ; f = n ; } first = last = null ; count = 0 ; notFull . signalAll ( ) ; } finally { lock . unlock ( ) ; } }
Atomically removes all of the elements from this deque . The deque will be empty after this call returns .
8,752
private void processJournal ( ) throws IOException { deleteIfExists ( journalFileTmp ) ; for ( Iterator < Entry > i = lruEntries . values ( ) . iterator ( ) ; i . hasNext ( ) ; ) { Entry entry = i . next ( ) ; if ( entry . currentEditor == null ) { for ( int t = 0 ; t < valueCount ; t ++ ) { size += entry . lengths [ t ] ; fileCount ++ ; } } else { entry . currentEditor = null ; for ( int t = 0 ; t < valueCount ; t ++ ) { deleteIfExists ( entry . getCleanFile ( t ) ) ; deleteIfExists ( entry . getDirtyFile ( t ) ) ; } i . remove ( ) ; } } }
Computes the initial size and collects garbage as a part of opening the cache . Dirty entries are assumed to be inconsistent and will be deleted .
8,753
private synchronized void rebuildJournal ( ) throws IOException { if ( journalWriter != null ) { journalWriter . close ( ) ; } Writer writer = new BufferedWriter ( new OutputStreamWriter ( new FileOutputStream ( journalFileTmp ) , Util . US_ASCII ) ) ; try { writer . write ( MAGIC ) ; writer . write ( "\n" ) ; writer . write ( VERSION_1 ) ; writer . write ( "\n" ) ; writer . write ( Integer . toString ( appVersion ) ) ; writer . write ( "\n" ) ; writer . write ( Integer . toString ( valueCount ) ) ; writer . write ( "\n" ) ; writer . write ( "\n" ) ; for ( Entry entry : lruEntries . values ( ) ) { if ( entry . currentEditor != null ) { writer . write ( DIRTY + ' ' + entry . key + '\n' ) ; } else { writer . write ( CLEAN + ' ' + entry . key + entry . getLengths ( ) + '\n' ) ; } } } finally { writer . close ( ) ; } if ( journalFile . exists ( ) ) { renameTo ( journalFile , journalFileBackup , true ) ; } renameTo ( journalFileTmp , journalFile , false ) ; journalFileBackup . delete ( ) ; journalWriter = new BufferedWriter ( new OutputStreamWriter ( new FileOutputStream ( journalFile , true ) , Util . US_ASCII ) ) ; }
Creates a new journal that omits redundant information . This replaces the current journal if it exists .
8,754
public static Executor createExecutor ( int threadPoolSize , int threadPriority , QueueProcessingType tasksProcessingType ) { boolean lifo = tasksProcessingType == QueueProcessingType . LIFO ; BlockingQueue < Runnable > taskQueue = lifo ? new LIFOLinkedBlockingDeque < Runnable > ( ) : new LinkedBlockingQueue < Runnable > ( ) ; return new ThreadPoolExecutor ( threadPoolSize , threadPoolSize , 0L , TimeUnit . MILLISECONDS , taskQueue , createThreadFactory ( threadPriority , "uil-pool-" ) ) ; }
Creates default implementation of task executor
8,755
private static File createReserveDiskCacheDir ( Context context ) { File cacheDir = StorageUtils . getCacheDirectory ( context , false ) ; File individualDir = new File ( cacheDir , "uil-images" ) ; if ( individualDir . exists ( ) || individualDir . mkdir ( ) ) { cacheDir = individualDir ; } return cacheDir ; }
Creates reserve disk cache folder which will be used if primary disk cache folder becomes unavailable
8,756
public String generate ( String imageUri ) { byte [ ] md5 = getMD5 ( imageUri . getBytes ( ) ) ; BigInteger bi = new BigInteger ( md5 ) . abs ( ) ; return bi . toString ( RADIX ) ; }
10 digits + 26 letters
8,757
public static File getOwnCacheDirectory ( Context context , String cacheDir ) { File appCacheDir = null ; if ( MEDIA_MOUNTED . equals ( Environment . getExternalStorageState ( ) ) && hasExternalStoragePermission ( context ) ) { appCacheDir = new File ( Environment . getExternalStorageDirectory ( ) , cacheDir ) ; } if ( appCacheDir == null || ( ! appCacheDir . exists ( ) && ! appCacheDir . mkdirs ( ) ) ) { appCacheDir = context . getCacheDir ( ) ; } return appCacheDir ; }
Returns specified application cache directory . Cache directory will be created on SD card by defined path if card is mounted and app has appropriate permission . Else - Android defines cache directory on device s file system .
8,758
private boolean resizeAndSaveImage ( int maxWidth , int maxHeight ) throws IOException { boolean saved = false ; File targetFile = configuration . diskCache . get ( uri ) ; if ( targetFile != null && targetFile . exists ( ) ) { ImageSize targetImageSize = new ImageSize ( maxWidth , maxHeight ) ; DisplayImageOptions specialOptions = new DisplayImageOptions . Builder ( ) . cloneFrom ( options ) . imageScaleType ( ImageScaleType . IN_SAMPLE_INT ) . build ( ) ; ImageDecodingInfo decodingInfo = new ImageDecodingInfo ( memoryCacheKey , Scheme . FILE . wrap ( targetFile . getAbsolutePath ( ) ) , uri , targetImageSize , ViewScaleType . FIT_INSIDE , getDownloader ( ) , specialOptions ) ; Bitmap bmp = decoder . decode ( decodingInfo ) ; if ( bmp != null && configuration . processorForDiskCache != null ) { L . d ( LOG_PROCESS_IMAGE_BEFORE_CACHE_ON_DISK , memoryCacheKey ) ; bmp = configuration . processorForDiskCache . process ( bmp ) ; if ( bmp == null ) { L . e ( ERROR_PROCESSOR_FOR_DISK_CACHE_NULL , memoryCacheKey ) ; } } if ( bmp != null ) { saved = configuration . diskCache . save ( uri , bmp ) ; bmp . recycle ( ) ; } } return saved ; }
Decodes image file into Bitmap resize it and save it back
8,759
private boolean canMergeLegacy ( String dataStructureName ) { Object mergePolicy = getMergePolicy ( dataStructureName ) ; InMemoryFormat inMemoryFormat = getInMemoryFormat ( dataStructureName ) ; return checkMergePolicySupportsInMemoryFormat ( dataStructureName , mergePolicy , inMemoryFormat , false , logger ) ; }
Check if data structures in - memory - format appropriate to merge with legacy policies
8,760
public void executeWithDelay ( ) { long now = Clock . currentTimeMillis ( ) ; if ( delay + now > hardLimit ) { scheduleNewExecution ( now ) ; } else if ( ! tryPostponeExecution ( ) ) { scheduleNewExecution ( now ) ; } }
invoke delayed execution .
8,761
public static < K , V > EntryView < K , V > createNullEntryView ( K key ) { return new NullEntryView < > ( key ) ; }
Creates a null entry view that has only key and no value .
8,762
private void calculateStoreTime ( DelayedEntry delayedEntry ) { Data key = ( Data ) delayedEntry . getKey ( ) ; DelayedEntry currentEntry = map . get ( key ) ; if ( currentEntry != null ) { long currentStoreTime = currentEntry . getStoreTime ( ) ; delayedEntry . setStoreTime ( currentStoreTime ) ; } }
If this is an existing key in this queue use previously set store time ; since we do not want to shift store time of an existing key on every update .
8,763
public < T > Future < T > submitToMember ( Callable < T > task , Member member ) { final Address memberAddress = getMemberAddress ( member ) ; return submitToTargetInternal ( task , memberAddress , null , false ) ; }
submit to members
8,764
public void submitToMember ( Runnable command , Member member , ExecutionCallback callback ) { Callable < ? > callable = createRunnableAdapter ( command ) ; submitToMember ( callable , member , callback ) ; }
submit to members callback
8,765
public < T > Future < T > submitToKeyOwner ( Callable < T > task , Object key ) { return submitToKeyOwnerInternal ( task , key , null , false ) ; }
submit to key
8,766
private void handleWanSyncMap ( HttpPostCommand command ) throws UnsupportedEncodingException { String res ; final String [ ] params = decodeParams ( command , 3 ) ; final String wanRepName = params [ 0 ] ; final String publisherId = params [ 1 ] ; final String mapName = params [ 2 ] ; try { textCommandService . getNode ( ) . getNodeEngine ( ) . getWanReplicationService ( ) . syncMap ( wanRepName , publisherId , mapName ) ; res = response ( ResponseType . SUCCESS , "message" , "Sync initiated" ) ; } catch ( Exception ex ) { logger . warning ( "Error occurred while syncing map" , ex ) ; res = exceptionResponse ( ex ) ; } sendResponse ( command , res ) ; }
Initiates a WAN sync for a single map and the wan replication name and publisher ID defined by the command parameters .
8,767
private void handleWanPausePublisher ( HttpPostCommand command ) throws UnsupportedEncodingException { String res ; String [ ] params = decodeParams ( command , 2 ) ; String wanReplicationName = params [ 0 ] ; String publisherId = params [ 1 ] ; WanReplicationService service = textCommandService . getNode ( ) . getNodeEngine ( ) . getWanReplicationService ( ) ; try { service . pause ( wanReplicationName , publisherId ) ; res = response ( ResponseType . SUCCESS , "message" , "WAN publisher paused" ) ; } catch ( Exception ex ) { logger . warning ( "Error occurred while pausing WAN publisher" , ex ) ; res = exceptionResponse ( ex ) ; } sendResponse ( command , res ) ; }
Pauses a WAN publisher on this member only . The publisher is identified by the WAN replication name and publisher ID passed as parameters to the HTTP command .
8,768
PartitionReplica checkAndGetPrimaryReplicaOwner ( int partitionId , int replicaIndex ) { InternalPartitionImpl partition = partitionStateManager . getPartitionImpl ( partitionId ) ; PartitionReplica owner = partition . getOwnerReplicaOrNull ( ) ; if ( owner == null ) { logger . info ( "Sync replica target is null, no need to sync -> partitionId=" + partitionId + ", replicaIndex=" + replicaIndex ) ; return null ; } PartitionReplica localReplica = PartitionReplica . from ( nodeEngine . getLocalMember ( ) ) ; if ( owner . equals ( localReplica ) ) { if ( logger . isFinestEnabled ( ) ) { logger . finest ( "This node is now owner of partition, cannot sync replica -> partitionId=" + partitionId + ", replicaIndex=" + replicaIndex + ", partition-info=" + partitionStateManager . getPartitionImpl ( partitionId ) ) ; } return null ; } if ( ! partition . isOwnerOrBackup ( localReplica ) ) { if ( logger . isFinestEnabled ( ) ) { logger . finest ( "This node is not backup replica of partitionId=" + partitionId + ", replicaIndex=" + replicaIndex + " anymore." ) ; } return null ; } return owner ; }
Checks preconditions for replica sync - if we don t know the owner yet if this node is the owner or not a replica
8,769
public void releaseReplicaSyncPermits ( int permits ) { assert permits > 0 : "Invalid permits: " + permits ; replicaSyncSemaphore . release ( permits ) ; if ( logger . isFinestEnabled ( ) ) { logger . finest ( "Released " + permits + " replica sync permits. Available permits: " + replicaSyncSemaphore . availablePermits ( ) ) ; } assert availableReplicaSyncPermits ( ) <= maxParallelReplications : "Number of replica sync permits exceeded the configured number!" ; }
Releases the previously acquired permits .
8,770
public final < B extends Data > B toData ( Object obj ) { return toData ( obj , globalPartitioningStrategy ) ; }
region Serialization Service
8,771
public final void register ( Class type , Serializer serializer ) { if ( type == null ) { throw new IllegalArgumentException ( "Class type information is required!" ) ; } if ( serializer . getTypeId ( ) <= 0 ) { throw new IllegalArgumentException ( "Type ID must be positive! Current: " + serializer . getTypeId ( ) + ", Serializer: " + serializer ) ; } safeRegister ( type , createSerializerAdapter ( serializer , this ) ) ; }
endregion Serialization Service
8,772
public Result runPartitionIndexOrPartitionScanQueryOnGivenOwnedPartition ( Query query , int partitionId ) { MapContainer mapContainer = mapServiceContext . getMapContainer ( query . getMapName ( ) ) ; PartitionIdSet partitions = singletonPartitionIdSet ( partitionCount , partitionId ) ; Predicate predicate = queryOptimizer . optimize ( query . getPredicate ( ) , mapContainer . getIndexes ( partitionId ) ) ; Collection < QueryableEntry > entries = null ; Indexes indexes = mapContainer . getIndexes ( partitionId ) ; if ( indexes != null && ! indexes . isGlobal ( ) ) { entries = indexes . query ( predicate ) ; } Result result ; if ( entries == null ) { result = createResult ( query , partitions ) ; partitionScanExecutor . execute ( query . getMapName ( ) , predicate , partitions , result ) ; result . completeConstruction ( partitions ) ; } else { result = populateNonEmptyResult ( query , entries , partitions ) ; } return result ; }
for a single partition . If the index is global it won t be asked
8,773
public JsonValue get ( String name ) { if ( name == null ) { throw new NullPointerException ( "name is null" ) ; } int index = indexOf ( name ) ; return index != - 1 ? values . get ( index ) : null ; }
Returns the value of the member with the specified name in this object . If this object contains multiple members with the given name this method will return the last one .
8,774
public Iterator < Member > iterator ( ) { final Iterator < String > namesIterator = names . iterator ( ) ; final Iterator < JsonValue > valuesIterator = values . iterator ( ) ; return new Iterator < JsonObject . Member > ( ) { public boolean hasNext ( ) { return namesIterator . hasNext ( ) ; } public Member next ( ) { String name = namesIterator . next ( ) ; JsonValue value = valuesIterator . next ( ) ; return new Member ( name , value ) ; } public void remove ( ) { throw new UnsupportedOperationException ( ) ; } } ; }
Returns an iterator over the members of this object in document order . The returned iterator cannot be used to modify this object .
8,775
public final void init ( NodeEngineImpl nodeEngine , Set < Operation > asyncOperations ) { this . nodeEngine = nodeEngine ; this . operationService = nodeEngine . getOperationService ( ) ; this . serializationService = nodeEngine . getSerializationService ( ) ; this . asyncOperations = asyncOperations ; this . executionService = nodeEngine . getExecutionService ( ) ; asyncOperations . add ( offloadedOperation ) ; offloadedOperation . setOperationResponseHandler ( newOperationResponseHandler ( ) ) ; }
Initializes the Offload .
8,776
private List < DelayedEntry > callHandler ( Collection < DelayedEntry > delayedEntries , StoreOperationType operationType ) { final int size = delayedEntries . size ( ) ; if ( size == 0 ) { return Collections . emptyList ( ) ; } if ( size == 1 || ! writeCoalescing ) { return processEntriesOneByOne ( delayedEntries , operationType ) ; } final DelayedEntry [ ] delayedEntriesArray = delayedEntries . toArray ( new DelayedEntry [ 0 ] ) ; final Map < Object , DelayedEntry > batchMap = prepareBatchMap ( delayedEntriesArray ) ; if ( batchMap . size ( ) == 1 ) { final DelayedEntry delayedEntry = delayedEntriesArray [ delayedEntriesArray . length - 1 ] ; return callSingleStoreWithListeners ( delayedEntry , operationType ) ; } final List < DelayedEntry > failedEntryList = callBatchStoreWithListeners ( batchMap , operationType ) ; final List < DelayedEntry > failedTries = new ArrayList < > ( ) ; for ( DelayedEntry entry : failedEntryList ) { final Collection < DelayedEntry > tmpFails = callSingleStoreWithListeners ( entry , operationType ) ; failedTries . addAll ( tmpFails ) ; } return failedTries ; }
Decides how entries should be passed to handlers . It passes entries to handler s single or batch handling methods .
8,777
public static String checkHasText ( String argument , String errorMessage ) { if ( argument == null || argument . isEmpty ( ) ) { throw new IllegalArgumentException ( errorMessage ) ; } return argument ; }
Tests if a string contains text .
8,778
public static < T > T checkNotNull ( T argument , String errorMessage ) { if ( argument == null ) { throw new NullPointerException ( errorMessage ) ; } return argument ; }
Tests if an argument is not null .
8,779
public static < T > Iterable < T > checkNoNullInside ( Iterable < T > argument , String errorMessage ) { if ( argument == null ) { return argument ; } for ( T element : argument ) { checkNotNull ( element , errorMessage ) ; } return argument ; }
Tests if the elements inside the argument collection are not null . If collection is null or empty the test is ignored .
8,780
public static < E > E isNotNull ( E argument , String argName ) { if ( argument == null ) { throw new IllegalArgumentException ( format ( "argument '%s' can't be null" , argName ) ) ; } return argument ; }
Tests if a string is not null .
8,781
public static int checkBackupCount ( int newBackupCount , int currentAsyncBackupCount ) { if ( newBackupCount < 0 ) { throw new IllegalArgumentException ( "backup-count can't be smaller than 0" ) ; } if ( currentAsyncBackupCount < 0 ) { throw new IllegalArgumentException ( "async-backup-count can't be smaller than 0" ) ; } if ( newBackupCount > MAX_BACKUP_COUNT ) { throw new IllegalArgumentException ( "backup-count can't be larger than than " + MAX_BACKUP_COUNT ) ; } if ( newBackupCount + currentAsyncBackupCount > MAX_BACKUP_COUNT ) { throw new IllegalArgumentException ( "the sum of backup-count and async-backup-count can't be larger than than " + MAX_BACKUP_COUNT ) ; } return newBackupCount ; }
Tests if the newBackupCount count is valid .
8,782
public static int checkAsyncBackupCount ( int currentBackupCount , int newAsyncBackupCount ) { if ( currentBackupCount < 0 ) { throw new IllegalArgumentException ( "backup-count can't be smaller than 0" ) ; } if ( newAsyncBackupCount < 0 ) { throw new IllegalArgumentException ( "async-backup-count can't be smaller than 0" ) ; } if ( newAsyncBackupCount > MAX_BACKUP_COUNT ) { throw new IllegalArgumentException ( "async-backup-count can't be larger than than " + MAX_BACKUP_COUNT ) ; } if ( currentBackupCount + newAsyncBackupCount > MAX_BACKUP_COUNT ) { throw new IllegalArgumentException ( "the sum of backup-count and async-backup-count can't be larger than than " + MAX_BACKUP_COUNT ) ; } return newAsyncBackupCount ; }
Tests if the newAsyncBackupCount count is valid .
8,783
public static < E > E checkInstanceOf ( Class < E > type , Object object , String errorMessage ) { isNotNull ( type , "type" ) ; if ( ! type . isInstance ( object ) ) { throw new IllegalArgumentException ( errorMessage ) ; } return ( E ) object ; }
Tests whether the supplied object is an instance of the supplied class type .
8,784
public static < E > E checkNotInstanceOf ( Class type , E object , String errorMessage ) { isNotNull ( type , "type" ) ; if ( type . isInstance ( object ) ) { throw new IllegalArgumentException ( errorMessage ) ; } return object ; }
Tests the supplied object to see if it is not a type of the supplied class .
8,785
public static < T > Iterator < T > checkHasNext ( Iterator < T > iterator , String message ) throws NoSuchElementException { if ( ! iterator . hasNext ( ) ) { throw new NoSuchElementException ( message ) ; } return iterator ; }
Check if iterator has next element . If not throw NoSuchElementException
8,786
@ SuppressFBWarnings ( "EI_EXPOSE_REP" ) public < K , V > void setPredicates ( Predicate < K , V > [ ] predicates ) { if ( this . predicates == null ) { this . predicates = predicates ; } else { throw new IllegalStateException ( "Cannot reset predicates in an OrPredicate after they have been already set." ) ; } }
Visitable predicates are treated as effectively immutable therefore callers should not make any changes to the array passed as argument after is has been set .
8,787
private int findMinIfNot ( int a , int b , int notMin ) { if ( a <= notMin ) { return b ; } if ( b <= notMin ) { return a ; } return Math . min ( a , b ) ; }
finds min choosing a lower bound .
8,788
void markNewRepartition ( ) { lastRepartitionTime . set ( Clock . currentTimeMillis ( ) ) ; elapsedMigrationOperationTime . set ( 0 ) ; elapsedDestinationCommitTime . set ( 0 ) ; elapsedMigrationTime . set ( 0 ) ; completedMigrations . set ( 0 ) ; }
Marks start of new repartitioning . Resets stats from previous repartitioning round .
8,789
public StateMachine < T > withTransition ( T from , T to , T ... moreTo ) { transitions . put ( from , EnumSet . of ( to , moreTo ) ) ; return this ; }
Add a valid transition from one state to one or more states
8,790
public StateMachine < T > next ( T nextState ) throws IllegalStateException { Set < T > allowed = transitions . get ( currentState ) ; checkNotNull ( allowed , "No transitions from state " + currentState ) ; checkState ( allowed . contains ( nextState ) , "Transition not allowed from state " + currentState + " to " + nextState ) ; currentState = nextState ; return this ; }
Transition to next state
8,791
public boolean is ( T state , T ... otherStates ) { return EnumSet . of ( state , otherStates ) . contains ( currentState ) ; }
Check if current state is one of given states
8,792
public void voteFor ( WeightedItem < T > weightedItem ) { reorganizationCounter ++ ; weightedItem . vote ( ) ; if ( reorganizationCounter == maxVotesBeforeReorganization ) { reorganizationCounter = 0 ; organizeAndAdd ( null ) ; } }
Casts a vote for given list node . This vote is added to the item s weight .
8,793
public final void start ( ) { if ( ! enabled ) { return ; } long periodSeconds = properties . getSeconds ( PERIOD_SECONDS ) ; if ( periodSeconds <= 0 ) { long defaultValue = Long . parseLong ( PERIOD_SECONDS . getDefaultValue ( ) ) ; logger . warning ( "Provided client statistics " + PERIOD_SECONDS . getName ( ) + " cannot be less than or equal to 0. You provided " + periodSeconds + " seconds as the configuration. Client will use the default value of " + defaultValue + " instead." ) ; periodSeconds = defaultValue ; } periodicStats = new PeriodicStatistics ( ) ; schedulePeriodicStatisticsSendTask ( periodSeconds ) ; logger . info ( "Client statistics is enabled with period " + periodSeconds + " seconds." ) ; }
Registers all client statistics and schedules periodic collection of stats .
8,794
public static long timeInMsOrTimeIfNullUnit ( long time , TimeUnit timeUnit ) { return timeUnit != null ? timeUnit . toMillis ( time ) : time ; }
Convert time to milliseconds based on the given time unit . If time unit is null then input time is treated as milliseconds .
8,795
public static String bytesToString ( byte [ ] bytes , int offset , int length ) { return new String ( bytes , offset , length , UTF8_CHARSET ) ; }
Creates a UTF8_CHARSET string from a byte array .
8,796
public static String upperCaseInternal ( String s ) { if ( isNullOrEmpty ( s ) ) { return s ; } return s . toUpperCase ( LOCALE_INTERNAL ) ; }
HC specific settings operands etc . use this method . Creates an uppercase string from the given string .
8,797
public static String lowerCaseInternal ( String s ) { if ( isNullOrEmpty ( s ) ) { return s ; } return s . toLowerCase ( LOCALE_INTERNAL ) ; }
HC specific settings operands etc . use this method . Creates a lowercase string from the given string .
8,798
public static int indexOf ( String input , char ch , int offset ) { for ( int i = offset ; i < input . length ( ) ; i ++ ) { if ( input . charAt ( i ) == ch ) { return i ; } } return - 1 ; }
Like a String . indexOf but without MIN_SUPPLEMENTARY_CODE_POINT handling
8,799
public static int lastIndexOf ( String input , char ch , int offset ) { for ( int i = input . length ( ) - 1 - offset ; i >= 0 ; i -- ) { if ( input . charAt ( i ) == ch ) { return i ; } } return - 1 ; }
Like a String . lastIndexOf but without MIN_SUPPLEMENTARY_CODE_POINT handling