id
int32
0
165k
repo
stringlengths
7
58
path
stringlengths
12
218
func_name
stringlengths
3
140
original_string
stringlengths
73
34.1k
language
stringclasses
1 value
code
stringlengths
73
34.1k
code_tokens
list
docstring
stringlengths
3
16k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
105
339
15,000
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/mr/security/TokenUtil.java
TokenUtil.getEsAuthToken
private static EsToken getEsAuthToken(ClusterName clusterName, User user) { return user.getEsToken(clusterName.getName()); }
java
private static EsToken getEsAuthToken(ClusterName clusterName, User user) { return user.getEsToken(clusterName.getName()); }
[ "private", "static", "EsToken", "getEsAuthToken", "(", "ClusterName", "clusterName", ",", "User", "user", ")", "{", "return", "user", ".", "getEsToken", "(", "clusterName", ".", "getName", "(", ")", ")", ";", "}" ]
Get the authentication token of the user for the provided cluster name in its ES-Hadoop specific form. @return null if the user does not have the token, otherwise the auth token for the cluster.
[ "Get", "the", "authentication", "token", "of", "the", "user", "for", "the", "provided", "cluster", "name", "in", "its", "ES", "-", "Hadoop", "specific", "form", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/mr/security/TokenUtil.java#L213-L215
15,001
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/InitializationUtils.java
InitializationUtils.discoverClusterInfo
public static ClusterInfo discoverClusterInfo(Settings settings, Log log) { ClusterName remoteClusterName = null; EsMajorVersion remoteVersion = null; String clusterName = settings.getProperty(InternalConfigurationOptions.INTERNAL_ES_CLUSTER_NAME); String clusterUUID = settings.getProperty(InternalConfigurationOptions.INTERNAL_ES_CLUSTER_UUID); String version = settings.getProperty(InternalConfigurationOptions.INTERNAL_ES_VERSION); if (StringUtils.hasText(clusterName) && StringUtils.hasText(version)) { // UUID is optional for now if (log.isDebugEnabled()) { log.debug(String.format("Elasticsearch cluster [NAME:%s][UUID:%s][VERSION:%s] already present in configuration; skipping discovery", clusterName, clusterUUID, version)); } remoteClusterName = new ClusterName(clusterName, clusterUUID); remoteVersion = EsMajorVersion.parse(version); return new ClusterInfo(remoteClusterName, remoteVersion); } RestClient bootstrap = new RestClient(settings); // first get ES main action info try { ClusterInfo mainInfo = bootstrap.mainInfo(); if (log.isDebugEnabled()) { log.debug(String.format("Discovered Elasticsearch cluster [%s/%s], version [%s]", mainInfo.getClusterName().getName(), mainInfo.getClusterName().getUUID(), mainInfo.getMajorVersion())); } settings.setInternalClusterInfo(mainInfo); return mainInfo; } catch (EsHadoopException ex) { throw new EsHadoopIllegalArgumentException(String.format("Cannot detect ES version - " + "typically this happens if the network/Elasticsearch cluster is not accessible or when targeting " + "a WAN/Cloud instance without the proper setting '%s'", ConfigurationOptions.ES_NODES_WAN_ONLY), ex); } finally { bootstrap.close(); } }
java
public static ClusterInfo discoverClusterInfo(Settings settings, Log log) { ClusterName remoteClusterName = null; EsMajorVersion remoteVersion = null; String clusterName = settings.getProperty(InternalConfigurationOptions.INTERNAL_ES_CLUSTER_NAME); String clusterUUID = settings.getProperty(InternalConfigurationOptions.INTERNAL_ES_CLUSTER_UUID); String version = settings.getProperty(InternalConfigurationOptions.INTERNAL_ES_VERSION); if (StringUtils.hasText(clusterName) && StringUtils.hasText(version)) { // UUID is optional for now if (log.isDebugEnabled()) { log.debug(String.format("Elasticsearch cluster [NAME:%s][UUID:%s][VERSION:%s] already present in configuration; skipping discovery", clusterName, clusterUUID, version)); } remoteClusterName = new ClusterName(clusterName, clusterUUID); remoteVersion = EsMajorVersion.parse(version); return new ClusterInfo(remoteClusterName, remoteVersion); } RestClient bootstrap = new RestClient(settings); // first get ES main action info try { ClusterInfo mainInfo = bootstrap.mainInfo(); if (log.isDebugEnabled()) { log.debug(String.format("Discovered Elasticsearch cluster [%s/%s], version [%s]", mainInfo.getClusterName().getName(), mainInfo.getClusterName().getUUID(), mainInfo.getMajorVersion())); } settings.setInternalClusterInfo(mainInfo); return mainInfo; } catch (EsHadoopException ex) { throw new EsHadoopIllegalArgumentException(String.format("Cannot detect ES version - " + "typically this happens if the network/Elasticsearch cluster is not accessible or when targeting " + "a WAN/Cloud instance without the proper setting '%s'", ConfigurationOptions.ES_NODES_WAN_ONLY), ex); } finally { bootstrap.close(); } }
[ "public", "static", "ClusterInfo", "discoverClusterInfo", "(", "Settings", "settings", ",", "Log", "log", ")", "{", "ClusterName", "remoteClusterName", "=", "null", ";", "EsMajorVersion", "remoteVersion", "=", "null", ";", "String", "clusterName", "=", "settings", ".", "getProperty", "(", "InternalConfigurationOptions", ".", "INTERNAL_ES_CLUSTER_NAME", ")", ";", "String", "clusterUUID", "=", "settings", ".", "getProperty", "(", "InternalConfigurationOptions", ".", "INTERNAL_ES_CLUSTER_UUID", ")", ";", "String", "version", "=", "settings", ".", "getProperty", "(", "InternalConfigurationOptions", ".", "INTERNAL_ES_VERSION", ")", ";", "if", "(", "StringUtils", ".", "hasText", "(", "clusterName", ")", "&&", "StringUtils", ".", "hasText", "(", "version", ")", ")", "{", "// UUID is optional for now", "if", "(", "log", ".", "isDebugEnabled", "(", ")", ")", "{", "log", ".", "debug", "(", "String", ".", "format", "(", "\"Elasticsearch cluster [NAME:%s][UUID:%s][VERSION:%s] already present in configuration; skipping discovery\"", ",", "clusterName", ",", "clusterUUID", ",", "version", ")", ")", ";", "}", "remoteClusterName", "=", "new", "ClusterName", "(", "clusterName", ",", "clusterUUID", ")", ";", "remoteVersion", "=", "EsMajorVersion", ".", "parse", "(", "version", ")", ";", "return", "new", "ClusterInfo", "(", "remoteClusterName", ",", "remoteVersion", ")", ";", "}", "RestClient", "bootstrap", "=", "new", "RestClient", "(", "settings", ")", ";", "// first get ES main action info", "try", "{", "ClusterInfo", "mainInfo", "=", "bootstrap", ".", "mainInfo", "(", ")", ";", "if", "(", "log", ".", "isDebugEnabled", "(", ")", ")", "{", "log", ".", "debug", "(", "String", ".", "format", "(", "\"Discovered Elasticsearch cluster [%s/%s], version [%s]\"", ",", "mainInfo", ".", "getClusterName", "(", ")", ".", "getName", "(", ")", ",", "mainInfo", ".", "getClusterName", "(", ")", ".", "getUUID", "(", ")", ",", "mainInfo", ".", "getMajorVersion", "(", ")", ")", ")", ";", "}", "settings", ".", "setInternalClusterInfo", "(", "mainInfo", ")", ";", "return", "mainInfo", ";", "}", "catch", "(", "EsHadoopException", "ex", ")", "{", "throw", "new", "EsHadoopIllegalArgumentException", "(", "String", ".", "format", "(", "\"Cannot detect ES version - \"", "+", "\"typically this happens if the network/Elasticsearch cluster is not accessible or when targeting \"", "+", "\"a WAN/Cloud instance without the proper setting '%s'\"", ",", "ConfigurationOptions", ".", "ES_NODES_WAN_ONLY", ")", ",", "ex", ")", ";", "}", "finally", "{", "bootstrap", ".", "close", "(", ")", ";", "}", "}" ]
Retrieves the Elasticsearch cluster name and version from the settings, or, if they should be missing, creates a bootstrap client and obtains their values.
[ "Retrieves", "the", "Elasticsearch", "cluster", "name", "and", "version", "from", "the", "settings", "or", "if", "they", "should", "be", "missing", "creates", "a", "bootstrap", "client", "and", "obtains", "their", "values", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/InitializationUtils.java#L311-L346
15,002
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/RestRepository.java
RestRepository.lazyInitWriting
private void lazyInitWriting() { if (!writeInitialized) { this.writeInitialized = true; this.bulkProcessor = new BulkProcessor(client, resources.getResourceWrite(), settings); this.trivialBytesRef = new BytesRef(); this.bulkEntryWriter = new BulkEntryWriter(settings, BulkCommands.create(settings, metaExtractor, client.clusterInfo.getMajorVersion())); } }
java
private void lazyInitWriting() { if (!writeInitialized) { this.writeInitialized = true; this.bulkProcessor = new BulkProcessor(client, resources.getResourceWrite(), settings); this.trivialBytesRef = new BytesRef(); this.bulkEntryWriter = new BulkEntryWriter(settings, BulkCommands.create(settings, metaExtractor, client.clusterInfo.getMajorVersion())); } }
[ "private", "void", "lazyInitWriting", "(", ")", "{", "if", "(", "!", "writeInitialized", ")", "{", "this", ".", "writeInitialized", "=", "true", ";", "this", ".", "bulkProcessor", "=", "new", "BulkProcessor", "(", "client", ",", "resources", ".", "getResourceWrite", "(", ")", ",", "settings", ")", ";", "this", ".", "trivialBytesRef", "=", "new", "BytesRef", "(", ")", ";", "this", ".", "bulkEntryWriter", "=", "new", "BulkEntryWriter", "(", "settings", ",", "BulkCommands", ".", "create", "(", "settings", ",", "metaExtractor", ",", "client", ".", "clusterInfo", ".", "getMajorVersion", "(", ")", ")", ")", ";", "}", "}" ]
postpone writing initialization since we can do only reading so there's no need to allocate buffers
[ "postpone", "writing", "initialization", "since", "we", "can", "do", "only", "reading", "so", "there", "s", "no", "need", "to", "allocate", "buffers" ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/RestRepository.java#L133-L140
15,003
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/RestRepository.java
RestRepository.scroll
Scroll scroll(String scrollId, ScrollReader reader) throws IOException { InputStream scroll = client.scroll(scrollId); try { return reader.read(scroll); } finally { if (scroll instanceof StatsAware) { stats.aggregate(((StatsAware) scroll).stats()); } } }
java
Scroll scroll(String scrollId, ScrollReader reader) throws IOException { InputStream scroll = client.scroll(scrollId); try { return reader.read(scroll); } finally { if (scroll instanceof StatsAware) { stats.aggregate(((StatsAware) scroll).stats()); } } }
[ "Scroll", "scroll", "(", "String", "scrollId", ",", "ScrollReader", "reader", ")", "throws", "IOException", "{", "InputStream", "scroll", "=", "client", ".", "scroll", "(", "scrollId", ")", ";", "try", "{", "return", "reader", ".", "read", "(", "scroll", ")", ";", "}", "finally", "{", "if", "(", "scroll", "instanceof", "StatsAware", ")", "{", "stats", ".", "aggregate", "(", "(", "(", "StatsAware", ")", "scroll", ")", ".", "stats", "(", ")", ")", ";", "}", "}", "}" ]
consume the scroll
[ "consume", "the", "scroll" ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/RestRepository.java#L322-L331
15,004
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/util/encoding/HttpEncodingTools.java
HttpEncodingTools.encodePath
@Deprecated public static String encodePath(String path) { try { return URIUtil.encodePath(path, "UTF-8"); } catch (URIException ex) { throw new EsHadoopIllegalArgumentException("Cannot encode path segment [" + path + "]", ex); } }
java
@Deprecated public static String encodePath(String path) { try { return URIUtil.encodePath(path, "UTF-8"); } catch (URIException ex) { throw new EsHadoopIllegalArgumentException("Cannot encode path segment [" + path + "]", ex); } }
[ "@", "Deprecated", "public", "static", "String", "encodePath", "(", "String", "path", ")", "{", "try", "{", "return", "URIUtil", ".", "encodePath", "(", "path", ",", "\"UTF-8\"", ")", ";", "}", "catch", "(", "URIException", "ex", ")", "{", "throw", "new", "EsHadoopIllegalArgumentException", "(", "\"Cannot encode path segment [\"", "+", "path", "+", "\"]\"", ",", "ex", ")", ";", "}", "}" ]
Encodes characters in the string except for those allowed in an absolute path. @deprecated Prefer to use {@link HttpEncodingTools#encode(String)} instead for encoding specific pieces of the URI. This method does not escape certain reserved characters, like '/' and ':'. As such, this is not safe to use on paths that may contain these reserved characters in the wrong places.
[ "Encodes", "characters", "in", "the", "string", "except", "for", "those", "allowed", "in", "an", "absolute", "path", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/util/encoding/HttpEncodingTools.java#L68-L75
15,005
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/util/encoding/HttpEncodingTools.java
HttpEncodingTools.concatenateAndUriEncode
public static String concatenateAndUriEncode(Collection<?> list, String delimiter) { Collection<String> escaped = new ArrayList<String>(); if (list != null) { for (Object object : list) { escaped.add(encode(object.toString())); } } return StringUtils.concatenate(escaped, delimiter); }
java
public static String concatenateAndUriEncode(Collection<?> list, String delimiter) { Collection<String> escaped = new ArrayList<String>(); if (list != null) { for (Object object : list) { escaped.add(encode(object.toString())); } } return StringUtils.concatenate(escaped, delimiter); }
[ "public", "static", "String", "concatenateAndUriEncode", "(", "Collection", "<", "?", ">", "list", ",", "String", "delimiter", ")", "{", "Collection", "<", "String", ">", "escaped", "=", "new", "ArrayList", "<", "String", ">", "(", ")", ";", "if", "(", "list", "!=", "null", ")", "{", "for", "(", "Object", "object", ":", "list", ")", "{", "escaped", ".", "add", "(", "encode", "(", "object", ".", "toString", "(", ")", ")", ")", ";", "}", "}", "return", "StringUtils", ".", "concatenate", "(", "escaped", ",", "delimiter", ")", ";", "}" ]
Encodes each string value of the list and concatenates the results using the supplied delimiter. @param list To be encoded and concatenated. @param delimiter Separator for concatenation. @return Concatenated and encoded string representation.
[ "Encodes", "each", "string", "value", "of", "the", "list", "and", "concatenates", "the", "results", "using", "the", "supplied", "delimiter", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/util/encoding/HttpEncodingTools.java#L115-L124
15,006
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/query/QueryUtils.java
QueryUtils.isExplicitlyRequested
public static boolean isExplicitlyRequested(String candidate, String... indices) { boolean result = false; for (String indexOrAlias : indices) { boolean include = true; if (indexOrAlias.charAt(0) == '+' || indexOrAlias.charAt(0) == '-') { include = indexOrAlias.charAt(0) == '+'; indexOrAlias = indexOrAlias.substring(1); } if (indexOrAlias.equals("*") || indexOrAlias.equals("_all")) { return false; } if (Regex.isSimpleMatchPattern(indexOrAlias)) { if (Regex.simpleMatch(indexOrAlias, candidate)) { if (include) { result = true; } else { return false; } } } else { if (candidate.equals(indexOrAlias)) { if (include) { result = true; } else { return false; } } } } return result; }
java
public static boolean isExplicitlyRequested(String candidate, String... indices) { boolean result = false; for (String indexOrAlias : indices) { boolean include = true; if (indexOrAlias.charAt(0) == '+' || indexOrAlias.charAt(0) == '-') { include = indexOrAlias.charAt(0) == '+'; indexOrAlias = indexOrAlias.substring(1); } if (indexOrAlias.equals("*") || indexOrAlias.equals("_all")) { return false; } if (Regex.isSimpleMatchPattern(indexOrAlias)) { if (Regex.simpleMatch(indexOrAlias, candidate)) { if (include) { result = true; } else { return false; } } } else { if (candidate.equals(indexOrAlias)) { if (include) { result = true; } else { return false; } } } } return result; }
[ "public", "static", "boolean", "isExplicitlyRequested", "(", "String", "candidate", ",", "String", "...", "indices", ")", "{", "boolean", "result", "=", "false", ";", "for", "(", "String", "indexOrAlias", ":", "indices", ")", "{", "boolean", "include", "=", "true", ";", "if", "(", "indexOrAlias", ".", "charAt", "(", "0", ")", "==", "'", "'", "||", "indexOrAlias", ".", "charAt", "(", "0", ")", "==", "'", "'", ")", "{", "include", "=", "indexOrAlias", ".", "charAt", "(", "0", ")", "==", "'", "'", ";", "indexOrAlias", "=", "indexOrAlias", ".", "substring", "(", "1", ")", ";", "}", "if", "(", "indexOrAlias", ".", "equals", "(", "\"*\"", ")", "||", "indexOrAlias", ".", "equals", "(", "\"_all\"", ")", ")", "{", "return", "false", ";", "}", "if", "(", "Regex", ".", "isSimpleMatchPattern", "(", "indexOrAlias", ")", ")", "{", "if", "(", "Regex", ".", "simpleMatch", "(", "indexOrAlias", ",", "candidate", ")", ")", "{", "if", "(", "include", ")", "{", "result", "=", "true", ";", "}", "else", "{", "return", "false", ";", "}", "}", "}", "else", "{", "if", "(", "candidate", ".", "equals", "(", "indexOrAlias", ")", ")", "{", "if", "(", "include", ")", "{", "result", "=", "true", ";", "}", "else", "{", "return", "false", ";", "}", "}", "}", "}", "return", "result", ";", "}" ]
Checks if the provided candidate is explicitly contained in the provided indices.
[ "Checks", "if", "the", "provided", "candidate", "is", "explicitly", "contained", "in", "the", "provided", "indices", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/query/QueryUtils.java#L92-L122
15,007
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/RestClient.java
RestClient.bulk
public BulkActionResponse bulk(Resource resource, TrackingBytesArray data) { // NB: dynamically get the stats since the transport can change long start = network.transportStats().netTotalTime; Response response = execute(PUT, resource.bulk(), data); long spent = network.transportStats().netTotalTime - start; stats.bulkTotal++; stats.docsSent += data.entries(); stats.bulkTotalTime += spent; // bytes will be counted by the transport layer return new BulkActionResponse(parseBulkActionResponse(response), response.status(), spent); }
java
public BulkActionResponse bulk(Resource resource, TrackingBytesArray data) { // NB: dynamically get the stats since the transport can change long start = network.transportStats().netTotalTime; Response response = execute(PUT, resource.bulk(), data); long spent = network.transportStats().netTotalTime - start; stats.bulkTotal++; stats.docsSent += data.entries(); stats.bulkTotalTime += spent; // bytes will be counted by the transport layer return new BulkActionResponse(parseBulkActionResponse(response), response.status(), spent); }
[ "public", "BulkActionResponse", "bulk", "(", "Resource", "resource", ",", "TrackingBytesArray", "data", ")", "{", "// NB: dynamically get the stats since the transport can change", "long", "start", "=", "network", ".", "transportStats", "(", ")", ".", "netTotalTime", ";", "Response", "response", "=", "execute", "(", "PUT", ",", "resource", ".", "bulk", "(", ")", ",", "data", ")", ";", "long", "spent", "=", "network", ".", "transportStats", "(", ")", ".", "netTotalTime", "-", "start", ";", "stats", ".", "bulkTotal", "++", ";", "stats", ".", "docsSent", "+=", "data", ".", "entries", "(", ")", ";", "stats", ".", "bulkTotalTime", "+=", "spent", ";", "// bytes will be counted by the transport layer", "return", "new", "BulkActionResponse", "(", "parseBulkActionResponse", "(", "response", ")", ",", "response", ".", "status", "(", ")", ",", "spent", ")", ";", "}" ]
Executes a single bulk operation against the provided resource, using the passed data as the request body. This method will retry bulk requests if the entire bulk request fails, but will not retry singular document failures. @param resource target of the bulk request. @param data bulk request body. This body will be cleared of entries on any successful bulk request. @return a BulkActionResponse object that will detail if there were failing documents that should be retried.
[ "Executes", "a", "single", "bulk", "operation", "against", "the", "provided", "resource", "using", "the", "passed", "data", "as", "the", "request", "body", ".", "This", "method", "will", "retry", "bulk", "requests", "if", "the", "entire", "bulk", "request", "fails", "but", "will", "not", "retry", "singular", "document", "failures", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/RestClient.java#L223-L235
15,008
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/query/SimpleQueryParser.java
SimpleQueryParser.parse
public static QueryBuilder parse(String raw, boolean isQuery) throws IOException { if (raw.startsWith("?")) { return parseURI(raw.substring(1)); } else if (raw.startsWith("{")) { return new RawQueryBuilder(raw, isQuery); } else { throw new IllegalArgumentException("Failed to parse query: " + raw); } }
java
public static QueryBuilder parse(String raw, boolean isQuery) throws IOException { if (raw.startsWith("?")) { return parseURI(raw.substring(1)); } else if (raw.startsWith("{")) { return new RawQueryBuilder(raw, isQuery); } else { throw new IllegalArgumentException("Failed to parse query: " + raw); } }
[ "public", "static", "QueryBuilder", "parse", "(", "String", "raw", ",", "boolean", "isQuery", ")", "throws", "IOException", "{", "if", "(", "raw", ".", "startsWith", "(", "\"?\"", ")", ")", "{", "return", "parseURI", "(", "raw", ".", "substring", "(", "1", ")", ")", ";", "}", "else", "if", "(", "raw", ".", "startsWith", "(", "\"{\"", ")", ")", "{", "return", "new", "RawQueryBuilder", "(", "raw", ",", "isQuery", ")", ";", "}", "else", "{", "throw", "new", "IllegalArgumentException", "(", "\"Failed to parse query: \"", "+", "raw", ")", ";", "}", "}" ]
Builds a QueryBuilder from the given string @param raw a JSON string or an URI search @param isQuery true if the string is a query otherwise the string is considered as a filter (only relevant for elasticsearch version prior to 2.x). @return @throws IOException
[ "Builds", "a", "QueryBuilder", "from", "the", "given", "string" ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/query/SimpleQueryParser.java#L47-L56
15,009
elastic/elasticsearch-hadoop
hive/src/main/java/org/elasticsearch/hadoop/hive/EsSerDe.java
EsSerDe.initialize
@Override public void initialize(Configuration conf, Properties tbl, Properties partitionProperties) throws SerDeException { inspector = HiveUtils.structObjectInspector(tbl); structTypeInfo = HiveUtils.typeInfo(inspector); cfg = conf; List<Settings> settingSources = new ArrayList<>(); settingSources.add(HadoopSettingsManager.loadFrom(tbl)); if (cfg != null) { settingSources.add(HadoopSettingsManager.loadFrom(cfg)); } settings = new CompositeSettings(settingSources); alias = HiveUtils.alias(settings); HiveUtils.fixHive13InvalidComments(settings, tbl); trace = log.isTraceEnabled(); outputJSON = settings.getOutputAsJson(); if (outputJSON) { jsonFieldName = new Text(HiveUtils.discoverJsonFieldName(settings, alias)); } }
java
@Override public void initialize(Configuration conf, Properties tbl, Properties partitionProperties) throws SerDeException { inspector = HiveUtils.structObjectInspector(tbl); structTypeInfo = HiveUtils.typeInfo(inspector); cfg = conf; List<Settings> settingSources = new ArrayList<>(); settingSources.add(HadoopSettingsManager.loadFrom(tbl)); if (cfg != null) { settingSources.add(HadoopSettingsManager.loadFrom(cfg)); } settings = new CompositeSettings(settingSources); alias = HiveUtils.alias(settings); HiveUtils.fixHive13InvalidComments(settings, tbl); trace = log.isTraceEnabled(); outputJSON = settings.getOutputAsJson(); if (outputJSON) { jsonFieldName = new Text(HiveUtils.discoverJsonFieldName(settings, alias)); } }
[ "@", "Override", "public", "void", "initialize", "(", "Configuration", "conf", ",", "Properties", "tbl", ",", "Properties", "partitionProperties", ")", "throws", "SerDeException", "{", "inspector", "=", "HiveUtils", ".", "structObjectInspector", "(", "tbl", ")", ";", "structTypeInfo", "=", "HiveUtils", ".", "typeInfo", "(", "inspector", ")", ";", "cfg", "=", "conf", ";", "List", "<", "Settings", ">", "settingSources", "=", "new", "ArrayList", "<>", "(", ")", ";", "settingSources", ".", "add", "(", "HadoopSettingsManager", ".", "loadFrom", "(", "tbl", ")", ")", ";", "if", "(", "cfg", "!=", "null", ")", "{", "settingSources", ".", "add", "(", "HadoopSettingsManager", ".", "loadFrom", "(", "cfg", ")", ")", ";", "}", "settings", "=", "new", "CompositeSettings", "(", "settingSources", ")", ";", "alias", "=", "HiveUtils", ".", "alias", "(", "settings", ")", ";", "HiveUtils", ".", "fixHive13InvalidComments", "(", "settings", ",", "tbl", ")", ";", "trace", "=", "log", ".", "isTraceEnabled", "(", ")", ";", "outputJSON", "=", "settings", ".", "getOutputAsJson", "(", ")", ";", "if", "(", "outputJSON", ")", "{", "jsonFieldName", "=", "new", "Text", "(", "HiveUtils", ".", "discoverJsonFieldName", "(", "settings", ",", "alias", ")", ")", ";", "}", "}" ]
implemented to actually get access to the raw properties
[ "implemented", "to", "actually", "get", "access", "to", "the", "raw", "properties" ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/hive/src/main/java/org/elasticsearch/hadoop/hive/EsSerDe.java#L79-L99
15,010
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/PerEntityPoolingMetadataExtractor.java
PerEntityPoolingMetadataExtractor._createExtractorFor
private FieldExtractor _createExtractorFor(Metadata metadata) { // Boot metadata tags that are not supported in this version of Elasticsearch if (version.onOrAfter(EsMajorVersion.V_6_X)) { // 6.0 Removed support for TTL and Timestamp metadata on index and update requests. switch (metadata) { case TTL: // Fall through case TIMESTAMP: return new UnsupportedMetadataFieldExtractor(metadata, version); } } return createExtractorFor(metadata); }
java
private FieldExtractor _createExtractorFor(Metadata metadata) { // Boot metadata tags that are not supported in this version of Elasticsearch if (version.onOrAfter(EsMajorVersion.V_6_X)) { // 6.0 Removed support for TTL and Timestamp metadata on index and update requests. switch (metadata) { case TTL: // Fall through case TIMESTAMP: return new UnsupportedMetadataFieldExtractor(metadata, version); } } return createExtractorFor(metadata); }
[ "private", "FieldExtractor", "_createExtractorFor", "(", "Metadata", "metadata", ")", "{", "// Boot metadata tags that are not supported in this version of Elasticsearch", "if", "(", "version", ".", "onOrAfter", "(", "EsMajorVersion", ".", "V_6_X", ")", ")", "{", "// 6.0 Removed support for TTL and Timestamp metadata on index and update requests.", "switch", "(", "metadata", ")", "{", "case", "TTL", ":", "// Fall through", "case", "TIMESTAMP", ":", "return", "new", "UnsupportedMetadataFieldExtractor", "(", "metadata", ",", "version", ")", ";", "}", "}", "return", "createExtractorFor", "(", "metadata", ")", ";", "}" ]
If a metadata tag is unsupported for this version of Elasticsearch then a
[ "If", "a", "metadata", "tag", "is", "unsupported", "for", "this", "version", "of", "Elasticsearch", "then", "a" ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/PerEntityPoolingMetadataExtractor.java#L117-L129
15,011
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/UpdateBulkFactory.java
UpdateBulkFactory.writeLegacyFormatting
private void writeLegacyFormatting(List<Object> list, Object paramExtractor) { if (paramExtractor != null) { list.add("{\"params\":"); list.add(paramExtractor); list.add(","); } else { list.add("{"); } if (HAS_SCRIPT) { /* * { * "params": ..., * "lang": "...", * "script": "...", * "upsert": {...} * } */ if (HAS_LANG) { list.add(SCRIPT_LANG_1X); } list.add(SCRIPT_1X); if (UPSERT) { list.add(",\"upsert\":"); } } else { /* * { * "doc_as_upsert": true, * "doc": {...} * } */ if (UPSERT) { list.add("\"doc_as_upsert\":true,"); } list.add("\"doc\":"); } }
java
private void writeLegacyFormatting(List<Object> list, Object paramExtractor) { if (paramExtractor != null) { list.add("{\"params\":"); list.add(paramExtractor); list.add(","); } else { list.add("{"); } if (HAS_SCRIPT) { /* * { * "params": ..., * "lang": "...", * "script": "...", * "upsert": {...} * } */ if (HAS_LANG) { list.add(SCRIPT_LANG_1X); } list.add(SCRIPT_1X); if (UPSERT) { list.add(",\"upsert\":"); } } else { /* * { * "doc_as_upsert": true, * "doc": {...} * } */ if (UPSERT) { list.add("\"doc_as_upsert\":true,"); } list.add("\"doc\":"); } }
[ "private", "void", "writeLegacyFormatting", "(", "List", "<", "Object", ">", "list", ",", "Object", "paramExtractor", ")", "{", "if", "(", "paramExtractor", "!=", "null", ")", "{", "list", ".", "add", "(", "\"{\\\"params\\\":\"", ")", ";", "list", ".", "add", "(", "paramExtractor", ")", ";", "list", ".", "add", "(", "\",\"", ")", ";", "}", "else", "{", "list", ".", "add", "(", "\"{\"", ")", ";", "}", "if", "(", "HAS_SCRIPT", ")", "{", "/*\n * {\n * \"params\": ...,\n * \"lang\": \"...\",\n * \"script\": \"...\",\n * \"upsert\": {...}\n * }\n */", "if", "(", "HAS_LANG", ")", "{", "list", ".", "add", "(", "SCRIPT_LANG_1X", ")", ";", "}", "list", ".", "add", "(", "SCRIPT_1X", ")", ";", "if", "(", "UPSERT", ")", "{", "list", ".", "add", "(", "\",\\\"upsert\\\":\"", ")", ";", "}", "}", "else", "{", "/*\n * {\n * \"doc_as_upsert\": true,\n * \"doc\": {...}\n * }\n */", "if", "(", "UPSERT", ")", "{", "list", ".", "add", "(", "\"\\\"doc_as_upsert\\\":true,\"", ")", ";", "}", "list", ".", "add", "(", "\"\\\"doc\\\":\"", ")", ";", "}", "}" ]
Script format meant for versions 1.x to 2.x. Required format for 1.x and below. @param list Consumer of snippets @param paramExtractor Extracts parameters from documents or constants
[ "Script", "format", "meant", "for", "versions", "1", ".", "x", "to", "2", ".", "x", ".", "Required", "format", "for", "1", ".", "x", "and", "below", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/UpdateBulkFactory.java#L129-L168
15,012
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/UpdateBulkFactory.java
UpdateBulkFactory.writeStrictFormatting
private void writeStrictFormatting(List<Object> list, Object paramExtractor, String scriptToUse) { if (HAS_SCRIPT) { /* * { * "script":{ * "inline": "...", * "lang": "...", * "params": ..., * }, * "upsert": {...} * } */ list.add(scriptToUse); if (HAS_LANG) { list.add(SCRIPT_LANG_5X); } if (paramExtractor != null) { list.add(",\"params\":"); list.add(paramExtractor); } list.add("}"); if (UPSERT) { list.add(",\"upsert\":"); } } else { /* * { * "doc_as_upsert": true, * "doc": {...} * } */ list.add("{"); if (UPSERT) { list.add("\"doc_as_upsert\":true,"); } list.add("\"doc\":"); } }
java
private void writeStrictFormatting(List<Object> list, Object paramExtractor, String scriptToUse) { if (HAS_SCRIPT) { /* * { * "script":{ * "inline": "...", * "lang": "...", * "params": ..., * }, * "upsert": {...} * } */ list.add(scriptToUse); if (HAS_LANG) { list.add(SCRIPT_LANG_5X); } if (paramExtractor != null) { list.add(",\"params\":"); list.add(paramExtractor); } list.add("}"); if (UPSERT) { list.add(",\"upsert\":"); } } else { /* * { * "doc_as_upsert": true, * "doc": {...} * } */ list.add("{"); if (UPSERT) { list.add("\"doc_as_upsert\":true,"); } list.add("\"doc\":"); } }
[ "private", "void", "writeStrictFormatting", "(", "List", "<", "Object", ">", "list", ",", "Object", "paramExtractor", ",", "String", "scriptToUse", ")", "{", "if", "(", "HAS_SCRIPT", ")", "{", "/*\n * {\n * \"script\":{\n * \"inline\": \"...\",\n * \"lang\": \"...\",\n * \"params\": ...,\n * },\n * \"upsert\": {...}\n * }\n */", "list", ".", "add", "(", "scriptToUse", ")", ";", "if", "(", "HAS_LANG", ")", "{", "list", ".", "add", "(", "SCRIPT_LANG_5X", ")", ";", "}", "if", "(", "paramExtractor", "!=", "null", ")", "{", "list", ".", "add", "(", "\",\\\"params\\\":\"", ")", ";", "list", ".", "add", "(", "paramExtractor", ")", ";", "}", "list", ".", "add", "(", "\"}\"", ")", ";", "if", "(", "UPSERT", ")", "{", "list", ".", "add", "(", "\",\\\"upsert\\\":\"", ")", ";", "}", "}", "else", "{", "/*\n * {\n * \"doc_as_upsert\": true,\n * \"doc\": {...}\n * }\n */", "list", ".", "add", "(", "\"{\"", ")", ";", "if", "(", "UPSERT", ")", "{", "list", ".", "add", "(", "\"\\\"doc_as_upsert\\\":true,\"", ")", ";", "}", "list", ".", "add", "(", "\"\\\"doc\\\":\"", ")", ";", "}", "}" ]
Script format meant for versions 2.x to 5.x. Required format for 5.x and above. @param list Consumer of snippets @param paramExtractor Extracts parameters from documents or constants
[ "Script", "format", "meant", "for", "versions", "2", ".", "x", "to", "5", ".", "x", ".", "Required", "format", "for", "5", ".", "x", "and", "above", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/UpdateBulkFactory.java#L175-L212
15,013
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/bulk/BulkProcessor.java
BulkProcessor.add
public void add(BytesRef payload) { // check space first // ba is the backing array for data if (payload.length() > ba.available()) { if (autoFlush) { flush(); } else { throw new EsHadoopIllegalStateException( String.format("Auto-flush disabled and bulk buffer full; disable manual flush or increase " + "capacity [current size %s]; bailing out", ba.capacity())); } } data.copyFrom(payload); dataEntries++; if (bufferEntriesThreshold > 0 && dataEntries >= bufferEntriesThreshold) { if (autoFlush) { flush(); } else { // handle the corner case of manual flush that occurs only after the buffer is completely full (think size of 1) if (dataEntries > bufferEntriesThreshold) { throw new EsHadoopIllegalStateException( String.format( "Auto-flush disabled and maximum number of entries surpassed; disable manual " + "flush or increase capacity [current size %s]; bailing out", bufferEntriesThreshold)); } } } }
java
public void add(BytesRef payload) { // check space first // ba is the backing array for data if (payload.length() > ba.available()) { if (autoFlush) { flush(); } else { throw new EsHadoopIllegalStateException( String.format("Auto-flush disabled and bulk buffer full; disable manual flush or increase " + "capacity [current size %s]; bailing out", ba.capacity())); } } data.copyFrom(payload); dataEntries++; if (bufferEntriesThreshold > 0 && dataEntries >= bufferEntriesThreshold) { if (autoFlush) { flush(); } else { // handle the corner case of manual flush that occurs only after the buffer is completely full (think size of 1) if (dataEntries > bufferEntriesThreshold) { throw new EsHadoopIllegalStateException( String.format( "Auto-flush disabled and maximum number of entries surpassed; disable manual " + "flush or increase capacity [current size %s]; bailing out", bufferEntriesThreshold)); } } } }
[ "public", "void", "add", "(", "BytesRef", "payload", ")", "{", "// check space first", "// ba is the backing array for data", "if", "(", "payload", ".", "length", "(", ")", ">", "ba", ".", "available", "(", ")", ")", "{", "if", "(", "autoFlush", ")", "{", "flush", "(", ")", ";", "}", "else", "{", "throw", "new", "EsHadoopIllegalStateException", "(", "String", ".", "format", "(", "\"Auto-flush disabled and bulk buffer full; disable manual flush or increase \"", "+", "\"capacity [current size %s]; bailing out\"", ",", "ba", ".", "capacity", "(", ")", ")", ")", ";", "}", "}", "data", ".", "copyFrom", "(", "payload", ")", ";", "dataEntries", "++", ";", "if", "(", "bufferEntriesThreshold", ">", "0", "&&", "dataEntries", ">=", "bufferEntriesThreshold", ")", "{", "if", "(", "autoFlush", ")", "{", "flush", "(", ")", ";", "}", "else", "{", "// handle the corner case of manual flush that occurs only after the buffer is completely full (think size of 1)", "if", "(", "dataEntries", ">", "bufferEntriesThreshold", ")", "{", "throw", "new", "EsHadoopIllegalStateException", "(", "String", ".", "format", "(", "\"Auto-flush disabled and maximum number of entries surpassed; disable manual \"", "+", "\"flush or increase capacity [current size %s]; bailing out\"", ",", "bufferEntriesThreshold", ")", ")", ";", "}", "}", "}", "}" ]
Adds an entry to the bulk request, potentially flushing if the request reaches capacity. @param payload the entire bulk entry in JSON format, including the header and payload.
[ "Adds", "an", "entry", "to", "the", "bulk", "request", "potentially", "flushing", "if", "the", "request", "reaches", "capacity", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/bulk/BulkProcessor.java#L127-L159
15,014
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/bulk/BulkProcessor.java
BulkProcessor.validateEditedEntry
private BytesRef validateEditedEntry(byte[] retryDataBuffer) { BytesRef result = new BytesRef(); byte closeBrace = '}'; byte newline = '\n'; int newlines = 0; for (byte b : retryDataBuffer) { if (b == newline) { newlines++; } } result.add(retryDataBuffer); // Check to make sure that either the last byte is a closed brace or a new line. byte lastByte = retryDataBuffer[retryDataBuffer.length - 1]; if (lastByte == newline) { // If last byte is a newline, make sure there are two newlines present in the data if (newlines != 2) { throw new EsHadoopIllegalArgumentException("Encountered malformed data entry for bulk write retry. " + "Data contains [" + newlines + "] newline characters (\\n) but expected to have [2]."); } } else if (lastByte == closeBrace) { // If the last byte is a closed brace, make sure there is only one newline in the data if (newlines != 1) { throw new EsHadoopIllegalArgumentException("Encountered malformed data entry for bulk write retry. " + "Data contains [" + newlines + "] newline characters (\\n) but expected to have [1]."); } // Add a newline to the entry in this case. byte[] trailingNewline = new byte[]{newline}; result.add(trailingNewline); } // Further checks are probably intrusive to performance return result; }
java
private BytesRef validateEditedEntry(byte[] retryDataBuffer) { BytesRef result = new BytesRef(); byte closeBrace = '}'; byte newline = '\n'; int newlines = 0; for (byte b : retryDataBuffer) { if (b == newline) { newlines++; } } result.add(retryDataBuffer); // Check to make sure that either the last byte is a closed brace or a new line. byte lastByte = retryDataBuffer[retryDataBuffer.length - 1]; if (lastByte == newline) { // If last byte is a newline, make sure there are two newlines present in the data if (newlines != 2) { throw new EsHadoopIllegalArgumentException("Encountered malformed data entry for bulk write retry. " + "Data contains [" + newlines + "] newline characters (\\n) but expected to have [2]."); } } else if (lastByte == closeBrace) { // If the last byte is a closed brace, make sure there is only one newline in the data if (newlines != 1) { throw new EsHadoopIllegalArgumentException("Encountered malformed data entry for bulk write retry. " + "Data contains [" + newlines + "] newline characters (\\n) but expected to have [1]."); } // Add a newline to the entry in this case. byte[] trailingNewline = new byte[]{newline}; result.add(trailingNewline); } // Further checks are probably intrusive to performance return result; }
[ "private", "BytesRef", "validateEditedEntry", "(", "byte", "[", "]", "retryDataBuffer", ")", "{", "BytesRef", "result", "=", "new", "BytesRef", "(", ")", ";", "byte", "closeBrace", "=", "'", "'", ";", "byte", "newline", "=", "'", "'", ";", "int", "newlines", "=", "0", ";", "for", "(", "byte", "b", ":", "retryDataBuffer", ")", "{", "if", "(", "b", "==", "newline", ")", "{", "newlines", "++", ";", "}", "}", "result", ".", "add", "(", "retryDataBuffer", ")", ";", "// Check to make sure that either the last byte is a closed brace or a new line.", "byte", "lastByte", "=", "retryDataBuffer", "[", "retryDataBuffer", ".", "length", "-", "1", "]", ";", "if", "(", "lastByte", "==", "newline", ")", "{", "// If last byte is a newline, make sure there are two newlines present in the data", "if", "(", "newlines", "!=", "2", ")", "{", "throw", "new", "EsHadoopIllegalArgumentException", "(", "\"Encountered malformed data entry for bulk write retry. \"", "+", "\"Data contains [\"", "+", "newlines", "+", "\"] newline characters (\\\\n) but expected to have [2].\"", ")", ";", "}", "}", "else", "if", "(", "lastByte", "==", "closeBrace", ")", "{", "// If the last byte is a closed brace, make sure there is only one newline in the data", "if", "(", "newlines", "!=", "1", ")", "{", "throw", "new", "EsHadoopIllegalArgumentException", "(", "\"Encountered malformed data entry for bulk write retry. \"", "+", "\"Data contains [\"", "+", "newlines", "+", "\"] newline characters (\\\\n) but expected to have [1].\"", ")", ";", "}", "// Add a newline to the entry in this case.", "byte", "[", "]", "trailingNewline", "=", "new", "byte", "[", "]", "{", "newline", "}", ";", "result", ".", "add", "(", "trailingNewline", ")", ";", "}", "// Further checks are probably intrusive to performance", "return", "result", ";", "}" ]
Validate the byte contents of a bulk entry that has been edited before being submitted for retry. @param retryDataBuffer The new entry contents @return A BytesRef that contains the entry contents, potentially cleaned up. @throws EsHadoopIllegalArgumentException In the event that the document data cannot be simply cleaned up.
[ "Validate", "the", "byte", "contents", "of", "a", "bulk", "entry", "that", "has", "been", "edited", "before", "being", "submitted", "for", "retry", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/bulk/BulkProcessor.java#L430-L466
15,015
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/bulk/BulkProcessor.java
BulkProcessor.initFlushOperation
private void initFlushOperation(String bulkLoggingID, boolean retryOperation, long retriedDocs, long waitTime) { if (retryOperation) { if (waitTime > 0L) { debugLog(bulkLoggingID, "Retrying [%d] entries after backing off for [%s] ms", retriedDocs, TimeValue.timeValueMillis(waitTime)); try { Thread.sleep(waitTime); } catch (InterruptedException e) { debugLog(bulkLoggingID, "Thread interrupted - giving up on retrying..."); throw new EsHadoopException("Thread interrupted - giving up on retrying...", e); } } else { debugLog(bulkLoggingID, "Retrying [%d] entries immediately (without backoff)", retriedDocs); } } else { debugLog(bulkLoggingID, "Sending batch of [%d] bytes/[%s] entries", data.length(), dataEntries); } }
java
private void initFlushOperation(String bulkLoggingID, boolean retryOperation, long retriedDocs, long waitTime) { if (retryOperation) { if (waitTime > 0L) { debugLog(bulkLoggingID, "Retrying [%d] entries after backing off for [%s] ms", retriedDocs, TimeValue.timeValueMillis(waitTime)); try { Thread.sleep(waitTime); } catch (InterruptedException e) { debugLog(bulkLoggingID, "Thread interrupted - giving up on retrying..."); throw new EsHadoopException("Thread interrupted - giving up on retrying...", e); } } else { debugLog(bulkLoggingID, "Retrying [%d] entries immediately (without backoff)", retriedDocs); } } else { debugLog(bulkLoggingID, "Sending batch of [%d] bytes/[%s] entries", data.length(), dataEntries); } }
[ "private", "void", "initFlushOperation", "(", "String", "bulkLoggingID", ",", "boolean", "retryOperation", ",", "long", "retriedDocs", ",", "long", "waitTime", ")", "{", "if", "(", "retryOperation", ")", "{", "if", "(", "waitTime", ">", "0L", ")", "{", "debugLog", "(", "bulkLoggingID", ",", "\"Retrying [%d] entries after backing off for [%s] ms\"", ",", "retriedDocs", ",", "TimeValue", ".", "timeValueMillis", "(", "waitTime", ")", ")", ";", "try", "{", "Thread", ".", "sleep", "(", "waitTime", ")", ";", "}", "catch", "(", "InterruptedException", "e", ")", "{", "debugLog", "(", "bulkLoggingID", ",", "\"Thread interrupted - giving up on retrying...\"", ")", ";", "throw", "new", "EsHadoopException", "(", "\"Thread interrupted - giving up on retrying...\"", ",", "e", ")", ";", "}", "}", "else", "{", "debugLog", "(", "bulkLoggingID", ",", "\"Retrying [%d] entries immediately (without backoff)\"", ",", "retriedDocs", ")", ";", "}", "}", "else", "{", "debugLog", "(", "bulkLoggingID", ",", "\"Sending batch of [%d] bytes/[%s] entries\"", ",", "data", ".", "length", "(", ")", ",", "dataEntries", ")", ";", "}", "}" ]
Logs flushing messages and performs backoff waiting if there is a wait time for retry.
[ "Logs", "flushing", "messages", "and", "performs", "backoff", "waiting", "if", "there", "is", "a", "wait", "time", "for", "retry", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/bulk/BulkProcessor.java#L471-L488
15,016
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/bulk/BulkProcessor.java
BulkProcessor.createDebugTxnID
private String createDebugTxnID() { if (LOG.isDebugEnabled()) { // Not required to be unique, just a best effort id here. return (Integer.toString(hashCode()) + Long.toString(System.currentTimeMillis())); } return null; }
java
private String createDebugTxnID() { if (LOG.isDebugEnabled()) { // Not required to be unique, just a best effort id here. return (Integer.toString(hashCode()) + Long.toString(System.currentTimeMillis())); } return null; }
[ "private", "String", "createDebugTxnID", "(", ")", "{", "if", "(", "LOG", ".", "isDebugEnabled", "(", ")", ")", "{", "// Not required to be unique, just a best effort id here.", "return", "(", "Integer", ".", "toString", "(", "hashCode", "(", ")", ")", "+", "Long", ".", "toString", "(", "System", ".", "currentTimeMillis", "(", ")", ")", ")", ";", "}", "return", "null", ";", "}" ]
Creates a semi-unique string to reasonably identify a bulk transaction. String is not guaranteed to be unique.
[ "Creates", "a", "semi", "-", "unique", "string", "to", "reasonably", "identify", "a", "bulk", "transaction", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/bulk/BulkProcessor.java#L495-L501
15,017
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/bulk/BulkProcessor.java
BulkProcessor.close
@Override public void close() { try { if (!hadWriteErrors) { flush(); } else { if (LOG.isDebugEnabled()) { LOG.debug("Dirty close; ignoring last existing write batch..."); } } if (requiresRefreshAfterBulk && executedBulkWrite) { // refresh batch restClient.refresh(resource); if (LOG.isDebugEnabled()) { LOG.debug(String.format("Refreshing index [%s]", resource)); } } } finally { for (IBulkWriteErrorHandler handler : documentBulkErrorHandlers) { handler.close(); } } }
java
@Override public void close() { try { if (!hadWriteErrors) { flush(); } else { if (LOG.isDebugEnabled()) { LOG.debug("Dirty close; ignoring last existing write batch..."); } } if (requiresRefreshAfterBulk && executedBulkWrite) { // refresh batch restClient.refresh(resource); if (LOG.isDebugEnabled()) { LOG.debug(String.format("Refreshing index [%s]", resource)); } } } finally { for (IBulkWriteErrorHandler handler : documentBulkErrorHandlers) { handler.close(); } } }
[ "@", "Override", "public", "void", "close", "(", ")", "{", "try", "{", "if", "(", "!", "hadWriteErrors", ")", "{", "flush", "(", ")", ";", "}", "else", "{", "if", "(", "LOG", ".", "isDebugEnabled", "(", ")", ")", "{", "LOG", ".", "debug", "(", "\"Dirty close; ignoring last existing write batch...\"", ")", ";", "}", "}", "if", "(", "requiresRefreshAfterBulk", "&&", "executedBulkWrite", ")", "{", "// refresh batch", "restClient", ".", "refresh", "(", "resource", ")", ";", "if", "(", "LOG", ".", "isDebugEnabled", "(", ")", ")", "{", "LOG", ".", "debug", "(", "String", ".", "format", "(", "\"Refreshing index [%s]\"", ",", "resource", ")", ")", ";", "}", "}", "}", "finally", "{", "for", "(", "IBulkWriteErrorHandler", "handler", ":", "documentBulkErrorHandlers", ")", "{", "handler", ".", "close", "(", ")", ";", "}", "}", "}" ]
Flushes and closes the bulk processor to further writes.
[ "Flushes", "and", "closes", "the", "bulk", "processor", "to", "further", "writes", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/bulk/BulkProcessor.java#L556-L580
15,018
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/serialization/json/BlockAwareJsonParser.java
BlockAwareJsonParser.exitBlock
public void exitBlock() { if (open == 0) { return; } if (open < 0) { throw new IllegalStateException("Parser is no longer nested in any blocks at the level in which it was " + "created. You must create a new block aware parser to track the levels above this one."); } while (open > 0) { Token t = delegate.nextToken(); if (t == null) { // handle EOF? return; } updateLevelBasedOn(t); } }
java
public void exitBlock() { if (open == 0) { return; } if (open < 0) { throw new IllegalStateException("Parser is no longer nested in any blocks at the level in which it was " + "created. You must create a new block aware parser to track the levels above this one."); } while (open > 0) { Token t = delegate.nextToken(); if (t == null) { // handle EOF? return; } updateLevelBasedOn(t); } }
[ "public", "void", "exitBlock", "(", ")", "{", "if", "(", "open", "==", "0", ")", "{", "return", ";", "}", "if", "(", "open", "<", "0", ")", "{", "throw", "new", "IllegalStateException", "(", "\"Parser is no longer nested in any blocks at the level in which it was \"", "+", "\"created. You must create a new block aware parser to track the levels above this one.\"", ")", ";", "}", "while", "(", "open", ">", "0", ")", "{", "Token", "t", "=", "delegate", ".", "nextToken", "(", ")", ";", "if", "(", "t", "==", "null", ")", "{", "// handle EOF?", "return", ";", "}", "updateLevelBasedOn", "(", "t", ")", ";", "}", "}" ]
If this parser is reading tokens from an object or an array that is nested below its original nesting level, it will consume and skip all tokens until it reaches the end of the block that it was created on. The underlying parser will be left on the END_X token for the block.
[ "If", "this", "parser", "is", "reading", "tokens", "from", "an", "object", "or", "an", "array", "that", "is", "nested", "below", "its", "original", "nesting", "level", "it", "will", "consume", "and", "skip", "all", "tokens", "until", "it", "reaches", "the", "end", "of", "the", "block", "that", "it", "was", "created", "on", ".", "The", "underlying", "parser", "will", "be", "left", "on", "the", "END_X", "token", "for", "the", "block", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/serialization/json/BlockAwareJsonParser.java#L58-L76
15,019
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/pooling/TransportPool.java
TransportPool.validate
private boolean validate(PooledTransport transport) { try { Response response = transport.execute(validationRequest); return response.hasSucceeded(); } catch (IOException ioe) { log.warn("Could not validate pooled connection on lease. Releasing pooled connection and trying again...", ioe); return false; } }
java
private boolean validate(PooledTransport transport) { try { Response response = transport.execute(validationRequest); return response.hasSucceeded(); } catch (IOException ioe) { log.warn("Could not validate pooled connection on lease. Releasing pooled connection and trying again...", ioe); return false; } }
[ "private", "boolean", "validate", "(", "PooledTransport", "transport", ")", "{", "try", "{", "Response", "response", "=", "transport", ".", "execute", "(", "validationRequest", ")", ";", "return", "response", ".", "hasSucceeded", "(", ")", ";", "}", "catch", "(", "IOException", "ioe", ")", "{", "log", ".", "warn", "(", "\"Could not validate pooled connection on lease. Releasing pooled connection and trying again...\"", ",", "ioe", ")", ";", "return", "false", ";", "}", "}" ]
Used to validate an idle pooled transport is still good for consumption. @param transport to test @return if the transport succeeded the validation or not
[ "Used", "to", "validate", "an", "idle", "pooled", "transport", "is", "still", "good", "for", "consumption", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/pooling/TransportPool.java#L100-L108
15,020
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/pooling/TransportPool.java
TransportPool.borrowTransport
synchronized Transport borrowTransport() { long now = System.currentTimeMillis(); List<PooledTransport> garbageTransports = new ArrayList<PooledTransport>(); PooledTransport candidate = null; // Grab a transport for (Map.Entry<PooledTransport, Long> entry : idle.entrySet()) { PooledTransport transport = entry.getKey(); if (validate(transport)) { candidate = transport; break; } else { garbageTransports.add(transport); } } // Remove any dead connections found for (PooledTransport transport : garbageTransports) { idle.remove(transport); release(transport); } // Create the connection if we didn't find any, remove it from the pool if we did. if (candidate == null) { candidate = create(); } else { idle.remove(candidate); } // Lease. leased.put(candidate, now); return new LeasedTransport(candidate, this); }
java
synchronized Transport borrowTransport() { long now = System.currentTimeMillis(); List<PooledTransport> garbageTransports = new ArrayList<PooledTransport>(); PooledTransport candidate = null; // Grab a transport for (Map.Entry<PooledTransport, Long> entry : idle.entrySet()) { PooledTransport transport = entry.getKey(); if (validate(transport)) { candidate = transport; break; } else { garbageTransports.add(transport); } } // Remove any dead connections found for (PooledTransport transport : garbageTransports) { idle.remove(transport); release(transport); } // Create the connection if we didn't find any, remove it from the pool if we did. if (candidate == null) { candidate = create(); } else { idle.remove(candidate); } // Lease. leased.put(candidate, now); return new LeasedTransport(candidate, this); }
[ "synchronized", "Transport", "borrowTransport", "(", ")", "{", "long", "now", "=", "System", ".", "currentTimeMillis", "(", ")", ";", "List", "<", "PooledTransport", ">", "garbageTransports", "=", "new", "ArrayList", "<", "PooledTransport", ">", "(", ")", ";", "PooledTransport", "candidate", "=", "null", ";", "// Grab a transport", "for", "(", "Map", ".", "Entry", "<", "PooledTransport", ",", "Long", ">", "entry", ":", "idle", ".", "entrySet", "(", ")", ")", "{", "PooledTransport", "transport", "=", "entry", ".", "getKey", "(", ")", ";", "if", "(", "validate", "(", "transport", ")", ")", "{", "candidate", "=", "transport", ";", "break", ";", "}", "else", "{", "garbageTransports", ".", "add", "(", "transport", ")", ";", "}", "}", "// Remove any dead connections found", "for", "(", "PooledTransport", "transport", ":", "garbageTransports", ")", "{", "idle", ".", "remove", "(", "transport", ")", ";", "release", "(", "transport", ")", ";", "}", "// Create the connection if we didn't find any, remove it from the pool if we did.", "if", "(", "candidate", "==", "null", ")", "{", "candidate", "=", "create", "(", ")", ";", "}", "else", "{", "idle", ".", "remove", "(", "candidate", ")", ";", "}", "// Lease.", "leased", ".", "put", "(", "candidate", ",", "now", ")", ";", "return", "new", "LeasedTransport", "(", "candidate", ",", "this", ")", ";", "}" ]
Borrows a Transport from this pool. If there are no pooled Transports available, a new one is created. @return A Transport backed by a pooled resource
[ "Borrows", "a", "Transport", "from", "this", "pool", ".", "If", "there", "are", "no", "pooled", "Transports", "available", "a", "new", "one", "is", "created", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/pooling/TransportPool.java#L122-L155
15,021
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/pooling/TransportPool.java
TransportPool.returnTransport
private synchronized void returnTransport(Transport returning) { long now = System.currentTimeMillis(); PooledTransport unwrapped; // check if they're returning a leased transport if (returning instanceof LeasedTransport) { LeasedTransport leasedTransport = (LeasedTransport) returning; unwrapped = leasedTransport.delegate; } else if (returning instanceof PooledTransport) { unwrapped = (PooledTransport) returning; } else { throw new EsHadoopIllegalStateException("Cannot return a non-poolable Transport to the pool"); } // make sure that this is even a leased transport before returning it if (leased.containsKey(unwrapped)) { leased.remove(unwrapped); idle.put(unwrapped, now); } else { throw new EsHadoopIllegalStateException("Cannot return a Transport object to a pool that was not sourced from the pool"); } }
java
private synchronized void returnTransport(Transport returning) { long now = System.currentTimeMillis(); PooledTransport unwrapped; // check if they're returning a leased transport if (returning instanceof LeasedTransport) { LeasedTransport leasedTransport = (LeasedTransport) returning; unwrapped = leasedTransport.delegate; } else if (returning instanceof PooledTransport) { unwrapped = (PooledTransport) returning; } else { throw new EsHadoopIllegalStateException("Cannot return a non-poolable Transport to the pool"); } // make sure that this is even a leased transport before returning it if (leased.containsKey(unwrapped)) { leased.remove(unwrapped); idle.put(unwrapped, now); } else { throw new EsHadoopIllegalStateException("Cannot return a Transport object to a pool that was not sourced from the pool"); } }
[ "private", "synchronized", "void", "returnTransport", "(", "Transport", "returning", ")", "{", "long", "now", "=", "System", ".", "currentTimeMillis", "(", ")", ";", "PooledTransport", "unwrapped", ";", "// check if they're returning a leased transport", "if", "(", "returning", "instanceof", "LeasedTransport", ")", "{", "LeasedTransport", "leasedTransport", "=", "(", "LeasedTransport", ")", "returning", ";", "unwrapped", "=", "leasedTransport", ".", "delegate", ";", "}", "else", "if", "(", "returning", "instanceof", "PooledTransport", ")", "{", "unwrapped", "=", "(", "PooledTransport", ")", "returning", ";", "}", "else", "{", "throw", "new", "EsHadoopIllegalStateException", "(", "\"Cannot return a non-poolable Transport to the pool\"", ")", ";", "}", "// make sure that this is even a leased transport before returning it", "if", "(", "leased", ".", "containsKey", "(", "unwrapped", ")", ")", "{", "leased", ".", "remove", "(", "unwrapped", ")", ";", "idle", ".", "put", "(", "unwrapped", ",", "now", ")", ";", "}", "else", "{", "throw", "new", "EsHadoopIllegalStateException", "(", "\"Cannot return a Transport object to a pool that was not sourced from the pool\"", ")", ";", "}", "}" ]
Returns a transport to the pool. @param returning Transport to be cleaned and returned to the pool.
[ "Returns", "a", "transport", "to", "the", "pool", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/pooling/TransportPool.java#L161-L182
15,022
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/pooling/TransportPool.java
TransportPool.removeOldConnections
synchronized int removeOldConnections() { long now = System.currentTimeMillis(); long expirationTime = now - idleTransportTimeout.millis(); List<PooledTransport> removeFromIdle = new ArrayList<PooledTransport>(); for (Map.Entry<PooledTransport, Long> idleEntry : idle.entrySet()) { long lastUsed = idleEntry.getValue(); if (lastUsed < expirationTime) { PooledTransport removed = idleEntry.getKey(); if (log.isTraceEnabled()) { log.trace("Expiring idle transport for job [" + jobPoolingKey + "], transport: [" + removed.toString() + "]. Last used [" + new TimeValue(now-lastUsed) + "] ago. Expired [" + idleTransportTimeout + "] ago."); } release(removed); removeFromIdle.add(removed); } } for (PooledTransport toRemove : removeFromIdle) { idle.remove(toRemove); } return idle.size() + leased.size(); }
java
synchronized int removeOldConnections() { long now = System.currentTimeMillis(); long expirationTime = now - idleTransportTimeout.millis(); List<PooledTransport> removeFromIdle = new ArrayList<PooledTransport>(); for (Map.Entry<PooledTransport, Long> idleEntry : idle.entrySet()) { long lastUsed = idleEntry.getValue(); if (lastUsed < expirationTime) { PooledTransport removed = idleEntry.getKey(); if (log.isTraceEnabled()) { log.trace("Expiring idle transport for job [" + jobPoolingKey + "], transport: [" + removed.toString() + "]. Last used [" + new TimeValue(now-lastUsed) + "] ago. Expired [" + idleTransportTimeout + "] ago."); } release(removed); removeFromIdle.add(removed); } } for (PooledTransport toRemove : removeFromIdle) { idle.remove(toRemove); } return idle.size() + leased.size(); }
[ "synchronized", "int", "removeOldConnections", "(", ")", "{", "long", "now", "=", "System", ".", "currentTimeMillis", "(", ")", ";", "long", "expirationTime", "=", "now", "-", "idleTransportTimeout", ".", "millis", "(", ")", ";", "List", "<", "PooledTransport", ">", "removeFromIdle", "=", "new", "ArrayList", "<", "PooledTransport", ">", "(", ")", ";", "for", "(", "Map", ".", "Entry", "<", "PooledTransport", ",", "Long", ">", "idleEntry", ":", "idle", ".", "entrySet", "(", ")", ")", "{", "long", "lastUsed", "=", "idleEntry", ".", "getValue", "(", ")", ";", "if", "(", "lastUsed", "<", "expirationTime", ")", "{", "PooledTransport", "removed", "=", "idleEntry", ".", "getKey", "(", ")", ";", "if", "(", "log", ".", "isTraceEnabled", "(", ")", ")", "{", "log", ".", "trace", "(", "\"Expiring idle transport for job [\"", "+", "jobPoolingKey", "+", "\"], transport: [\"", "+", "removed", ".", "toString", "(", ")", "+", "\"]. Last used [\"", "+", "new", "TimeValue", "(", "now", "-", "lastUsed", ")", "+", "\"] ago. Expired [\"", "+", "idleTransportTimeout", "+", "\"] ago.\"", ")", ";", "}", "release", "(", "removed", ")", ";", "removeFromIdle", ".", "add", "(", "removed", ")", ";", "}", "}", "for", "(", "PooledTransport", "toRemove", ":", "removeFromIdle", ")", "{", "idle", ".", "remove", "(", "toRemove", ")", ";", "}", "return", "idle", ".", "size", "(", ")", "+", "leased", ".", "size", "(", ")", ";", "}" ]
Cleans the pool by removing any resources that have been idle for longer than the configured transport pool idle time. @return how many connections in the pool still exist (idle AND leased).
[ "Cleans", "the", "pool", "by", "removing", "any", "resources", "that", "have", "been", "idle", "for", "longer", "than", "the", "configured", "transport", "pool", "idle", "time", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/pooling/TransportPool.java#L188-L212
15,023
elastic/elasticsearch-hadoop
pig/src/main/java/org/elasticsearch/hadoop/pig/PigValueWriter.java
PigValueWriter.isPopulatedMixedValueMap
private boolean isPopulatedMixedValueMap(ResourceFieldSchema schema, int field, Tuple object) { if (schema.getType() != DataType.MAP) { // Can't be a mixed value map if it's not a map at all. return false; } try { Object fieldValue = object.get(field); Map<?, ?> map = (Map<?, ?>) fieldValue; return schema.getSchema() == null && !(map == null || map.isEmpty()); } catch (ExecException e) { throw new EsHadoopIllegalStateException(e); } }
java
private boolean isPopulatedMixedValueMap(ResourceFieldSchema schema, int field, Tuple object) { if (schema.getType() != DataType.MAP) { // Can't be a mixed value map if it's not a map at all. return false; } try { Object fieldValue = object.get(field); Map<?, ?> map = (Map<?, ?>) fieldValue; return schema.getSchema() == null && !(map == null || map.isEmpty()); } catch (ExecException e) { throw new EsHadoopIllegalStateException(e); } }
[ "private", "boolean", "isPopulatedMixedValueMap", "(", "ResourceFieldSchema", "schema", ",", "int", "field", ",", "Tuple", "object", ")", "{", "if", "(", "schema", ".", "getType", "(", ")", "!=", "DataType", ".", "MAP", ")", "{", "// Can't be a mixed value map if it's not a map at all.", "return", "false", ";", "}", "try", "{", "Object", "fieldValue", "=", "object", ".", "get", "(", "field", ")", ";", "Map", "<", "?", ",", "?", ">", "map", "=", "(", "Map", "<", "?", ",", "?", ">", ")", "fieldValue", ";", "return", "schema", ".", "getSchema", "(", ")", "==", "null", "&&", "!", "(", "map", "==", "null", "||", "map", ".", "isEmpty", "(", ")", ")", ";", "}", "catch", "(", "ExecException", "e", ")", "{", "throw", "new", "EsHadoopIllegalStateException", "(", "e", ")", ";", "}", "}" ]
Checks to see if the given field is a schema-less Map that has values. @return true if Map has no schema but has values (mixed schema map). false if not a Map or if Map is just empty.
[ "Checks", "to", "see", "if", "the", "given", "field", "is", "a", "schema", "-", "less", "Map", "that", "has", "values", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/pig/src/main/java/org/elasticsearch/hadoop/pig/PigValueWriter.java#L266-L279
15,024
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/serialization/dto/mapping/FieldParser.java
FieldParser.parseMappings
public static MappingSet parseMappings(Map<String, Object> content, boolean includeTypeName) { Iterator<Map.Entry<String, Object>> indices = content.entrySet().iterator(); List<Mapping> indexMappings = new ArrayList<Mapping>(); while(indices.hasNext()) { // These mappings are ordered by index, then optionally type. parseIndexMappings(indices.next(), indexMappings, includeTypeName); } return new MappingSet(indexMappings); }
java
public static MappingSet parseMappings(Map<String, Object> content, boolean includeTypeName) { Iterator<Map.Entry<String, Object>> indices = content.entrySet().iterator(); List<Mapping> indexMappings = new ArrayList<Mapping>(); while(indices.hasNext()) { // These mappings are ordered by index, then optionally type. parseIndexMappings(indices.next(), indexMappings, includeTypeName); } return new MappingSet(indexMappings); }
[ "public", "static", "MappingSet", "parseMappings", "(", "Map", "<", "String", ",", "Object", ">", "content", ",", "boolean", "includeTypeName", ")", "{", "Iterator", "<", "Map", ".", "Entry", "<", "String", ",", "Object", ">", ">", "indices", "=", "content", ".", "entrySet", "(", ")", ".", "iterator", "(", ")", ";", "List", "<", "Mapping", ">", "indexMappings", "=", "new", "ArrayList", "<", "Mapping", ">", "(", ")", ";", "while", "(", "indices", ".", "hasNext", "(", ")", ")", "{", "// These mappings are ordered by index, then optionally type.", "parseIndexMappings", "(", "indices", ".", "next", "(", ")", ",", "indexMappings", ",", "includeTypeName", ")", ";", "}", "return", "new", "MappingSet", "(", "indexMappings", ")", ";", "}" ]
Convert the deserialized mapping request body into an object @param content entire mapping request body for all indices and types @param includeTypeName true if the given content to be parsed includes type names within the structure, or false if it is in the typeless format @return MappingSet for that response.
[ "Convert", "the", "deserialized", "mapping", "request", "body", "into", "an", "object" ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/serialization/dto/mapping/FieldParser.java#L54-L62
15,025
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/AbstractBulkFactory.java
AbstractBulkFactory.writeObjectHeader
protected void writeObjectHeader(List<Object> list) { // action list.add("{\"" + getOperation() + "\":{"); // flag indicating whether a comma needs to be added between fields boolean commaMightBeNeeded = false; commaMightBeNeeded = addExtractorOrDynamicValue(list, getMetadataExtractorOrFallback(Metadata.INDEX, indexExtractor), "", commaMightBeNeeded); commaMightBeNeeded = addExtractorOrDynamicValue(list, getMetadataExtractorOrFallback(Metadata.TYPE, typeExtractor), "\"_type\":", commaMightBeNeeded); commaMightBeNeeded = id(list, commaMightBeNeeded); commaMightBeNeeded = addExtractorOrDynamicValue(list, getMetadataExtractorOrFallback(Metadata.PARENT, parentExtractor), requestParameterNames.parent, commaMightBeNeeded); commaMightBeNeeded = addExtractorOrDynamicValueAsFieldWriter(list, getMetadataExtractorOrFallback(Metadata.ROUTING, routingExtractor), requestParameterNames.routing, commaMightBeNeeded); commaMightBeNeeded = addExtractorOrDynamicValue(list, getMetadataExtractorOrFallback(Metadata.TTL, ttlExtractor), "\"_ttl\":", commaMightBeNeeded); commaMightBeNeeded = addExtractorOrDynamicValue(list, getMetadataExtractorOrFallback(Metadata.TIMESTAMP, timestampExtractor), "\"_timestamp\":", commaMightBeNeeded); // version & version_type fields Object versionField = getMetadataExtractorOrFallback(Metadata.VERSION, versionExtractor); if (versionField != null) { if (commaMightBeNeeded) { list.add(","); commaMightBeNeeded = false; } commaMightBeNeeded = true; list.add(requestParameterNames.version); list.add(versionField); // version_type - only needed when a version is specified Object versionTypeField = getMetadataExtractorOrFallback(Metadata.VERSION_TYPE, versionTypeExtractor); if (versionTypeField != null) { if (commaMightBeNeeded) { list.add(","); commaMightBeNeeded = false; } commaMightBeNeeded = true; list.add(requestParameterNames.versionType); list.add(versionTypeField); } } // useful for update command otherHeader(list, commaMightBeNeeded); list.add("}}\n"); }
java
protected void writeObjectHeader(List<Object> list) { // action list.add("{\"" + getOperation() + "\":{"); // flag indicating whether a comma needs to be added between fields boolean commaMightBeNeeded = false; commaMightBeNeeded = addExtractorOrDynamicValue(list, getMetadataExtractorOrFallback(Metadata.INDEX, indexExtractor), "", commaMightBeNeeded); commaMightBeNeeded = addExtractorOrDynamicValue(list, getMetadataExtractorOrFallback(Metadata.TYPE, typeExtractor), "\"_type\":", commaMightBeNeeded); commaMightBeNeeded = id(list, commaMightBeNeeded); commaMightBeNeeded = addExtractorOrDynamicValue(list, getMetadataExtractorOrFallback(Metadata.PARENT, parentExtractor), requestParameterNames.parent, commaMightBeNeeded); commaMightBeNeeded = addExtractorOrDynamicValueAsFieldWriter(list, getMetadataExtractorOrFallback(Metadata.ROUTING, routingExtractor), requestParameterNames.routing, commaMightBeNeeded); commaMightBeNeeded = addExtractorOrDynamicValue(list, getMetadataExtractorOrFallback(Metadata.TTL, ttlExtractor), "\"_ttl\":", commaMightBeNeeded); commaMightBeNeeded = addExtractorOrDynamicValue(list, getMetadataExtractorOrFallback(Metadata.TIMESTAMP, timestampExtractor), "\"_timestamp\":", commaMightBeNeeded); // version & version_type fields Object versionField = getMetadataExtractorOrFallback(Metadata.VERSION, versionExtractor); if (versionField != null) { if (commaMightBeNeeded) { list.add(","); commaMightBeNeeded = false; } commaMightBeNeeded = true; list.add(requestParameterNames.version); list.add(versionField); // version_type - only needed when a version is specified Object versionTypeField = getMetadataExtractorOrFallback(Metadata.VERSION_TYPE, versionTypeExtractor); if (versionTypeField != null) { if (commaMightBeNeeded) { list.add(","); commaMightBeNeeded = false; } commaMightBeNeeded = true; list.add(requestParameterNames.versionType); list.add(versionTypeField); } } // useful for update command otherHeader(list, commaMightBeNeeded); list.add("}}\n"); }
[ "protected", "void", "writeObjectHeader", "(", "List", "<", "Object", ">", "list", ")", "{", "// action", "list", ".", "add", "(", "\"{\\\"\"", "+", "getOperation", "(", ")", "+", "\"\\\":{\"", ")", ";", "// flag indicating whether a comma needs to be added between fields", "boolean", "commaMightBeNeeded", "=", "false", ";", "commaMightBeNeeded", "=", "addExtractorOrDynamicValue", "(", "list", ",", "getMetadataExtractorOrFallback", "(", "Metadata", ".", "INDEX", ",", "indexExtractor", ")", ",", "\"\"", ",", "commaMightBeNeeded", ")", ";", "commaMightBeNeeded", "=", "addExtractorOrDynamicValue", "(", "list", ",", "getMetadataExtractorOrFallback", "(", "Metadata", ".", "TYPE", ",", "typeExtractor", ")", ",", "\"\\\"_type\\\":\"", ",", "commaMightBeNeeded", ")", ";", "commaMightBeNeeded", "=", "id", "(", "list", ",", "commaMightBeNeeded", ")", ";", "commaMightBeNeeded", "=", "addExtractorOrDynamicValue", "(", "list", ",", "getMetadataExtractorOrFallback", "(", "Metadata", ".", "PARENT", ",", "parentExtractor", ")", ",", "requestParameterNames", ".", "parent", ",", "commaMightBeNeeded", ")", ";", "commaMightBeNeeded", "=", "addExtractorOrDynamicValueAsFieldWriter", "(", "list", ",", "getMetadataExtractorOrFallback", "(", "Metadata", ".", "ROUTING", ",", "routingExtractor", ")", ",", "requestParameterNames", ".", "routing", ",", "commaMightBeNeeded", ")", ";", "commaMightBeNeeded", "=", "addExtractorOrDynamicValue", "(", "list", ",", "getMetadataExtractorOrFallback", "(", "Metadata", ".", "TTL", ",", "ttlExtractor", ")", ",", "\"\\\"_ttl\\\":\"", ",", "commaMightBeNeeded", ")", ";", "commaMightBeNeeded", "=", "addExtractorOrDynamicValue", "(", "list", ",", "getMetadataExtractorOrFallback", "(", "Metadata", ".", "TIMESTAMP", ",", "timestampExtractor", ")", ",", "\"\\\"_timestamp\\\":\"", ",", "commaMightBeNeeded", ")", ";", "// version & version_type fields", "Object", "versionField", "=", "getMetadataExtractorOrFallback", "(", "Metadata", ".", "VERSION", ",", "versionExtractor", ")", ";", "if", "(", "versionField", "!=", "null", ")", "{", "if", "(", "commaMightBeNeeded", ")", "{", "list", ".", "add", "(", "\",\"", ")", ";", "commaMightBeNeeded", "=", "false", ";", "}", "commaMightBeNeeded", "=", "true", ";", "list", ".", "add", "(", "requestParameterNames", ".", "version", ")", ";", "list", ".", "add", "(", "versionField", ")", ";", "// version_type - only needed when a version is specified", "Object", "versionTypeField", "=", "getMetadataExtractorOrFallback", "(", "Metadata", ".", "VERSION_TYPE", ",", "versionTypeExtractor", ")", ";", "if", "(", "versionTypeField", "!=", "null", ")", "{", "if", "(", "commaMightBeNeeded", ")", "{", "list", ".", "add", "(", "\",\"", ")", ";", "commaMightBeNeeded", "=", "false", ";", "}", "commaMightBeNeeded", "=", "true", ";", "list", ".", "add", "(", "requestParameterNames", ".", "versionType", ")", ";", "list", ".", "add", "(", "versionTypeField", ")", ";", "}", "}", "// useful for update command", "otherHeader", "(", "list", ",", "commaMightBeNeeded", ")", ";", "list", ".", "add", "(", "\"}}\\n\"", ")", ";", "}" ]
write action & metadata header
[ "write", "action", "&", "metadata", "header" ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/AbstractBulkFactory.java#L397-L439
15,026
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/AbstractBulkFactory.java
AbstractBulkFactory.addExtractorOrDynamicValue
private boolean addExtractorOrDynamicValue(List<Object> list, Object extractor, String header, boolean commaMightBeNeeded) { if (extractor != null) { if (commaMightBeNeeded) { list.add(","); } list.add(header); list.add(extractor); return true; } return commaMightBeNeeded; }
java
private boolean addExtractorOrDynamicValue(List<Object> list, Object extractor, String header, boolean commaMightBeNeeded) { if (extractor != null) { if (commaMightBeNeeded) { list.add(","); } list.add(header); list.add(extractor); return true; } return commaMightBeNeeded; }
[ "private", "boolean", "addExtractorOrDynamicValue", "(", "List", "<", "Object", ">", "list", ",", "Object", "extractor", ",", "String", "header", ",", "boolean", "commaMightBeNeeded", ")", "{", "if", "(", "extractor", "!=", "null", ")", "{", "if", "(", "commaMightBeNeeded", ")", "{", "list", ".", "add", "(", "\",\"", ")", ";", "}", "list", ".", "add", "(", "header", ")", ";", "list", ".", "add", "(", "extractor", ")", ";", "return", "true", ";", "}", "return", "commaMightBeNeeded", ";", "}" ]
If extractor is present, this will add the header to the template, followed by the extractor. If a comma is needed, the comma will be inserted before the header. @return true if a comma may be needed on the next call.
[ "If", "extractor", "is", "present", "this", "will", "add", "the", "header", "to", "the", "template", "followed", "by", "the", "extractor", ".", "If", "a", "comma", "is", "needed", "the", "comma", "will", "be", "inserted", "before", "the", "header", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/AbstractBulkFactory.java#L451-L461
15,027
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/AbstractBulkFactory.java
AbstractBulkFactory.addExtractorOrDynamicValueAsFieldWriter
private boolean addExtractorOrDynamicValueAsFieldWriter(List<Object> list, FieldExtractor extractor, String header, boolean commaMightBeNeeded) { if (extractor != null) { String head = header; if (commaMightBeNeeded) { head = "," + head; } list.add(new FieldWriter(head, extractor)); return true; } return commaMightBeNeeded; }
java
private boolean addExtractorOrDynamicValueAsFieldWriter(List<Object> list, FieldExtractor extractor, String header, boolean commaMightBeNeeded) { if (extractor != null) { String head = header; if (commaMightBeNeeded) { head = "," + head; } list.add(new FieldWriter(head, extractor)); return true; } return commaMightBeNeeded; }
[ "private", "boolean", "addExtractorOrDynamicValueAsFieldWriter", "(", "List", "<", "Object", ">", "list", ",", "FieldExtractor", "extractor", ",", "String", "header", ",", "boolean", "commaMightBeNeeded", ")", "{", "if", "(", "extractor", "!=", "null", ")", "{", "String", "head", "=", "header", ";", "if", "(", "commaMightBeNeeded", ")", "{", "head", "=", "\",\"", "+", "head", ";", "}", "list", ".", "add", "(", "new", "FieldWriter", "(", "head", ",", "extractor", ")", ")", ";", "return", "true", ";", "}", "return", "commaMightBeNeeded", ";", "}" ]
If extractor is present, this will combine the header and extractor into a FieldWriter, allowing the FieldWriter to determine when and if to write the header value based on the given document's data. If a comma is needed, it is appended to the header string before being passed to the FieldWriter. @return true if a comma may be needed on the next call
[ "If", "extractor", "is", "present", "this", "will", "combine", "the", "header", "and", "extractor", "into", "a", "FieldWriter", "allowing", "the", "FieldWriter", "to", "determine", "when", "and", "if", "to", "write", "the", "header", "value", "based", "on", "the", "given", "document", "s", "data", ".", "If", "a", "comma", "is", "needed", "it", "is", "appended", "to", "the", "header", "string", "before", "being", "passed", "to", "the", "FieldWriter", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/AbstractBulkFactory.java#L471-L481
15,028
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/AbstractBulkFactory.java
AbstractBulkFactory.getMetadataExtractorOrFallback
protected FieldExtractor getMetadataExtractorOrFallback(Metadata meta, FieldExtractor fallbackExtractor) { if (metaExtractor != null) { FieldExtractor metaFE = metaExtractor.get(meta); if (metaFE != null) { return metaFE; } } return fallbackExtractor; }
java
protected FieldExtractor getMetadataExtractorOrFallback(Metadata meta, FieldExtractor fallbackExtractor) { if (metaExtractor != null) { FieldExtractor metaFE = metaExtractor.get(meta); if (metaFE != null) { return metaFE; } } return fallbackExtractor; }
[ "protected", "FieldExtractor", "getMetadataExtractorOrFallback", "(", "Metadata", "meta", ",", "FieldExtractor", "fallbackExtractor", ")", "{", "if", "(", "metaExtractor", "!=", "null", ")", "{", "FieldExtractor", "metaFE", "=", "metaExtractor", ".", "get", "(", "meta", ")", ";", "if", "(", "metaFE", "!=", "null", ")", "{", "return", "metaFE", ";", "}", "}", "return", "fallbackExtractor", ";", "}" ]
Get the extractor for a given field, trying first one from a MetadataExtractor, and failing that, falling back to the provided 'static' one
[ "Get", "the", "extractor", "for", "a", "given", "field", "trying", "first", "one", "from", "a", "MetadataExtractor", "and", "failing", "that", "falling", "back", "to", "the", "provided", "static", "one" ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/serialization/bulk/AbstractBulkFactory.java#L491-L499
15,029
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/serialization/dto/mapping/Mapping.java
Mapping.filter
public Mapping filter(Collection<String> includes, Collection<String> excludes) { if (includes.isEmpty() && excludes.isEmpty()) { return this; } List<Field> filtered = new ArrayList<Field>(); List<FieldFilter.NumberedInclude> convertedIncludes = FieldFilter.toNumberedFilter(includes); boolean intact = true; for (Field fl : this.getFields()) { intact &= filterField(fl, null, filtered, convertedIncludes, excludes); } return (intact ? this : new Mapping(this.getIndex(), this.getType(), filtered)); }
java
public Mapping filter(Collection<String> includes, Collection<String> excludes) { if (includes.isEmpty() && excludes.isEmpty()) { return this; } List<Field> filtered = new ArrayList<Field>(); List<FieldFilter.NumberedInclude> convertedIncludes = FieldFilter.toNumberedFilter(includes); boolean intact = true; for (Field fl : this.getFields()) { intact &= filterField(fl, null, filtered, convertedIncludes, excludes); } return (intact ? this : new Mapping(this.getIndex(), this.getType(), filtered)); }
[ "public", "Mapping", "filter", "(", "Collection", "<", "String", ">", "includes", ",", "Collection", "<", "String", ">", "excludes", ")", "{", "if", "(", "includes", ".", "isEmpty", "(", ")", "&&", "excludes", ".", "isEmpty", "(", ")", ")", "{", "return", "this", ";", "}", "List", "<", "Field", ">", "filtered", "=", "new", "ArrayList", "<", "Field", ">", "(", ")", ";", "List", "<", "FieldFilter", ".", "NumberedInclude", ">", "convertedIncludes", "=", "FieldFilter", ".", "toNumberedFilter", "(", "includes", ")", ";", "boolean", "intact", "=", "true", ";", "for", "(", "Field", "fl", ":", "this", ".", "getFields", "(", ")", ")", "{", "intact", "&=", "filterField", "(", "fl", ",", "null", ",", "filtered", ",", "convertedIncludes", ",", "excludes", ")", ";", "}", "return", "(", "intact", "?", "this", ":", "new", "Mapping", "(", "this", ".", "getIndex", "(", ")", ",", "this", ".", "getType", "(", ")", ",", "filtered", ")", ")", ";", "}" ]
Filters out fields based on the provided include and exclude information and returns a Mapping object @param includes Field names to explicitly include in the mapping @param excludes Field names to explicitly exclude in the mapping @return this if no fields were filtered, or a new Mapping with the modified field lists.
[ "Filters", "out", "fields", "based", "on", "the", "provided", "include", "and", "exclude", "information", "and", "returns", "a", "Mapping", "object" ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/serialization/dto/mapping/Mapping.java#L80-L94
15,030
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/serialization/dto/mapping/Mapping.java
Mapping.flatten
public Map<String, FieldType> flatten() { if (fields == null || fields.length == 0) { return Collections.<String, FieldType> emptyMap(); } Map<String, FieldType> map = new LinkedHashMap<String, FieldType>(); for (Field nestedField : fields) { addSubFieldToMap(map, nestedField, null); } return map; }
java
public Map<String, FieldType> flatten() { if (fields == null || fields.length == 0) { return Collections.<String, FieldType> emptyMap(); } Map<String, FieldType> map = new LinkedHashMap<String, FieldType>(); for (Field nestedField : fields) { addSubFieldToMap(map, nestedField, null); } return map; }
[ "public", "Map", "<", "String", ",", "FieldType", ">", "flatten", "(", ")", "{", "if", "(", "fields", "==", "null", "||", "fields", ".", "length", "==", "0", ")", "{", "return", "Collections", ".", "<", "String", ",", "FieldType", ">", "emptyMap", "(", ")", ";", "}", "Map", "<", "String", ",", "FieldType", ">", "map", "=", "new", "LinkedHashMap", "<", "String", ",", "FieldType", ">", "(", ")", ";", "for", "(", "Field", "nestedField", ":", "fields", ")", "{", "addSubFieldToMap", "(", "map", ",", "nestedField", ",", "null", ")", ";", "}", "return", "map", ";", "}" ]
Takes a mapping tree and returns a map of all of its fields flattened, and paired with their field types.
[ "Takes", "a", "mapping", "tree", "and", "returns", "a", "map", "of", "all", "of", "its", "fields", "flattened", "and", "paired", "with", "their", "field", "types", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/serialization/dto/mapping/Mapping.java#L123-L135
15,031
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/RestService.java
RestService.assignPartitions
public static List<PartitionDefinition> assignPartitions(List<PartitionDefinition> partitions, int currentTask, int totalTasks) { int esPartitions = partitions.size(); if (totalTasks >= esPartitions) { return (currentTask >= esPartitions ? Collections.<PartitionDefinition>emptyList() : Collections.singletonList(partitions.get(currentTask))); } else { int partitionsPerTask = esPartitions / totalTasks; int remainder = esPartitions % totalTasks; int partitionsPerCurrentTask = partitionsPerTask; // spread the reminder against the tasks if (currentTask < remainder) { partitionsPerCurrentTask++; } // find the offset inside the collection int offset = partitionsPerTask * currentTask; if (currentTask != 0) { offset += (remainder > currentTask ? 1 : remainder); } // common case if (partitionsPerCurrentTask == 1) { return Collections.singletonList(partitions.get(offset)); } List<PartitionDefinition> pa = new ArrayList<PartitionDefinition>(partitionsPerCurrentTask); for (int index = offset; index < offset + partitionsPerCurrentTask; index++) { pa.add(partitions.get(index)); } return pa; } }
java
public static List<PartitionDefinition> assignPartitions(List<PartitionDefinition> partitions, int currentTask, int totalTasks) { int esPartitions = partitions.size(); if (totalTasks >= esPartitions) { return (currentTask >= esPartitions ? Collections.<PartitionDefinition>emptyList() : Collections.singletonList(partitions.get(currentTask))); } else { int partitionsPerTask = esPartitions / totalTasks; int remainder = esPartitions % totalTasks; int partitionsPerCurrentTask = partitionsPerTask; // spread the reminder against the tasks if (currentTask < remainder) { partitionsPerCurrentTask++; } // find the offset inside the collection int offset = partitionsPerTask * currentTask; if (currentTask != 0) { offset += (remainder > currentTask ? 1 : remainder); } // common case if (partitionsPerCurrentTask == 1) { return Collections.singletonList(partitions.get(offset)); } List<PartitionDefinition> pa = new ArrayList<PartitionDefinition>(partitionsPerCurrentTask); for (int index = offset; index < offset + partitionsPerCurrentTask; index++) { pa.add(partitions.get(index)); } return pa; } }
[ "public", "static", "List", "<", "PartitionDefinition", ">", "assignPartitions", "(", "List", "<", "PartitionDefinition", ">", "partitions", ",", "int", "currentTask", ",", "int", "totalTasks", ")", "{", "int", "esPartitions", "=", "partitions", ".", "size", "(", ")", ";", "if", "(", "totalTasks", ">=", "esPartitions", ")", "{", "return", "(", "currentTask", ">=", "esPartitions", "?", "Collections", ".", "<", "PartitionDefinition", ">", "emptyList", "(", ")", ":", "Collections", ".", "singletonList", "(", "partitions", ".", "get", "(", "currentTask", ")", ")", ")", ";", "}", "else", "{", "int", "partitionsPerTask", "=", "esPartitions", "/", "totalTasks", ";", "int", "remainder", "=", "esPartitions", "%", "totalTasks", ";", "int", "partitionsPerCurrentTask", "=", "partitionsPerTask", ";", "// spread the reminder against the tasks", "if", "(", "currentTask", "<", "remainder", ")", "{", "partitionsPerCurrentTask", "++", ";", "}", "// find the offset inside the collection", "int", "offset", "=", "partitionsPerTask", "*", "currentTask", ";", "if", "(", "currentTask", "!=", "0", ")", "{", "offset", "+=", "(", "remainder", ">", "currentTask", "?", "1", ":", "remainder", ")", ";", "}", "// common case", "if", "(", "partitionsPerCurrentTask", "==", "1", ")", "{", "return", "Collections", ".", "singletonList", "(", "partitions", ".", "get", "(", "offset", ")", ")", ";", "}", "List", "<", "PartitionDefinition", ">", "pa", "=", "new", "ArrayList", "<", "PartitionDefinition", ">", "(", "partitionsPerCurrentTask", ")", ";", "for", "(", "int", "index", "=", "offset", ";", "index", "<", "offset", "+", "partitionsPerCurrentTask", ";", "index", "++", ")", "{", "pa", ".", "add", "(", "partitions", ".", "get", "(", "index", ")", ")", ";", "}", "return", "pa", ";", "}", "}" ]
expects currentTask to start from 0
[ "expects", "currentTask", "to", "start", "from", "0" ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/RestService.java#L538-L570
15,032
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/RestService.java
RestService.initSingleIndex
private static RestRepository initSingleIndex(Settings settings, long currentInstance, Resource resource, Log log) { if (log.isDebugEnabled()) { log.debug(String.format("Resource [%s] resolves as a single index", resource)); } RestRepository repository = new RestRepository(settings); // create the index if needed if (repository.touch()) { if (repository.waitForYellow()) { log.warn(String.format("Timed out waiting for index [%s] to reach yellow health", resource)); } } // if WAN mode is used, use an already selected node if (settings.getNodesWANOnly()) { String node = SettingsUtils.getPinnedNode(settings); if (log.isDebugEnabled()) { log.debug(String.format("Partition writer instance [%s] assigned to [%s]", currentInstance, node)); } return repository; } // if client-nodes are used, simply use the underlying nodes if (settings.getNodesClientOnly()) { String clientNode = repository.getRestClient().getCurrentNode(); if (log.isDebugEnabled()) { log.debug(String.format("Client-node routing detected; partition writer instance [%s] assigned to [%s]", currentInstance, clientNode)); } return repository; } // no routing necessary; select the relevant target shard/node Map<ShardInfo, NodeInfo> targetShards = repository.getWriteTargetPrimaryShards(settings.getNodesClientOnly()); repository.close(); Assert.isTrue(!targetShards.isEmpty(), String.format("Cannot determine write shards for [%s]; likely its format is incorrect (maybe it contains illegal characters? or all shards failed?)", resource)); List<ShardInfo> orderedShards = new ArrayList<ShardInfo>(targetShards.keySet()); // make sure the order is strict Collections.sort(orderedShards); if (log.isTraceEnabled()) { log.trace(String.format("Partition writer instance [%s] discovered [%s] primary shards %s", currentInstance, orderedShards.size(), orderedShards)); } // if there's no task info, just pick a random bucket if (currentInstance <= 0) { currentInstance = new Random().nextInt(targetShards.size()) + 1; } int bucket = (int)(currentInstance % targetShards.size()); ShardInfo chosenShard = orderedShards.get(bucket); NodeInfo targetNode = targetShards.get(chosenShard); // pin settings SettingsUtils.pinNode(settings, targetNode.getPublishAddress()); String node = SettingsUtils.getPinnedNode(settings); repository = new RestRepository(settings); if (log.isDebugEnabled()) { log.debug(String.format("Partition writer instance [%s] assigned to primary shard [%s] at address [%s]", currentInstance, chosenShard.getName(), node)); } return repository; }
java
private static RestRepository initSingleIndex(Settings settings, long currentInstance, Resource resource, Log log) { if (log.isDebugEnabled()) { log.debug(String.format("Resource [%s] resolves as a single index", resource)); } RestRepository repository = new RestRepository(settings); // create the index if needed if (repository.touch()) { if (repository.waitForYellow()) { log.warn(String.format("Timed out waiting for index [%s] to reach yellow health", resource)); } } // if WAN mode is used, use an already selected node if (settings.getNodesWANOnly()) { String node = SettingsUtils.getPinnedNode(settings); if (log.isDebugEnabled()) { log.debug(String.format("Partition writer instance [%s] assigned to [%s]", currentInstance, node)); } return repository; } // if client-nodes are used, simply use the underlying nodes if (settings.getNodesClientOnly()) { String clientNode = repository.getRestClient().getCurrentNode(); if (log.isDebugEnabled()) { log.debug(String.format("Client-node routing detected; partition writer instance [%s] assigned to [%s]", currentInstance, clientNode)); } return repository; } // no routing necessary; select the relevant target shard/node Map<ShardInfo, NodeInfo> targetShards = repository.getWriteTargetPrimaryShards(settings.getNodesClientOnly()); repository.close(); Assert.isTrue(!targetShards.isEmpty(), String.format("Cannot determine write shards for [%s]; likely its format is incorrect (maybe it contains illegal characters? or all shards failed?)", resource)); List<ShardInfo> orderedShards = new ArrayList<ShardInfo>(targetShards.keySet()); // make sure the order is strict Collections.sort(orderedShards); if (log.isTraceEnabled()) { log.trace(String.format("Partition writer instance [%s] discovered [%s] primary shards %s", currentInstance, orderedShards.size(), orderedShards)); } // if there's no task info, just pick a random bucket if (currentInstance <= 0) { currentInstance = new Random().nextInt(targetShards.size()) + 1; } int bucket = (int)(currentInstance % targetShards.size()); ShardInfo chosenShard = orderedShards.get(bucket); NodeInfo targetNode = targetShards.get(chosenShard); // pin settings SettingsUtils.pinNode(settings, targetNode.getPublishAddress()); String node = SettingsUtils.getPinnedNode(settings); repository = new RestRepository(settings); if (log.isDebugEnabled()) { log.debug(String.format("Partition writer instance [%s] assigned to primary shard [%s] at address [%s]", currentInstance, chosenShard.getName(), node)); } return repository; }
[ "private", "static", "RestRepository", "initSingleIndex", "(", "Settings", "settings", ",", "long", "currentInstance", ",", "Resource", "resource", ",", "Log", "log", ")", "{", "if", "(", "log", ".", "isDebugEnabled", "(", ")", ")", "{", "log", ".", "debug", "(", "String", ".", "format", "(", "\"Resource [%s] resolves as a single index\"", ",", "resource", ")", ")", ";", "}", "RestRepository", "repository", "=", "new", "RestRepository", "(", "settings", ")", ";", "// create the index if needed", "if", "(", "repository", ".", "touch", "(", ")", ")", "{", "if", "(", "repository", ".", "waitForYellow", "(", ")", ")", "{", "log", ".", "warn", "(", "String", ".", "format", "(", "\"Timed out waiting for index [%s] to reach yellow health\"", ",", "resource", ")", ")", ";", "}", "}", "// if WAN mode is used, use an already selected node", "if", "(", "settings", ".", "getNodesWANOnly", "(", ")", ")", "{", "String", "node", "=", "SettingsUtils", ".", "getPinnedNode", "(", "settings", ")", ";", "if", "(", "log", ".", "isDebugEnabled", "(", ")", ")", "{", "log", ".", "debug", "(", "String", ".", "format", "(", "\"Partition writer instance [%s] assigned to [%s]\"", ",", "currentInstance", ",", "node", ")", ")", ";", "}", "return", "repository", ";", "}", "// if client-nodes are used, simply use the underlying nodes", "if", "(", "settings", ".", "getNodesClientOnly", "(", ")", ")", "{", "String", "clientNode", "=", "repository", ".", "getRestClient", "(", ")", ".", "getCurrentNode", "(", ")", ";", "if", "(", "log", ".", "isDebugEnabled", "(", ")", ")", "{", "log", ".", "debug", "(", "String", ".", "format", "(", "\"Client-node routing detected; partition writer instance [%s] assigned to [%s]\"", ",", "currentInstance", ",", "clientNode", ")", ")", ";", "}", "return", "repository", ";", "}", "// no routing necessary; select the relevant target shard/node", "Map", "<", "ShardInfo", ",", "NodeInfo", ">", "targetShards", "=", "repository", ".", "getWriteTargetPrimaryShards", "(", "settings", ".", "getNodesClientOnly", "(", ")", ")", ";", "repository", ".", "close", "(", ")", ";", "Assert", ".", "isTrue", "(", "!", "targetShards", ".", "isEmpty", "(", ")", ",", "String", ".", "format", "(", "\"Cannot determine write shards for [%s]; likely its format is incorrect (maybe it contains illegal characters? or all shards failed?)\"", ",", "resource", ")", ")", ";", "List", "<", "ShardInfo", ">", "orderedShards", "=", "new", "ArrayList", "<", "ShardInfo", ">", "(", "targetShards", ".", "keySet", "(", ")", ")", ";", "// make sure the order is strict", "Collections", ".", "sort", "(", "orderedShards", ")", ";", "if", "(", "log", ".", "isTraceEnabled", "(", ")", ")", "{", "log", ".", "trace", "(", "String", ".", "format", "(", "\"Partition writer instance [%s] discovered [%s] primary shards %s\"", ",", "currentInstance", ",", "orderedShards", ".", "size", "(", ")", ",", "orderedShards", ")", ")", ";", "}", "// if there's no task info, just pick a random bucket", "if", "(", "currentInstance", "<=", "0", ")", "{", "currentInstance", "=", "new", "Random", "(", ")", ".", "nextInt", "(", "targetShards", ".", "size", "(", ")", ")", "+", "1", ";", "}", "int", "bucket", "=", "(", "int", ")", "(", "currentInstance", "%", "targetShards", ".", "size", "(", ")", ")", ";", "ShardInfo", "chosenShard", "=", "orderedShards", ".", "get", "(", "bucket", ")", ";", "NodeInfo", "targetNode", "=", "targetShards", ".", "get", "(", "chosenShard", ")", ";", "// pin settings", "SettingsUtils", ".", "pinNode", "(", "settings", ",", "targetNode", ".", "getPublishAddress", "(", ")", ")", ";", "String", "node", "=", "SettingsUtils", ".", "getPinnedNode", "(", "settings", ")", ";", "repository", "=", "new", "RestRepository", "(", "settings", ")", ";", "if", "(", "log", ".", "isDebugEnabled", "(", ")", ")", "{", "log", ".", "debug", "(", "String", ".", "format", "(", "\"Partition writer instance [%s] assigned to primary shard [%s] at address [%s]\"", ",", "currentInstance", ",", "chosenShard", ".", "getName", "(", ")", ",", "node", ")", ")", ";", "}", "return", "repository", ";", "}" ]
Validate and configure a rest repository for writing to an index. The index is potentially created if it does not exist, and the client is pinned to a node that hosts one of the index's primary shards based on its currentInstance number. @param settings Job settings @param currentInstance Partition number @param resource Configured write resource @param log Logger to use @return The RestRepository to be used by the partition writer
[ "Validate", "and", "configure", "a", "rest", "repository", "for", "writing", "to", "an", "index", ".", "The", "index", "is", "potentially", "created", "if", "it", "does", "not", "exist", "and", "the", "client", "is", "pinned", "to", "a", "node", "that", "hosts", "one", "of", "the", "index", "s", "primary", "shards", "based", "on", "its", "currentInstance", "number", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/RestService.java#L653-L721
15,033
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/RestService.java
RestService.initMultiIndices
private static RestRepository initMultiIndices(Settings settings, long currentInstance, Resource resource, Log log) { if (log.isDebugEnabled()) { log.debug(String.format("Resource [%s] resolves as an index pattern", resource)); } // multi-index write - since we don't know before hand what index will be used, use an already selected node String node = SettingsUtils.getPinnedNode(settings); if (log.isDebugEnabled()) { log.debug(String.format("Partition writer instance [%s] assigned to [%s]", currentInstance, node)); } return new RestRepository(settings); }
java
private static RestRepository initMultiIndices(Settings settings, long currentInstance, Resource resource, Log log) { if (log.isDebugEnabled()) { log.debug(String.format("Resource [%s] resolves as an index pattern", resource)); } // multi-index write - since we don't know before hand what index will be used, use an already selected node String node = SettingsUtils.getPinnedNode(settings); if (log.isDebugEnabled()) { log.debug(String.format("Partition writer instance [%s] assigned to [%s]", currentInstance, node)); } return new RestRepository(settings); }
[ "private", "static", "RestRepository", "initMultiIndices", "(", "Settings", "settings", ",", "long", "currentInstance", ",", "Resource", "resource", ",", "Log", "log", ")", "{", "if", "(", "log", ".", "isDebugEnabled", "(", ")", ")", "{", "log", ".", "debug", "(", "String", ".", "format", "(", "\"Resource [%s] resolves as an index pattern\"", ",", "resource", ")", ")", ";", "}", "// multi-index write - since we don't know before hand what index will be used, use an already selected node", "String", "node", "=", "SettingsUtils", ".", "getPinnedNode", "(", "settings", ")", ";", "if", "(", "log", ".", "isDebugEnabled", "(", ")", ")", "{", "log", ".", "debug", "(", "String", ".", "format", "(", "\"Partition writer instance [%s] assigned to [%s]\"", ",", "currentInstance", ",", "node", ")", ")", ";", "}", "return", "new", "RestRepository", "(", "settings", ")", ";", "}" ]
Creates a RestRepository for use with a multi-index resource pattern. The client is left pinned to the original node that it was pinned to since the shard locations cannot be determined at all. @param settings Job settings @param currentInstance Partition number @param resource Configured write resource @param log Logger to use @return The RestRepository to be used by the partition writer
[ "Creates", "a", "RestRepository", "for", "use", "with", "a", "multi", "-", "index", "resource", "pattern", ".", "The", "client", "is", "left", "pinned", "to", "the", "original", "node", "that", "it", "was", "pinned", "to", "since", "the", "shard", "locations", "cannot", "be", "determined", "at", "all", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/RestService.java#L732-L744
15,034
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/RestService.java
RestService.initAliasWrite
private static RestRepository initAliasWrite(GetAliasesRequestBuilder.Response response, Settings settings, long currentInstance, Resource resource, Log log) { if (log.isDebugEnabled()) { log.debug(String.format("Resource [%s] resolves as an index alias", resource)); } // indexName -> aliasName -> alias definition Map<String, Map<String, IndicesAliases.Alias>> indexAliasTable = response.getIndices().getAll(); if (indexAliasTable.size() < 1) { // Sanity check throw new EsHadoopIllegalArgumentException("Cannot initialize alias write resource [" + resource.index() + "] if it does not have any alias entries."); } else if (indexAliasTable.size() > 1) { // Multiple indices, validate that one index-alias relation has its write index flag set String currentWriteIndex = null; for (Map.Entry<String, Map<String, IndicesAliases.Alias>> indexRow : indexAliasTable.entrySet()) { String indexName = indexRow.getKey(); Map<String, IndicesAliases.Alias> aliases = indexRow.getValue(); IndicesAliases.Alias aliasInfo = aliases.get(resource.index()); if (aliasInfo.isWriteIndex()) { currentWriteIndex = indexName; break; } } if (currentWriteIndex == null) { throw new EsHadoopIllegalArgumentException("Attempting to write to alias [" + resource.index() + "], " + "but detected multiple indices [" + indexAliasTable.size() + "] with no write index selected. " + "Bailing out..."); } else { if (log.isDebugEnabled()) { log.debug(String.format("Writing to currently configured write-index [%s]", currentWriteIndex)); } } } else { // Single index in the alias, but we should still not pin the nodes if (log.isDebugEnabled()) { log.debug(String.format("Writing to the alias's single configured index [%s]", indexAliasTable.keySet().iterator().next())); } } // alias-index write - since we don't know beforehand what concrete index will be used at any // given time during the job, use an already selected node String node = SettingsUtils.getPinnedNode(settings); if (log.isDebugEnabled()) { log.debug(String.format("Partition writer instance [%s] assigned to [%s]", currentInstance, node)); } return new RestRepository(settings); }
java
private static RestRepository initAliasWrite(GetAliasesRequestBuilder.Response response, Settings settings, long currentInstance, Resource resource, Log log) { if (log.isDebugEnabled()) { log.debug(String.format("Resource [%s] resolves as an index alias", resource)); } // indexName -> aliasName -> alias definition Map<String, Map<String, IndicesAliases.Alias>> indexAliasTable = response.getIndices().getAll(); if (indexAliasTable.size() < 1) { // Sanity check throw new EsHadoopIllegalArgumentException("Cannot initialize alias write resource [" + resource.index() + "] if it does not have any alias entries."); } else if (indexAliasTable.size() > 1) { // Multiple indices, validate that one index-alias relation has its write index flag set String currentWriteIndex = null; for (Map.Entry<String, Map<String, IndicesAliases.Alias>> indexRow : indexAliasTable.entrySet()) { String indexName = indexRow.getKey(); Map<String, IndicesAliases.Alias> aliases = indexRow.getValue(); IndicesAliases.Alias aliasInfo = aliases.get(resource.index()); if (aliasInfo.isWriteIndex()) { currentWriteIndex = indexName; break; } } if (currentWriteIndex == null) { throw new EsHadoopIllegalArgumentException("Attempting to write to alias [" + resource.index() + "], " + "but detected multiple indices [" + indexAliasTable.size() + "] with no write index selected. " + "Bailing out..."); } else { if (log.isDebugEnabled()) { log.debug(String.format("Writing to currently configured write-index [%s]", currentWriteIndex)); } } } else { // Single index in the alias, but we should still not pin the nodes if (log.isDebugEnabled()) { log.debug(String.format("Writing to the alias's single configured index [%s]", indexAliasTable.keySet().iterator().next())); } } // alias-index write - since we don't know beforehand what concrete index will be used at any // given time during the job, use an already selected node String node = SettingsUtils.getPinnedNode(settings); if (log.isDebugEnabled()) { log.debug(String.format("Partition writer instance [%s] assigned to [%s]", currentInstance, node)); } return new RestRepository(settings); }
[ "private", "static", "RestRepository", "initAliasWrite", "(", "GetAliasesRequestBuilder", ".", "Response", "response", ",", "Settings", "settings", ",", "long", "currentInstance", ",", "Resource", "resource", ",", "Log", "log", ")", "{", "if", "(", "log", ".", "isDebugEnabled", "(", ")", ")", "{", "log", ".", "debug", "(", "String", ".", "format", "(", "\"Resource [%s] resolves as an index alias\"", ",", "resource", ")", ")", ";", "}", "// indexName -> aliasName -> alias definition", "Map", "<", "String", ",", "Map", "<", "String", ",", "IndicesAliases", ".", "Alias", ">", ">", "indexAliasTable", "=", "response", ".", "getIndices", "(", ")", ".", "getAll", "(", ")", ";", "if", "(", "indexAliasTable", ".", "size", "(", ")", "<", "1", ")", "{", "// Sanity check", "throw", "new", "EsHadoopIllegalArgumentException", "(", "\"Cannot initialize alias write resource [\"", "+", "resource", ".", "index", "(", ")", "+", "\"] if it does not have any alias entries.\"", ")", ";", "}", "else", "if", "(", "indexAliasTable", ".", "size", "(", ")", ">", "1", ")", "{", "// Multiple indices, validate that one index-alias relation has its write index flag set", "String", "currentWriteIndex", "=", "null", ";", "for", "(", "Map", ".", "Entry", "<", "String", ",", "Map", "<", "String", ",", "IndicesAliases", ".", "Alias", ">", ">", "indexRow", ":", "indexAliasTable", ".", "entrySet", "(", ")", ")", "{", "String", "indexName", "=", "indexRow", ".", "getKey", "(", ")", ";", "Map", "<", "String", ",", "IndicesAliases", ".", "Alias", ">", "aliases", "=", "indexRow", ".", "getValue", "(", ")", ";", "IndicesAliases", ".", "Alias", "aliasInfo", "=", "aliases", ".", "get", "(", "resource", ".", "index", "(", ")", ")", ";", "if", "(", "aliasInfo", ".", "isWriteIndex", "(", ")", ")", "{", "currentWriteIndex", "=", "indexName", ";", "break", ";", "}", "}", "if", "(", "currentWriteIndex", "==", "null", ")", "{", "throw", "new", "EsHadoopIllegalArgumentException", "(", "\"Attempting to write to alias [\"", "+", "resource", ".", "index", "(", ")", "+", "\"], \"", "+", "\"but detected multiple indices [\"", "+", "indexAliasTable", ".", "size", "(", ")", "+", "\"] with no write index selected. \"", "+", "\"Bailing out...\"", ")", ";", "}", "else", "{", "if", "(", "log", ".", "isDebugEnabled", "(", ")", ")", "{", "log", ".", "debug", "(", "String", ".", "format", "(", "\"Writing to currently configured write-index [%s]\"", ",", "currentWriteIndex", ")", ")", ";", "}", "}", "}", "else", "{", "// Single index in the alias, but we should still not pin the nodes", "if", "(", "log", ".", "isDebugEnabled", "(", ")", ")", "{", "log", ".", "debug", "(", "String", ".", "format", "(", "\"Writing to the alias's single configured index [%s]\"", ",", "indexAliasTable", ".", "keySet", "(", ")", ".", "iterator", "(", ")", ".", "next", "(", ")", ")", ")", ";", "}", "}", "// alias-index write - since we don't know beforehand what concrete index will be used at any", "// given time during the job, use an already selected node", "String", "node", "=", "SettingsUtils", ".", "getPinnedNode", "(", "settings", ")", ";", "if", "(", "log", ".", "isDebugEnabled", "(", ")", ")", "{", "log", ".", "debug", "(", "String", ".", "format", "(", "\"Partition writer instance [%s] assigned to [%s]\"", ",", "currentInstance", ",", "node", ")", ")", ";", "}", "return", "new", "RestRepository", "(", "settings", ")", ";", "}" ]
Validate and configure a rest repository for writing to an alias backed by a valid write-index. This validation only checks that an alias is valid at time of job start, and makes no guarantees about the alias changing during the execution. @param response Response from the get alias call @param settings Job settings @param currentInstance Partition number @param resource Configured write resource @param log Logger to use @return The RestRepository to be used by the partition writer
[ "Validate", "and", "configure", "a", "rest", "repository", "for", "writing", "to", "an", "alias", "backed", "by", "a", "valid", "write", "-", "index", ".", "This", "validation", "only", "checks", "that", "an", "alias", "is", "valid", "at", "time", "of", "job", "start", "and", "makes", "no", "guarantees", "about", "the", "alias", "changing", "during", "the", "execution", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/RestService.java#L757-L806
15,035
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/util/IOUtils.java
IOUtils.toCanonicalFilePath
public static String toCanonicalFilePath(URL fileURL) throws URISyntaxException, IOException { if (fileURL == null) { return null; } // Only handle jar: and file: schemes if (!"jar".equals(fileURL.getProtocol()) && !"file".equals(fileURL.getProtocol())) { return null; } // Parse the jar file location from the jar url. Doesn't open any resources. if ("jar".equals(fileURL.getProtocol())) { JarURLConnection jarURLConnection = (JarURLConnection) fileURL.openConnection(); fileURL = jarURLConnection.getJarFileURL(); } URI fileURI = fileURL.toURI(); File file = new File(fileURI); // Use filesystem to resolve any sym links or dots in the path to // a singular unique file path File canonicalFile = file.getCanonicalFile(); return canonicalFile.toURI().toString(); }
java
public static String toCanonicalFilePath(URL fileURL) throws URISyntaxException, IOException { if (fileURL == null) { return null; } // Only handle jar: and file: schemes if (!"jar".equals(fileURL.getProtocol()) && !"file".equals(fileURL.getProtocol())) { return null; } // Parse the jar file location from the jar url. Doesn't open any resources. if ("jar".equals(fileURL.getProtocol())) { JarURLConnection jarURLConnection = (JarURLConnection) fileURL.openConnection(); fileURL = jarURLConnection.getJarFileURL(); } URI fileURI = fileURL.toURI(); File file = new File(fileURI); // Use filesystem to resolve any sym links or dots in the path to // a singular unique file path File canonicalFile = file.getCanonicalFile(); return canonicalFile.toURI().toString(); }
[ "public", "static", "String", "toCanonicalFilePath", "(", "URL", "fileURL", ")", "throws", "URISyntaxException", ",", "IOException", "{", "if", "(", "fileURL", "==", "null", ")", "{", "return", "null", ";", "}", "// Only handle jar: and file: schemes", "if", "(", "!", "\"jar\"", ".", "equals", "(", "fileURL", ".", "getProtocol", "(", ")", ")", "&&", "!", "\"file\"", ".", "equals", "(", "fileURL", ".", "getProtocol", "(", ")", ")", ")", "{", "return", "null", ";", "}", "// Parse the jar file location from the jar url. Doesn't open any resources.", "if", "(", "\"jar\"", ".", "equals", "(", "fileURL", ".", "getProtocol", "(", ")", ")", ")", "{", "JarURLConnection", "jarURLConnection", "=", "(", "JarURLConnection", ")", "fileURL", ".", "openConnection", "(", ")", ";", "fileURL", "=", "jarURLConnection", ".", "getJarFileURL", "(", ")", ";", "}", "URI", "fileURI", "=", "fileURL", ".", "toURI", "(", ")", ";", "File", "file", "=", "new", "File", "(", "fileURI", ")", ";", "// Use filesystem to resolve any sym links or dots in the path to", "// a singular unique file path", "File", "canonicalFile", "=", "file", ".", "getCanonicalFile", "(", ")", ";", "return", "canonicalFile", ".", "toURI", "(", ")", ".", "toString", "(", ")", ";", "}" ]
Convert either a file or jar url into a local canonical file, or null if the file is a different scheme. @param fileURL the url to resolve to a canonical file. @return null if given URL is null, not using the jar scheme, or not using the file scheme. Otherwise, returns the String path to the local canonical file. @throws URISyntaxException If the given URL cannot be transformed into a URI @throws IOException If the jar cannot be read or if the canonical file cannot be determined
[ "Convert", "either", "a", "file", "or", "jar", "url", "into", "a", "local", "canonical", "file", "or", "null", "if", "the", "file", "is", "a", "different", "scheme", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/util/IOUtils.java#L225-L249
15,036
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/mr/EsMapReduceUtil.java
EsMapReduceUtil.initCredentials
public static void initCredentials(Job job) { Configuration configuration = job.getConfiguration(); Settings settings = HadoopSettingsManager.loadFrom(configuration); InitializationUtils.setUserProviderIfNotSet(settings, HadoopUserProvider.class, LOG); UserProvider userProvider = UserProvider.create(settings); if (userProvider.isEsKerberosEnabled()) { User user = userProvider.getUser(); ClusterInfo clusterInfo = settings.getClusterInfoOrNull(); RestClient bootstrap = new RestClient(settings); try { // first get ES main action info if it's missing if (clusterInfo == null) { clusterInfo = bootstrap.mainInfo(); } // Add the token to the job TokenUtil.addTokenForJob(bootstrap, clusterInfo.getClusterName(), user, job); } catch (EsHadoopException ex) { throw new EsHadoopIllegalArgumentException(String.format("Cannot detect ES version - " + "typically this happens if the network/Elasticsearch cluster is not accessible or when targeting " + "a WAN/Cloud instance without the proper setting '%s'", ConfigurationOptions.ES_NODES_WAN_ONLY), ex); } finally { bootstrap.close(); } } else { if (LOG.isDebugEnabled()) { LOG.debug("Ignoring Elasticsearch credentials since Kerberos Auth is not enabled."); } } }
java
public static void initCredentials(Job job) { Configuration configuration = job.getConfiguration(); Settings settings = HadoopSettingsManager.loadFrom(configuration); InitializationUtils.setUserProviderIfNotSet(settings, HadoopUserProvider.class, LOG); UserProvider userProvider = UserProvider.create(settings); if (userProvider.isEsKerberosEnabled()) { User user = userProvider.getUser(); ClusterInfo clusterInfo = settings.getClusterInfoOrNull(); RestClient bootstrap = new RestClient(settings); try { // first get ES main action info if it's missing if (clusterInfo == null) { clusterInfo = bootstrap.mainInfo(); } // Add the token to the job TokenUtil.addTokenForJob(bootstrap, clusterInfo.getClusterName(), user, job); } catch (EsHadoopException ex) { throw new EsHadoopIllegalArgumentException(String.format("Cannot detect ES version - " + "typically this happens if the network/Elasticsearch cluster is not accessible or when targeting " + "a WAN/Cloud instance without the proper setting '%s'", ConfigurationOptions.ES_NODES_WAN_ONLY), ex); } finally { bootstrap.close(); } } else { if (LOG.isDebugEnabled()) { LOG.debug("Ignoring Elasticsearch credentials since Kerberos Auth is not enabled."); } } }
[ "public", "static", "void", "initCredentials", "(", "Job", "job", ")", "{", "Configuration", "configuration", "=", "job", ".", "getConfiguration", "(", ")", ";", "Settings", "settings", "=", "HadoopSettingsManager", ".", "loadFrom", "(", "configuration", ")", ";", "InitializationUtils", ".", "setUserProviderIfNotSet", "(", "settings", ",", "HadoopUserProvider", ".", "class", ",", "LOG", ")", ";", "UserProvider", "userProvider", "=", "UserProvider", ".", "create", "(", "settings", ")", ";", "if", "(", "userProvider", ".", "isEsKerberosEnabled", "(", ")", ")", "{", "User", "user", "=", "userProvider", ".", "getUser", "(", ")", ";", "ClusterInfo", "clusterInfo", "=", "settings", ".", "getClusterInfoOrNull", "(", ")", ";", "RestClient", "bootstrap", "=", "new", "RestClient", "(", "settings", ")", ";", "try", "{", "// first get ES main action info if it's missing", "if", "(", "clusterInfo", "==", "null", ")", "{", "clusterInfo", "=", "bootstrap", ".", "mainInfo", "(", ")", ";", "}", "// Add the token to the job", "TokenUtil", ".", "addTokenForJob", "(", "bootstrap", ",", "clusterInfo", ".", "getClusterName", "(", ")", ",", "user", ",", "job", ")", ";", "}", "catch", "(", "EsHadoopException", "ex", ")", "{", "throw", "new", "EsHadoopIllegalArgumentException", "(", "String", ".", "format", "(", "\"Cannot detect ES version - \"", "+", "\"typically this happens if the network/Elasticsearch cluster is not accessible or when targeting \"", "+", "\"a WAN/Cloud instance without the proper setting '%s'\"", ",", "ConfigurationOptions", ".", "ES_NODES_WAN_ONLY", ")", ",", "ex", ")", ";", "}", "finally", "{", "bootstrap", ".", "close", "(", ")", ";", "}", "}", "else", "{", "if", "(", "LOG", ".", "isDebugEnabled", "(", ")", ")", "{", "LOG", ".", "debug", "(", "\"Ignoring Elasticsearch credentials since Kerberos Auth is not enabled.\"", ")", ";", "}", "}", "}" ]
Given the settings contained within a job object, retrieve an authentication token from either the currently logged in user or from the Elasticsearch cluster and add it to the job's credential set. @param job for collecting the settings to connect to Elasticsearch, as well as for storing the authentication token
[ "Given", "the", "settings", "contained", "within", "a", "job", "object", "retrieve", "an", "authentication", "token", "from", "either", "the", "currently", "logged", "in", "user", "or", "from", "the", "Elasticsearch", "cluster", "and", "add", "it", "to", "the", "job", "s", "credential", "set", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/mr/EsMapReduceUtil.java#L57-L86
15,037
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/mr/EsMapReduceUtil.java
EsMapReduceUtil.initCredentials
public static void initCredentials(JobConf jobConf) { Settings settings = HadoopSettingsManager.loadFrom(jobConf); InitializationUtils.setUserProviderIfNotSet(settings, HadoopUserProvider.class, LOG); UserProvider userProvider = UserProvider.create(settings); if (userProvider.isEsKerberosEnabled()) { User user = userProvider.getUser(); ClusterInfo clusterInfo = settings.getClusterInfoOrNull(); RestClient bootstrap = new RestClient(settings); try { // first get ES main action info if it's missing if (clusterInfo == null) { clusterInfo = bootstrap.mainInfo(); } // Add the token to the job TokenUtil.addTokenForJobConf(bootstrap, clusterInfo.getClusterName(), user, jobConf); } catch (EsHadoopException ex) { throw new EsHadoopIllegalArgumentException(String.format("Cannot detect ES version - " + "typically this happens if the network/Elasticsearch cluster is not accessible or when targeting " + "a WAN/Cloud instance without the proper setting '%s'", ConfigurationOptions.ES_NODES_WAN_ONLY), ex); } finally { bootstrap.close(); } } else { if (LOG.isDebugEnabled()) { LOG.debug("Ignoring Elasticsearch credentials since Kerberos Auth is not enabled."); } } }
java
public static void initCredentials(JobConf jobConf) { Settings settings = HadoopSettingsManager.loadFrom(jobConf); InitializationUtils.setUserProviderIfNotSet(settings, HadoopUserProvider.class, LOG); UserProvider userProvider = UserProvider.create(settings); if (userProvider.isEsKerberosEnabled()) { User user = userProvider.getUser(); ClusterInfo clusterInfo = settings.getClusterInfoOrNull(); RestClient bootstrap = new RestClient(settings); try { // first get ES main action info if it's missing if (clusterInfo == null) { clusterInfo = bootstrap.mainInfo(); } // Add the token to the job TokenUtil.addTokenForJobConf(bootstrap, clusterInfo.getClusterName(), user, jobConf); } catch (EsHadoopException ex) { throw new EsHadoopIllegalArgumentException(String.format("Cannot detect ES version - " + "typically this happens if the network/Elasticsearch cluster is not accessible or when targeting " + "a WAN/Cloud instance without the proper setting '%s'", ConfigurationOptions.ES_NODES_WAN_ONLY), ex); } finally { bootstrap.close(); } } else { if (LOG.isDebugEnabled()) { LOG.debug("Ignoring Elasticsearch credentials since Kerberos Auth is not enabled."); } } }
[ "public", "static", "void", "initCredentials", "(", "JobConf", "jobConf", ")", "{", "Settings", "settings", "=", "HadoopSettingsManager", ".", "loadFrom", "(", "jobConf", ")", ";", "InitializationUtils", ".", "setUserProviderIfNotSet", "(", "settings", ",", "HadoopUserProvider", ".", "class", ",", "LOG", ")", ";", "UserProvider", "userProvider", "=", "UserProvider", ".", "create", "(", "settings", ")", ";", "if", "(", "userProvider", ".", "isEsKerberosEnabled", "(", ")", ")", "{", "User", "user", "=", "userProvider", ".", "getUser", "(", ")", ";", "ClusterInfo", "clusterInfo", "=", "settings", ".", "getClusterInfoOrNull", "(", ")", ";", "RestClient", "bootstrap", "=", "new", "RestClient", "(", "settings", ")", ";", "try", "{", "// first get ES main action info if it's missing", "if", "(", "clusterInfo", "==", "null", ")", "{", "clusterInfo", "=", "bootstrap", ".", "mainInfo", "(", ")", ";", "}", "// Add the token to the job", "TokenUtil", ".", "addTokenForJobConf", "(", "bootstrap", ",", "clusterInfo", ".", "getClusterName", "(", ")", ",", "user", ",", "jobConf", ")", ";", "}", "catch", "(", "EsHadoopException", "ex", ")", "{", "throw", "new", "EsHadoopIllegalArgumentException", "(", "String", ".", "format", "(", "\"Cannot detect ES version - \"", "+", "\"typically this happens if the network/Elasticsearch cluster is not accessible or when targeting \"", "+", "\"a WAN/Cloud instance without the proper setting '%s'\"", ",", "ConfigurationOptions", ".", "ES_NODES_WAN_ONLY", ")", ",", "ex", ")", ";", "}", "finally", "{", "bootstrap", ".", "close", "(", ")", ";", "}", "}", "else", "{", "if", "(", "LOG", ".", "isDebugEnabled", "(", ")", ")", "{", "LOG", ".", "debug", "(", "\"Ignoring Elasticsearch credentials since Kerberos Auth is not enabled.\"", ")", ";", "}", "}", "}" ]
Given the settings contained within the job conf, retrieve an authentication token from either the currently logged in user or from the Elasticsearch cluster and add it to the job's credential set. @param jobConf containing the settings to connect to Elasticsearch, as well as for storing the authentication token
[ "Given", "the", "settings", "contained", "within", "the", "job", "conf", "retrieve", "an", "authentication", "token", "from", "either", "the", "currently", "logged", "in", "user", "or", "from", "the", "Elasticsearch", "cluster", "and", "add", "it", "to", "the", "job", "s", "credential", "set", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/mr/EsMapReduceUtil.java#L93-L121
15,038
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/util/StringUtils.java
StringUtils.levenshteinDistance
public static int levenshteinDistance(CharSequence one, CharSequence another, int threshold) { int n = one.length(); int m = another.length(); // if one string is empty, the edit distance is necessarily the length of the other if (n == 0) { return m <= threshold ? m : -1; } else if (m == 0) { return n <= threshold ? n : -1; } if (n > m) { // swap the two strings to consume less memory final CharSequence tmp = one; one = another; another = tmp; n = m; m = another.length(); } int p[] = new int[n + 1]; // 'previous' cost array, horizontally int d[] = new int[n + 1]; // cost array, horizontally int _d[]; // placeholder to assist in swapping p and d // fill in starting table values final int boundary = Math.min(n, threshold) + 1; for (int i = 0; i < boundary; i++) { p[i] = i; } // these fills ensure that the value above the rightmost entry of our // stripe will be ignored in following loop iterations Arrays.fill(p, boundary, p.length, Integer.MAX_VALUE); Arrays.fill(d, Integer.MAX_VALUE); for (int j = 1; j <= m; j++) { final char t_j = another.charAt(j - 1); d[0] = j; // compute stripe indices, constrain to array size final int min = Math.max(1, j - threshold); final int max = (j > Integer.MAX_VALUE - threshold) ? n : Math.min(n, j + threshold); // the stripe may lead off of the table if s and t are of different sizes if (min > max) { return -1; } // ignore entry left of leftmost if (min > 1) { d[min - 1] = Integer.MAX_VALUE; } // iterates through [min, max] in s for (int i = min; i <= max; i++) { if (one.charAt(i - 1) == t_j) { // diagonally left and up d[i] = p[i - 1]; } else { // 1 + minimum of cell to the left, to the top, diagonally left and up d[i] = 1 + Math.min(Math.min(d[i - 1], p[i]), p[i - 1]); } } // copy current distance counts to 'previous row' distance counts _d = p; p = d; d = _d; } // if p[n] is greater than the threshold, there's no guarantee on it being the correct // distance if (p[n] <= threshold) { return p[n]; } return -1; }
java
public static int levenshteinDistance(CharSequence one, CharSequence another, int threshold) { int n = one.length(); int m = another.length(); // if one string is empty, the edit distance is necessarily the length of the other if (n == 0) { return m <= threshold ? m : -1; } else if (m == 0) { return n <= threshold ? n : -1; } if (n > m) { // swap the two strings to consume less memory final CharSequence tmp = one; one = another; another = tmp; n = m; m = another.length(); } int p[] = new int[n + 1]; // 'previous' cost array, horizontally int d[] = new int[n + 1]; // cost array, horizontally int _d[]; // placeholder to assist in swapping p and d // fill in starting table values final int boundary = Math.min(n, threshold) + 1; for (int i = 0; i < boundary; i++) { p[i] = i; } // these fills ensure that the value above the rightmost entry of our // stripe will be ignored in following loop iterations Arrays.fill(p, boundary, p.length, Integer.MAX_VALUE); Arrays.fill(d, Integer.MAX_VALUE); for (int j = 1; j <= m; j++) { final char t_j = another.charAt(j - 1); d[0] = j; // compute stripe indices, constrain to array size final int min = Math.max(1, j - threshold); final int max = (j > Integer.MAX_VALUE - threshold) ? n : Math.min(n, j + threshold); // the stripe may lead off of the table if s and t are of different sizes if (min > max) { return -1; } // ignore entry left of leftmost if (min > 1) { d[min - 1] = Integer.MAX_VALUE; } // iterates through [min, max] in s for (int i = min; i <= max; i++) { if (one.charAt(i - 1) == t_j) { // diagonally left and up d[i] = p[i - 1]; } else { // 1 + minimum of cell to the left, to the top, diagonally left and up d[i] = 1 + Math.min(Math.min(d[i - 1], p[i]), p[i - 1]); } } // copy current distance counts to 'previous row' distance counts _d = p; p = d; d = _d; } // if p[n] is greater than the threshold, there's no guarantee on it being the correct // distance if (p[n] <= threshold) { return p[n]; } return -1; }
[ "public", "static", "int", "levenshteinDistance", "(", "CharSequence", "one", ",", "CharSequence", "another", ",", "int", "threshold", ")", "{", "int", "n", "=", "one", ".", "length", "(", ")", ";", "int", "m", "=", "another", ".", "length", "(", ")", ";", "// if one string is empty, the edit distance is necessarily the length of the other", "if", "(", "n", "==", "0", ")", "{", "return", "m", "<=", "threshold", "?", "m", ":", "-", "1", ";", "}", "else", "if", "(", "m", "==", "0", ")", "{", "return", "n", "<=", "threshold", "?", "n", ":", "-", "1", ";", "}", "if", "(", "n", ">", "m", ")", "{", "// swap the two strings to consume less memory", "final", "CharSequence", "tmp", "=", "one", ";", "one", "=", "another", ";", "another", "=", "tmp", ";", "n", "=", "m", ";", "m", "=", "another", ".", "length", "(", ")", ";", "}", "int", "p", "[", "]", "=", "new", "int", "[", "n", "+", "1", "]", ";", "// 'previous' cost array, horizontally", "int", "d", "[", "]", "=", "new", "int", "[", "n", "+", "1", "]", ";", "// cost array, horizontally", "int", "_d", "[", "]", ";", "// placeholder to assist in swapping p and d", "// fill in starting table values", "final", "int", "boundary", "=", "Math", ".", "min", "(", "n", ",", "threshold", ")", "+", "1", ";", "for", "(", "int", "i", "=", "0", ";", "i", "<", "boundary", ";", "i", "++", ")", "{", "p", "[", "i", "]", "=", "i", ";", "}", "// these fills ensure that the value above the rightmost entry of our", "// stripe will be ignored in following loop iterations", "Arrays", ".", "fill", "(", "p", ",", "boundary", ",", "p", ".", "length", ",", "Integer", ".", "MAX_VALUE", ")", ";", "Arrays", ".", "fill", "(", "d", ",", "Integer", ".", "MAX_VALUE", ")", ";", "for", "(", "int", "j", "=", "1", ";", "j", "<=", "m", ";", "j", "++", ")", "{", "final", "char", "t_j", "=", "another", ".", "charAt", "(", "j", "-", "1", ")", ";", "d", "[", "0", "]", "=", "j", ";", "// compute stripe indices, constrain to array size", "final", "int", "min", "=", "Math", ".", "max", "(", "1", ",", "j", "-", "threshold", ")", ";", "final", "int", "max", "=", "(", "j", ">", "Integer", ".", "MAX_VALUE", "-", "threshold", ")", "?", "n", ":", "Math", ".", "min", "(", "n", ",", "j", "+", "threshold", ")", ";", "// the stripe may lead off of the table if s and t are of different sizes", "if", "(", "min", ">", "max", ")", "{", "return", "-", "1", ";", "}", "// ignore entry left of leftmost", "if", "(", "min", ">", "1", ")", "{", "d", "[", "min", "-", "1", "]", "=", "Integer", ".", "MAX_VALUE", ";", "}", "// iterates through [min, max] in s", "for", "(", "int", "i", "=", "min", ";", "i", "<=", "max", ";", "i", "++", ")", "{", "if", "(", "one", ".", "charAt", "(", "i", "-", "1", ")", "==", "t_j", ")", "{", "// diagonally left and up", "d", "[", "i", "]", "=", "p", "[", "i", "-", "1", "]", ";", "}", "else", "{", "// 1 + minimum of cell to the left, to the top, diagonally left and up", "d", "[", "i", "]", "=", "1", "+", "Math", ".", "min", "(", "Math", ".", "min", "(", "d", "[", "i", "-", "1", "]", ",", "p", "[", "i", "]", ")", ",", "p", "[", "i", "-", "1", "]", ")", ";", "}", "}", "// copy current distance counts to 'previous row' distance counts", "_d", "=", "p", ";", "p", "=", "d", ";", "d", "=", "_d", ";", "}", "// if p[n] is greater than the threshold, there's no guarantee on it being the correct", "// distance", "if", "(", "p", "[", "n", "]", "<=", "threshold", ")", "{", "return", "p", "[", "n", "]", ";", "}", "return", "-", "1", ";", "}" ]
returns -1 if the two strings are within the given threshold of each other, -1 otherwise
[ "returns", "-", "1", "if", "the", "two", "strings", "are", "within", "the", "given", "threshold", "of", "each", "other", "-", "1", "otherwise" ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/util/StringUtils.java#L230-L308
15,039
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/util/StringUtils.java
StringUtils.toJsonString
public static String toJsonString(Object value) { if (value == null) { return "null"; } else if (value.getClass().equals(String.class)) { return "\"" + StringUtils.jsonEncoding(value.toString()) + "\""; } // else it's a RawJson, Boolean or Number so no escaping or quotes else { return value.toString(); } }
java
public static String toJsonString(Object value) { if (value == null) { return "null"; } else if (value.getClass().equals(String.class)) { return "\"" + StringUtils.jsonEncoding(value.toString()) + "\""; } // else it's a RawJson, Boolean or Number so no escaping or quotes else { return value.toString(); } }
[ "public", "static", "String", "toJsonString", "(", "Object", "value", ")", "{", "if", "(", "value", "==", "null", ")", "{", "return", "\"null\"", ";", "}", "else", "if", "(", "value", ".", "getClass", "(", ")", ".", "equals", "(", "String", ".", "class", ")", ")", "{", "return", "\"\\\"\"", "+", "StringUtils", ".", "jsonEncoding", "(", "value", ".", "toString", "(", ")", ")", "+", "\"\\\"\"", ";", "}", "// else it's a RawJson, Boolean or Number so no escaping or quotes", "else", "{", "return", "value", ".", "toString", "(", ")", ";", "}", "}" ]
return the value in a JSON friendly way
[ "return", "the", "value", "in", "a", "JSON", "friendly", "way" ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/util/StringUtils.java#L411-L422
15,040
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/NetworkUtils.java
NetworkUtils.getGlobalInterfaces
static InetAddress[] getGlobalInterfaces() throws SocketException { List<InetAddress> list = new ArrayList<InetAddress> (); for (NetworkInterface intf : getInterfaces()) { if (intf.isUp()) { for (InetAddress address : Collections.list(intf.getInetAddresses())) { if (address.isLoopbackAddress() == false && address.isSiteLocalAddress() == false && address.isLinkLocalAddress() == false) { list.add(address); } } } } return list.toArray(new InetAddress[list.size()]); }
java
static InetAddress[] getGlobalInterfaces() throws SocketException { List<InetAddress> list = new ArrayList<InetAddress> (); for (NetworkInterface intf : getInterfaces()) { if (intf.isUp()) { for (InetAddress address : Collections.list(intf.getInetAddresses())) { if (address.isLoopbackAddress() == false && address.isSiteLocalAddress() == false && address.isLinkLocalAddress() == false) { list.add(address); } } } } return list.toArray(new InetAddress[list.size()]); }
[ "static", "InetAddress", "[", "]", "getGlobalInterfaces", "(", ")", "throws", "SocketException", "{", "List", "<", "InetAddress", ">", "list", "=", "new", "ArrayList", "<", "InetAddress", ">", "(", ")", ";", "for", "(", "NetworkInterface", "intf", ":", "getInterfaces", "(", ")", ")", "{", "if", "(", "intf", ".", "isUp", "(", ")", ")", "{", "for", "(", "InetAddress", "address", ":", "Collections", ".", "list", "(", "intf", ".", "getInetAddresses", "(", ")", ")", ")", "{", "if", "(", "address", ".", "isLoopbackAddress", "(", ")", "==", "false", "&&", "address", ".", "isSiteLocalAddress", "(", ")", "==", "false", "&&", "address", ".", "isLinkLocalAddress", "(", ")", "==", "false", ")", "{", "list", ".", "add", "(", "address", ")", ";", "}", "}", "}", "}", "return", "list", ".", "toArray", "(", "new", "InetAddress", "[", "list", ".", "size", "(", ")", "]", ")", ";", "}" ]
Returns all global scope addresses for interfaces that are up.
[ "Returns", "all", "global", "scope", "addresses", "for", "interfaces", "that", "are", "up", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/NetworkUtils.java#L50-L64
15,041
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/commonshttp/auth/spnego/SpnegoAuthScheme.java
SpnegoAuthScheme.getFQDN
protected String getFQDN(URI requestURI) throws UnknownHostException { String host = requestURI.getHost(); InetAddress address = InetAddress.getByName(host); return address.getCanonicalHostName(); }
java
protected String getFQDN(URI requestURI) throws UnknownHostException { String host = requestURI.getHost(); InetAddress address = InetAddress.getByName(host); return address.getCanonicalHostName(); }
[ "protected", "String", "getFQDN", "(", "URI", "requestURI", ")", "throws", "UnknownHostException", "{", "String", "host", "=", "requestURI", ".", "getHost", "(", ")", ";", "InetAddress", "address", "=", "InetAddress", ".", "getByName", "(", "host", ")", ";", "return", "address", ".", "getCanonicalHostName", "(", ")", ";", "}" ]
Get the FQDN of the request uri's address, reverse resolving if needed. @param requestURI URI of the request that needs authentication @return FQDN of the uri @throws UnknownHostException if the address cannot be resolved
[ "Get", "the", "FQDN", "of", "the", "request", "uri", "s", "address", "reverse", "resolving", "if", "needed", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/commonshttp/auth/spnego/SpnegoAuthScheme.java#L87-L91
15,042
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/commonshttp/auth/spnego/SpnegoAuthScheme.java
SpnegoAuthScheme.initializeNegotiator
private void initializeNegotiator(URI requestURI, SpnegoCredentials spnegoCredentials) throws UnknownHostException, AuthenticationException, GSSException { // Initialize negotiator if (spnegoNegotiator == null) { // Determine host principal String servicePrincipal = spnegoCredentials.getServicePrincipalName(); if (spnegoCredentials.getServicePrincipalName().contains(HOSTNAME_PATTERN)) { String fqdn = getFQDN(requestURI); String[] components = spnegoCredentials.getServicePrincipalName().split("[/@]"); if (components.length != 3 || !components[1].equals(HOSTNAME_PATTERN)) { throw new AuthenticationException("Malformed service principal name [" + spnegoCredentials.getServicePrincipalName() + "]. To use host substitution, the principal must be of the format [serviceName/_HOST@REALM.NAME]."); } servicePrincipal = components[0] + "/" + fqdn.toLowerCase() + "@" + components[2]; } User userInfo = spnegoCredentials.getUserProvider().getUser(); KerberosPrincipal principal = userInfo.getKerberosPrincipal(); if (principal == null) { throw new EsHadoopIllegalArgumentException("Could not locate Kerberos Principal on currently logged in user."); } spnegoNegotiator = new SpnegoNegotiator(principal.getName(), servicePrincipal); } }
java
private void initializeNegotiator(URI requestURI, SpnegoCredentials spnegoCredentials) throws UnknownHostException, AuthenticationException, GSSException { // Initialize negotiator if (spnegoNegotiator == null) { // Determine host principal String servicePrincipal = spnegoCredentials.getServicePrincipalName(); if (spnegoCredentials.getServicePrincipalName().contains(HOSTNAME_PATTERN)) { String fqdn = getFQDN(requestURI); String[] components = spnegoCredentials.getServicePrincipalName().split("[/@]"); if (components.length != 3 || !components[1].equals(HOSTNAME_PATTERN)) { throw new AuthenticationException("Malformed service principal name [" + spnegoCredentials.getServicePrincipalName() + "]. To use host substitution, the principal must be of the format [serviceName/_HOST@REALM.NAME]."); } servicePrincipal = components[0] + "/" + fqdn.toLowerCase() + "@" + components[2]; } User userInfo = spnegoCredentials.getUserProvider().getUser(); KerberosPrincipal principal = userInfo.getKerberosPrincipal(); if (principal == null) { throw new EsHadoopIllegalArgumentException("Could not locate Kerberos Principal on currently logged in user."); } spnegoNegotiator = new SpnegoNegotiator(principal.getName(), servicePrincipal); } }
[ "private", "void", "initializeNegotiator", "(", "URI", "requestURI", ",", "SpnegoCredentials", "spnegoCredentials", ")", "throws", "UnknownHostException", ",", "AuthenticationException", ",", "GSSException", "{", "// Initialize negotiator", "if", "(", "spnegoNegotiator", "==", "null", ")", "{", "// Determine host principal", "String", "servicePrincipal", "=", "spnegoCredentials", ".", "getServicePrincipalName", "(", ")", ";", "if", "(", "spnegoCredentials", ".", "getServicePrincipalName", "(", ")", ".", "contains", "(", "HOSTNAME_PATTERN", ")", ")", "{", "String", "fqdn", "=", "getFQDN", "(", "requestURI", ")", ";", "String", "[", "]", "components", "=", "spnegoCredentials", ".", "getServicePrincipalName", "(", ")", ".", "split", "(", "\"[/@]\"", ")", ";", "if", "(", "components", ".", "length", "!=", "3", "||", "!", "components", "[", "1", "]", ".", "equals", "(", "HOSTNAME_PATTERN", ")", ")", "{", "throw", "new", "AuthenticationException", "(", "\"Malformed service principal name [\"", "+", "spnegoCredentials", ".", "getServicePrincipalName", "(", ")", "+", "\"]. To use host substitution, the principal must be of the format [serviceName/_HOST@REALM.NAME].\"", ")", ";", "}", "servicePrincipal", "=", "components", "[", "0", "]", "+", "\"/\"", "+", "fqdn", ".", "toLowerCase", "(", ")", "+", "\"@\"", "+", "components", "[", "2", "]", ";", "}", "User", "userInfo", "=", "spnegoCredentials", ".", "getUserProvider", "(", ")", ".", "getUser", "(", ")", ";", "KerberosPrincipal", "principal", "=", "userInfo", ".", "getKerberosPrincipal", "(", ")", ";", "if", "(", "principal", "==", "null", ")", "{", "throw", "new", "EsHadoopIllegalArgumentException", "(", "\"Could not locate Kerberos Principal on currently logged in user.\"", ")", ";", "}", "spnegoNegotiator", "=", "new", "SpnegoNegotiator", "(", "principal", ".", "getName", "(", ")", ",", "servicePrincipal", ")", ";", "}", "}" ]
Creates the negotiator if it is not yet created, or does nothing if the negotiator is already initialized. @param requestURI request being authenticated @param spnegoCredentials The user and service principals @throws UnknownHostException If the service principal is host based, and if the request URI cannot be resolved to a FQDN @throws AuthenticationException If the service principal is malformed @throws GSSException If the negotiator cannot be created.
[ "Creates", "the", "negotiator", "if", "it", "is", "not", "yet", "created", "or", "does", "nothing", "if", "the", "negotiator", "is", "already", "initialized", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/commonshttp/auth/spnego/SpnegoAuthScheme.java#L101-L122
15,043
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/commonshttp/auth/spnego/SpnegoAuthScheme.java
SpnegoAuthScheme.getNegotiateToken
private String getNegotiateToken() throws GSSException { if (spnegoNegotiator == null) { throw new IllegalStateException("Negotiator not yet initialized."); } // Perform GSS Dance String authString; if (StringUtils.hasText(challenge)) { authString = spnegoNegotiator.send(challenge); } else { authString = spnegoNegotiator.send(); } this.challenge = null; // Prepend the authentication scheme to use if (authString != null) { authString = EsHadoopAuthPolicies.NEGOTIATE + " " + authString; } return authString; }
java
private String getNegotiateToken() throws GSSException { if (spnegoNegotiator == null) { throw new IllegalStateException("Negotiator not yet initialized."); } // Perform GSS Dance String authString; if (StringUtils.hasText(challenge)) { authString = spnegoNegotiator.send(challenge); } else { authString = spnegoNegotiator.send(); } this.challenge = null; // Prepend the authentication scheme to use if (authString != null) { authString = EsHadoopAuthPolicies.NEGOTIATE + " " + authString; } return authString; }
[ "private", "String", "getNegotiateToken", "(", ")", "throws", "GSSException", "{", "if", "(", "spnegoNegotiator", "==", "null", ")", "{", "throw", "new", "IllegalStateException", "(", "\"Negotiator not yet initialized.\"", ")", ";", "}", "// Perform GSS Dance", "String", "authString", ";", "if", "(", "StringUtils", ".", "hasText", "(", "challenge", ")", ")", "{", "authString", "=", "spnegoNegotiator", ".", "send", "(", "challenge", ")", ";", "}", "else", "{", "authString", "=", "spnegoNegotiator", ".", "send", "(", ")", ";", "}", "this", ".", "challenge", "=", "null", ";", "// Prepend the authentication scheme to use", "if", "(", "authString", "!=", "null", ")", "{", "authString", "=", "EsHadoopAuthPolicies", ".", "NEGOTIATE", "+", "\" \"", "+", "authString", ";", "}", "return", "authString", ";", "}" ]
Attempts to retrieve the next negotiation token to send, consuming any previously set challenge data. @return A negotiate header to be sent in the following request, or null if the negotiations have concluded. @throws GSSException If the negotiation encounters any problems, such as malformed tokens, or invalid keys.
[ "Attempts", "to", "retrieve", "the", "next", "negotiation", "token", "to", "send", "consuming", "any", "previously", "set", "challenge", "data", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/commonshttp/auth/spnego/SpnegoAuthScheme.java#L129-L147
15,044
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/commonshttp/auth/spnego/SpnegoAuthScheme.java
SpnegoAuthScheme.authenticate
private String authenticate(Credentials credentials, URI requestURI) throws AuthenticationException { if (!(credentials instanceof SpnegoCredentials)) { throw new AuthenticationException("Invalid credentials type provided to " + this.getClass().getName() + "." + "Expected " + SpnegoCredentials.class.getName() + " but got " + credentials.getClass().getName()); } final SpnegoCredentials spnegoCredentials = (SpnegoCredentials) credentials; try { initializeNegotiator(requestURI, spnegoCredentials); return getNegotiateToken(); } catch (GSSException e) { throw new AuthenticationException("Could not authenticate", e); } catch (UnknownHostException e) { throw new AuthenticationException("Could not authenticate", e); } }
java
private String authenticate(Credentials credentials, URI requestURI) throws AuthenticationException { if (!(credentials instanceof SpnegoCredentials)) { throw new AuthenticationException("Invalid credentials type provided to " + this.getClass().getName() + "." + "Expected " + SpnegoCredentials.class.getName() + " but got " + credentials.getClass().getName()); } final SpnegoCredentials spnegoCredentials = (SpnegoCredentials) credentials; try { initializeNegotiator(requestURI, spnegoCredentials); return getNegotiateToken(); } catch (GSSException e) { throw new AuthenticationException("Could not authenticate", e); } catch (UnknownHostException e) { throw new AuthenticationException("Could not authenticate", e); } }
[ "private", "String", "authenticate", "(", "Credentials", "credentials", ",", "URI", "requestURI", ")", "throws", "AuthenticationException", "{", "if", "(", "!", "(", "credentials", "instanceof", "SpnegoCredentials", ")", ")", "{", "throw", "new", "AuthenticationException", "(", "\"Invalid credentials type provided to \"", "+", "this", ".", "getClass", "(", ")", ".", "getName", "(", ")", "+", "\".\"", "+", "\"Expected \"", "+", "SpnegoCredentials", ".", "class", ".", "getName", "(", ")", "+", "\" but got \"", "+", "credentials", ".", "getClass", "(", ")", ".", "getName", "(", ")", ")", ";", "}", "final", "SpnegoCredentials", "spnegoCredentials", "=", "(", "SpnegoCredentials", ")", "credentials", ";", "try", "{", "initializeNegotiator", "(", "requestURI", ",", "spnegoCredentials", ")", ";", "return", "getNegotiateToken", "(", ")", ";", "}", "catch", "(", "GSSException", "e", ")", "{", "throw", "new", "AuthenticationException", "(", "\"Could not authenticate\"", ",", "e", ")", ";", "}", "catch", "(", "UnknownHostException", "e", ")", "{", "throw", "new", "AuthenticationException", "(", "\"Could not authenticate\"", ",", "e", ")", ";", "}", "}" ]
Implementation method that returns the text to send via the Authenticate header on the next request.
[ "Implementation", "method", "that", "returns", "the", "text", "to", "send", "via", "the", "Authenticate", "header", "on", "the", "next", "request", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/commonshttp/auth/spnego/SpnegoAuthScheme.java#L152-L166
15,045
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/rest/commonshttp/auth/spnego/SpnegoAuthScheme.java
SpnegoAuthScheme.ensureMutualAuth
public void ensureMutualAuth(String returnChallenge) throws AuthenticationException { try { processChallenge(returnChallenge); } catch (MalformedChallengeException mce) { throw new AuthenticationException("Received invalid response header for mutual authentication", mce); } try { String token = getNegotiateToken(); if (!spnegoNegotiator.established() || token != null) { throw new AuthenticationException("Could not complete SPNEGO Authentication, Mutual Authentication Failed"); } } catch (GSSException gsse) { throw new AuthenticationException("Could not complete SPNEGO Authentication", gsse); } }
java
public void ensureMutualAuth(String returnChallenge) throws AuthenticationException { try { processChallenge(returnChallenge); } catch (MalformedChallengeException mce) { throw new AuthenticationException("Received invalid response header for mutual authentication", mce); } try { String token = getNegotiateToken(); if (!spnegoNegotiator.established() || token != null) { throw new AuthenticationException("Could not complete SPNEGO Authentication, Mutual Authentication Failed"); } } catch (GSSException gsse) { throw new AuthenticationException("Could not complete SPNEGO Authentication", gsse); } }
[ "public", "void", "ensureMutualAuth", "(", "String", "returnChallenge", ")", "throws", "AuthenticationException", "{", "try", "{", "processChallenge", "(", "returnChallenge", ")", ";", "}", "catch", "(", "MalformedChallengeException", "mce", ")", "{", "throw", "new", "AuthenticationException", "(", "\"Received invalid response header for mutual authentication\"", ",", "mce", ")", ";", "}", "try", "{", "String", "token", "=", "getNegotiateToken", "(", ")", ";", "if", "(", "!", "spnegoNegotiator", ".", "established", "(", ")", "||", "token", "!=", "null", ")", "{", "throw", "new", "AuthenticationException", "(", "\"Could not complete SPNEGO Authentication, Mutual Authentication Failed\"", ")", ";", "}", "}", "catch", "(", "GSSException", "gsse", ")", "{", "throw", "new", "AuthenticationException", "(", "\"Could not complete SPNEGO Authentication\"", ",", "gsse", ")", ";", "}", "}" ]
Authenticating requests with SPNEGO means that a request will execute before the client is sure that the server is mutually authenticated. This means that, at best, if mutual auth is requested, the client cannot trust that the server is giving accurate information, or in the case that the client has already sent data, further communication with the server should not happen. @param returnChallenge The Negotiate challenge from the response headers of a successful executed request @throws AuthenticationException If the response header does not allow for mutual authentication to be established.
[ "Authenticating", "requests", "with", "SPNEGO", "means", "that", "a", "request", "will", "execute", "before", "the", "client", "is", "sure", "that", "the", "server", "is", "mutually", "authenticated", ".", "This", "means", "that", "at", "best", "if", "mutual", "auth", "is", "requested", "the", "client", "cannot", "trust", "that", "the", "server", "is", "giving", "accurate", "information", "or", "in", "the", "case", "that", "the", "client", "has", "already", "sent", "data", "further", "communication", "with", "the", "server", "should", "not", "happen", "." ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/rest/commonshttp/auth/spnego/SpnegoAuthScheme.java#L196-L210
15,046
elastic/elasticsearch-hadoop
mr/src/main/java/org/elasticsearch/hadoop/cfg/Settings.java
Settings.getClusterInfoOrNull
public ClusterInfo getClusterInfoOrNull() { String clusterName = getProperty(InternalConfigurationOptions.INTERNAL_ES_CLUSTER_NAME); if (clusterName == null) { return null; } String clusterUUID = getProperty(InternalConfigurationOptions.INTERNAL_ES_CLUSTER_UUID); EsMajorVersion version = getInternalVersionOrThrow(); return new ClusterInfo(new ClusterName(clusterName, clusterUUID), version); }
java
public ClusterInfo getClusterInfoOrNull() { String clusterName = getProperty(InternalConfigurationOptions.INTERNAL_ES_CLUSTER_NAME); if (clusterName == null) { return null; } String clusterUUID = getProperty(InternalConfigurationOptions.INTERNAL_ES_CLUSTER_UUID); EsMajorVersion version = getInternalVersionOrThrow(); return new ClusterInfo(new ClusterName(clusterName, clusterUUID), version); }
[ "public", "ClusterInfo", "getClusterInfoOrNull", "(", ")", "{", "String", "clusterName", "=", "getProperty", "(", "InternalConfigurationOptions", ".", "INTERNAL_ES_CLUSTER_NAME", ")", ";", "if", "(", "clusterName", "==", "null", ")", "{", "return", "null", ";", "}", "String", "clusterUUID", "=", "getProperty", "(", "InternalConfigurationOptions", ".", "INTERNAL_ES_CLUSTER_UUID", ")", ";", "EsMajorVersion", "version", "=", "getInternalVersionOrThrow", "(", ")", ";", "return", "new", "ClusterInfo", "(", "new", "ClusterName", "(", "clusterName", ",", "clusterUUID", ")", ",", "version", ")", ";", "}" ]
Get the internal cluster name and version or null if not present in the settings @return the {@link ClusterInfo} extracted from the properties or null if not present
[ "Get", "the", "internal", "cluster", "name", "and", "version", "or", "null", "if", "not", "present", "in", "the", "settings" ]
f3acaba268ff96efae8eb946088c748c777c22cc
https://github.com/elastic/elasticsearch-hadoop/blob/f3acaba268ff96efae8eb946088c748c777c22cc/mr/src/main/java/org/elasticsearch/hadoop/cfg/Settings.java#L93-L101
15,047
square/okio
samples/src/main/java/okio/samples/BitmapEncoder.java
BitmapEncoder.generateGradient
Bitmap generateGradient() { int[][] pixels = new int[1080][1920]; for (int y = 0; y < 1080; y++) { for (int x = 0; x < 1920; x++) { int r = (int) (y / 1080f * 255); int g = (int) (x / 1920f * 255); int b = (int) ((Math.hypot(x, y) / Math.hypot(1080, 1920)) * 255); pixels[y][x] = r << 16 | g << 8 | b; } } return new Bitmap(pixels); }
java
Bitmap generateGradient() { int[][] pixels = new int[1080][1920]; for (int y = 0; y < 1080; y++) { for (int x = 0; x < 1920; x++) { int r = (int) (y / 1080f * 255); int g = (int) (x / 1920f * 255); int b = (int) ((Math.hypot(x, y) / Math.hypot(1080, 1920)) * 255); pixels[y][x] = r << 16 | g << 8 | b; } } return new Bitmap(pixels); }
[ "Bitmap", "generateGradient", "(", ")", "{", "int", "[", "]", "[", "]", "pixels", "=", "new", "int", "[", "1080", "]", "[", "1920", "]", ";", "for", "(", "int", "y", "=", "0", ";", "y", "<", "1080", ";", "y", "++", ")", "{", "for", "(", "int", "x", "=", "0", ";", "x", "<", "1920", ";", "x", "++", ")", "{", "int", "r", "=", "(", "int", ")", "(", "y", "/", "1080f", "*", "255", ")", ";", "int", "g", "=", "(", "int", ")", "(", "x", "/", "1920f", "*", "255", ")", ";", "int", "b", "=", "(", "int", ")", "(", "(", "Math", ".", "hypot", "(", "x", ",", "y", ")", "/", "Math", ".", "hypot", "(", "1080", ",", "1920", ")", ")", "*", "255", ")", ";", "pixels", "[", "y", "]", "[", "x", "]", "=", "r", "<<", "16", "|", "g", "<<", "8", "|", "b", ";", "}", "}", "return", "new", "Bitmap", "(", "pixels", ")", ";", "}" ]
Returns a bitmap that lights up red subpixels at the bottom, green subpixels on the right, and blue subpixels in bottom-right.
[ "Returns", "a", "bitmap", "that", "lights", "up", "red", "subpixels", "at", "the", "bottom", "green", "subpixels", "on", "the", "right", "and", "blue", "subpixels", "in", "bottom", "-", "right", "." ]
3e5824816a27434ebc1db5f3b7e45eb853e1d7da
https://github.com/square/okio/blob/3e5824816a27434ebc1db5f3b7e45eb853e1d7da/samples/src/main/java/okio/samples/BitmapEncoder.java#L56-L67
15,048
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/cache/disc/impl/ext/StrictLineReader.java
StrictLineReader.fillBuf
private void fillBuf() throws IOException { int result = in.read(buf, 0, buf.length); if (result == -1) { throw new EOFException(); } pos = 0; end = result; }
java
private void fillBuf() throws IOException { int result = in.read(buf, 0, buf.length); if (result == -1) { throw new EOFException(); } pos = 0; end = result; }
[ "private", "void", "fillBuf", "(", ")", "throws", "IOException", "{", "int", "result", "=", "in", ".", "read", "(", "buf", ",", "0", ",", "buf", ".", "length", ")", ";", "if", "(", "result", "==", "-", "1", ")", "{", "throw", "new", "EOFException", "(", ")", ";", "}", "pos", "=", "0", ";", "end", "=", "result", ";", "}" ]
Reads new input data into the buffer. Call only with pos == end or end == -1, depending on the desired outcome if the function throws.
[ "Reads", "new", "input", "data", "into", "the", "buffer", ".", "Call", "only", "with", "pos", "==", "end", "or", "end", "==", "-", "1", "depending", "on", "the", "desired", "outcome", "if", "the", "function", "throws", "." ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/cache/disc/impl/ext/StrictLineReader.java#L182-L189
15,049
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/core/ImageLoaderEngine.java
ImageLoaderEngine.submit
void submit(final LoadAndDisplayImageTask task) { taskDistributor.execute(new Runnable() { @Override public void run() { File image = configuration.diskCache.get(task.getLoadingUri()); boolean isImageCachedOnDisk = image != null && image.exists(); initExecutorsIfNeed(); if (isImageCachedOnDisk) { taskExecutorForCachedImages.execute(task); } else { taskExecutor.execute(task); } } }); }
java
void submit(final LoadAndDisplayImageTask task) { taskDistributor.execute(new Runnable() { @Override public void run() { File image = configuration.diskCache.get(task.getLoadingUri()); boolean isImageCachedOnDisk = image != null && image.exists(); initExecutorsIfNeed(); if (isImageCachedOnDisk) { taskExecutorForCachedImages.execute(task); } else { taskExecutor.execute(task); } } }); }
[ "void", "submit", "(", "final", "LoadAndDisplayImageTask", "task", ")", "{", "taskDistributor", ".", "execute", "(", "new", "Runnable", "(", ")", "{", "@", "Override", "public", "void", "run", "(", ")", "{", "File", "image", "=", "configuration", ".", "diskCache", ".", "get", "(", "task", ".", "getLoadingUri", "(", ")", ")", ";", "boolean", "isImageCachedOnDisk", "=", "image", "!=", "null", "&&", "image", ".", "exists", "(", ")", ";", "initExecutorsIfNeed", "(", ")", ";", "if", "(", "isImageCachedOnDisk", ")", "{", "taskExecutorForCachedImages", ".", "execute", "(", "task", ")", ";", "}", "else", "{", "taskExecutor", ".", "execute", "(", "task", ")", ";", "}", "}", "}", ")", ";", "}" ]
Submits task to execution pool
[ "Submits", "task", "to", "execution", "pool" ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/core/ImageLoaderEngine.java#L68-L82
15,050
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/utils/IoUtils.java
IoUtils.copyStream
public static boolean copyStream(InputStream is, OutputStream os, CopyListener listener, int bufferSize) throws IOException { int current = 0; int total = is.available(); if (total <= 0) { total = DEFAULT_IMAGE_TOTAL_SIZE; } final byte[] bytes = new byte[bufferSize]; int count; if (shouldStopLoading(listener, current, total)) return false; while ((count = is.read(bytes, 0, bufferSize)) != -1) { os.write(bytes, 0, count); current += count; if (shouldStopLoading(listener, current, total)) return false; } os.flush(); return true; }
java
public static boolean copyStream(InputStream is, OutputStream os, CopyListener listener, int bufferSize) throws IOException { int current = 0; int total = is.available(); if (total <= 0) { total = DEFAULT_IMAGE_TOTAL_SIZE; } final byte[] bytes = new byte[bufferSize]; int count; if (shouldStopLoading(listener, current, total)) return false; while ((count = is.read(bytes, 0, bufferSize)) != -1) { os.write(bytes, 0, count); current += count; if (shouldStopLoading(listener, current, total)) return false; } os.flush(); return true; }
[ "public", "static", "boolean", "copyStream", "(", "InputStream", "is", ",", "OutputStream", "os", ",", "CopyListener", "listener", ",", "int", "bufferSize", ")", "throws", "IOException", "{", "int", "current", "=", "0", ";", "int", "total", "=", "is", ".", "available", "(", ")", ";", "if", "(", "total", "<=", "0", ")", "{", "total", "=", "DEFAULT_IMAGE_TOTAL_SIZE", ";", "}", "final", "byte", "[", "]", "bytes", "=", "new", "byte", "[", "bufferSize", "]", ";", "int", "count", ";", "if", "(", "shouldStopLoading", "(", "listener", ",", "current", ",", "total", ")", ")", "return", "false", ";", "while", "(", "(", "count", "=", "is", ".", "read", "(", "bytes", ",", "0", ",", "bufferSize", ")", ")", "!=", "-", "1", ")", "{", "os", ".", "write", "(", "bytes", ",", "0", ",", "count", ")", ";", "current", "+=", "count", ";", "if", "(", "shouldStopLoading", "(", "listener", ",", "current", ",", "total", ")", ")", "return", "false", ";", "}", "os", ".", "flush", "(", ")", ";", "return", "true", ";", "}" ]
Copies stream, fires progress events by listener, can be interrupted by listener. @param is Input stream @param os Output stream @param listener null-ok; Listener of copying progress and controller of copying interrupting @param bufferSize Buffer size for copying, also represents a step for firing progress listener callback, i.e. progress event will be fired after every copied <b>bufferSize</b> bytes @return <b>true</b> - if stream copied successfully; <b>false</b> - if copying was interrupted by listener @throws IOException
[ "Copies", "stream", "fires", "progress", "events", "by", "listener", "can", "be", "interrupted", "by", "listener", "." ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/utils/IoUtils.java#L66-L84
15,051
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/utils/IoUtils.java
IoUtils.readAndCloseStream
public static void readAndCloseStream(InputStream is) { final byte[] bytes = new byte[DEFAULT_BUFFER_SIZE]; try { while (is.read(bytes, 0, DEFAULT_BUFFER_SIZE) != -1); } catch (IOException ignored) { } finally { closeSilently(is); } }
java
public static void readAndCloseStream(InputStream is) { final byte[] bytes = new byte[DEFAULT_BUFFER_SIZE]; try { while (is.read(bytes, 0, DEFAULT_BUFFER_SIZE) != -1); } catch (IOException ignored) { } finally { closeSilently(is); } }
[ "public", "static", "void", "readAndCloseStream", "(", "InputStream", "is", ")", "{", "final", "byte", "[", "]", "bytes", "=", "new", "byte", "[", "DEFAULT_BUFFER_SIZE", "]", ";", "try", "{", "while", "(", "is", ".", "read", "(", "bytes", ",", "0", ",", "DEFAULT_BUFFER_SIZE", ")", "!=", "-", "1", ")", ";", "}", "catch", "(", "IOException", "ignored", ")", "{", "}", "finally", "{", "closeSilently", "(", "is", ")", ";", "}", "}" ]
Reads all data from stream and close it silently @param is Input stream
[ "Reads", "all", "data", "from", "stream", "and", "close", "it", "silently" ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/utils/IoUtils.java#L103-L111
15,052
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/core/assist/deque/LinkedBlockingDeque.java
LinkedBlockingDeque.unlinkFirst
private E unlinkFirst() { // assert lock.isHeldByCurrentThread(); Node<E> f = first; if (f == null) return null; Node<E> n = f.next; E item = f.item; f.item = null; f.next = f; // help GC first = n; if (n == null) last = null; else n.prev = null; --count; notFull.signal(); return item; }
java
private E unlinkFirst() { // assert lock.isHeldByCurrentThread(); Node<E> f = first; if (f == null) return null; Node<E> n = f.next; E item = f.item; f.item = null; f.next = f; // help GC first = n; if (n == null) last = null; else n.prev = null; --count; notFull.signal(); return item; }
[ "private", "E", "unlinkFirst", "(", ")", "{", "// assert lock.isHeldByCurrentThread();", "Node", "<", "E", ">", "f", "=", "first", ";", "if", "(", "f", "==", "null", ")", "return", "null", ";", "Node", "<", "E", ">", "n", "=", "f", ".", "next", ";", "E", "item", "=", "f", ".", "item", ";", "f", ".", "item", "=", "null", ";", "f", ".", "next", "=", "f", ";", "// help GC", "first", "=", "n", ";", "if", "(", "n", "==", "null", ")", "last", "=", "null", ";", "else", "n", ".", "prev", "=", "null", ";", "--", "count", ";", "notFull", ".", "signal", "(", ")", ";", "return", "item", ";", "}" ]
Removes and returns first element, or null if empty.
[ "Removes", "and", "returns", "first", "element", "or", "null", "if", "empty", "." ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/core/assist/deque/LinkedBlockingDeque.java#L236-L253
15,053
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/core/assist/deque/LinkedBlockingDeque.java
LinkedBlockingDeque.unlinkLast
private E unlinkLast() { // assert lock.isHeldByCurrentThread(); Node<E> l = last; if (l == null) return null; Node<E> p = l.prev; E item = l.item; l.item = null; l.prev = l; // help GC last = p; if (p == null) first = null; else p.next = null; --count; notFull.signal(); return item; }
java
private E unlinkLast() { // assert lock.isHeldByCurrentThread(); Node<E> l = last; if (l == null) return null; Node<E> p = l.prev; E item = l.item; l.item = null; l.prev = l; // help GC last = p; if (p == null) first = null; else p.next = null; --count; notFull.signal(); return item; }
[ "private", "E", "unlinkLast", "(", ")", "{", "// assert lock.isHeldByCurrentThread();", "Node", "<", "E", ">", "l", "=", "last", ";", "if", "(", "l", "==", "null", ")", "return", "null", ";", "Node", "<", "E", ">", "p", "=", "l", ".", "prev", ";", "E", "item", "=", "l", ".", "item", ";", "l", ".", "item", "=", "null", ";", "l", ".", "prev", "=", "l", ";", "// help GC", "last", "=", "p", ";", "if", "(", "p", "==", "null", ")", "first", "=", "null", ";", "else", "p", ".", "next", "=", "null", ";", "--", "count", ";", "notFull", ".", "signal", "(", ")", ";", "return", "item", ";", "}" ]
Removes and returns last element, or null if empty.
[ "Removes", "and", "returns", "last", "element", "or", "null", "if", "empty", "." ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/core/assist/deque/LinkedBlockingDeque.java#L258-L275
15,054
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/core/assist/deque/LinkedBlockingDeque.java
LinkedBlockingDeque.clear
public void clear() { final ReentrantLock lock = this.lock; lock.lock(); try { for (Node<E> f = first; f != null; ) { f.item = null; Node<E> n = f.next; f.prev = null; f.next = null; f = n; } first = last = null; count = 0; notFull.signalAll(); } finally { lock.unlock(); } }
java
public void clear() { final ReentrantLock lock = this.lock; lock.lock(); try { for (Node<E> f = first; f != null; ) { f.item = null; Node<E> n = f.next; f.prev = null; f.next = null; f = n; } first = last = null; count = 0; notFull.signalAll(); } finally { lock.unlock(); } }
[ "public", "void", "clear", "(", ")", "{", "final", "ReentrantLock", "lock", "=", "this", ".", "lock", ";", "lock", ".", "lock", "(", ")", ";", "try", "{", "for", "(", "Node", "<", "E", ">", "f", "=", "first", ";", "f", "!=", "null", ";", ")", "{", "f", ".", "item", "=", "null", ";", "Node", "<", "E", ">", "n", "=", "f", ".", "next", ";", "f", ".", "prev", "=", "null", ";", "f", ".", "next", "=", "null", ";", "f", "=", "n", ";", "}", "first", "=", "last", "=", "null", ";", "count", "=", "0", ";", "notFull", ".", "signalAll", "(", ")", ";", "}", "finally", "{", "lock", ".", "unlock", "(", ")", ";", "}", "}" ]
Atomically removes all of the elements from this deque. The deque will be empty after this call returns.
[ "Atomically", "removes", "all", "of", "the", "elements", "from", "this", "deque", ".", "The", "deque", "will", "be", "empty", "after", "this", "call", "returns", "." ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/core/assist/deque/LinkedBlockingDeque.java#L968-L985
15,055
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/cache/disc/impl/ext/DiskLruCache.java
DiskLruCache.processJournal
private void processJournal() throws IOException { deleteIfExists(journalFileTmp); for (Iterator<Entry> i = lruEntries.values().iterator(); i.hasNext(); ) { Entry entry = i.next(); if (entry.currentEditor == null) { for (int t = 0; t < valueCount; t++) { size += entry.lengths[t]; fileCount++; } } else { entry.currentEditor = null; for (int t = 0; t < valueCount; t++) { deleteIfExists(entry.getCleanFile(t)); deleteIfExists(entry.getDirtyFile(t)); } i.remove(); } } }
java
private void processJournal() throws IOException { deleteIfExists(journalFileTmp); for (Iterator<Entry> i = lruEntries.values().iterator(); i.hasNext(); ) { Entry entry = i.next(); if (entry.currentEditor == null) { for (int t = 0; t < valueCount; t++) { size += entry.lengths[t]; fileCount++; } } else { entry.currentEditor = null; for (int t = 0; t < valueCount; t++) { deleteIfExists(entry.getCleanFile(t)); deleteIfExists(entry.getDirtyFile(t)); } i.remove(); } } }
[ "private", "void", "processJournal", "(", ")", "throws", "IOException", "{", "deleteIfExists", "(", "journalFileTmp", ")", ";", "for", "(", "Iterator", "<", "Entry", ">", "i", "=", "lruEntries", ".", "values", "(", ")", ".", "iterator", "(", ")", ";", "i", ".", "hasNext", "(", ")", ";", ")", "{", "Entry", "entry", "=", "i", ".", "next", "(", ")", ";", "if", "(", "entry", ".", "currentEditor", "==", "null", ")", "{", "for", "(", "int", "t", "=", "0", ";", "t", "<", "valueCount", ";", "t", "++", ")", "{", "size", "+=", "entry", ".", "lengths", "[", "t", "]", ";", "fileCount", "++", ";", "}", "}", "else", "{", "entry", ".", "currentEditor", "=", "null", ";", "for", "(", "int", "t", "=", "0", ";", "t", "<", "valueCount", ";", "t", "++", ")", "{", "deleteIfExists", "(", "entry", ".", "getCleanFile", "(", "t", ")", ")", ";", "deleteIfExists", "(", "entry", ".", "getDirtyFile", "(", "t", ")", ")", ";", "}", "i", ".", "remove", "(", ")", ";", "}", "}", "}" ]
Computes the initial size and collects garbage as a part of opening the cache. Dirty entries are assumed to be inconsistent and will be deleted.
[ "Computes", "the", "initial", "size", "and", "collects", "garbage", "as", "a", "part", "of", "opening", "the", "cache", ".", "Dirty", "entries", "are", "assumed", "to", "be", "inconsistent", "and", "will", "be", "deleted", "." ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/cache/disc/impl/ext/DiskLruCache.java#L329-L347
15,056
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/cache/disc/impl/ext/DiskLruCache.java
DiskLruCache.rebuildJournal
private synchronized void rebuildJournal() throws IOException { if (journalWriter != null) { journalWriter.close(); } Writer writer = new BufferedWriter( new OutputStreamWriter(new FileOutputStream(journalFileTmp), Util.US_ASCII)); try { writer.write(MAGIC); writer.write("\n"); writer.write(VERSION_1); writer.write("\n"); writer.write(Integer.toString(appVersion)); writer.write("\n"); writer.write(Integer.toString(valueCount)); writer.write("\n"); writer.write("\n"); for (Entry entry : lruEntries.values()) { if (entry.currentEditor != null) { writer.write(DIRTY + ' ' + entry.key + '\n'); } else { writer.write(CLEAN + ' ' + entry.key + entry.getLengths() + '\n'); } } } finally { writer.close(); } if (journalFile.exists()) { renameTo(journalFile, journalFileBackup, true); } renameTo(journalFileTmp, journalFile, false); journalFileBackup.delete(); journalWriter = new BufferedWriter( new OutputStreamWriter(new FileOutputStream(journalFile, true), Util.US_ASCII)); }
java
private synchronized void rebuildJournal() throws IOException { if (journalWriter != null) { journalWriter.close(); } Writer writer = new BufferedWriter( new OutputStreamWriter(new FileOutputStream(journalFileTmp), Util.US_ASCII)); try { writer.write(MAGIC); writer.write("\n"); writer.write(VERSION_1); writer.write("\n"); writer.write(Integer.toString(appVersion)); writer.write("\n"); writer.write(Integer.toString(valueCount)); writer.write("\n"); writer.write("\n"); for (Entry entry : lruEntries.values()) { if (entry.currentEditor != null) { writer.write(DIRTY + ' ' + entry.key + '\n'); } else { writer.write(CLEAN + ' ' + entry.key + entry.getLengths() + '\n'); } } } finally { writer.close(); } if (journalFile.exists()) { renameTo(journalFile, journalFileBackup, true); } renameTo(journalFileTmp, journalFile, false); journalFileBackup.delete(); journalWriter = new BufferedWriter( new OutputStreamWriter(new FileOutputStream(journalFile, true), Util.US_ASCII)); }
[ "private", "synchronized", "void", "rebuildJournal", "(", ")", "throws", "IOException", "{", "if", "(", "journalWriter", "!=", "null", ")", "{", "journalWriter", ".", "close", "(", ")", ";", "}", "Writer", "writer", "=", "new", "BufferedWriter", "(", "new", "OutputStreamWriter", "(", "new", "FileOutputStream", "(", "journalFileTmp", ")", ",", "Util", ".", "US_ASCII", ")", ")", ";", "try", "{", "writer", ".", "write", "(", "MAGIC", ")", ";", "writer", ".", "write", "(", "\"\\n\"", ")", ";", "writer", ".", "write", "(", "VERSION_1", ")", ";", "writer", ".", "write", "(", "\"\\n\"", ")", ";", "writer", ".", "write", "(", "Integer", ".", "toString", "(", "appVersion", ")", ")", ";", "writer", ".", "write", "(", "\"\\n\"", ")", ";", "writer", ".", "write", "(", "Integer", ".", "toString", "(", "valueCount", ")", ")", ";", "writer", ".", "write", "(", "\"\\n\"", ")", ";", "writer", ".", "write", "(", "\"\\n\"", ")", ";", "for", "(", "Entry", "entry", ":", "lruEntries", ".", "values", "(", ")", ")", "{", "if", "(", "entry", ".", "currentEditor", "!=", "null", ")", "{", "writer", ".", "write", "(", "DIRTY", "+", "'", "'", "+", "entry", ".", "key", "+", "'", "'", ")", ";", "}", "else", "{", "writer", ".", "write", "(", "CLEAN", "+", "'", "'", "+", "entry", ".", "key", "+", "entry", ".", "getLengths", "(", ")", "+", "'", "'", ")", ";", "}", "}", "}", "finally", "{", "writer", ".", "close", "(", ")", ";", "}", "if", "(", "journalFile", ".", "exists", "(", ")", ")", "{", "renameTo", "(", "journalFile", ",", "journalFileBackup", ",", "true", ")", ";", "}", "renameTo", "(", "journalFileTmp", ",", "journalFile", ",", "false", ")", ";", "journalFileBackup", ".", "delete", "(", ")", ";", "journalWriter", "=", "new", "BufferedWriter", "(", "new", "OutputStreamWriter", "(", "new", "FileOutputStream", "(", "journalFile", ",", "true", ")", ",", "Util", ".", "US_ASCII", ")", ")", ";", "}" ]
Creates a new journal that omits redundant information. This replaces the current journal if it exists.
[ "Creates", "a", "new", "journal", "that", "omits", "redundant", "information", ".", "This", "replaces", "the", "current", "journal", "if", "it", "exists", "." ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/cache/disc/impl/ext/DiskLruCache.java#L353-L390
15,057
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/core/DefaultConfigurationFactory.java
DefaultConfigurationFactory.createExecutor
public static Executor createExecutor(int threadPoolSize, int threadPriority, QueueProcessingType tasksProcessingType) { boolean lifo = tasksProcessingType == QueueProcessingType.LIFO; BlockingQueue<Runnable> taskQueue = lifo ? new LIFOLinkedBlockingDeque<Runnable>() : new LinkedBlockingQueue<Runnable>(); return new ThreadPoolExecutor(threadPoolSize, threadPoolSize, 0L, TimeUnit.MILLISECONDS, taskQueue, createThreadFactory(threadPriority, "uil-pool-")); }
java
public static Executor createExecutor(int threadPoolSize, int threadPriority, QueueProcessingType tasksProcessingType) { boolean lifo = tasksProcessingType == QueueProcessingType.LIFO; BlockingQueue<Runnable> taskQueue = lifo ? new LIFOLinkedBlockingDeque<Runnable>() : new LinkedBlockingQueue<Runnable>(); return new ThreadPoolExecutor(threadPoolSize, threadPoolSize, 0L, TimeUnit.MILLISECONDS, taskQueue, createThreadFactory(threadPriority, "uil-pool-")); }
[ "public", "static", "Executor", "createExecutor", "(", "int", "threadPoolSize", ",", "int", "threadPriority", ",", "QueueProcessingType", "tasksProcessingType", ")", "{", "boolean", "lifo", "=", "tasksProcessingType", "==", "QueueProcessingType", ".", "LIFO", ";", "BlockingQueue", "<", "Runnable", ">", "taskQueue", "=", "lifo", "?", "new", "LIFOLinkedBlockingDeque", "<", "Runnable", ">", "(", ")", ":", "new", "LinkedBlockingQueue", "<", "Runnable", ">", "(", ")", ";", "return", "new", "ThreadPoolExecutor", "(", "threadPoolSize", ",", "threadPoolSize", ",", "0L", ",", "TimeUnit", ".", "MILLISECONDS", ",", "taskQueue", ",", "createThreadFactory", "(", "threadPriority", ",", "\"uil-pool-\"", ")", ")", ";", "}" ]
Creates default implementation of task executor
[ "Creates", "default", "implementation", "of", "task", "executor" ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/core/DefaultConfigurationFactory.java#L61-L68
15,058
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/core/DefaultConfigurationFactory.java
DefaultConfigurationFactory.createReserveDiskCacheDir
private static File createReserveDiskCacheDir(Context context) { File cacheDir = StorageUtils.getCacheDirectory(context, false); File individualDir = new File(cacheDir, "uil-images"); if (individualDir.exists() || individualDir.mkdir()) { cacheDir = individualDir; } return cacheDir; }
java
private static File createReserveDiskCacheDir(Context context) { File cacheDir = StorageUtils.getCacheDirectory(context, false); File individualDir = new File(cacheDir, "uil-images"); if (individualDir.exists() || individualDir.mkdir()) { cacheDir = individualDir; } return cacheDir; }
[ "private", "static", "File", "createReserveDiskCacheDir", "(", "Context", "context", ")", "{", "File", "cacheDir", "=", "StorageUtils", ".", "getCacheDirectory", "(", "context", ",", "false", ")", ";", "File", "individualDir", "=", "new", "File", "(", "cacheDir", ",", "\"uil-images\"", ")", ";", "if", "(", "individualDir", ".", "exists", "(", ")", "||", "individualDir", ".", "mkdir", "(", ")", ")", "{", "cacheDir", "=", "individualDir", ";", "}", "return", "cacheDir", ";", "}" ]
Creates reserve disk cache folder which will be used if primary disk cache folder becomes unavailable
[ "Creates", "reserve", "disk", "cache", "folder", "which", "will", "be", "used", "if", "primary", "disk", "cache", "folder", "becomes", "unavailable" ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/core/DefaultConfigurationFactory.java#L101-L108
15,059
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/cache/disc/naming/Md5FileNameGenerator.java
Md5FileNameGenerator.generate
@Override public String generate(String imageUri) { byte[] md5 = getMD5(imageUri.getBytes()); BigInteger bi = new BigInteger(md5).abs(); return bi.toString(RADIX); }
java
@Override public String generate(String imageUri) { byte[] md5 = getMD5(imageUri.getBytes()); BigInteger bi = new BigInteger(md5).abs(); return bi.toString(RADIX); }
[ "@", "Override", "public", "String", "generate", "(", "String", "imageUri", ")", "{", "byte", "[", "]", "md5", "=", "getMD5", "(", "imageUri", ".", "getBytes", "(", ")", ")", ";", "BigInteger", "bi", "=", "new", "BigInteger", "(", "md5", ")", ".", "abs", "(", ")", ";", "return", "bi", ".", "toString", "(", "RADIX", ")", ";", "}" ]
10 digits + 26 letters
[ "10", "digits", "+", "26", "letters" ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/cache/disc/naming/Md5FileNameGenerator.java#L35-L40
15,060
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/utils/StorageUtils.java
StorageUtils.getOwnCacheDirectory
public static File getOwnCacheDirectory(Context context, String cacheDir) { File appCacheDir = null; if (MEDIA_MOUNTED.equals(Environment.getExternalStorageState()) && hasExternalStoragePermission(context)) { appCacheDir = new File(Environment.getExternalStorageDirectory(), cacheDir); } if (appCacheDir == null || (!appCacheDir.exists() && !appCacheDir.mkdirs())) { appCacheDir = context.getCacheDir(); } return appCacheDir; }
java
public static File getOwnCacheDirectory(Context context, String cacheDir) { File appCacheDir = null; if (MEDIA_MOUNTED.equals(Environment.getExternalStorageState()) && hasExternalStoragePermission(context)) { appCacheDir = new File(Environment.getExternalStorageDirectory(), cacheDir); } if (appCacheDir == null || (!appCacheDir.exists() && !appCacheDir.mkdirs())) { appCacheDir = context.getCacheDir(); } return appCacheDir; }
[ "public", "static", "File", "getOwnCacheDirectory", "(", "Context", "context", ",", "String", "cacheDir", ")", "{", "File", "appCacheDir", "=", "null", ";", "if", "(", "MEDIA_MOUNTED", ".", "equals", "(", "Environment", ".", "getExternalStorageState", "(", ")", ")", "&&", "hasExternalStoragePermission", "(", "context", ")", ")", "{", "appCacheDir", "=", "new", "File", "(", "Environment", ".", "getExternalStorageDirectory", "(", ")", ",", "cacheDir", ")", ";", "}", "if", "(", "appCacheDir", "==", "null", "||", "(", "!", "appCacheDir", ".", "exists", "(", ")", "&&", "!", "appCacheDir", ".", "mkdirs", "(", ")", ")", ")", "{", "appCacheDir", "=", "context", ".", "getCacheDir", "(", ")", ";", "}", "return", "appCacheDir", ";", "}" ]
Returns specified application cache directory. Cache directory will be created on SD card by defined path if card is mounted and app has appropriate permission. Else - Android defines cache directory on device's file system. @param context Application context @param cacheDir Cache directory path (e.g.: "AppCacheDir", "AppDir/cache/images") @return Cache {@link File directory}
[ "Returns", "specified", "application", "cache", "directory", ".", "Cache", "directory", "will", "be", "created", "on", "SD", "card", "by", "defined", "path", "if", "card", "is", "mounted", "and", "app", "has", "appropriate", "permission", ".", "Else", "-", "Android", "defines", "cache", "directory", "on", "device", "s", "file", "system", "." ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/utils/StorageUtils.java#L130-L139
15,061
nostra13/Android-Universal-Image-Loader
library/src/main/java/com/nostra13/universalimageloader/core/LoadAndDisplayImageTask.java
LoadAndDisplayImageTask.resizeAndSaveImage
private boolean resizeAndSaveImage(int maxWidth, int maxHeight) throws IOException { // Decode image file, compress and re-save it boolean saved = false; File targetFile = configuration.diskCache.get(uri); if (targetFile != null && targetFile.exists()) { ImageSize targetImageSize = new ImageSize(maxWidth, maxHeight); DisplayImageOptions specialOptions = new DisplayImageOptions.Builder().cloneFrom(options) .imageScaleType(ImageScaleType.IN_SAMPLE_INT).build(); ImageDecodingInfo decodingInfo = new ImageDecodingInfo(memoryCacheKey, Scheme.FILE.wrap(targetFile.getAbsolutePath()), uri, targetImageSize, ViewScaleType.FIT_INSIDE, getDownloader(), specialOptions); Bitmap bmp = decoder.decode(decodingInfo); if (bmp != null && configuration.processorForDiskCache != null) { L.d(LOG_PROCESS_IMAGE_BEFORE_CACHE_ON_DISK, memoryCacheKey); bmp = configuration.processorForDiskCache.process(bmp); if (bmp == null) { L.e(ERROR_PROCESSOR_FOR_DISK_CACHE_NULL, memoryCacheKey); } } if (bmp != null) { saved = configuration.diskCache.save(uri, bmp); bmp.recycle(); } } return saved; }
java
private boolean resizeAndSaveImage(int maxWidth, int maxHeight) throws IOException { // Decode image file, compress and re-save it boolean saved = false; File targetFile = configuration.diskCache.get(uri); if (targetFile != null && targetFile.exists()) { ImageSize targetImageSize = new ImageSize(maxWidth, maxHeight); DisplayImageOptions specialOptions = new DisplayImageOptions.Builder().cloneFrom(options) .imageScaleType(ImageScaleType.IN_SAMPLE_INT).build(); ImageDecodingInfo decodingInfo = new ImageDecodingInfo(memoryCacheKey, Scheme.FILE.wrap(targetFile.getAbsolutePath()), uri, targetImageSize, ViewScaleType.FIT_INSIDE, getDownloader(), specialOptions); Bitmap bmp = decoder.decode(decodingInfo); if (bmp != null && configuration.processorForDiskCache != null) { L.d(LOG_PROCESS_IMAGE_BEFORE_CACHE_ON_DISK, memoryCacheKey); bmp = configuration.processorForDiskCache.process(bmp); if (bmp == null) { L.e(ERROR_PROCESSOR_FOR_DISK_CACHE_NULL, memoryCacheKey); } } if (bmp != null) { saved = configuration.diskCache.save(uri, bmp); bmp.recycle(); } } return saved; }
[ "private", "boolean", "resizeAndSaveImage", "(", "int", "maxWidth", ",", "int", "maxHeight", ")", "throws", "IOException", "{", "// Decode image file, compress and re-save it", "boolean", "saved", "=", "false", ";", "File", "targetFile", "=", "configuration", ".", "diskCache", ".", "get", "(", "uri", ")", ";", "if", "(", "targetFile", "!=", "null", "&&", "targetFile", ".", "exists", "(", ")", ")", "{", "ImageSize", "targetImageSize", "=", "new", "ImageSize", "(", "maxWidth", ",", "maxHeight", ")", ";", "DisplayImageOptions", "specialOptions", "=", "new", "DisplayImageOptions", ".", "Builder", "(", ")", ".", "cloneFrom", "(", "options", ")", ".", "imageScaleType", "(", "ImageScaleType", ".", "IN_SAMPLE_INT", ")", ".", "build", "(", ")", ";", "ImageDecodingInfo", "decodingInfo", "=", "new", "ImageDecodingInfo", "(", "memoryCacheKey", ",", "Scheme", ".", "FILE", ".", "wrap", "(", "targetFile", ".", "getAbsolutePath", "(", ")", ")", ",", "uri", ",", "targetImageSize", ",", "ViewScaleType", ".", "FIT_INSIDE", ",", "getDownloader", "(", ")", ",", "specialOptions", ")", ";", "Bitmap", "bmp", "=", "decoder", ".", "decode", "(", "decodingInfo", ")", ";", "if", "(", "bmp", "!=", "null", "&&", "configuration", ".", "processorForDiskCache", "!=", "null", ")", "{", "L", ".", "d", "(", "LOG_PROCESS_IMAGE_BEFORE_CACHE_ON_DISK", ",", "memoryCacheKey", ")", ";", "bmp", "=", "configuration", ".", "processorForDiskCache", ".", "process", "(", "bmp", ")", ";", "if", "(", "bmp", "==", "null", ")", "{", "L", ".", "e", "(", "ERROR_PROCESSOR_FOR_DISK_CACHE_NULL", ",", "memoryCacheKey", ")", ";", "}", "}", "if", "(", "bmp", "!=", "null", ")", "{", "saved", "=", "configuration", ".", "diskCache", ".", "save", "(", "uri", ",", "bmp", ")", ";", "bmp", ".", "recycle", "(", ")", ";", "}", "}", "return", "saved", ";", "}" ]
Decodes image file into Bitmap, resize it and save it back
[ "Decodes", "image", "file", "into", "Bitmap", "resize", "it", "and", "save", "it", "back" ]
fc3c5f6779bb4f702e233653b61bd9d559e345cc
https://github.com/nostra13/Android-Universal-Image-Loader/blob/fc3c5f6779bb4f702e233653b61bd9d559e345cc/library/src/main/java/com/nostra13/universalimageloader/core/LoadAndDisplayImageTask.java#L305-L330
15,062
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/spi/impl/merge/AbstractMergeRunnable.java
AbstractMergeRunnable.canMergeLegacy
private boolean canMergeLegacy(String dataStructureName) { Object mergePolicy = getMergePolicy(dataStructureName); InMemoryFormat inMemoryFormat = getInMemoryFormat(dataStructureName); return checkMergePolicySupportsInMemoryFormat(dataStructureName, mergePolicy, inMemoryFormat, false, logger); }
java
private boolean canMergeLegacy(String dataStructureName) { Object mergePolicy = getMergePolicy(dataStructureName); InMemoryFormat inMemoryFormat = getInMemoryFormat(dataStructureName); return checkMergePolicySupportsInMemoryFormat(dataStructureName, mergePolicy, inMemoryFormat, false, logger); }
[ "private", "boolean", "canMergeLegacy", "(", "String", "dataStructureName", ")", "{", "Object", "mergePolicy", "=", "getMergePolicy", "(", "dataStructureName", ")", ";", "InMemoryFormat", "inMemoryFormat", "=", "getInMemoryFormat", "(", "dataStructureName", ")", ";", "return", "checkMergePolicySupportsInMemoryFormat", "(", "dataStructureName", ",", "mergePolicy", ",", "inMemoryFormat", ",", "false", ",", "logger", ")", ";", "}" ]
Check if data structures in-memory-format appropriate to merge with legacy policies
[ "Check", "if", "data", "structures", "in", "-", "memory", "-", "format", "appropriate", "to", "merge", "with", "legacy", "policies" ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/spi/impl/merge/AbstractMergeRunnable.java#L197-L203
15,063
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/scheduler/CoalescingDelayedTrigger.java
CoalescingDelayedTrigger.executeWithDelay
public void executeWithDelay() { long now = Clock.currentTimeMillis(); if (delay + now > hardLimit) { scheduleNewExecution(now); } else if (!tryPostponeExecution()) { scheduleNewExecution(now); } }
java
public void executeWithDelay() { long now = Clock.currentTimeMillis(); if (delay + now > hardLimit) { scheduleNewExecution(now); } else if (!tryPostponeExecution()) { scheduleNewExecution(now); } }
[ "public", "void", "executeWithDelay", "(", ")", "{", "long", "now", "=", "Clock", ".", "currentTimeMillis", "(", ")", ";", "if", "(", "delay", "+", "now", ">", "hardLimit", ")", "{", "scheduleNewExecution", "(", "now", ")", ";", "}", "else", "if", "(", "!", "tryPostponeExecution", "(", ")", ")", "{", "scheduleNewExecution", "(", "now", ")", ";", "}", "}" ]
invoke delayed execution.
[ "invoke", "delayed", "execution", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/scheduler/CoalescingDelayedTrigger.java#L75-L82
15,064
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/map/impl/EntryViews.java
EntryViews.createNullEntryView
public static <K, V> EntryView<K, V> createNullEntryView(K key) { return new NullEntryView<>(key); }
java
public static <K, V> EntryView<K, V> createNullEntryView(K key) { return new NullEntryView<>(key); }
[ "public", "static", "<", "K", ",", "V", ">", "EntryView", "<", "K", ",", "V", ">", "createNullEntryView", "(", "K", "key", ")", "{", "return", "new", "NullEntryView", "<>", "(", "key", ")", ";", "}" ]
Creates a null entry view that has only key and no value. @param key the key object which will be wrapped in {@link com.hazelcast.core.EntryView}. @param <K> the type of key. @param <V> the type of value. @return returns null entry view.
[ "Creates", "a", "null", "entry", "view", "that", "has", "only", "key", "and", "no", "value", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/map/impl/EntryViews.java#L41-L43
15,065
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/map/impl/mapstore/writebehind/CoalescedWriteBehindQueue.java
CoalescedWriteBehindQueue.calculateStoreTime
private void calculateStoreTime(DelayedEntry delayedEntry) { Data key = (Data) delayedEntry.getKey(); DelayedEntry currentEntry = map.get(key); if (currentEntry != null) { long currentStoreTime = currentEntry.getStoreTime(); delayedEntry.setStoreTime(currentStoreTime); } }
java
private void calculateStoreTime(DelayedEntry delayedEntry) { Data key = (Data) delayedEntry.getKey(); DelayedEntry currentEntry = map.get(key); if (currentEntry != null) { long currentStoreTime = currentEntry.getStoreTime(); delayedEntry.setStoreTime(currentStoreTime); } }
[ "private", "void", "calculateStoreTime", "(", "DelayedEntry", "delayedEntry", ")", "{", "Data", "key", "=", "(", "Data", ")", "delayedEntry", ".", "getKey", "(", ")", ";", "DelayedEntry", "currentEntry", "=", "map", ".", "get", "(", "key", ")", ";", "if", "(", "currentEntry", "!=", "null", ")", "{", "long", "currentStoreTime", "=", "currentEntry", ".", "getStoreTime", "(", ")", ";", "delayedEntry", ".", "setStoreTime", "(", "currentStoreTime", ")", ";", "}", "}" ]
If this is an existing key in this queue, use previously set store time; since we do not want to shift store time of an existing key on every update.
[ "If", "this", "is", "an", "existing", "key", "in", "this", "queue", "use", "previously", "set", "store", "time", ";", "since", "we", "do", "not", "want", "to", "shift", "store", "time", "of", "an", "existing", "key", "on", "every", "update", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/map/impl/mapstore/writebehind/CoalescedWriteBehindQueue.java#L154-L161
15,066
hazelcast/hazelcast
hazelcast-client/src/main/java/com/hazelcast/client/proxy/ClientExecutorServiceProxy.java
ClientExecutorServiceProxy.submitToMember
@Override public <T> Future<T> submitToMember(Callable<T> task, Member member) { final Address memberAddress = getMemberAddress(member); return submitToTargetInternal(task, memberAddress, null, false); }
java
@Override public <T> Future<T> submitToMember(Callable<T> task, Member member) { final Address memberAddress = getMemberAddress(member); return submitToTargetInternal(task, memberAddress, null, false); }
[ "@", "Override", "public", "<", "T", ">", "Future", "<", "T", ">", "submitToMember", "(", "Callable", "<", "T", ">", "task", ",", "Member", "member", ")", "{", "final", "Address", "memberAddress", "=", "getMemberAddress", "(", "member", ")", ";", "return", "submitToTargetInternal", "(", "task", ",", "memberAddress", ",", "null", ",", "false", ")", ";", "}" ]
submit to members
[ "submit", "to", "members" ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast-client/src/main/java/com/hazelcast/client/proxy/ClientExecutorServiceProxy.java#L147-L151
15,067
hazelcast/hazelcast
hazelcast-client/src/main/java/com/hazelcast/client/proxy/ClientExecutorServiceProxy.java
ClientExecutorServiceProxy.submitToMember
@Override public void submitToMember(Runnable command, Member member, ExecutionCallback callback) { Callable<?> callable = createRunnableAdapter(command); submitToMember(callable, member, callback); }
java
@Override public void submitToMember(Runnable command, Member member, ExecutionCallback callback) { Callable<?> callable = createRunnableAdapter(command); submitToMember(callable, member, callback); }
[ "@", "Override", "public", "void", "submitToMember", "(", "Runnable", "command", ",", "Member", "member", ",", "ExecutionCallback", "callback", ")", "{", "Callable", "<", "?", ">", "callable", "=", "createRunnableAdapter", "(", "command", ")", ";", "submitToMember", "(", "callable", ",", "member", ",", "callback", ")", ";", "}" ]
submit to members callback
[ "submit", "to", "members", "callback" ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast-client/src/main/java/com/hazelcast/client/proxy/ClientExecutorServiceProxy.java#L189-L193
15,068
hazelcast/hazelcast
hazelcast-client/src/main/java/com/hazelcast/client/proxy/ClientExecutorServiceProxy.java
ClientExecutorServiceProxy.submitToKeyOwner
@Override public <T> Future<T> submitToKeyOwner(Callable<T> task, Object key) { return submitToKeyOwnerInternal(task, key, null, false); }
java
@Override public <T> Future<T> submitToKeyOwner(Callable<T> task, Object key) { return submitToKeyOwnerInternal(task, key, null, false); }
[ "@", "Override", "public", "<", "T", ">", "Future", "<", "T", ">", "submitToKeyOwner", "(", "Callable", "<", "T", ">", "task", ",", "Object", "key", ")", "{", "return", "submitToKeyOwnerInternal", "(", "task", ",", "key", ",", "null", ",", "false", ")", ";", "}" ]
submit to key
[ "submit", "to", "key" ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast-client/src/main/java/com/hazelcast/client/proxy/ClientExecutorServiceProxy.java#L316-L319
15,069
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/internal/ascii/rest/HttpPostCommandProcessor.java
HttpPostCommandProcessor.handleWanSyncMap
private void handleWanSyncMap(HttpPostCommand command) throws UnsupportedEncodingException { String res; final String[] params = decodeParams(command, 3); final String wanRepName = params[0]; final String publisherId = params[1]; final String mapName = params[2]; try { textCommandService.getNode().getNodeEngine().getWanReplicationService().syncMap(wanRepName, publisherId, mapName); res = response(ResponseType.SUCCESS, "message", "Sync initiated"); } catch (Exception ex) { logger.warning("Error occurred while syncing map", ex); res = exceptionResponse(ex); } sendResponse(command, res); }
java
private void handleWanSyncMap(HttpPostCommand command) throws UnsupportedEncodingException { String res; final String[] params = decodeParams(command, 3); final String wanRepName = params[0]; final String publisherId = params[1]; final String mapName = params[2]; try { textCommandService.getNode().getNodeEngine().getWanReplicationService().syncMap(wanRepName, publisherId, mapName); res = response(ResponseType.SUCCESS, "message", "Sync initiated"); } catch (Exception ex) { logger.warning("Error occurred while syncing map", ex); res = exceptionResponse(ex); } sendResponse(command, res); }
[ "private", "void", "handleWanSyncMap", "(", "HttpPostCommand", "command", ")", "throws", "UnsupportedEncodingException", "{", "String", "res", ";", "final", "String", "[", "]", "params", "=", "decodeParams", "(", "command", ",", "3", ")", ";", "final", "String", "wanRepName", "=", "params", "[", "0", "]", ";", "final", "String", "publisherId", "=", "params", "[", "1", "]", ";", "final", "String", "mapName", "=", "params", "[", "2", "]", ";", "try", "{", "textCommandService", ".", "getNode", "(", ")", ".", "getNodeEngine", "(", ")", ".", "getWanReplicationService", "(", ")", ".", "syncMap", "(", "wanRepName", ",", "publisherId", ",", "mapName", ")", ";", "res", "=", "response", "(", "ResponseType", ".", "SUCCESS", ",", "\"message\"", ",", "\"Sync initiated\"", ")", ";", "}", "catch", "(", "Exception", "ex", ")", "{", "logger", ".", "warning", "(", "\"Error occurred while syncing map\"", ",", "ex", ")", ";", "res", "=", "exceptionResponse", "(", "ex", ")", ";", "}", "sendResponse", "(", "command", ",", "res", ")", ";", "}" ]
Initiates a WAN sync for a single map and the wan replication name and publisher ID defined by the command parameters. @param command the HTTP command @throws UnsupportedEncodingException If character encoding needs to be consulted, but named character encoding is not supported
[ "Initiates", "a", "WAN", "sync", "for", "a", "single", "map", "and", "the", "wan", "replication", "name", "and", "publisher", "ID", "defined", "by", "the", "command", "parameters", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/internal/ascii/rest/HttpPostCommandProcessor.java#L409-L423
15,070
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/internal/ascii/rest/HttpPostCommandProcessor.java
HttpPostCommandProcessor.handleWanPausePublisher
private void handleWanPausePublisher(HttpPostCommand command) throws UnsupportedEncodingException { String res; String[] params = decodeParams(command, 2); String wanReplicationName = params[0]; String publisherId = params[1]; WanReplicationService service = textCommandService.getNode().getNodeEngine().getWanReplicationService(); try { service.pause(wanReplicationName, publisherId); res = response(ResponseType.SUCCESS, "message", "WAN publisher paused"); } catch (Exception ex) { logger.warning("Error occurred while pausing WAN publisher", ex); res = exceptionResponse(ex); } sendResponse(command, res); }
java
private void handleWanPausePublisher(HttpPostCommand command) throws UnsupportedEncodingException { String res; String[] params = decodeParams(command, 2); String wanReplicationName = params[0]; String publisherId = params[1]; WanReplicationService service = textCommandService.getNode().getNodeEngine().getWanReplicationService(); try { service.pause(wanReplicationName, publisherId); res = response(ResponseType.SUCCESS, "message", "WAN publisher paused"); } catch (Exception ex) { logger.warning("Error occurred while pausing WAN publisher", ex); res = exceptionResponse(ex); } sendResponse(command, res); }
[ "private", "void", "handleWanPausePublisher", "(", "HttpPostCommand", "command", ")", "throws", "UnsupportedEncodingException", "{", "String", "res", ";", "String", "[", "]", "params", "=", "decodeParams", "(", "command", ",", "2", ")", ";", "String", "wanReplicationName", "=", "params", "[", "0", "]", ";", "String", "publisherId", "=", "params", "[", "1", "]", ";", "WanReplicationService", "service", "=", "textCommandService", ".", "getNode", "(", ")", ".", "getNodeEngine", "(", ")", ".", "getWanReplicationService", "(", ")", ";", "try", "{", "service", ".", "pause", "(", "wanReplicationName", ",", "publisherId", ")", ";", "res", "=", "response", "(", "ResponseType", ".", "SUCCESS", ",", "\"message\"", ",", "\"WAN publisher paused\"", ")", ";", "}", "catch", "(", "Exception", "ex", ")", "{", "logger", ".", "warning", "(", "\"Error occurred while pausing WAN publisher\"", ",", "ex", ")", ";", "res", "=", "exceptionResponse", "(", "ex", ")", ";", "}", "sendResponse", "(", "command", ",", "res", ")", ";", "}" ]
Pauses a WAN publisher on this member only. The publisher is identified by the WAN replication name and publisher ID passed as parameters to the HTTP command. @param command the HTTP command @throws UnsupportedEncodingException If character encoding needs to be consulted, but named character encoding is not supported @see com.hazelcast.config.WanPublisherState#PAUSED
[ "Pauses", "a", "WAN", "publisher", "on", "this", "member", "only", ".", "The", "publisher", "is", "identified", "by", "the", "WAN", "replication", "name", "and", "publisher", "ID", "passed", "as", "parameters", "to", "the", "HTTP", "command", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/internal/ascii/rest/HttpPostCommandProcessor.java#L538-L553
15,071
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/internal/partition/impl/PartitionReplicaManager.java
PartitionReplicaManager.checkAndGetPrimaryReplicaOwner
PartitionReplica checkAndGetPrimaryReplicaOwner(int partitionId, int replicaIndex) { InternalPartitionImpl partition = partitionStateManager.getPartitionImpl(partitionId); PartitionReplica owner = partition.getOwnerReplicaOrNull(); if (owner == null) { logger.info("Sync replica target is null, no need to sync -> partitionId=" + partitionId + ", replicaIndex=" + replicaIndex); return null; } PartitionReplica localReplica = PartitionReplica.from(nodeEngine.getLocalMember()); if (owner.equals(localReplica)) { if (logger.isFinestEnabled()) { logger.finest("This node is now owner of partition, cannot sync replica -> partitionId=" + partitionId + ", replicaIndex=" + replicaIndex + ", partition-info=" + partitionStateManager.getPartitionImpl(partitionId)); } return null; } if (!partition.isOwnerOrBackup(localReplica)) { if (logger.isFinestEnabled()) { logger.finest("This node is not backup replica of partitionId=" + partitionId + ", replicaIndex=" + replicaIndex + " anymore."); } return null; } return owner; }
java
PartitionReplica checkAndGetPrimaryReplicaOwner(int partitionId, int replicaIndex) { InternalPartitionImpl partition = partitionStateManager.getPartitionImpl(partitionId); PartitionReplica owner = partition.getOwnerReplicaOrNull(); if (owner == null) { logger.info("Sync replica target is null, no need to sync -> partitionId=" + partitionId + ", replicaIndex=" + replicaIndex); return null; } PartitionReplica localReplica = PartitionReplica.from(nodeEngine.getLocalMember()); if (owner.equals(localReplica)) { if (logger.isFinestEnabled()) { logger.finest("This node is now owner of partition, cannot sync replica -> partitionId=" + partitionId + ", replicaIndex=" + replicaIndex + ", partition-info=" + partitionStateManager.getPartitionImpl(partitionId)); } return null; } if (!partition.isOwnerOrBackup(localReplica)) { if (logger.isFinestEnabled()) { logger.finest("This node is not backup replica of partitionId=" + partitionId + ", replicaIndex=" + replicaIndex + " anymore."); } return null; } return owner; }
[ "PartitionReplica", "checkAndGetPrimaryReplicaOwner", "(", "int", "partitionId", ",", "int", "replicaIndex", ")", "{", "InternalPartitionImpl", "partition", "=", "partitionStateManager", ".", "getPartitionImpl", "(", "partitionId", ")", ";", "PartitionReplica", "owner", "=", "partition", ".", "getOwnerReplicaOrNull", "(", ")", ";", "if", "(", "owner", "==", "null", ")", "{", "logger", ".", "info", "(", "\"Sync replica target is null, no need to sync -> partitionId=\"", "+", "partitionId", "+", "\", replicaIndex=\"", "+", "replicaIndex", ")", ";", "return", "null", ";", "}", "PartitionReplica", "localReplica", "=", "PartitionReplica", ".", "from", "(", "nodeEngine", ".", "getLocalMember", "(", ")", ")", ";", "if", "(", "owner", ".", "equals", "(", "localReplica", ")", ")", "{", "if", "(", "logger", ".", "isFinestEnabled", "(", ")", ")", "{", "logger", ".", "finest", "(", "\"This node is now owner of partition, cannot sync replica -> partitionId=\"", "+", "partitionId", "+", "\", replicaIndex=\"", "+", "replicaIndex", "+", "\", partition-info=\"", "+", "partitionStateManager", ".", "getPartitionImpl", "(", "partitionId", ")", ")", ";", "}", "return", "null", ";", "}", "if", "(", "!", "partition", ".", "isOwnerOrBackup", "(", "localReplica", ")", ")", "{", "if", "(", "logger", ".", "isFinestEnabled", "(", ")", ")", "{", "logger", ".", "finest", "(", "\"This node is not backup replica of partitionId=\"", "+", "partitionId", "+", "\", replicaIndex=\"", "+", "replicaIndex", "+", "\" anymore.\"", ")", ";", "}", "return", "null", ";", "}", "return", "owner", ";", "}" ]
Checks preconditions for replica sync - if we don't know the owner yet, if this node is the owner or not a replica
[ "Checks", "preconditions", "for", "replica", "sync", "-", "if", "we", "don", "t", "know", "the", "owner", "yet", "if", "this", "node", "is", "the", "owner", "or", "not", "a", "replica" ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/internal/partition/impl/PartitionReplicaManager.java#L160-L187
15,072
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/internal/partition/impl/PartitionReplicaManager.java
PartitionReplicaManager.releaseReplicaSyncPermits
public void releaseReplicaSyncPermits(int permits) { assert permits > 0 : "Invalid permits: " + permits; replicaSyncSemaphore.release(permits); if (logger.isFinestEnabled()) { logger.finest("Released " + permits + " replica sync permits. Available permits: " + replicaSyncSemaphore.availablePermits()); } assert availableReplicaSyncPermits() <= maxParallelReplications : "Number of replica sync permits exceeded the configured number!"; }
java
public void releaseReplicaSyncPermits(int permits) { assert permits > 0 : "Invalid permits: " + permits; replicaSyncSemaphore.release(permits); if (logger.isFinestEnabled()) { logger.finest("Released " + permits + " replica sync permits. Available permits: " + replicaSyncSemaphore.availablePermits()); } assert availableReplicaSyncPermits() <= maxParallelReplications : "Number of replica sync permits exceeded the configured number!"; }
[ "public", "void", "releaseReplicaSyncPermits", "(", "int", "permits", ")", "{", "assert", "permits", ">", "0", ":", "\"Invalid permits: \"", "+", "permits", ";", "replicaSyncSemaphore", ".", "release", "(", "permits", ")", ";", "if", "(", "logger", ".", "isFinestEnabled", "(", ")", ")", "{", "logger", ".", "finest", "(", "\"Released \"", "+", "permits", "+", "\" replica sync permits. Available permits: \"", "+", "replicaSyncSemaphore", ".", "availablePermits", "(", ")", ")", ";", "}", "assert", "availableReplicaSyncPermits", "(", ")", "<=", "maxParallelReplications", ":", "\"Number of replica sync permits exceeded the configured number!\"", ";", "}" ]
Releases the previously acquired permits. @param permits number of permits
[ "Releases", "the", "previously", "acquired", "permits", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/internal/partition/impl/PartitionReplicaManager.java#L408-L417
15,073
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/internal/serialization/impl/AbstractSerializationService.java
AbstractSerializationService.toData
@Override public final <B extends Data> B toData(Object obj) { return toData(obj, globalPartitioningStrategy); }
java
@Override public final <B extends Data> B toData(Object obj) { return toData(obj, globalPartitioningStrategy); }
[ "@", "Override", "public", "final", "<", "B", "extends", "Data", ">", "B", "toData", "(", "Object", "obj", ")", "{", "return", "toData", "(", "obj", ",", "globalPartitioningStrategy", ")", ";", "}" ]
region Serialization Service
[ "region", "Serialization", "Service" ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/internal/serialization/impl/AbstractSerializationService.java#L104-L107
15,074
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/internal/serialization/impl/AbstractSerializationService.java
AbstractSerializationService.register
public final void register(Class type, Serializer serializer) { if (type == null) { throw new IllegalArgumentException("Class type information is required!"); } if (serializer.getTypeId() <= 0) { throw new IllegalArgumentException( "Type ID must be positive! Current: " + serializer.getTypeId() + ", Serializer: " + serializer); } safeRegister(type, createSerializerAdapter(serializer, this)); }
java
public final void register(Class type, Serializer serializer) { if (type == null) { throw new IllegalArgumentException("Class type information is required!"); } if (serializer.getTypeId() <= 0) { throw new IllegalArgumentException( "Type ID must be positive! Current: " + serializer.getTypeId() + ", Serializer: " + serializer); } safeRegister(type, createSerializerAdapter(serializer, this)); }
[ "public", "final", "void", "register", "(", "Class", "type", ",", "Serializer", "serializer", ")", "{", "if", "(", "type", "==", "null", ")", "{", "throw", "new", "IllegalArgumentException", "(", "\"Class type information is required!\"", ")", ";", "}", "if", "(", "serializer", ".", "getTypeId", "(", ")", "<=", "0", ")", "{", "throw", "new", "IllegalArgumentException", "(", "\"Type ID must be positive! Current: \"", "+", "serializer", ".", "getTypeId", "(", ")", "+", "\", Serializer: \"", "+", "serializer", ")", ";", "}", "safeRegister", "(", "type", ",", "createSerializerAdapter", "(", "serializer", ",", "this", ")", ")", ";", "}" ]
endregion Serialization Service
[ "endregion", "Serialization", "Service" ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/internal/serialization/impl/AbstractSerializationService.java#L358-L367
15,075
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/map/impl/query/QueryRunner.java
QueryRunner.runPartitionIndexOrPartitionScanQueryOnGivenOwnedPartition
public Result runPartitionIndexOrPartitionScanQueryOnGivenOwnedPartition(Query query, int partitionId) { MapContainer mapContainer = mapServiceContext.getMapContainer(query.getMapName()); PartitionIdSet partitions = singletonPartitionIdSet(partitionCount, partitionId); // first we optimize the query Predicate predicate = queryOptimizer.optimize(query.getPredicate(), mapContainer.getIndexes(partitionId)); Collection<QueryableEntry> entries = null; Indexes indexes = mapContainer.getIndexes(partitionId); if (indexes != null && !indexes.isGlobal()) { entries = indexes.query(predicate); } Result result; if (entries == null) { result = createResult(query, partitions); partitionScanExecutor.execute(query.getMapName(), predicate, partitions, result); result.completeConstruction(partitions); } else { result = populateNonEmptyResult(query, entries, partitions); } return result; }
java
public Result runPartitionIndexOrPartitionScanQueryOnGivenOwnedPartition(Query query, int partitionId) { MapContainer mapContainer = mapServiceContext.getMapContainer(query.getMapName()); PartitionIdSet partitions = singletonPartitionIdSet(partitionCount, partitionId); // first we optimize the query Predicate predicate = queryOptimizer.optimize(query.getPredicate(), mapContainer.getIndexes(partitionId)); Collection<QueryableEntry> entries = null; Indexes indexes = mapContainer.getIndexes(partitionId); if (indexes != null && !indexes.isGlobal()) { entries = indexes.query(predicate); } Result result; if (entries == null) { result = createResult(query, partitions); partitionScanExecutor.execute(query.getMapName(), predicate, partitions, result); result.completeConstruction(partitions); } else { result = populateNonEmptyResult(query, entries, partitions); } return result; }
[ "public", "Result", "runPartitionIndexOrPartitionScanQueryOnGivenOwnedPartition", "(", "Query", "query", ",", "int", "partitionId", ")", "{", "MapContainer", "mapContainer", "=", "mapServiceContext", ".", "getMapContainer", "(", "query", ".", "getMapName", "(", ")", ")", ";", "PartitionIdSet", "partitions", "=", "singletonPartitionIdSet", "(", "partitionCount", ",", "partitionId", ")", ";", "// first we optimize the query", "Predicate", "predicate", "=", "queryOptimizer", ".", "optimize", "(", "query", ".", "getPredicate", "(", ")", ",", "mapContainer", ".", "getIndexes", "(", "partitionId", ")", ")", ";", "Collection", "<", "QueryableEntry", ">", "entries", "=", "null", ";", "Indexes", "indexes", "=", "mapContainer", ".", "getIndexes", "(", "partitionId", ")", ";", "if", "(", "indexes", "!=", "null", "&&", "!", "indexes", ".", "isGlobal", "(", ")", ")", "{", "entries", "=", "indexes", ".", "query", "(", "predicate", ")", ";", "}", "Result", "result", ";", "if", "(", "entries", "==", "null", ")", "{", "result", "=", "createResult", "(", "query", ",", "partitions", ")", ";", "partitionScanExecutor", ".", "execute", "(", "query", ".", "getMapName", "(", ")", ",", "predicate", ",", "partitions", ",", "result", ")", ";", "result", ".", "completeConstruction", "(", "partitions", ")", ";", "}", "else", "{", "result", "=", "populateNonEmptyResult", "(", "query", ",", "entries", ",", "partitions", ")", ";", "}", "return", "result", ";", "}" ]
for a single partition. If the index is global it won't be asked
[ "for", "a", "single", "partition", ".", "If", "the", "index", "is", "global", "it", "won", "t", "be", "asked" ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/map/impl/query/QueryRunner.java#L180-L203
15,076
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/internal/json/JsonObject.java
JsonObject.get
public JsonValue get(String name) { if (name == null) { throw new NullPointerException("name is null"); } int index = indexOf(name); return index != -1 ? values.get(index) : null; }
java
public JsonValue get(String name) { if (name == null) { throw new NullPointerException("name is null"); } int index = indexOf(name); return index != -1 ? values.get(index) : null; }
[ "public", "JsonValue", "get", "(", "String", "name", ")", "{", "if", "(", "name", "==", "null", ")", "{", "throw", "new", "NullPointerException", "(", "\"name is null\"", ")", ";", "}", "int", "index", "=", "indexOf", "(", "name", ")", ";", "return", "index", "!=", "-", "1", "?", "values", ".", "get", "(", "index", ")", ":", "null", ";", "}" ]
Returns the value of the member with the specified name in this object. If this object contains multiple members with the given name, this method will return the last one. @param name the name of the member whose value is to be returned @return the value of the last member with the specified name, or <code>null</code> if this object does not contain a member with that name
[ "Returns", "the", "value", "of", "the", "member", "with", "the", "specified", "name", "in", "this", "object", ".", "If", "this", "object", "contains", "multiple", "members", "with", "the", "given", "name", "this", "method", "will", "return", "the", "last", "one", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/internal/json/JsonObject.java#L559-L565
15,077
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/internal/json/JsonObject.java
JsonObject.iterator
public Iterator<Member> iterator() { final Iterator<String> namesIterator = names.iterator(); final Iterator<JsonValue> valuesIterator = values.iterator(); return new Iterator<JsonObject.Member>() { public boolean hasNext() { return namesIterator.hasNext(); } public Member next() { String name = namesIterator.next(); JsonValue value = valuesIterator.next(); return new Member(name, value); } public void remove() { throw new UnsupportedOperationException(); } }; }
java
public Iterator<Member> iterator() { final Iterator<String> namesIterator = names.iterator(); final Iterator<JsonValue> valuesIterator = values.iterator(); return new Iterator<JsonObject.Member>() { public boolean hasNext() { return namesIterator.hasNext(); } public Member next() { String name = namesIterator.next(); JsonValue value = valuesIterator.next(); return new Member(name, value); } public void remove() { throw new UnsupportedOperationException(); } }; }
[ "public", "Iterator", "<", "Member", ">", "iterator", "(", ")", "{", "final", "Iterator", "<", "String", ">", "namesIterator", "=", "names", ".", "iterator", "(", ")", ";", "final", "Iterator", "<", "JsonValue", ">", "valuesIterator", "=", "values", ".", "iterator", "(", ")", ";", "return", "new", "Iterator", "<", "JsonObject", ".", "Member", ">", "(", ")", "{", "public", "boolean", "hasNext", "(", ")", "{", "return", "namesIterator", ".", "hasNext", "(", ")", ";", "}", "public", "Member", "next", "(", ")", "{", "String", "name", "=", "namesIterator", ".", "next", "(", ")", ";", "JsonValue", "value", "=", "valuesIterator", ".", "next", "(", ")", ";", "return", "new", "Member", "(", "name", ",", "value", ")", ";", "}", "public", "void", "remove", "(", ")", "{", "throw", "new", "UnsupportedOperationException", "(", ")", ";", "}", "}", ";", "}" ]
Returns an iterator over the members of this object in document order. The returned iterator cannot be used to modify this object. @return an iterator over the members of this object
[ "Returns", "an", "iterator", "over", "the", "members", "of", "this", "object", "in", "document", "order", ".", "The", "returned", "iterator", "cannot", "be", "used", "to", "modify", "this", "object", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/internal/json/JsonObject.java#L715-L735
15,078
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/spi/Offload.java
Offload.init
public final void init(NodeEngineImpl nodeEngine, Set<Operation> asyncOperations) { this.nodeEngine = nodeEngine; this.operationService = nodeEngine.getOperationService(); this.serializationService = nodeEngine.getSerializationService(); this.asyncOperations = asyncOperations; this.executionService = nodeEngine.getExecutionService(); asyncOperations.add(offloadedOperation); offloadedOperation.setOperationResponseHandler(newOperationResponseHandler()); }
java
public final void init(NodeEngineImpl nodeEngine, Set<Operation> asyncOperations) { this.nodeEngine = nodeEngine; this.operationService = nodeEngine.getOperationService(); this.serializationService = nodeEngine.getSerializationService(); this.asyncOperations = asyncOperations; this.executionService = nodeEngine.getExecutionService(); asyncOperations.add(offloadedOperation); offloadedOperation.setOperationResponseHandler(newOperationResponseHandler()); }
[ "public", "final", "void", "init", "(", "NodeEngineImpl", "nodeEngine", ",", "Set", "<", "Operation", ">", "asyncOperations", ")", "{", "this", ".", "nodeEngine", "=", "nodeEngine", ";", "this", ".", "operationService", "=", "nodeEngine", ".", "getOperationService", "(", ")", ";", "this", ".", "serializationService", "=", "nodeEngine", ".", "getSerializationService", "(", ")", ";", "this", ".", "asyncOperations", "=", "asyncOperations", ";", "this", ".", "executionService", "=", "nodeEngine", ".", "getExecutionService", "(", ")", ";", "asyncOperations", ".", "add", "(", "offloadedOperation", ")", ";", "offloadedOperation", ".", "setOperationResponseHandler", "(", "newOperationResponseHandler", "(", ")", ")", ";", "}" ]
Initializes the Offload. As part of the initialization, the {@link OperationResponseHandler} of the offloaded {@link Operation} is replaced by a decorated version that takes care of automatic deregistration of the operation on completion. This method is called before the {@link #start()} is called by the Operation infrastructure. An implementor of the {@link Offload} doesn't need to deal with this method. @param nodeEngine the {@link NodeEngineImpl}
[ "Initializes", "the", "Offload", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/spi/Offload.java#L113-L122
15,079
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/map/impl/mapstore/writebehind/DefaultWriteBehindProcessor.java
DefaultWriteBehindProcessor.callHandler
private List<DelayedEntry> callHandler(Collection<DelayedEntry> delayedEntries, StoreOperationType operationType) { final int size = delayedEntries.size(); if (size == 0) { return Collections.emptyList(); } // if we want to write all store operations on a key into the MapStore, not same as write-coalescing, we don't call // batch processing methods e.g., MapStore{#storeAll,#deleteAll}, instead we call methods which process single entries // e.g. MapStore{#store,#delete}. This is because MapStore#storeAll requires a Map type in its signature and Map type // can only contain one store operation type per key, so only last update on a key can be included when batching. // Due to that limitation it is not possible to provide a correct no-write-coalescing write-behind behavior. // Under that limitation of current MapStore interface, we are making a workaround and persisting all // entries one by one for no-write-coalescing write-behind map-stores and as a result not doing batching // when writeCoalescing is false. if (size == 1 || !writeCoalescing) { return processEntriesOneByOne(delayedEntries, operationType); } final DelayedEntry[] delayedEntriesArray = delayedEntries.toArray(new DelayedEntry[0]); final Map<Object, DelayedEntry> batchMap = prepareBatchMap(delayedEntriesArray); // if all batch is on same key, call single store. if (batchMap.size() == 1) { final DelayedEntry delayedEntry = delayedEntriesArray[delayedEntriesArray.length - 1]; return callSingleStoreWithListeners(delayedEntry, operationType); } final List<DelayedEntry> failedEntryList = callBatchStoreWithListeners(batchMap, operationType); final List<DelayedEntry> failedTries = new ArrayList<>(); for (DelayedEntry entry : failedEntryList) { final Collection<DelayedEntry> tmpFails = callSingleStoreWithListeners(entry, operationType); failedTries.addAll(tmpFails); } return failedTries; }
java
private List<DelayedEntry> callHandler(Collection<DelayedEntry> delayedEntries, StoreOperationType operationType) { final int size = delayedEntries.size(); if (size == 0) { return Collections.emptyList(); } // if we want to write all store operations on a key into the MapStore, not same as write-coalescing, we don't call // batch processing methods e.g., MapStore{#storeAll,#deleteAll}, instead we call methods which process single entries // e.g. MapStore{#store,#delete}. This is because MapStore#storeAll requires a Map type in its signature and Map type // can only contain one store operation type per key, so only last update on a key can be included when batching. // Due to that limitation it is not possible to provide a correct no-write-coalescing write-behind behavior. // Under that limitation of current MapStore interface, we are making a workaround and persisting all // entries one by one for no-write-coalescing write-behind map-stores and as a result not doing batching // when writeCoalescing is false. if (size == 1 || !writeCoalescing) { return processEntriesOneByOne(delayedEntries, operationType); } final DelayedEntry[] delayedEntriesArray = delayedEntries.toArray(new DelayedEntry[0]); final Map<Object, DelayedEntry> batchMap = prepareBatchMap(delayedEntriesArray); // if all batch is on same key, call single store. if (batchMap.size() == 1) { final DelayedEntry delayedEntry = delayedEntriesArray[delayedEntriesArray.length - 1]; return callSingleStoreWithListeners(delayedEntry, operationType); } final List<DelayedEntry> failedEntryList = callBatchStoreWithListeners(batchMap, operationType); final List<DelayedEntry> failedTries = new ArrayList<>(); for (DelayedEntry entry : failedEntryList) { final Collection<DelayedEntry> tmpFails = callSingleStoreWithListeners(entry, operationType); failedTries.addAll(tmpFails); } return failedTries; }
[ "private", "List", "<", "DelayedEntry", ">", "callHandler", "(", "Collection", "<", "DelayedEntry", ">", "delayedEntries", ",", "StoreOperationType", "operationType", ")", "{", "final", "int", "size", "=", "delayedEntries", ".", "size", "(", ")", ";", "if", "(", "size", "==", "0", ")", "{", "return", "Collections", ".", "emptyList", "(", ")", ";", "}", "// if we want to write all store operations on a key into the MapStore, not same as write-coalescing, we don't call", "// batch processing methods e.g., MapStore{#storeAll,#deleteAll}, instead we call methods which process single entries", "// e.g. MapStore{#store,#delete}. This is because MapStore#storeAll requires a Map type in its signature and Map type", "// can only contain one store operation type per key, so only last update on a key can be included when batching.", "// Due to that limitation it is not possible to provide a correct no-write-coalescing write-behind behavior.", "// Under that limitation of current MapStore interface, we are making a workaround and persisting all", "// entries one by one for no-write-coalescing write-behind map-stores and as a result not doing batching", "// when writeCoalescing is false.", "if", "(", "size", "==", "1", "||", "!", "writeCoalescing", ")", "{", "return", "processEntriesOneByOne", "(", "delayedEntries", ",", "operationType", ")", ";", "}", "final", "DelayedEntry", "[", "]", "delayedEntriesArray", "=", "delayedEntries", ".", "toArray", "(", "new", "DelayedEntry", "[", "0", "]", ")", ";", "final", "Map", "<", "Object", ",", "DelayedEntry", ">", "batchMap", "=", "prepareBatchMap", "(", "delayedEntriesArray", ")", ";", "// if all batch is on same key, call single store.", "if", "(", "batchMap", ".", "size", "(", ")", "==", "1", ")", "{", "final", "DelayedEntry", "delayedEntry", "=", "delayedEntriesArray", "[", "delayedEntriesArray", ".", "length", "-", "1", "]", ";", "return", "callSingleStoreWithListeners", "(", "delayedEntry", ",", "operationType", ")", ";", "}", "final", "List", "<", "DelayedEntry", ">", "failedEntryList", "=", "callBatchStoreWithListeners", "(", "batchMap", ",", "operationType", ")", ";", "final", "List", "<", "DelayedEntry", ">", "failedTries", "=", "new", "ArrayList", "<>", "(", ")", ";", "for", "(", "DelayedEntry", "entry", ":", "failedEntryList", ")", "{", "final", "Collection", "<", "DelayedEntry", ">", "tmpFails", "=", "callSingleStoreWithListeners", "(", "entry", ",", "operationType", ")", ";", "failedTries", ".", "addAll", "(", "tmpFails", ")", ";", "}", "return", "failedTries", ";", "}" ]
Decides how entries should be passed to handlers. It passes entries to handler's single or batch handling methods. @param delayedEntries sorted entries to be processed. @return failed entry list if any.
[ "Decides", "how", "entries", "should", "be", "passed", "to", "handlers", ".", "It", "passes", "entries", "to", "handler", "s", "single", "or", "batch", "handling", "methods", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/map/impl/mapstore/writebehind/DefaultWriteBehindProcessor.java#L121-L153
15,080
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/Preconditions.java
Preconditions.checkHasText
public static String checkHasText(String argument, String errorMessage) { if (argument == null || argument.isEmpty()) { throw new IllegalArgumentException(errorMessage); } return argument; }
java
public static String checkHasText(String argument, String errorMessage) { if (argument == null || argument.isEmpty()) { throw new IllegalArgumentException(errorMessage); } return argument; }
[ "public", "static", "String", "checkHasText", "(", "String", "argument", ",", "String", "errorMessage", ")", "{", "if", "(", "argument", "==", "null", "||", "argument", ".", "isEmpty", "(", ")", ")", "{", "throw", "new", "IllegalArgumentException", "(", "errorMessage", ")", ";", "}", "return", "argument", ";", "}" ]
Tests if a string contains text. @param argument the string tested to see if it contains text. @param errorMessage the errorMessage @return the string argument that was tested. @throws java.lang.IllegalArgumentException if the string is empty
[ "Tests", "if", "a", "string", "contains", "text", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/Preconditions.java#L41-L47
15,081
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/Preconditions.java
Preconditions.checkNotNull
public static <T> T checkNotNull(T argument, String errorMessage) { if (argument == null) { throw new NullPointerException(errorMessage); } return argument; }
java
public static <T> T checkNotNull(T argument, String errorMessage) { if (argument == null) { throw new NullPointerException(errorMessage); } return argument; }
[ "public", "static", "<", "T", ">", "T", "checkNotNull", "(", "T", "argument", ",", "String", "errorMessage", ")", "{", "if", "(", "argument", "==", "null", ")", "{", "throw", "new", "NullPointerException", "(", "errorMessage", ")", ";", "}", "return", "argument", ";", "}" ]
Tests if an argument is not null. @param argument the argument tested to see if it is not null. @param errorMessage the errorMessage @return the argument that was tested. @throws java.lang.NullPointerException if argument is null
[ "Tests", "if", "an", "argument", "is", "not", "null", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/Preconditions.java#L57-L62
15,082
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/Preconditions.java
Preconditions.checkNoNullInside
public static <T> Iterable<T> checkNoNullInside(Iterable<T> argument, String errorMessage) { if (argument == null) { return argument; } for (T element : argument) { checkNotNull(element, errorMessage); } return argument; }
java
public static <T> Iterable<T> checkNoNullInside(Iterable<T> argument, String errorMessage) { if (argument == null) { return argument; } for (T element : argument) { checkNotNull(element, errorMessage); } return argument; }
[ "public", "static", "<", "T", ">", "Iterable", "<", "T", ">", "checkNoNullInside", "(", "Iterable", "<", "T", ">", "argument", ",", "String", "errorMessage", ")", "{", "if", "(", "argument", "==", "null", ")", "{", "return", "argument", ";", "}", "for", "(", "T", "element", ":", "argument", ")", "{", "checkNotNull", "(", "element", ",", "errorMessage", ")", ";", "}", "return", "argument", ";", "}" ]
Tests if the elements inside the argument collection are not null. If collection is null or empty the test is ignored. @param argument the iterable tested to see if it does not contain null elements; may be null or empty @param errorMessage the errorMessage @return the argument that was tested. @throws java.lang.NullPointerException if argument contains a null element inside
[ "Tests", "if", "the", "elements", "inside", "the", "argument", "collection", "are", "not", "null", ".", "If", "collection", "is", "null", "or", "empty", "the", "test", "is", "ignored", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/Preconditions.java#L73-L81
15,083
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/Preconditions.java
Preconditions.isNotNull
public static <E> E isNotNull(E argument, String argName) { if (argument == null) { throw new IllegalArgumentException(format("argument '%s' can't be null", argName)); } return argument; }
java
public static <E> E isNotNull(E argument, String argName) { if (argument == null) { throw new IllegalArgumentException(format("argument '%s' can't be null", argName)); } return argument; }
[ "public", "static", "<", "E", ">", "E", "isNotNull", "(", "E", "argument", ",", "String", "argName", ")", "{", "if", "(", "argument", "==", "null", ")", "{", "throw", "new", "IllegalArgumentException", "(", "format", "(", "\"argument '%s' can't be null\"", ",", "argName", ")", ")", ";", "}", "return", "argument", ";", "}" ]
Tests if a string is not null. @param argument the string tested to see if it is not null. @param argName the string name (used in message if an error is thrown). @return the string argument that was tested. @throws java.lang.IllegalArgumentException if the string is null.
[ "Tests", "if", "a", "string", "is", "not", "null", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/Preconditions.java#L105-L111
15,084
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/Preconditions.java
Preconditions.checkBackupCount
public static int checkBackupCount(int newBackupCount, int currentAsyncBackupCount) { if (newBackupCount < 0) { throw new IllegalArgumentException("backup-count can't be smaller than 0"); } if (currentAsyncBackupCount < 0) { throw new IllegalArgumentException("async-backup-count can't be smaller than 0"); } if (newBackupCount > MAX_BACKUP_COUNT) { throw new IllegalArgumentException("backup-count can't be larger than than " + MAX_BACKUP_COUNT); } if (newBackupCount + currentAsyncBackupCount > MAX_BACKUP_COUNT) { throw new IllegalArgumentException("the sum of backup-count and async-backup-count can't be larger than than " + MAX_BACKUP_COUNT); } return newBackupCount; }
java
public static int checkBackupCount(int newBackupCount, int currentAsyncBackupCount) { if (newBackupCount < 0) { throw new IllegalArgumentException("backup-count can't be smaller than 0"); } if (currentAsyncBackupCount < 0) { throw new IllegalArgumentException("async-backup-count can't be smaller than 0"); } if (newBackupCount > MAX_BACKUP_COUNT) { throw new IllegalArgumentException("backup-count can't be larger than than " + MAX_BACKUP_COUNT); } if (newBackupCount + currentAsyncBackupCount > MAX_BACKUP_COUNT) { throw new IllegalArgumentException("the sum of backup-count and async-backup-count can't be larger than than " + MAX_BACKUP_COUNT); } return newBackupCount; }
[ "public", "static", "int", "checkBackupCount", "(", "int", "newBackupCount", ",", "int", "currentAsyncBackupCount", ")", "{", "if", "(", "newBackupCount", "<", "0", ")", "{", "throw", "new", "IllegalArgumentException", "(", "\"backup-count can't be smaller than 0\"", ")", ";", "}", "if", "(", "currentAsyncBackupCount", "<", "0", ")", "{", "throw", "new", "IllegalArgumentException", "(", "\"async-backup-count can't be smaller than 0\"", ")", ";", "}", "if", "(", "newBackupCount", ">", "MAX_BACKUP_COUNT", ")", "{", "throw", "new", "IllegalArgumentException", "(", "\"backup-count can't be larger than than \"", "+", "MAX_BACKUP_COUNT", ")", ";", "}", "if", "(", "newBackupCount", "+", "currentAsyncBackupCount", ">", "MAX_BACKUP_COUNT", ")", "{", "throw", "new", "IllegalArgumentException", "(", "\"the sum of backup-count and async-backup-count can't be larger than than \"", "+", "MAX_BACKUP_COUNT", ")", ";", "}", "return", "newBackupCount", ";", "}" ]
Tests if the newBackupCount count is valid. @param newBackupCount the number of sync backups @param currentAsyncBackupCount the current number of async backups @return the newBackupCount @throws java.lang.IllegalArgumentException if newBackupCount is smaller than 0, or larger than the maximum number of backups.
[ "Tests", "if", "the", "newBackupCount", "count", "is", "valid", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/Preconditions.java#L212-L231
15,085
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/Preconditions.java
Preconditions.checkAsyncBackupCount
public static int checkAsyncBackupCount(int currentBackupCount, int newAsyncBackupCount) { if (currentBackupCount < 0) { throw new IllegalArgumentException("backup-count can't be smaller than 0"); } if (newAsyncBackupCount < 0) { throw new IllegalArgumentException("async-backup-count can't be smaller than 0"); } if (newAsyncBackupCount > MAX_BACKUP_COUNT) { throw new IllegalArgumentException("async-backup-count can't be larger than than " + MAX_BACKUP_COUNT); } if (currentBackupCount + newAsyncBackupCount > MAX_BACKUP_COUNT) { throw new IllegalArgumentException("the sum of backup-count and async-backup-count can't be larger than than " + MAX_BACKUP_COUNT); } return newAsyncBackupCount; }
java
public static int checkAsyncBackupCount(int currentBackupCount, int newAsyncBackupCount) { if (currentBackupCount < 0) { throw new IllegalArgumentException("backup-count can't be smaller than 0"); } if (newAsyncBackupCount < 0) { throw new IllegalArgumentException("async-backup-count can't be smaller than 0"); } if (newAsyncBackupCount > MAX_BACKUP_COUNT) { throw new IllegalArgumentException("async-backup-count can't be larger than than " + MAX_BACKUP_COUNT); } if (currentBackupCount + newAsyncBackupCount > MAX_BACKUP_COUNT) { throw new IllegalArgumentException("the sum of backup-count and async-backup-count can't be larger than than " + MAX_BACKUP_COUNT); } return newAsyncBackupCount; }
[ "public", "static", "int", "checkAsyncBackupCount", "(", "int", "currentBackupCount", ",", "int", "newAsyncBackupCount", ")", "{", "if", "(", "currentBackupCount", "<", "0", ")", "{", "throw", "new", "IllegalArgumentException", "(", "\"backup-count can't be smaller than 0\"", ")", ";", "}", "if", "(", "newAsyncBackupCount", "<", "0", ")", "{", "throw", "new", "IllegalArgumentException", "(", "\"async-backup-count can't be smaller than 0\"", ")", ";", "}", "if", "(", "newAsyncBackupCount", ">", "MAX_BACKUP_COUNT", ")", "{", "throw", "new", "IllegalArgumentException", "(", "\"async-backup-count can't be larger than than \"", "+", "MAX_BACKUP_COUNT", ")", ";", "}", "if", "(", "currentBackupCount", "+", "newAsyncBackupCount", ">", "MAX_BACKUP_COUNT", ")", "{", "throw", "new", "IllegalArgumentException", "(", "\"the sum of backup-count and async-backup-count can't be larger than than \"", "+", "MAX_BACKUP_COUNT", ")", ";", "}", "return", "newAsyncBackupCount", ";", "}" ]
Tests if the newAsyncBackupCount count is valid. @param currentBackupCount the current number of backups @param newAsyncBackupCount the new number of async backups @return the newAsyncBackupCount @throws java.lang.IllegalArgumentException if asyncBackupCount is smaller than 0, or larger than the maximum number of backups.
[ "Tests", "if", "the", "newAsyncBackupCount", "count", "is", "valid", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/Preconditions.java#L242-L261
15,086
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/Preconditions.java
Preconditions.checkInstanceOf
public static <E> E checkInstanceOf(Class<E> type, Object object, String errorMessage) { isNotNull(type, "type"); if (!type.isInstance(object)) { throw new IllegalArgumentException(errorMessage); } return (E) object; }
java
public static <E> E checkInstanceOf(Class<E> type, Object object, String errorMessage) { isNotNull(type, "type"); if (!type.isInstance(object)) { throw new IllegalArgumentException(errorMessage); } return (E) object; }
[ "public", "static", "<", "E", ">", "E", "checkInstanceOf", "(", "Class", "<", "E", ">", "type", ",", "Object", "object", ",", "String", "errorMessage", ")", "{", "isNotNull", "(", "type", ",", "\"type\"", ")", ";", "if", "(", "!", "type", ".", "isInstance", "(", "object", ")", ")", "{", "throw", "new", "IllegalArgumentException", "(", "errorMessage", ")", ";", "}", "return", "(", "E", ")", "object", ";", "}" ]
Tests whether the supplied object is an instance of the supplied class type. @param type the expected type. @param object the object tested against the expected type. @param errorMessage the errorMessage @return the object argument. @throws java.lang.IllegalArgumentException if the object is not an instance of the expected type.
[ "Tests", "whether", "the", "supplied", "object", "is", "an", "instance", "of", "the", "supplied", "class", "type", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/Preconditions.java#L272-L278
15,087
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/Preconditions.java
Preconditions.checkNotInstanceOf
public static <E> E checkNotInstanceOf(Class type, E object, String errorMessage) { isNotNull(type, "type"); if (type.isInstance(object)) { throw new IllegalArgumentException(errorMessage); } return object; }
java
public static <E> E checkNotInstanceOf(Class type, E object, String errorMessage) { isNotNull(type, "type"); if (type.isInstance(object)) { throw new IllegalArgumentException(errorMessage); } return object; }
[ "public", "static", "<", "E", ">", "E", "checkNotInstanceOf", "(", "Class", "type", ",", "E", "object", ",", "String", "errorMessage", ")", "{", "isNotNull", "(", "type", ",", "\"type\"", ")", ";", "if", "(", "type", ".", "isInstance", "(", "object", ")", ")", "{", "throw", "new", "IllegalArgumentException", "(", "errorMessage", ")", ";", "}", "return", "object", ";", "}" ]
Tests the supplied object to see if it is not a type of the supplied class. @param type the type that is not of the supplied class. @param object the object tested against the type. @param errorMessage the errorMessage @return the object argument. @throws java.lang.IllegalArgumentException if the object is an instance of the type that is not of the expected class.
[ "Tests", "the", "supplied", "object", "to", "see", "if", "it", "is", "not", "a", "type", "of", "the", "supplied", "class", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/Preconditions.java#L297-L303
15,088
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/Preconditions.java
Preconditions.checkHasNext
public static <T> Iterator<T> checkHasNext(Iterator<T> iterator, String message) throws NoSuchElementException { if (!iterator.hasNext()) { throw new NoSuchElementException(message); } return iterator; }
java
public static <T> Iterator<T> checkHasNext(Iterator<T> iterator, String message) throws NoSuchElementException { if (!iterator.hasNext()) { throw new NoSuchElementException(message); } return iterator; }
[ "public", "static", "<", "T", ">", "Iterator", "<", "T", ">", "checkHasNext", "(", "Iterator", "<", "T", ">", "iterator", ",", "String", "message", ")", "throws", "NoSuchElementException", "{", "if", "(", "!", "iterator", ".", "hasNext", "(", ")", ")", "{", "throw", "new", "NoSuchElementException", "(", "message", ")", ";", "}", "return", "iterator", ";", "}" ]
Check if iterator has next element. If not throw NoSuchElementException @param iterator @param message @return the iterator itself @throws java.util.NoSuchElementException if iterator.hasNext returns false
[ "Check", "if", "iterator", "has", "next", "element", ".", "If", "not", "throw", "NoSuchElementException" ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/Preconditions.java#L339-L344
15,089
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/query/impl/predicates/OrPredicate.java
OrPredicate.setPredicates
@Override @SuppressFBWarnings("EI_EXPOSE_REP") public <K, V> void setPredicates(Predicate<K, V>[] predicates) { if (this.predicates == null) { this.predicates = predicates; } else { throw new IllegalStateException("Cannot reset predicates in an OrPredicate after they have been already set."); } }
java
@Override @SuppressFBWarnings("EI_EXPOSE_REP") public <K, V> void setPredicates(Predicate<K, V>[] predicates) { if (this.predicates == null) { this.predicates = predicates; } else { throw new IllegalStateException("Cannot reset predicates in an OrPredicate after they have been already set."); } }
[ "@", "Override", "@", "SuppressFBWarnings", "(", "\"EI_EXPOSE_REP\"", ")", "public", "<", "K", ",", "V", ">", "void", "setPredicates", "(", "Predicate", "<", "K", ",", "V", ">", "[", "]", "predicates", ")", "{", "if", "(", "this", ".", "predicates", "==", "null", ")", "{", "this", ".", "predicates", "=", "predicates", ";", "}", "else", "{", "throw", "new", "IllegalStateException", "(", "\"Cannot reset predicates in an OrPredicate after they have been already set.\"", ")", ";", "}", "}" ]
Visitable predicates are treated as effectively immutable, therefore callers should not make any changes to the array passed as argument after is has been set. @param predicates the array of sub-predicates for this {@code Or} operator. It is not safe to make any changes to this array after it has been set.
[ "Visitable", "predicates", "are", "treated", "as", "effectively", "immutable", "therefore", "callers", "should", "not", "make", "any", "changes", "to", "the", "array", "passed", "as", "argument", "after", "is", "has", "been", "set", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/query/impl/predicates/OrPredicate.java#L189-L197
15,090
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/query/Parser.java
Parser.findMinIfNot
private int findMinIfNot(int a, int b, int notMin) { if (a <= notMin) { return b; } if (b <= notMin) { return a; } return Math.min(a, b); }
java
private int findMinIfNot(int a, int b, int notMin) { if (a <= notMin) { return b; } if (b <= notMin) { return a; } return Math.min(a, b); }
[ "private", "int", "findMinIfNot", "(", "int", "a", ",", "int", "b", ",", "int", "notMin", ")", "{", "if", "(", "a", "<=", "notMin", ")", "{", "return", "b", ";", "}", "if", "(", "b", "<=", "notMin", ")", "{", "return", "a", ";", "}", "return", "Math", ".", "min", "(", "a", ",", "b", ")", ";", "}" ]
finds min choosing a lower bound. @param a first number @param b second number @param notMin lower bound
[ "finds", "min", "choosing", "a", "lower", "bound", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/query/Parser.java#L226-L234
15,091
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/internal/partition/impl/MigrationStats.java
MigrationStats.markNewRepartition
void markNewRepartition() { lastRepartitionTime.set(Clock.currentTimeMillis()); elapsedMigrationOperationTime.set(0); elapsedDestinationCommitTime.set(0); elapsedMigrationTime.set(0); completedMigrations.set(0); }
java
void markNewRepartition() { lastRepartitionTime.set(Clock.currentTimeMillis()); elapsedMigrationOperationTime.set(0); elapsedDestinationCommitTime.set(0); elapsedMigrationTime.set(0); completedMigrations.set(0); }
[ "void", "markNewRepartition", "(", ")", "{", "lastRepartitionTime", ".", "set", "(", "Clock", ".", "currentTimeMillis", "(", ")", ")", ";", "elapsedMigrationOperationTime", ".", "set", "(", "0", ")", ";", "elapsedDestinationCommitTime", ".", "set", "(", "0", ")", ";", "elapsedMigrationTime", ".", "set", "(", "0", ")", ";", "completedMigrations", ".", "set", "(", "0", ")", ";", "}" ]
Marks start of new repartitioning. Resets stats from previous repartitioning round.
[ "Marks", "start", "of", "new", "repartitioning", ".", "Resets", "stats", "from", "previous", "repartitioning", "round", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/internal/partition/impl/MigrationStats.java#L62-L68
15,092
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/StateMachine.java
StateMachine.withTransition
public StateMachine<T> withTransition(T from, T to, T... moreTo) { transitions.put(from, EnumSet.of(to, moreTo)); return this; }
java
public StateMachine<T> withTransition(T from, T to, T... moreTo) { transitions.put(from, EnumSet.of(to, moreTo)); return this; }
[ "public", "StateMachine", "<", "T", ">", "withTransition", "(", "T", "from", ",", "T", "to", ",", "T", "...", "moreTo", ")", "{", "transitions", ".", "put", "(", "from", ",", "EnumSet", ".", "of", "(", "to", ",", "moreTo", ")", ")", ";", "return", "this", ";", "}" ]
Add a valid transition from one state to one or more states
[ "Add", "a", "valid", "transition", "from", "one", "state", "to", "one", "or", "more", "states" ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/StateMachine.java#L48-L51
15,093
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/StateMachine.java
StateMachine.next
public StateMachine<T> next(T nextState) throws IllegalStateException { Set<T> allowed = transitions.get(currentState); checkNotNull(allowed, "No transitions from state " + currentState); checkState(allowed.contains(nextState), "Transition not allowed from state " + currentState + " to " + nextState); currentState = nextState; return this; }
java
public StateMachine<T> next(T nextState) throws IllegalStateException { Set<T> allowed = transitions.get(currentState); checkNotNull(allowed, "No transitions from state " + currentState); checkState(allowed.contains(nextState), "Transition not allowed from state " + currentState + " to " + nextState); currentState = nextState; return this; }
[ "public", "StateMachine", "<", "T", ">", "next", "(", "T", "nextState", ")", "throws", "IllegalStateException", "{", "Set", "<", "T", ">", "allowed", "=", "transitions", ".", "get", "(", "currentState", ")", ";", "checkNotNull", "(", "allowed", ",", "\"No transitions from state \"", "+", "currentState", ")", ";", "checkState", "(", "allowed", ".", "contains", "(", "nextState", ")", ",", "\"Transition not allowed from state \"", "+", "currentState", "+", "\" to \"", "+", "nextState", ")", ";", "currentState", "=", "nextState", ";", "return", "this", ";", "}" ]
Transition to next state @throws IllegalStateException if transition is not allowed @throws NullPointerException if the {@code nextState} is {@code null}
[ "Transition", "to", "next", "state" ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/StateMachine.java#L59-L65
15,094
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/StateMachine.java
StateMachine.is
public boolean is(T state, T... otherStates) { return EnumSet.of(state, otherStates).contains(currentState); }
java
public boolean is(T state, T... otherStates) { return EnumSet.of(state, otherStates).contains(currentState); }
[ "public", "boolean", "is", "(", "T", "state", ",", "T", "...", "otherStates", ")", "{", "return", "EnumSet", ".", "of", "(", "state", ",", "otherStates", ")", ".", "contains", "(", "currentState", ")", ";", "}" ]
Check if current state is one of given states
[ "Check", "if", "current", "state", "is", "one", "of", "given", "states" ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/StateMachine.java#L82-L84
15,095
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/collection/WeightedEvictableList.java
WeightedEvictableList.voteFor
public void voteFor(WeightedItem<T> weightedItem) { reorganizationCounter++; weightedItem.vote(); if (reorganizationCounter == maxVotesBeforeReorganization) { reorganizationCounter = 0; organizeAndAdd(null); } }
java
public void voteFor(WeightedItem<T> weightedItem) { reorganizationCounter++; weightedItem.vote(); if (reorganizationCounter == maxVotesBeforeReorganization) { reorganizationCounter = 0; organizeAndAdd(null); } }
[ "public", "void", "voteFor", "(", "WeightedItem", "<", "T", ">", "weightedItem", ")", "{", "reorganizationCounter", "++", ";", "weightedItem", ".", "vote", "(", ")", ";", "if", "(", "reorganizationCounter", "==", "maxVotesBeforeReorganization", ")", "{", "reorganizationCounter", "=", "0", ";", "organizeAndAdd", "(", "null", ")", ";", "}", "}" ]
Casts a vote for given list node. This vote is added to the item's weight. @param weightedItem
[ "Casts", "a", "vote", "for", "given", "list", "node", ".", "This", "vote", "is", "added", "to", "the", "item", "s", "weight", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/collection/WeightedEvictableList.java#L75-L82
15,096
hazelcast/hazelcast
hazelcast-client/src/main/java/com/hazelcast/client/impl/statistics/Statistics.java
Statistics.start
public final void start() { if (!enabled) { return; } long periodSeconds = properties.getSeconds(PERIOD_SECONDS); if (periodSeconds <= 0) { long defaultValue = Long.parseLong(PERIOD_SECONDS.getDefaultValue()); logger.warning("Provided client statistics " + PERIOD_SECONDS.getName() + " cannot be less than or equal to 0. You provided " + periodSeconds + " seconds as the configuration. Client will use the default value of " + defaultValue + " instead."); periodSeconds = defaultValue; } // Note that the OperatingSystemMetricSet and RuntimeMetricSet are already registered during client start, // hence we do not re-register periodicStats = new PeriodicStatistics(); schedulePeriodicStatisticsSendTask(periodSeconds); logger.info("Client statistics is enabled with period " + periodSeconds + " seconds."); }
java
public final void start() { if (!enabled) { return; } long periodSeconds = properties.getSeconds(PERIOD_SECONDS); if (periodSeconds <= 0) { long defaultValue = Long.parseLong(PERIOD_SECONDS.getDefaultValue()); logger.warning("Provided client statistics " + PERIOD_SECONDS.getName() + " cannot be less than or equal to 0. You provided " + periodSeconds + " seconds as the configuration. Client will use the default value of " + defaultValue + " instead."); periodSeconds = defaultValue; } // Note that the OperatingSystemMetricSet and RuntimeMetricSet are already registered during client start, // hence we do not re-register periodicStats = new PeriodicStatistics(); schedulePeriodicStatisticsSendTask(periodSeconds); logger.info("Client statistics is enabled with period " + periodSeconds + " seconds."); }
[ "public", "final", "void", "start", "(", ")", "{", "if", "(", "!", "enabled", ")", "{", "return", ";", "}", "long", "periodSeconds", "=", "properties", ".", "getSeconds", "(", "PERIOD_SECONDS", ")", ";", "if", "(", "periodSeconds", "<=", "0", ")", "{", "long", "defaultValue", "=", "Long", ".", "parseLong", "(", "PERIOD_SECONDS", ".", "getDefaultValue", "(", ")", ")", ";", "logger", ".", "warning", "(", "\"Provided client statistics \"", "+", "PERIOD_SECONDS", ".", "getName", "(", ")", "+", "\" cannot be less than or equal to 0. You provided \"", "+", "periodSeconds", "+", "\" seconds as the configuration. Client will use the default value of \"", "+", "defaultValue", "+", "\" instead.\"", ")", ";", "periodSeconds", "=", "defaultValue", ";", "}", "// Note that the OperatingSystemMetricSet and RuntimeMetricSet are already registered during client start,", "// hence we do not re-register", "periodicStats", "=", "new", "PeriodicStatistics", "(", ")", ";", "schedulePeriodicStatisticsSendTask", "(", "periodSeconds", ")", ";", "logger", ".", "info", "(", "\"Client statistics is enabled with period \"", "+", "periodSeconds", "+", "\" seconds.\"", ")", ";", "}" ]
Registers all client statistics and schedules periodic collection of stats.
[ "Registers", "all", "client", "statistics", "and", "schedules", "periodic", "collection", "of", "stats", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast-client/src/main/java/com/hazelcast/client/impl/statistics/Statistics.java#L95-L116
15,097
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/TimeUtil.java
TimeUtil.timeInMsOrTimeIfNullUnit
public static long timeInMsOrTimeIfNullUnit(long time, TimeUnit timeUnit) { return timeUnit != null ? timeUnit.toMillis(time) : time; }
java
public static long timeInMsOrTimeIfNullUnit(long time, TimeUnit timeUnit) { return timeUnit != null ? timeUnit.toMillis(time) : time; }
[ "public", "static", "long", "timeInMsOrTimeIfNullUnit", "(", "long", "time", ",", "TimeUnit", "timeUnit", ")", "{", "return", "timeUnit", "!=", "null", "?", "timeUnit", ".", "toMillis", "(", "time", ")", ":", "time", ";", "}" ]
Convert time to milliseconds based on the given time unit. If time unit is null, then input time is treated as milliseconds. @param time The input time @param timeUnit The time unit to base the conversion on @return The millisecond representation of the time based on the unit, or the time itself if the unit is <code>null</code>
[ "Convert", "time", "to", "milliseconds", "based", "on", "the", "given", "time", "unit", ".", "If", "time", "unit", "is", "null", "then", "input", "time", "is", "treated", "as", "milliseconds", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/TimeUtil.java#L59-L61
15,098
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/StringUtil.java
StringUtil.bytesToString
public static String bytesToString(byte[] bytes, int offset, int length) { return new String(bytes, offset, length, UTF8_CHARSET); }
java
public static String bytesToString(byte[] bytes, int offset, int length) { return new String(bytes, offset, length, UTF8_CHARSET); }
[ "public", "static", "String", "bytesToString", "(", "byte", "[", "]", "bytes", ",", "int", "offset", ",", "int", "length", ")", "{", "return", "new", "String", "(", "bytes", ",", "offset", ",", "length", ",", "UTF8_CHARSET", ")", ";", "}" ]
Creates a UTF8_CHARSET string from a byte array. @param bytes the byte array. @param offset the index of the first byte to decode @param length the number of bytes to decode @return the string created from the byte array.
[ "Creates", "a", "UTF8_CHARSET", "string", "from", "a", "byte", "array", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/StringUtil.java#L75-L77
15,099
hazelcast/hazelcast
hazelcast/src/main/java/com/hazelcast/util/StringUtil.java
StringUtil.upperCaseInternal
public static String upperCaseInternal(String s) { if (isNullOrEmpty(s)) { return s; } return s.toUpperCase(LOCALE_INTERNAL); }
java
public static String upperCaseInternal(String s) { if (isNullOrEmpty(s)) { return s; } return s.toUpperCase(LOCALE_INTERNAL); }
[ "public", "static", "String", "upperCaseInternal", "(", "String", "s", ")", "{", "if", "(", "isNullOrEmpty", "(", "s", ")", ")", "{", "return", "s", ";", "}", "return", "s", ".", "toUpperCase", "(", "LOCALE_INTERNAL", ")", ";", "}" ]
HC specific settings, operands etc. use this method. Creates an uppercase string from the given string. @param s the given string @return an uppercase string, or {@code null}/empty if the string is {@code null}/empty
[ "HC", "specific", "settings", "operands", "etc", ".", "use", "this", "method", ".", "Creates", "an", "uppercase", "string", "from", "the", "given", "string", "." ]
8c4bc10515dbbfb41a33e0302c0caedf3cda1baf
https://github.com/hazelcast/hazelcast/blob/8c4bc10515dbbfb41a33e0302c0caedf3cda1baf/hazelcast/src/main/java/com/hazelcast/util/StringUtil.java#L135-L140